Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions eng/pipelines/libraries/run-test-job.yml
Original file line number Diff line number Diff line change
Expand Up @@ -191,4 +191,8 @@ jobs:
- fullpgo_random_gdv
- fullpgo_random_edge
- fullpgo_random_gdv_edge
- jitosr
- jitosr_stress
- jitosr_stress_random
- jitosr_pgo

25 changes: 20 additions & 5 deletions src/coreclr/jit/codegencommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6385,16 +6385,31 @@ void CodeGen::genEnregisterOSRArgsAndLocals()

void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
{
// For OSR the original method has set this up for us.
assert(compiler->compGeneratingProlog);

const bool reportArg = compiler->lvaReportParamTypeArg();

if (compiler->opts.IsOSR())
{
PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
if (reportArg)
{
// OSR method will use Tier0 slot to report context arg.
//
assert(ppInfo->HasGenericContextArgOffset());
JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n");
}
else if (compiler->lvaKeepAliveAndReportThis())
{
// OSR method will use Tier0 slot to report `this` as context.
//
assert(ppInfo->HasKeptAliveThis());
JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n");
}

return;
}

assert(compiler->compGeneratingProlog);

bool reportArg = compiler->lvaReportParamTypeArg();

// We should report either generic context arg or "this" when used so.
if (!reportArg)
{
Expand Down
42 changes: 25 additions & 17 deletions src/coreclr/jit/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6396,45 +6396,53 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
// Honor the config setting that tells the jit to
// always optimize methods with loops.
//
// If that's not set, and OSR is enabled, the jit may still
// If neither of those apply, and OSR is enabled, the jit may still
// decide to optimize, if there's something in the method that
// OSR currently cannot handle.
// OSR currently cannot handle, or we're optionally suppressing
// OSR by method hash.
//
const char* reason = nullptr;

if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
{
reason = "tail.call and not BBINSTR";
}
else if ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)
else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0))
{
if (compHasBackwardJump)
{
reason = "loop";
}
reason = "loop";
}
else if (JitConfig.TC_OnStackReplacement() > 0)

if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0))
{
const bool patchpointsOK = compCanHavePatchpoints(&reason);
assert(patchpointsOK || (reason != nullptr));
const char* noPatchpointReason = nullptr;
bool canEscapeViaOSR = compCanHavePatchpoints(&reason);

#ifdef DEBUG
// Optionally disable OSR by method hash.
//
if (patchpointsOK && compHasBackwardJump)
if (canEscapeViaOSR)
{
// Optionally disable OSR by method hash. This will force any
// method that might otherwise get trapped in Tier0 to be optimized.
//
static ConfigMethodRange JitEnableOsrRange;
JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange());
const unsigned hash = impInlineRoot()->info.compMethodHash();
if (!JitEnableOsrRange.Contains(hash))
{
JITDUMP("Disabling OSR -- Method hash 0x%08x not within range ", hash);
JITDUMPEXEC(JitEnableOsrRange.Dump());
JITDUMP("\n");
reason = "OSR disabled by JitEnableOsrRange";
canEscapeViaOSR = false;
reason = "OSR disabled by JitEnableOsrRange";
}
}
#endif

if (canEscapeViaOSR)
{
JITDUMP("\nOSR enabled for this method\n");
}
else
{
JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason);
assert(reason != nullptr);
}
}

if (reason != nullptr)
Expand Down
18 changes: 15 additions & 3 deletions src/coreclr/jit/compiler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1986,13 +1986,18 @@ inline bool Compiler::lvaKeepAliveAndReportThis()
// the VM requires us to keep the generics context alive or it is used in a look-up.
// We keep it alive in the lookup scenario, even when the VM didn't ask us to,
// because collectible types need the generics context when gc-ing.
//
// Methoods that can inspire OSR methods must always report context as live
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

typo: Methoods

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Will fix in subsequent PR.

//
if (genericsContextIsThis)
{
const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0;
const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0;
const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints();

if (lvaGenericsContextInUse || mustKeep)
if (lvaGenericsContextInUse || mustKeep || hasPatchpoint)
{
JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : "referenced");
JITDUMP("Reporting this as generic context: %s\n",
mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced"));
return true;
}
}
Expand Down Expand Up @@ -2024,6 +2029,13 @@ inline bool Compiler::lvaReportParamTypeArg()
{
return true;
}

// Methoods that have patchpoints always report context as live
//
if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints())
{
return true;
}
}

// Otherwise, we don't need to report it -- the generics context parameter is unused.
Expand Down
28 changes: 22 additions & 6 deletions src/coreclr/jit/importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9540,6 +9540,10 @@ var_types Compiler::impImportCall(OPCODE opcode,
// Take care to pass raw IL offset here as the 'debug info' might be different for
// inlinees.
rawILOffset);

// Devirtualization may change which method gets invoked. Update our local cache.
//
methHnd = callInfo->hMethod;
}

if (impIsThis(obj))
Expand Down Expand Up @@ -11803,9 +11807,21 @@ void Compiler::impImportBlockCode(BasicBlock* block)

#ifdef FEATURE_ON_STACK_REPLACEMENT

// Is OSR enabled?
bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0);

#ifdef DEBUG

// Optionally suppress patchpoints by method hash
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
static ConfigMethodRange JitEnablePatchpointRange;
JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange());
const unsigned hash = impInlineRoot()->info.compMethodHash();
const bool inRange = JitEnablePatchpointRange.Contains(hash);
enablePatchpoints &= inRange;

#endif // DEBUG

if (enablePatchpoints)
{
// We don't inline at Tier0, if we do, we may need rethink our approach.
// Could probably support inlines that don't introduce flow.
Expand All @@ -11820,13 +11836,13 @@ void Compiler::impImportBlockCode(BasicBlock* block)
//
if (!compTailPrefixSeen)
{
assert(compCanHavePatchpoints());

// The normaly policy is only to add patchpoints to the targets of lexically
// backwards branches.
//
if (compHasBackwardJump)
{
assert(compCanHavePatchpoints());

// Is the start of this block a suitable patchpoint?
//
if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0))
Expand Down Expand Up @@ -11857,8 +11873,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
const bool tryOffsetOSR = offsetOSR >= 0;
const bool tryRandomOSR = randomOSR > 0;

if ((tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() &&
((block->bbFlags & BBF_PATCHPOINT) == 0))
if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) &&
!block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0))
{
// Block start can have a patchpoint. See if we should add one.
//
Expand Down
12 changes: 10 additions & 2 deletions src/coreclr/jit/jitconfigvalues.h
Original file line number Diff line number Diff line change
Expand Up @@ -471,8 +471,16 @@ CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1
#endif // debug

#if defined(DEBUG)
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // Enable osr for only some methods
#endif // debug
// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape
// from Tier0 code. Methods outside the range that would normally be jitted at Tier0
// and have patchpoints will instead be switched to optimized.
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange"))
// EnablePatchpointRange allows you to limit the set of Tier0 methods that
// will have patchpoints, and hence control which methods will create OSR methods.
// Unlike EnableOsrRange, it will not alter the optimization setting for methods
// outside the enabled range.
CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange"))
#endif

// Profile instrumentation options
CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1)
Expand Down
5 changes: 3 additions & 2 deletions src/coreclr/jit/phase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,11 @@ void Phase::PostPhase(PhaseStatus status)
// clang-format off

static Phases s_allowlist[] = {
PHASE_INCPROFILE,
PHASE_IBCPREP,
PHASE_IMPORTATION,
PHASE_PATCHPOINTS,
PHASE_IBCINSTR,
PHASE_IBCPREP,
PHASE_INCPROFILE,
PHASE_INDXCALL,
PHASE_MORPH_INLINE,
PHASE_ALLOCATE_OBJECTS,
Expand Down