diff --git a/eng/pipelines/libraries/run-test-job.yml b/eng/pipelines/libraries/run-test-job.yml index d864487760908b..0f2070a49bfbf7 100644 --- a/eng/pipelines/libraries/run-test-job.yml +++ b/eng/pipelines/libraries/run-test-job.yml @@ -191,4 +191,8 @@ jobs: - fullpgo_random_gdv - fullpgo_random_edge - fullpgo_random_gdv_edge + - jitosr + - jitosr_stress + - jitosr_stress_random + - jitosr_pgo diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 1c9eda6f63afc1..fe8b2d82dcaec9 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -6385,16 +6385,31 @@ void CodeGen::genEnregisterOSRArgsAndLocals() void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed) { - // For OSR the original method has set this up for us. + assert(compiler->compGeneratingProlog); + + const bool reportArg = compiler->lvaReportParamTypeArg(); + if (compiler->opts.IsOSR()) { + PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo; + if (reportArg) + { + // OSR method will use Tier0 slot to report context arg. + // + assert(ppInfo->HasGenericContextArgOffset()); + JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n"); + } + else if (compiler->lvaKeepAliveAndReportThis()) + { + // OSR method will use Tier0 slot to report `this` as context. + // + assert(ppInfo->HasKeptAliveThis()); + JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n"); + } + return; } - assert(compiler->compGeneratingProlog); - - bool reportArg = compiler->lvaReportParamTypeArg(); - // We should report either generic context arg or "this" when used so. if (!reportArg) { diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index e85192d1905938..f6299e5bf35619 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -6396,9 +6396,10 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, // Honor the config setting that tells the jit to // always optimize methods with loops. // - // If that's not set, and OSR is enabled, the jit may still + // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that - // OSR currently cannot handle. + // OSR currently cannot handle, or we're optionally suppressing + // OSR by method hash. // const char* reason = nullptr; @@ -6406,35 +6407,42 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, { reason = "tail.call and not BBINSTR"; } - else if ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0) + else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { - if (compHasBackwardJump) - { - reason = "loop"; - } + reason = "loop"; } - else if (JitConfig.TC_OnStackReplacement() > 0) + + if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { - const bool patchpointsOK = compCanHavePatchpoints(&reason); - assert(patchpointsOK || (reason != nullptr)); + const char* noPatchpointReason = nullptr; + bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG - // Optionally disable OSR by method hash. - // - if (patchpointsOK && compHasBackwardJump) + if (canEscapeViaOSR) { + // Optionally disable OSR by method hash. This will force any + // method that might otherwise get trapped in Tier0 to be optimized. + // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { - JITDUMP("Disabling OSR -- Method hash 0x%08x not within range ", hash); - JITDUMPEXEC(JitEnableOsrRange.Dump()); - JITDUMP("\n"); - reason = "OSR disabled by JitEnableOsrRange"; + canEscapeViaOSR = false; + reason = "OSR disabled by JitEnableOsrRange"; } } #endif + + if (canEscapeViaOSR) + { + JITDUMP("\nOSR enabled for this method\n"); + } + else + { + JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); + assert(reason != nullptr); + } } if (reason != nullptr) diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 5a7b17f776f2c6..464cafeb72750e 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -1986,13 +1986,18 @@ inline bool Compiler::lvaKeepAliveAndReportThis() // the VM requires us to keep the generics context alive or it is used in a look-up. // We keep it alive in the lookup scenario, even when the VM didn't ask us to, // because collectible types need the generics context when gc-ing. + // + // Methoods that can inspire OSR methods must always report context as live + // if (genericsContextIsThis) { - const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; + const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; + const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints(); - if (lvaGenericsContextInUse || mustKeep) + if (lvaGenericsContextInUse || mustKeep || hasPatchpoint) { - JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : "referenced"); + JITDUMP("Reporting this as generic context: %s\n", + mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced")); return true; } } @@ -2024,6 +2029,13 @@ inline bool Compiler::lvaReportParamTypeArg() { return true; } + + // Methoods that have patchpoints always report context as live + // + if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) + { + return true; + } } // Otherwise, we don't need to report it -- the generics context parameter is unused. diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 9668b22c5ebf87..ca697139a8904c 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -9540,6 +9540,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); + + // Devirtualization may change which method gets invoked. Update our local cache. + // + methHnd = callInfo->hMethod; } if (impIsThis(obj)) @@ -11803,9 +11807,21 @@ void Compiler::impImportBlockCode(BasicBlock* block) #ifdef FEATURE_ON_STACK_REPLACEMENT - // Is OSR enabled? + bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); + +#ifdef DEBUG + + // Optionally suppress patchpoints by method hash // - if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0)) + static ConfigMethodRange JitEnablePatchpointRange; + JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); + const unsigned hash = impInlineRoot()->info.compMethodHash(); + const bool inRange = JitEnablePatchpointRange.Contains(hash); + enablePatchpoints &= inRange; + +#endif // DEBUG + + if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. @@ -11820,13 +11836,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) // if (!compTailPrefixSeen) { - assert(compCanHavePatchpoints()); - // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { + assert(compCanHavePatchpoints()); + // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) @@ -11857,8 +11873,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; - if ((tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && - ((block->bbFlags & BBF_PATCHPOINT) == 0)) + if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && + !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 4fc4594c6fb2b9..1346c2069da170 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -471,8 +471,16 @@ CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1 #endif // debug #if defined(DEBUG) -CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // Enable osr for only some methods -#endif // debug +// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape +// from Tier0 code. Methods outside the range that would normally be jitted at Tier0 +// and have patchpoints will instead be switched to optimized. +CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) +// EnablePatchpointRange allows you to limit the set of Tier0 methods that +// will have patchpoints, and hence control which methods will create OSR methods. +// Unlike EnableOsrRange, it will not alter the optimization setting for methods +// outside the enabled range. +CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange")) +#endif // Profile instrumentation options CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1) diff --git a/src/coreclr/jit/phase.cpp b/src/coreclr/jit/phase.cpp index 7bcc372cb0cf82..5f2e2172c6f4a5 100644 --- a/src/coreclr/jit/phase.cpp +++ b/src/coreclr/jit/phase.cpp @@ -167,10 +167,11 @@ void Phase::PostPhase(PhaseStatus status) // clang-format off static Phases s_allowlist[] = { + PHASE_INCPROFILE, + PHASE_IBCPREP, PHASE_IMPORTATION, + PHASE_PATCHPOINTS, PHASE_IBCINSTR, - PHASE_IBCPREP, - PHASE_INCPROFILE, PHASE_INDXCALL, PHASE_MORPH_INLINE, PHASE_ALLOCATE_OBJECTS,