From 561b80825dfcb314a9336c2a4fb2b8e0a6285d26 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Thu, 18 Nov 2021 19:08:40 -0800 Subject: [PATCH] Enable QJFL and OSR by default for x64 and arm64 Change these default values when the jit targets x64 or arm64: * COMPlus_TC_QuickJitForLoops=1 * COMPlus_TC_OnStackReplacement=1 The upshot is that on x64/arm64 more methods will be jitted at Tier0, and we will rely on OSR to get out of long-running Tier0 methods. Other architectures continue to use the old behavior for now, as OSR is not yet supported for x86 or arm. --- src/coreclr/inc/clrconfigvalues.h | 4 ++++ src/coreclr/jit/jitconfigvalues.h | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h index f4cdfc58b1851c..4187fcf8005057 100644 --- a/src/coreclr/inc/clrconfigvalues.h +++ b/src/coreclr/inc/clrconfigvalues.h @@ -585,7 +585,11 @@ RETAIL_CONFIG_DWORD_INFO(INTERNAL_HillClimbing_GainExponent, #endif // _DEBUG RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TieredCompilation, W("TieredCompilation"), 1, "Enables tiered compilation") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_QuickJit, W("TC_QuickJit"), 1, "For methods that would be jitted, enable using quick JIT when appropriate.") +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 1, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") +#else // !(defined(TARGET_AMD64) || defined(TARGET_ARM64)) RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 0, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_AggressiveTiering, W("TC_AggressiveTiering"), 0, "Transition through tiers aggressively.") RETAIL_CONFIG_DWORD_INFO(INTERNAL_TC_BackgroundWorkerTimeoutMs, W("TC_BackgroundWorkerTimeoutMs"), TC_BackgroundWorkerTimeoutMs, "How long in milliseconds the background worker thread may remain idle before exiting.") RETAIL_CONFIG_DWORD_INFO(INTERNAL_TC_CallCountThreshold, W("TC_CallCountThreshold"), TC_CallCountThreshold, "Number of times a method must be called in tier 0 after which it is promoted to the next tier.") diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 3d45952a0de663..fe65b551aec988 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -461,8 +461,12 @@ CONFIG_STRING(JitGuardedDevirtualizationRange, W("JitGuardedDevirtualizationRang CONFIG_INTEGER(JitRandomGuardedDevirtualization, W("JitRandomGuardedDevirtualization"), 0) #endif // DEBUG -// Enable insertion of patchpoints into Tier0 methods with loops. +// Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed. +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 1) +#else CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Initial patchpoint counter value used by jitted code CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000) // Enable partial compilation for Tier0 methods