From d2b8729f859fafbf252f2712f2120027dbb39cfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20Sowi=C5=84ski?= Date: Fri, 4 Jul 2025 11:58:58 +0200 Subject: [PATCH 1/5] Lower const single bit test, variable WiP --- src/coreclr/jit/lower.cpp | 97 +++++++++++++++++-- src/tests/JIT/Directed/BitTest/BitTest.cs | 108 ++++++++++++++++++++++ 2 files changed, 195 insertions(+), 10 deletions(-) diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 3b8b03c975c45f..c6ebe0e2ea2313 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4147,7 +4147,16 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); -#if defined(TARGET_XARCH) || defined(TARGET_ARM64) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) + + auto isVariableSingleBit = [](GenTree*& testedOp, GenTree*& bitOp) -> bool { + if (!bitOp->OperIs(GT_LSH)) + { + std::swap(bitOp, testedOp); + } + return bitOp->OperIs(GT_LSH) && varTypeIsIntOrI(bitOp) && bitOp->gtGetOp1()->IsIntegralConst(1); + }; + ssize_t op2Value = op2->IconValue(); #ifdef TARGET_XARCH @@ -4242,6 +4251,77 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } +#ifdef TARGET_RISCV64 + GenTree* testedOp = andOp1; + GenTree* bitOp = andOp2; + bool isSingleBit = bitOp->IsIntegralConstUnsignedPow2() || isVariableSingleBit(testedOp, bitOp); + if ((op2Value == 0) && !bitOp->IsIntegralConst(1) && isSingleBit) + { + LIR::Use cmpUse; + bool isUserJtrue = BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE); + if (bitOp->IsIntegralConst() && (isUserJtrue ? !bitOp->isContained() : cmp->OperIs(GT_NE))) + { + // Shift the tested single bit into the sign bit, then check if negative/positive. + INT64 bit = bitOp->AsIntConCommon()->IntegralValue(); + if (bit > 0) + { + int shiftAmount = genTypeSize(op1) * 8 - BitOperations::Log2((UINT64)bit) - 1; + assert(shiftAmount > 0); + op1->SetOperRaw(GT_LSH); + bitOp->AsIntConCommon()->SetIntegralValue(shiftAmount); + bitOp->SetContained(); + } + else + { + // The tested single bit is the sign bit, just remove the AND + assert(bit == INT_MIN || bit == LONG_MIN); + cmp->AsOp()->gtOp1 = testedOp; + BlockRange().Remove(bitOp); + BlockRange().Remove(op1); + } + + cmp->SetOperRaw(cmp->OperIs(GT_NE) ? GT_LT : GT_GE); + cmp->ClearUnsigned(); + op2->SetContained(); + + return cmp->gtNext; + } + else + { + return cmp->gtNext; + // TODO: debug + + // Transform (a & bit) into ((a >> log2(bit)) & 1) + // The "!=/== 0" is folded below if necessary. + GenTree* shiftAmount = nullptr; + GenTree* one = nullptr; + if (bitOp->IsIntegralConst()) // a & constBit + { + INT64 bit = bitOp->AsIntConCommon()->IntegralValue(); + int log2 = BitOperations::Log2((UINT64)bit); + shiftAmount = comp->gtNewIconNode(log2); + BlockRange().InsertAfter(testedOp, shiftAmount); + + bitOp->AsIntConCommon()->SetIntegralValue(1); + one = bitOp; + } + else // a & (1 << varBit) + { + assert(bitOp->OperIs(GT_LSH)); + BlockRange().Remove(bitOp); + shiftAmount = bitOp->gtGetOp2(); + one = bitOp->gtGetOp1(); + } + assert(one->IsIntegralConst(1)); + + GenTree* shiftRight = comp->gtNewOperNode(GT_RSH, testedOp->TypeGet(), testedOp, shiftAmount); + BlockRange().InsertAfter(testedOp, shiftRight); + op1->AsOp()->gtOp1 = shiftRight; + op1->AsOp()->gtOp2 = one; + } + } +#endif // TARGET_RISCV64 + // Optimizes (X & 1) != 0 to (X & 1) // Optimizes (X & 1) == 0 to ((NOT X) & 1) // (== 1 or != 1) cases are transformed to (!= 0 or == 0) above @@ -4275,6 +4355,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } } +#ifndef TARGET_RISCV64 if (op2Value == 0) { BlockRange().Remove(op1); @@ -4321,7 +4402,9 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } #endif } - else if (andOp2->IsIntegralConst() && GenTree::Compare(andOp2, op2)) + else +#endif // !TARGET_RISCV64 + if (andOp2->IsIntegralConst() && GenTree::Compare(andOp2, op2)) { // // Transform EQ|NE(AND(x, y), y) into EQ|NE(AND(NOT(x), y), 0) when y is a constant. @@ -4350,13 +4433,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) GenTree* lsh = cmp->AsOp()->gtOp1; GenTree* op = cmp->AsOp()->gtOp2; - - if (!lsh->OperIs(GT_LSH)) - { - std::swap(lsh, op); - } - - if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh) && lsh->gtGetOp1()->IsIntegralConst(1)) + if (isVariableSingleBit(op, lsh)) { cmp->SetOper(cmp->OperIs(GT_TEST_EQ) ? GT_BITTEST_EQ : GT_BITTEST_NE); @@ -4371,7 +4448,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } } #endif // TARGET_XARCH -#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) // Optimize EQ/NE(relop/SETCC, 0) into (maybe reversed) cond. if (cmp->OperIs(GT_EQ, GT_NE) && op2->IsIntegralConst(0) && (op1->OperIsCompare() || op1->OperIs(GT_SETCC))) diff --git a/src/tests/JIT/Directed/BitTest/BitTest.cs b/src/tests/JIT/Directed/BitTest/BitTest.cs index fc6fcb587c0f89..34d2febf7ba1fb 100644 --- a/src/tests/JIT/Directed/BitTest/BitTest.cs +++ b/src/tests/JIT/Directed/BitTest/BitTest.cs @@ -38,6 +38,51 @@ public class Program [MethodImpl(MethodImplOptions.NoInlining)] static bool I8_BT_mem_reg(ref long x, int y) => (x & (1L << y)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I1_BT_reg_min(sbyte x) => (x & (1 << 7)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static sbyte I1_BT_reg_min_JCC(sbyte x) => (sbyte)((x & (1 << 7)) == 0 ? (x + 1) : (x - 1)); + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I2_BT_reg_min(short x) => (x & (1 << 15)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I4_BT_reg_min(int x) => (x & (1 << 31)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I4_BT_reg_min_EQ(int x) => (x & (1 << 31)) == 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static int I4_BT_reg_min_JCC(int x) => (x & (1 << 31)) == 0 ? (x + 1) : (x - 1); + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I8_BT_reg_min(long x) => (x & (1L << 63)) != 0; + + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I1_BT_reg_min_1(sbyte x) => (x & (1 << 6)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static sbyte I1_BT_reg_min_1_JCC(sbyte x) => (sbyte)((x & (1 << 6)) == 0 ? (x + 1) : (x - 1)); + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I2_BT_reg_min_1(short x) => (x & (1 << 14)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I4_BT_reg_min_1(int x) => (x & (1 << 30)) != 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I4_BT_reg_min_1_EQ(int x) => (x & (1 << 30)) == 0; + + [MethodImpl(MethodImplOptions.NoInlining)] + static int I4_BT_reg_min_1_JCC(int x) => (x & (1 << 30)) == 0 ? (x + 1) : (x - 1); + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool I8_BT_reg_min_1(long x) => (x & (1L << 62)) != 0; + + [Fact] public static int TestEntryPoint() { @@ -107,6 +152,69 @@ public static int TestEntryPoint() pass &= I8_BT_mem_reg(ref i8one, 64); pass &= !I8_BT_mem_reg(ref i8two, 0); + pass &= I1_BT_reg_min(sbyte.MinValue); + Assert.True(pass); + pass &= !I1_BT_reg_min(sbyte.MaxValue); + Assert.True(pass); + pass &= !I1_BT_reg_min_1(sbyte.MinValue); + Assert.True(pass); + pass &= I1_BT_reg_min_1(sbyte.MaxValue); + Assert.True(pass); + + pass &= I1_BT_reg_min_JCC(sbyte.MinValue) == sbyte.MaxValue; + Assert.True(pass); + pass &= I1_BT_reg_min_JCC(sbyte.MaxValue) == sbyte.MinValue; + Assert.True(pass); + pass &= I1_BT_reg_min_1_JCC(sbyte.MinValue) == (sbyte.MinValue + 1); + Assert.True(pass); + pass &= I1_BT_reg_min_1_JCC(sbyte.MaxValue) == (sbyte.MaxValue - 1); + Assert.True(pass); + + pass &= I2_BT_reg_min(short.MinValue); + Assert.Equal(0, pass ? 0 : 159); + pass &= !I2_BT_reg_min(short.MaxValue); + Assert.Equal(0, pass ? 0 : 161); + pass &= !I2_BT_reg_min_1(short.MinValue); + Assert.Equal(0, pass ? 0 : 163); + pass &= I2_BT_reg_min_1(short.MaxValue); + Assert.Equal(0, pass ? 0 : 165); + + pass &= I4_BT_reg_min(int.MinValue); + Assert.Equal(0, pass ? 0 : 168); + pass &= !I4_BT_reg_min(int.MaxValue); + Assert.Equal(0, pass ? 0 : 170); + pass &= !I4_BT_reg_min_1(int.MinValue); + Assert.Equal(0, pass ? 0 : 172); + pass &= I4_BT_reg_min_1(int.MaxValue); + Assert.Equal(0, pass ? 0 : 174); + + pass &= !I4_BT_reg_min_EQ(int.MinValue); + Assert.Equal(0, pass ? 0 : 177); + pass &= I4_BT_reg_min_EQ(int.MaxValue); + Assert.Equal(0, pass ? 0 : 179); + pass &= I4_BT_reg_min_1_EQ(int.MinValue); + Assert.Equal(0, pass ? 0 : 181); + pass &= !I4_BT_reg_min_1_EQ(int.MaxValue); + Assert.Equal(0, pass ? 0 : 183); + + pass &= I4_BT_reg_min_JCC(int.MinValue) == int.MaxValue; + Assert.Equal(0, pass ? 0 : 186); + pass &= I4_BT_reg_min_JCC(int.MaxValue) == int.MinValue; + Assert.Equal(0, pass ? 0 : 188); + pass &= I4_BT_reg_min_1_JCC(int.MinValue) == (int.MinValue + 1); + Assert.Equal(0, pass ? 0 : 190); + pass &= I4_BT_reg_min_1_JCC(int.MaxValue) == (int.MaxValue - 1); + Assert.Equal(0, pass ? 0 : 192); + + pass &= I8_BT_reg_min(long.MinValue); + Assert.Equal(0, pass ? 0 : 195); + pass &= !I8_BT_reg_min(long.MaxValue); + Assert.Equal(0, pass ? 0 : 197); + pass &= !I8_BT_reg_min_1(long.MinValue); + Assert.Equal(0, pass ? 0 : 199); + pass &= I8_BT_reg_min_1(long.MaxValue); + Assert.Equal(0, pass ? 0 : 201); + if (pass) { Console.WriteLine("PASSED"); From 6686029c8a8780c5f8284fc641437b32f53053e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20Sowi=C5=84ski?= Date: Fri, 4 Jul 2025 15:59:04 +0200 Subject: [PATCH 2/5] Reuse xarch code for extracting bit index --- src/coreclr/jit/codegenriscv64.cpp | 1 + src/coreclr/jit/lower.cpp | 130 ++++++++++------------ src/tests/JIT/Directed/BitTest/BitTest.cs | 30 +---- 3 files changed, 62 insertions(+), 99 deletions(-) diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index ada2c9268d527a..d4e27f61f1cd24 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -5021,6 +5021,7 @@ void CodeGen::genCodeForShift(GenTree* tree) } else { + assert(isImmed(tree)); instruction ins = genGetInsForOper(tree); unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal; diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index c6ebe0e2ea2313..925bc1e0c9e490 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4149,12 +4149,33 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) #if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) - auto isVariableSingleBit = [](GenTree*& testedOp, GenTree*& bitOp) -> bool { - if (!bitOp->OperIs(GT_LSH)) + // If 'test' is a single bit test, leaves the tested expr in the left op, the bit index in the right op, and returns + // true. Otherwise, returns false. + auto tryReduceSingleBitTestOps = [this](GenTreeOp* test) -> bool { + assert(test->OperIs(GT_AND, GT_TEST_EQ, GT_TEST_NE)); + GenTree* testedOp = test->gtOp1; + GenTree* bitOp = test->gtOp2; +#ifdef TARGET_RISCV64 + if (bitOp->IsIntegralConstUnsignedPow2()) { + INT64 bit = bitOp->AsIntConCommon()->IntegralValue(); + int log2 = BitOperations::Log2((UINT64)bit); + bitOp->AsIntConCommon()->SetIntegralValue(log2); + return true; + } +#endif + if (!bitOp->OperIs(GT_LSH)) std::swap(bitOp, testedOp); + + if (bitOp->OperIs(GT_LSH) && varTypeIsIntOrI(bitOp) && bitOp->gtGetOp1()->IsIntegralConst(1)) + { + BlockRange().Remove(bitOp->gtGetOp1()); + BlockRange().Remove(bitOp); + test->gtOp1 = testedOp; + test->gtOp2 = bitOp->gtGetOp2(); + return true; } - return bitOp->OperIs(GT_LSH) && varTypeIsIntOrI(bitOp) && bitOp->gtGetOp1()->IsIntegralConst(1); + return false; }; ssize_t op2Value = op2->IconValue(); @@ -4252,73 +4273,53 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } #ifdef TARGET_RISCV64 - GenTree* testedOp = andOp1; - GenTree* bitOp = andOp2; - bool isSingleBit = bitOp->IsIntegralConstUnsignedPow2() || isVariableSingleBit(testedOp, bitOp); - if ((op2Value == 0) && !bitOp->IsIntegralConst(1) && isSingleBit) + if (op2Value == 0 && !andOp2->isContained() && tryReduceSingleBitTestOps(op1->AsOp())) { + GenTree* testedOp = op1->gtGetOp1(); + GenTree* bitIndexOp = op1->gtGetOp2(); + + if (bitIndexOp->IsIntegralConst()) + bitIndexOp->SetContained(); + LIR::Use cmpUse; - bool isUserJtrue = BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE); - if (bitOp->IsIntegralConst() && (isUserJtrue ? !bitOp->isContained() : cmp->OperIs(GT_NE))) - { - // Shift the tested single bit into the sign bit, then check if negative/positive. - INT64 bit = bitOp->AsIntConCommon()->IntegralValue(); - if (bit > 0) + bool isUserJtrue = BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE); + if (bitIndexOp->IsIntegralConst() && (cmp->OperIs(GT_NE) || isUserJtrue)) + { + // Shift the tested bit into the sign bit, then check if negative/positive. + // Work on whole registers because comparisons and compressed shifts are full-register only. + INT64 bitIndex = bitIndexOp->AsIntConCommon()->IntegralValue(); + INT64 signBitIndex = genTypeSize(TYP_I_IMPL) * 8 - 1; + if (bitIndex < signBitIndex) { - int shiftAmount = genTypeSize(op1) * 8 - BitOperations::Log2((UINT64)bit) - 1; - assert(shiftAmount > 0); + bitIndexOp->AsIntConCommon()->SetIntegralValue(signBitIndex - bitIndex); op1->SetOperRaw(GT_LSH); - bitOp->AsIntConCommon()->SetIntegralValue(shiftAmount); - bitOp->SetContained(); + op1->gtType = TYP_I_IMPL; } else { - // The tested single bit is the sign bit, just remove the AND - assert(bit == INT_MIN || bit == LONG_MIN); - cmp->AsOp()->gtOp1 = testedOp; - BlockRange().Remove(bitOp); + // The tested bit is the sign bit, remove "AND bitIndex" and only check if negative/positive + assert(bitIndex == signBitIndex); + assert(genActualType(testedOp) == TYP_I_IMPL); + BlockRange().Remove(bitIndexOp); BlockRange().Remove(op1); + cmp->AsOp()->gtOp1 = testedOp; } + op2->gtType = TYP_I_IMPL; cmp->SetOperRaw(cmp->OperIs(GT_NE) ? GT_LT : GT_GE); cmp->ClearUnsigned(); - op2->SetContained(); - return cmp->gtNext; + return cmp; } - else - { - return cmp->gtNext; - // TODO: debug - // Transform (a & bit) into ((a >> log2(bit)) & 1) - // The "!=/== 0" is folded below if necessary. - GenTree* shiftAmount = nullptr; - GenTree* one = nullptr; - if (bitOp->IsIntegralConst()) // a & constBit - { - INT64 bit = bitOp->AsIntConCommon()->IntegralValue(); - int log2 = BitOperations::Log2((UINT64)bit); - shiftAmount = comp->gtNewIconNode(log2); - BlockRange().InsertAfter(testedOp, shiftAmount); - - bitOp->AsIntConCommon()->SetIntegralValue(1); - one = bitOp; - } - else // a & (1 << varBit) - { - assert(bitOp->OperIs(GT_LSH)); - BlockRange().Remove(bitOp); - shiftAmount = bitOp->gtGetOp2(); - one = bitOp->gtGetOp1(); - } - assert(one->IsIntegralConst(1)); - - GenTree* shiftRight = comp->gtNewOperNode(GT_RSH, testedOp->TypeGet(), testedOp, shiftAmount); - BlockRange().InsertAfter(testedOp, shiftRight); - op1->AsOp()->gtOp1 = shiftRight; - op1->AsOp()->gtOp2 = one; - } + // Shift the tested bit into the lowest bit, then AND with 1. + // The "EQ|NE 0" comparison is folded below as necessary. + var_types type = genActualType(testedOp); + op1->AsOp()->gtOp1 = andOp1 = comp->gtNewOperNode(GT_RSH, type, testedOp, bitIndexOp); + op1->AsOp()->gtOp2 = andOp2 = comp->gtNewIconNode(1, type); + BlockRange().InsertBefore(op1, andOp1); + BlockRange().InsertBefore(op1, andOp2); + andOp2->SetContained(); } #endif // TARGET_RISCV64 @@ -4355,9 +4356,9 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } } -#ifndef TARGET_RISCV64 if (op2Value == 0) { +#ifndef TARGET_RISCV64 BlockRange().Remove(op1); BlockRange().Remove(op2); @@ -4401,10 +4402,9 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } } #endif - } - else #endif // !TARGET_RISCV64 - if (andOp2->IsIntegralConst() && GenTree::Compare(andOp2, op2)) + } + else if (andOp2->IsIntegralConst() && GenTree::Compare(andOp2, op2)) { // // Transform EQ|NE(AND(x, y), y) into EQ|NE(AND(NOT(x), y), 0) when y is a constant. @@ -4430,20 +4430,10 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) // Note that BT has the same behavior as LSH when the bit index exceeds the // operand bit size - it uses (bit_index MOD bit_size). // - - GenTree* lsh = cmp->AsOp()->gtOp1; - GenTree* op = cmp->AsOp()->gtOp2; - if (isVariableSingleBit(op, lsh)) + if (tryReduceSingleBitTestOps(cmp->AsOp())) { cmp->SetOper(cmp->OperIs(GT_TEST_EQ) ? GT_BITTEST_EQ : GT_BITTEST_NE); - - BlockRange().Remove(lsh->gtGetOp1()); - BlockRange().Remove(lsh); - - cmp->AsOp()->gtOp1 = op; - cmp->AsOp()->gtOp2 = lsh->gtGetOp2(); cmp->gtGetOp2()->ClearContained(); - return cmp->gtNext; } } diff --git a/src/tests/JIT/Directed/BitTest/BitTest.cs b/src/tests/JIT/Directed/BitTest/BitTest.cs index 34d2febf7ba1fb..0c2b7511585238 100644 --- a/src/tests/JIT/Directed/BitTest/BitTest.cs +++ b/src/tests/JIT/Directed/BitTest/BitTest.cs @@ -41,7 +41,7 @@ public class Program [MethodImpl(MethodImplOptions.NoInlining)] static bool I1_BT_reg_min(sbyte x) => (x & (1 << 7)) != 0; - + [MethodImpl(MethodImplOptions.NoInlining)] static sbyte I1_BT_reg_min_JCC(sbyte x) => (sbyte)((x & (1 << 7)) == 0 ? (x + 1) : (x - 1)); @@ -153,67 +153,39 @@ public static int TestEntryPoint() pass &= !I8_BT_mem_reg(ref i8two, 0); pass &= I1_BT_reg_min(sbyte.MinValue); - Assert.True(pass); pass &= !I1_BT_reg_min(sbyte.MaxValue); - Assert.True(pass); pass &= !I1_BT_reg_min_1(sbyte.MinValue); - Assert.True(pass); pass &= I1_BT_reg_min_1(sbyte.MaxValue); - Assert.True(pass); pass &= I1_BT_reg_min_JCC(sbyte.MinValue) == sbyte.MaxValue; - Assert.True(pass); pass &= I1_BT_reg_min_JCC(sbyte.MaxValue) == sbyte.MinValue; - Assert.True(pass); pass &= I1_BT_reg_min_1_JCC(sbyte.MinValue) == (sbyte.MinValue + 1); - Assert.True(pass); pass &= I1_BT_reg_min_1_JCC(sbyte.MaxValue) == (sbyte.MaxValue - 1); - Assert.True(pass); pass &= I2_BT_reg_min(short.MinValue); - Assert.Equal(0, pass ? 0 : 159); pass &= !I2_BT_reg_min(short.MaxValue); - Assert.Equal(0, pass ? 0 : 161); pass &= !I2_BT_reg_min_1(short.MinValue); - Assert.Equal(0, pass ? 0 : 163); pass &= I2_BT_reg_min_1(short.MaxValue); - Assert.Equal(0, pass ? 0 : 165); pass &= I4_BT_reg_min(int.MinValue); - Assert.Equal(0, pass ? 0 : 168); pass &= !I4_BT_reg_min(int.MaxValue); - Assert.Equal(0, pass ? 0 : 170); pass &= !I4_BT_reg_min_1(int.MinValue); - Assert.Equal(0, pass ? 0 : 172); pass &= I4_BT_reg_min_1(int.MaxValue); - Assert.Equal(0, pass ? 0 : 174); pass &= !I4_BT_reg_min_EQ(int.MinValue); - Assert.Equal(0, pass ? 0 : 177); pass &= I4_BT_reg_min_EQ(int.MaxValue); - Assert.Equal(0, pass ? 0 : 179); pass &= I4_BT_reg_min_1_EQ(int.MinValue); - Assert.Equal(0, pass ? 0 : 181); pass &= !I4_BT_reg_min_1_EQ(int.MaxValue); - Assert.Equal(0, pass ? 0 : 183); pass &= I4_BT_reg_min_JCC(int.MinValue) == int.MaxValue; - Assert.Equal(0, pass ? 0 : 186); pass &= I4_BT_reg_min_JCC(int.MaxValue) == int.MinValue; - Assert.Equal(0, pass ? 0 : 188); pass &= I4_BT_reg_min_1_JCC(int.MinValue) == (int.MinValue + 1); - Assert.Equal(0, pass ? 0 : 190); pass &= I4_BT_reg_min_1_JCC(int.MaxValue) == (int.MaxValue - 1); - Assert.Equal(0, pass ? 0 : 192); pass &= I8_BT_reg_min(long.MinValue); - Assert.Equal(0, pass ? 0 : 195); pass &= !I8_BT_reg_min(long.MaxValue); - Assert.Equal(0, pass ? 0 : 197); pass &= !I8_BT_reg_min_1(long.MinValue); - Assert.Equal(0, pass ? 0 : 199); pass &= I8_BT_reg_min_1(long.MaxValue); - Assert.Equal(0, pass ? 0 : 201); if (pass) { From 514fa0e8a7a13f2a0c974f50959b21721e4b1646 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20Sowi=C5=84ski?= Date: Thu, 10 Jul 2025 09:27:19 +0200 Subject: [PATCH 3/5] Disable removeCast --- src/coreclr/jit/lower.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 925bc1e0c9e490..fed0e1520000bb 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4215,6 +4215,8 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) bool removeCast = #ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && !castOp->isContained() && +#elif defined(TARGET_RISCV64) + false && // disable, comparisons and bit operations are full-register only #endif (castOp->OperIs(GT_LCL_VAR, GT_CALL, GT_OR, GT_XOR, GT_AND) #ifdef TARGET_XARCH From 4a074d576245c89fd647f784965e3e4a098b6ce6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20Sowi=C5=84ski?= Date: Tue, 16 Sep 2025 10:05:26 +0200 Subject: [PATCH 4/5] Don't check JTRUE context for constant bit index lowering --- src/coreclr/jit/lower.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 2442f86c29bee3..06166428d9e609 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4261,11 +4261,6 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) GenTree* bitIndexOp = op1->gtGetOp2(); if (bitIndexOp->IsIntegralConst()) - bitIndexOp->SetContained(); - - LIR::Use cmpUse; - bool isUserJtrue = BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE); - if (bitIndexOp->IsIntegralConst() && (cmp->OperIs(GT_NE) || isUserJtrue)) { // Shift the tested bit into the sign bit, then check if negative/positive. // Work on whole registers because comparisons and compressed shifts are full-register only. @@ -4274,6 +4269,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) if (bitIndex < signBitIndex) { bitIndexOp->AsIntConCommon()->SetIntegralValue(signBitIndex - bitIndex); + bitIndexOp->SetContained(); op1->SetOperRaw(GT_LSH); op1->gtType = TYP_I_IMPL; } From 44366a35d34242651a4f15e8514fd7e223f806d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20Sowi=C5=84ski?= Date: Tue, 16 Sep 2025 10:13:19 +0200 Subject: [PATCH 5/5] cleanup --- src/coreclr/jit/lower.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 06166428d9e609..147c7ec1af3284 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4295,8 +4295,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) var_types type = genActualType(testedOp); op1->AsOp()->gtOp1 = andOp1 = comp->gtNewOperNode(GT_RSH, type, testedOp, bitIndexOp); op1->AsOp()->gtOp2 = andOp2 = comp->gtNewIconNode(1, type); - BlockRange().InsertBefore(op1, andOp1); - BlockRange().InsertBefore(op1, andOp2); + BlockRange().InsertBefore(op1, andOp1, andOp2); andOp2->SetContained(); } #endif // TARGET_RISCV64