Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/coreclr/jit/assertionprop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5268,7 +5268,7 @@ class AssertionPropFlowCallback
{
// Scenario where next block and conditional block, both point to the same block.
// In such case, intersect the assertions present on both the out edges of predBlock.
assert(predBlock->bbNext == block);
assert(predBlock->NextIs(block));
BitVecOps::IntersectionD(apTraits, pAssertionOut, predBlock->bbAssertionOut);

if (VerboseDataflow())
Expand Down
27 changes: 21 additions & 6 deletions src/coreclr/jit/block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
// these cannot cause transfer to the handler...)
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext)
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next())
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
Expand All @@ -160,7 +160,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
if (enclosingDsc->HasFilter())
{
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
filterBlk = filterBlk->bbNext)
filterBlk = filterBlk->Next())
{
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);

Expand All @@ -186,6 +186,21 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
return res;
}

//------------------------------------------------------------------------
// IsLastHotBlock: see if this is the last block before the cold section
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we have a corresponding IsFirstColdBlock(Compiler*)?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can add that in for consistency.

//
// Arguments:
// compiler - current compiler instance
//
// Returns:
// true if the next block is fgFirstColdBlock
// (if fgFirstColdBlock is null, this call is equivalent to IsLast())
//
bool BasicBlock::IsLastHotBlock(Compiler* compiler) const
{
return (bbNext == compiler->fgFirstColdBlock);
}

//------------------------------------------------------------------------
// checkPredListOrder: see if pred list is properly ordered
//
Expand Down Expand Up @@ -1509,10 +1524,10 @@ bool BasicBlock::isBBCallAlwaysPair() const
assert(!(this->bbFlags & BBF_RETLESS_CALL));
#endif
// Some asserts that the next block is a BBJ_ALWAYS of the proper form.
assert(this->bbNext != nullptr);
assert(this->bbNext->KindIs(BBJ_ALWAYS));
assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS);
assert(this->bbNext->isEmpty());
assert(!this->IsLast());
assert(this->Next()->KindIs(BBJ_ALWAYS));
assert(this->Next()->bbFlags & BBF_KEEP_BBJ_ALWAYS);
assert(this->Next()->isEmpty());

return true;
}
Expand Down
111 changes: 76 additions & 35 deletions src/coreclr/jit/block.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,10 +508,49 @@ struct BasicBlock : private LIR::Range
{
friend class LIR;

private:
BasicBlock* bbNext; // next BB in ascending PC offset order
BasicBlock* bbPrev;

void setNext(BasicBlock* next)
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block

public:
BBjumpKinds GetBBJumpKind() const
{
return bbJumpKind;
}

void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* compiler))
{
#ifdef DEBUG
// BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
// TODO: Change assert to check if compiler is in appropriate optimization phase to use BBJ_NONE
// (right now, this assertion does the null check to avoid unused variable warnings)
assert((kind != BBJ_NONE) || (compiler != nullptr));
#endif // DEBUG
bbJumpKind = kind;
}

BasicBlock* Prev() const
{
return bbPrev;
}

void SetPrev(BasicBlock* prev)
{
bbPrev = prev;
if (prev)
{
prev->bbNext = this;
}
}

BasicBlock* Next() const
{
return bbNext;
}

void SetNext(BasicBlock* next)
{
bbNext = next;
if (next)
Expand All @@ -520,6 +559,35 @@ struct BasicBlock : private LIR::Range
}
}

bool IsFirst() const
{
return (bbPrev == nullptr);
}

bool IsLast() const
{
return (bbNext == nullptr);
}

bool PrevIs(BasicBlock* block) const
{
return (bbPrev == block);
}

bool NextIs(BasicBlock* block) const
{
return (bbNext == block);
}

bool IsLastHotBlock(Compiler* compiler) const;

/* The following union describes the jump target(s) of this block */
union {
unsigned bbJumpOffs; // PC offset (temporary only)
BasicBlock* bbJumpDest; // basic block
BBswtDesc* bbJumpSwt; // switch descriptor
};

BasicBlockFlags bbFlags;

static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
Expand Down Expand Up @@ -702,33 +770,6 @@ struct BasicBlock : private LIR::Range
// a block corresponding to an exit from the try of a try/finally.
bool isBBCallAlwaysPairTail() const;

private:
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block

public:
BBjumpKinds GetBBJumpKind() const
{
return bbJumpKind;
}

void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp))
{
#ifdef DEBUG
// BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
// TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE
// (right now, this assertion does the null check to avoid unused variable warnings)
assert((kind != BBJ_NONE) || (comp != nullptr));
#endif // DEBUG
bbJumpKind = kind;
}

/* The following union describes the jump target(s) of this block */
union {
unsigned bbJumpOffs; // PC offset (temporary only)
BasicBlock* bbJumpDest; // basic block
BBswtDesc* bbJumpSwt; // switch descriptor
};

bool KindIs(BBjumpKinds kind) const
{
return bbJumpKind == kind;
Expand Down Expand Up @@ -1435,10 +1476,10 @@ class BasicBlockIterator
{
assert(m_block != nullptr);
// Check that we haven't been spliced out of the list.
assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block));
assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block));
assert((m_block->IsLast()) || m_block->Next()->PrevIs(m_block));
assert((m_block->IsFirst()) || m_block->Prev()->NextIs(m_block));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With the function, we can remove an unnecessary set of parens

Suggested change
assert((m_block->IsLast()) || m_block->Next()->PrevIs(m_block));
assert((m_block->IsFirst()) || m_block->Prev()->NextIs(m_block));
assert(m_block->IsLast() || m_block->Next()->PrevIs(m_block));
assert(m_block->IsFirst() || m_block->Prev()->NextIs(m_block));

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for catching all these nits (sorry, these kinds of PRs seem especially tedious to review).


m_block = m_block->bbNext;
m_block = m_block->Next();
return *this;
}

Expand Down Expand Up @@ -1501,7 +1542,7 @@ class BasicBlockRangeList

BasicBlockIterator end() const
{
return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block
return BasicBlockIterator(m_end->Next()); // walk until we see the block *following* the `m_end` block
}
};

Expand Down Expand Up @@ -1596,18 +1637,18 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
break;

case BBJ_NONE:
m_succs[0] = block->bbNext;
m_succs[0] = block->Next();
m_begin = &m_succs[0];
m_end = &m_succs[1];
break;

case BBJ_COND:
m_succs[0] = block->bbNext;
m_succs[0] = block->Next();
m_begin = &m_succs[0];

// If both fall-through and branch successors are identical, then only include
// them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
if (block->bbJumpDest == block->bbNext)
if (block->NextIs(block->bbJumpDest))
{
m_end = &m_succs[1];
}
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/clrjit.natvis
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
<Exec>varIndex++</Exec>
<Exec>bbLiveInMap = bbLiveInMap >> 1</Exec>
</Loop>
<Exec>block = block->bbNext</Exec>
<Exec>block = block->Next()</Exec>
</Loop>
</CustomListItems>
<Item Name="outVarToRegMaps">"OutVarToRegMaps"</Item>
Expand All @@ -124,7 +124,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
<Exec>varIndex++</Exec>
<Exec>bbLiveInMap = bbLiveInMap >> 1</Exec>
</Loop>
<Exec>block = block->bbNext</Exec>
<Exec>block = block->Next()</Exec>
</Loop>
</CustomListItems>
<Item Name="AvailableRegs mask">this-&gt;m_AvailableRegs</Item>
Expand Down
12 changes: 6 additions & 6 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,12 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// we would have otherwise created retless calls.
assert(block->isBBCallAlwaysPair());

assert(block->bbNext != NULL);
assert(block->bbNext->KindIs(BBJ_ALWAYS));
assert(block->bbNext->bbJumpDest != NULL);
assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
assert(!block->IsLast());
assert(block->Next()->KindIs(BBJ_ALWAYS));
assert(block->Next()->bbJumpDest != NULL);
assert(block->Next()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);

bbFinallyRet = block->bbNext->bbJumpDest;
bbFinallyRet = block->Next()->bbJumpDest;

// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
Expand All @@ -143,7 +143,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// block is RETLESS.
assert(!(block->bbFlags & BBF_RETLESS_CALL));
assert(block->isBBCallAlwaysPair());
return block->bbNext;
return block->Next();
}

//------------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2160,7 +2160,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
}
GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);

BasicBlock* const nextBlock = block->bbNext;
BasicBlock* const nextBlock = block->Next();

if (block->bbFlags & BBF_RETLESS_CALL)
{
Expand All @@ -2184,7 +2184,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;

// Now go to where the finally funclet needs to return to.
if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
Expand Down
32 changes: 16 additions & 16 deletions src/coreclr/jit/codegencommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -402,10 +402,10 @@ void CodeGen::genMarkLabelsForCodegen()
{
// For callfinally thunks, we need to mark the block following the callfinally/always pair,
// as that's needed for identifying the range of the "duplicate finally" region in EH data.
BasicBlock* bbToLabel = block->bbNext;
BasicBlock* bbToLabel = block->Next();
if (block->isBBCallAlwaysPair())
{
bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
bbToLabel = bbToLabel->Next(); // skip the BBJ_ALWAYS
}
if (bbToLabel != nullptr)
{
Expand Down Expand Up @@ -446,16 +446,16 @@ void CodeGen::genMarkLabelsForCodegen()
JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum);
JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum);

if (HBtab->ebdTryLast->bbNext != nullptr)
if (!HBtab->ebdTryLast->IsLast())
{
HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum);
HBtab->ebdTryLast->Next()->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->Next()->bbNum);
}

if (HBtab->ebdHndLast->bbNext != nullptr)
if (!HBtab->ebdHndLast->IsLast())
{
HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum);
HBtab->ebdHndLast->Next()->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->Next()->bbNum);
}

if (HBtab->HasFilter())
Expand Down Expand Up @@ -2302,9 +2302,9 @@ void CodeGen::genReportEH()
hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg);

tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
: compiler->ehCodeOffset(HBtab->ebdTryLast->Next());
hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
: compiler->ehCodeOffset(HBtab->ebdHndLast->Next());

if (HBtab->HasFilter())
{
Expand Down Expand Up @@ -2524,9 +2524,9 @@ void CodeGen::genReportEH()
hndBeg = compiler->ehCodeOffset(bbHndBeg);

tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbTryLast->bbNext);
: compiler->ehCodeOffset(bbTryLast->Next());
hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbHndLast->bbNext);
: compiler->ehCodeOffset(bbHndLast->Next());

if (encTab->HasFilter())
{
Expand Down Expand Up @@ -2590,10 +2590,10 @@ void CodeGen::genReportEH()

// How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be
// a label or jump target, since the BBJ_CALLFINALLY doesn't fall through.
BasicBlock* bbLabel = block->bbNext;
BasicBlock* bbLabel = block->Next();
if (block->isBBCallAlwaysPair())
{
bbLabel = bbLabel->bbNext; // skip the BBJ_ALWAYS
bbLabel = bbLabel->Next(); // skip the BBJ_ALWAYS
}
if (bbLabel == nullptr)
{
Expand Down Expand Up @@ -5210,7 +5210,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block)

assert(block != nullptr);
const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars);
bool last = (block->bbNext == nullptr);
bool last = (block->IsLast());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
bool last = (block->IsLast());
bool last = block->IsLast();

or just substitute it in the call below and eliminate the last var

GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
}

Expand Down Expand Up @@ -5257,7 +5257,7 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block)

JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum);

bool last = (block->bbNext == nullptr);
bool last = (block->IsLast());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, last);
}
Expand Down
Loading