Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions src/sv2/template_provider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ void Sv2TemplateProvider::Interrupt()
LOCK(m_tp_mutex);
try {
for (auto& t : GetBlockTemplates()) {
t.second->interruptWait();
t.second.second->interruptWait();
}
} catch (const ipc::Exception& e) {
// Bitcoin Core v30 does not yet implement interruptWait(), fall back
Expand Down Expand Up @@ -266,6 +266,7 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)
};

while (!m_flag_interrupt_sv2) {
uint256 prev_hash;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @Sjors, I think there's an issue with the prev_hash variable in ThreadSv2ClientHandler.

Right now, prev_hash is declared at the top of the while loop but only gets assigned when block_template is null (inside that first if block). This works fine on the first iteration, but on subsequent iterations block_template won't be null anymore (since it gets updated at line 341), so the assignment never happens. This means the comparison if (new_prev_hash != prev_hash) ends up reading uninitialized memory.

I think the fix is pretty straightforward - just move prev_hash outside the loop and update it after each waitNext() call:

uint256 prev_hash;  // Move this before the while loop

while (!m_flag_interrupt_sv2) {
    if (!block_template) {
        // ... create template ...
        prev_hash = block_template->getBlockHeader().hashPrevBlock;
        m_block_template_cache.insert({template_id, std::make_pair(prev_hash, block_template)});
    }

    std::shared_ptr<BlockTemplate> tmpl = block_template->waitNext(options);
    if (tmpl) {
        block_template = tmpl;
        uint256 new_prev_hash{block_template->getBlockHeader().hashPrevBlock};
        
        if (new_prev_hash != prev_hash) {
            future_template = true;
            // ...
        }
        
        prev_hash = new_prev_hash;  // Add this line to update for next iteration
        m_block_template_cache.insert({m_template_id, std::make_pair(new_prev_hash, block_template)});
    }
}

This also has the nice side effect of removing that getBlockHeader() call that was happening every iteration in the original code (the old old_prev_hash line), so it's actually more optimized.

if (!block_template) {
LogPrintLevel(BCLog::SV2, BCLog::Level::Trace, "Generate initial block template for client id=%zu\n",
client_id);
Expand All @@ -288,7 +289,7 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)
LogPrintLevel(BCLog::SV2, BCLog::Level::Trace, "Assemble template: %.2fms\n",
Ticks<MillisecondsDouble>(SteadyClock::now() - time_start));

uint256 prev_hash{block_template->getBlockHeader().hashPrevBlock};
prev_hash = block_template->getBlockHeader().hashPrevBlock;
{
LOCK(m_tp_mutex);
if (prev_hash != m_best_prev_hash) {
Expand All @@ -299,7 +300,7 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)

// Add template to cache before sending it, to prevent race
// condition: https://github.com/stratum-mining/stratum/issues/1773
m_block_template_cache.insert({template_id,block_template});
m_block_template_cache.insert({template_id,std::make_pair(prev_hash, block_template)});
}

{
Expand Down Expand Up @@ -346,7 +347,6 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)
client_id);
}

uint256 old_prev_hash{block_template->getBlockHeader().hashPrevBlock};
std::shared_ptr<BlockTemplate> tmpl = block_template->waitNext(options);
// The client may have disconnected during the wait, check now to avoid
// a spurious IPC call and confusing log statements.
Expand All @@ -362,7 +362,7 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)

{
LOCK(m_tp_mutex);
if (new_prev_hash != old_prev_hash) {
if (new_prev_hash != prev_hash) {
LogPrintLevel(BCLog::SV2, BCLog::Level::Trace, "Tip changed, client id=%zu\n",
client_id);
future_template = true;
Expand All @@ -375,7 +375,7 @@ void Sv2TemplateProvider::ThreadSv2ClientHandler(size_t client_id)

// Add template to cache before sending it, to prevent race
// condition: https://github.com/stratum-mining/stratum/issues/1773
m_block_template_cache.insert({m_template_id,block_template});
m_block_template_cache.insert({m_template_id, std::make_pair(new_prev_hash,block_template)});
}

{
Expand Down Expand Up @@ -422,7 +422,7 @@ void Sv2TemplateProvider::RequestTransactionData(Sv2Client& client, node::Sv2Req

return;
}
block = (*cached_block->second).getBlock();
block = (*cached_block->second.second).getBlock();
}

{
Expand Down Expand Up @@ -497,7 +497,7 @@ void Sv2TemplateProvider::SubmitSolution(node::Sv2SubmitSolutionMsg solution)
* on the network. In case of a reorg the node will be able to switch
* faster because it already has (but not fully validated) the block.
*/
block_template = cached_block_template->second;
block_template = cached_block_template->second.second;
}

// Submit the solution to construct and process the block
Expand Down Expand Up @@ -555,7 +555,7 @@ void Sv2TemplateProvider::PruneBlockTemplateCache()
// If the blocks prevout is not the tip's prevout, delete it.
uint256 prev_hash = m_best_prev_hash;
std::erase_if(m_block_template_cache, [prev_hash] (const auto& kv) {
if (kv.second->getBlockHeader().hashPrevBlock != prev_hash) {
if (kv.second.first != prev_hash) {
return true;
}
return false;
Expand Down
5 changes: 3 additions & 2 deletions src/sv2/template_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,10 @@ class Sv2TemplateProvider : public Sv2EventsInterface
std::chrono::nanoseconds m_last_block_time GUARDED_BY(m_tp_mutex);

/**
* A cache that maps ids used in NewTemplate messages and its associated block template.
* A cache that maps ids used in NewTemplate messages and its associated
* <prevhash,block template>.
*/
using BlockTemplateCache = std::map<uint64_t, std::shared_ptr<BlockTemplate>>;
using BlockTemplateCache = std::map<uint64_t, std::pair<uint256, std::shared_ptr<BlockTemplate>>>;
BlockTemplateCache m_block_template_cache GUARDED_BY(m_tp_mutex);

public:
Expand Down
Loading