Skip to content

Commit c893488

Browse files
committed
eth: pre-process downloader responses on the peer reader thread
1 parent 721c572 commit c893488

File tree

10 files changed

+207
-85
lines changed

10 files changed

+207
-85
lines changed

eth/downloader/downloader.go

Lines changed: 45 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,13 @@ var (
8585
// peerDropFn is a callback type for dropping a peer detected as malicious.
8686
type peerDropFn func(id string)
8787

88+
// headerTask is a set of downloaded headers to queue along with their precomputed
89+
// hashes to avoid constant rehashing.
90+
type headerTask struct {
91+
headers []*types.Header
92+
hashes []common.Hash
93+
}
94+
8895
type Downloader struct {
8996
mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
9097
mux *event.TypeMux // Event multiplexer to announce sync operation events
@@ -116,7 +123,7 @@ type Downloader struct {
116123
ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
117124

118125
// Channels
119-
headerProcCh chan []*types.Header // Channel to feed the header processor new tasks
126+
headerProcCh chan *headerTask // Channel to feed the header processor new tasks
120127

121128
// State sync
122129
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
@@ -210,7 +217,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
210217
blockchain: chain,
211218
lightchain: lightchain,
212219
dropPeer: dropPeer,
213-
headerProcCh: make(chan []*types.Header, 1),
220+
headerProcCh: make(chan *headerTask, 1),
214221
quitCh: make(chan struct{}),
215222
SnapSyncer: snap.NewSyncer(stateDb),
216223
stateSyncStart: make(chan *stateSync),
@@ -626,7 +633,7 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
626633
if mode == SnapSync {
627634
fetch = 2 // head + pivot headers
628635
}
629-
headers, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true)
636+
headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true)
630637
if err != nil {
631638
return nil, nil, err
632639
}
@@ -645,7 +652,7 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
645652
if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {
646653
return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer)
647654
}
648-
p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash())
655+
p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0])
649656
return head, nil, nil
650657
}
651658
// At this point we have 2 headers in total and the first is the
@@ -784,7 +791,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re
784791
from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
785792

786793
p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
787-
headers, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false)
794+
headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false)
788795
if err != nil {
789796
return 0, err
790797
}
@@ -811,7 +818,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re
811818
continue
812819
}
813820
// Otherwise check if we already know the header or not
814-
h := headers[i].Hash()
821+
h := hashes[i]
815822
n := headers[i].Number.Uint64()
816823

817824
var known bool
@@ -854,7 +861,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
854861
// Split our chain interval in two, and request the hash to cross check
855862
check := (start + end) / 2
856863

857-
headers, err := d.fetchHeadersByNumber(p, check, 1, 0, false)
864+
headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false)
858865
if err != nil {
859866
return 0, err
860867
}
@@ -864,7 +871,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
864871
return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
865872
}
866873
// Modify the search interval based on the response
867-
h := headers[0].Hash()
874+
h := hashes[0]
868875
n := headers[0].Number.Uint64()
869876

870877
var known bool
@@ -923,6 +930,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
923930
// - Full header retrieval if we're near the chain head
924931
var (
925932
headers []*types.Header
933+
hashes []common.Hash
926934
err error
927935
)
928936
switch {
@@ -932,15 +940,15 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
932940
d.pivotLock.RUnlock()
933941

934942
p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks))
935-
headers, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
943+
headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
936944

937945
case skeleton:
938946
p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
939-
headers, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
947+
headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
940948

941949
default:
942950
p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
943-
headers, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false)
951+
headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false)
944952
}
945953
switch err {
946954
case nil:
@@ -1038,12 +1046,14 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10381046
// If we received a skeleton batch, resolve internals concurrently
10391047
var progressed bool
10401048
if skeleton {
1041-
filled, proced, err := d.fillHeaderSkeleton(from, headers)
1049+
filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers)
10421050
if err != nil {
10431051
p.log.Debug("Skeleton chain invalid", "err", err)
10441052
return fmt.Errorf("%w: %v", errInvalidChain, err)
10451053
}
10461054
headers = filled[proced:]
1055+
hashes = hashset[proced:]
1056+
10471057
progressed = proced > 0
10481058
from += uint64(proced)
10491059
} else {
@@ -1079,6 +1089,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10791089
delay = n
10801090
}
10811091
headers = headers[:n-delay]
1092+
hashes = hashes[:n-delay]
10821093
}
10831094
}
10841095
}
@@ -1098,7 +1109,10 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10981109
if len(headers) > 0 {
10991110
p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
11001111
select {
1101-
case d.headerProcCh <- headers:
1112+
case d.headerProcCh <- &headerTask{
1113+
headers: headers,
1114+
hashes: hashes,
1115+
}:
11021116
case <-d.cancelCh:
11031117
return errCanceled
11041118
}
@@ -1121,19 +1135,19 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
11211135
//
11221136
// The method returns the entire filled skeleton and also the number of headers
11231137
// already forwarded for processing.
1124-
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
1138+
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) {
11251139
log.Debug("Filling up skeleton", "from", from)
11261140
d.queue.ScheduleSkeleton(from, skeleton)
11271141

11281142
err := d.concurrentFetch((*headerQueue)(d))
11291143
if err != nil {
11301144
log.Debug("Skeleton fill failed", "err", err)
11311145
}
1132-
filled, proced := d.queue.RetrieveHeaders()
1146+
filled, hashes, proced := d.queue.RetrieveHeaders()
11331147
if err == nil {
11341148
log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced)
11351149
}
1136-
return filled, proced, err
1150+
return filled, hashes, proced, err
11371151
}
11381152

11391153
// fetchBodies iteratively downloads the scheduled block bodies, taking any
@@ -1199,9 +1213,9 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
11991213
rollbackErr = errCanceled
12001214
return errCanceled
12011215

1202-
case headers := <-d.headerProcCh:
1216+
case task := <-d.headerProcCh:
12031217
// Terminate header processing if we synced up
1204-
if len(headers) == 0 {
1218+
if task == nil || len(task.headers) == 0 {
12051219
// Notify everyone that headers are fully processed
12061220
for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
12071221
select {
@@ -1245,6 +1259,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12451259
return nil
12461260
}
12471261
// Otherwise split the chunk of headers into batches and process them
1262+
headers, hashes := task.headers, task.hashes
1263+
12481264
gotHeaders = true
12491265
for len(headers) > 0 {
12501266
// Terminate if something failed in between processing chunks
@@ -1259,7 +1275,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12591275
if limit > len(headers) {
12601276
limit = len(headers)
12611277
}
1262-
chunk := headers[:limit]
1278+
chunkHeaders := headers[:limit]
1279+
chunkHashes := hashes[:limit]
12631280

12641281
// In case of header only syncing, validate the chunk immediately
12651282
if mode == SnapSync || mode == LightSync {
@@ -1273,22 +1290,22 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12731290
d.pivotLock.RUnlock()
12741291

12751292
frequency := fsHeaderCheckFrequency
1276-
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
1293+
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
12771294
frequency = 1
12781295
}
1279-
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
1296+
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
12801297
rollbackErr = err
12811298

12821299
// If some headers were inserted, track them as uncertain
12831300
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
1284-
rollback = chunk[0].Number.Uint64()
1301+
rollback = chunkHeaders[0].Number.Uint64()
12851302
}
1286-
log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
1303+
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
12871304
return fmt.Errorf("%w: %v", errInvalidChain, err)
12881305
}
12891306
// All verifications passed, track all headers within the alloted limits
12901307
if mode == SnapSync {
1291-
head := chunk[len(chunk)-1].Number.Uint64()
1308+
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
12921309
if head-rollback > uint64(fsHeaderSafetyNet) {
12931310
rollback = head - uint64(fsHeaderSafetyNet)
12941311
} else {
@@ -1308,13 +1325,14 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
13081325
}
13091326
}
13101327
// Otherwise insert the headers for content retrieval
1311-
inserts := d.queue.Schedule(chunk, origin)
1312-
if len(inserts) != len(chunk) {
1313-
rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
1328+
inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin)
1329+
if len(inserts) != len(chunkHeaders) {
1330+
rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders))
13141331
return fmt.Errorf("%w: stale headers", errBadPeer)
13151332
}
13161333
}
13171334
headers = headers[limit:]
1335+
hashes = hashes[limit:]
13181336
origin += uint64(limit)
13191337
}
13201338
// Update the highest block number we know if a higher one is found.

eth/downloader/downloader_test.go

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,13 +177,18 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
177177
}
178178
}
179179
}
180+
hashes := make([]common.Hash, len(headers))
181+
for i, header := range headers {
182+
hashes[i] = header.Hash()
183+
}
180184
// Deliver the headers to the downloader
181185
req := &eth.Request{
182186
Peer: dlp.id,
183187
}
184188
res := &eth.Response{
185189
Req: req,
186190
Res: (*eth.BlockHeadersPacket)(&headers),
191+
Meta: hashes,
187192
Time: 1,
188193
Done: make(chan error, 1), // Ignore the returned status
189194
}
@@ -216,13 +221,18 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
216221
}
217222
}
218223
}
224+
hashes := make([]common.Hash, len(headers))
225+
for i, header := range headers {
226+
hashes[i] = header.Hash()
227+
}
219228
// Deliver the headers to the downloader
220229
req := &eth.Request{
221230
Peer: dlp.id,
222231
}
223232
res := &eth.Response{
224233
Req: req,
225234
Res: (*eth.BlockHeadersPacket)(&headers),
235+
Meta: hashes,
226236
Time: 1,
227237
Done: make(chan error, 1), // Ignore the returned status
228238
}
@@ -243,12 +253,22 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
243253
bodies[i] = new(eth.BlockBody)
244254
rlp.DecodeBytes(blob, bodies[i])
245255
}
256+
var (
257+
txsHashes = make([]common.Hash, len(bodies))
258+
uncleHashes = make([]common.Hash, len(bodies))
259+
)
260+
hasher := trie.NewStackTrie(nil)
261+
for i, body := range bodies {
262+
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
263+
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
264+
}
246265
req := &eth.Request{
247266
Peer: dlp.id,
248267
}
249268
res := &eth.Response{
250269
Req: req,
251270
Res: (*eth.BlockBodiesPacket)(&bodies),
271+
Meta: [][]common.Hash{txsHashes, uncleHashes},
252272
Time: 1,
253273
Done: make(chan error, 1), // Ignore the returned status
254274
}
@@ -268,12 +288,18 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *
268288
for i, blob := range blobs {
269289
rlp.DecodeBytes(blob, &receipts[i])
270290
}
291+
hasher := trie.NewStackTrie(nil)
292+
hashes = make([]common.Hash, len(receipts))
293+
for i, receipt := range receipts {
294+
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
295+
}
271296
req := &eth.Request{
272297
Peer: dlp.id,
273298
}
274299
res := &eth.Response{
275300
Req: req,
276301
Res: (*eth.ReceiptsPacket)(&receipts),
302+
Meta: hashes,
277303
Time: 1,
278304
Done: make(chan error, 1), // Ignore the returned status
279305
}

0 commit comments

Comments
 (0)