8585// peerDropFn is a callback type for dropping a peer detected as malicious.
8686type peerDropFn func (id string )
8787
88+ // headerTask is a set of downloaded headers to queue along with their precomputed
89+ // hashes to avoid constant rehashing.
90+ type headerTask struct {
91+ headers []* types.Header
92+ hashes []common.Hash
93+ }
94+
8895type Downloader struct {
8996 mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
9097 mux * event.TypeMux // Event multiplexer to announce sync operation events
@@ -116,7 +123,7 @@ type Downloader struct {
116123 ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
117124
118125 // Channels
119- headerProcCh chan [] * types. Header // Channel to feed the header processor new tasks
126+ headerProcCh chan * headerTask // Channel to feed the header processor new tasks
120127
121128 // State sync
122129 pivotHeader * types.Header // Pivot block header to dynamically push the syncing state root
@@ -210,7 +217,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
210217 blockchain : chain ,
211218 lightchain : lightchain ,
212219 dropPeer : dropPeer ,
213- headerProcCh : make (chan [] * types. Header , 1 ),
220+ headerProcCh : make (chan * headerTask , 1 ),
214221 quitCh : make (chan struct {}),
215222 SnapSyncer : snap .NewSyncer (stateDb ),
216223 stateSyncStart : make (chan * stateSync ),
@@ -626,7 +633,7 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
626633 if mode == SnapSync {
627634 fetch = 2 // head + pivot headers
628635 }
629- headers , err := d .fetchHeadersByHash (p , latest , fetch , fsMinFullBlocks - 1 , true )
636+ headers , hashes , err := d .fetchHeadersByHash (p , latest , fetch , fsMinFullBlocks - 1 , true )
630637 if err != nil {
631638 return nil , nil , err
632639 }
@@ -645,7 +652,7 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
645652 if mode == SnapSync && head .Number .Uint64 () > uint64 (fsMinFullBlocks ) {
646653 return nil , nil , fmt .Errorf ("%w: no pivot included along head header" , errBadPeer )
647654 }
648- p .log .Debug ("Remote head identified, no pivot" , "number" , head .Number , "hash" , head . Hash () )
655+ p .log .Debug ("Remote head identified, no pivot" , "number" , head .Number , "hash" , hashes [ 0 ] )
649656 return head , nil , nil
650657 }
651658 // At this point we have 2 headers in total and the first is the
@@ -784,7 +791,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re
784791 from , count , skip , max := calculateRequestSpan (remoteHeight , localHeight )
785792
786793 p .log .Trace ("Span searching for common ancestor" , "count" , count , "from" , from , "skip" , skip )
787- headers , err := d .fetchHeadersByNumber (p , uint64 (from ), count , skip , false )
794+ headers , hashes , err := d .fetchHeadersByNumber (p , uint64 (from ), count , skip , false )
788795 if err != nil {
789796 return 0 , err
790797 }
@@ -811,7 +818,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re
811818 continue
812819 }
813820 // Otherwise check if we already know the header or not
814- h := headers [i ]. Hash ()
821+ h := hashes [i ]
815822 n := headers [i ].Number .Uint64 ()
816823
817824 var known bool
@@ -854,7 +861,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
854861 // Split our chain interval in two, and request the hash to cross check
855862 check := (start + end ) / 2
856863
857- headers , err := d .fetchHeadersByNumber (p , check , 1 , 0 , false )
864+ headers , hashes , err := d .fetchHeadersByNumber (p , check , 1 , 0 , false )
858865 if err != nil {
859866 return 0 , err
860867 }
@@ -864,7 +871,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
864871 return 0 , fmt .Errorf ("%w: multiple headers (%d) for single request" , errBadPeer , len (headers ))
865872 }
866873 // Modify the search interval based on the response
867- h := headers [0 ]. Hash ()
874+ h := hashes [0 ]
868875 n := headers [0 ].Number .Uint64 ()
869876
870877 var known bool
@@ -923,6 +930,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
923930 // - Full header retrieval if we're near the chain head
924931 var (
925932 headers []* types.Header
933+ hashes []common.Hash
926934 err error
927935 )
928936 switch {
@@ -932,15 +940,15 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
932940 d .pivotLock .RUnlock ()
933941
934942 p .log .Trace ("Fetching next pivot header" , "number" , pivot + uint64 (fsMinFullBlocks ))
935- headers , err = d .fetchHeadersByNumber (p , pivot + uint64 (fsMinFullBlocks ), 2 , fsMinFullBlocks - 9 , false ) // move +64 when it's 2x64-8 deep
943+ headers , hashes , err = d .fetchHeadersByNumber (p , pivot + uint64 (fsMinFullBlocks ), 2 , fsMinFullBlocks - 9 , false ) // move +64 when it's 2x64-8 deep
936944
937945 case skeleton :
938946 p .log .Trace ("Fetching skeleton headers" , "count" , MaxHeaderFetch , "from" , from )
939- headers , err = d .fetchHeadersByNumber (p , from + uint64 (MaxHeaderFetch )- 1 , MaxSkeletonSize , MaxHeaderFetch - 1 , false )
947+ headers , hashes , err = d .fetchHeadersByNumber (p , from + uint64 (MaxHeaderFetch )- 1 , MaxSkeletonSize , MaxHeaderFetch - 1 , false )
940948
941949 default :
942950 p .log .Trace ("Fetching full headers" , "count" , MaxHeaderFetch , "from" , from )
943- headers , err = d .fetchHeadersByNumber (p , from , MaxHeaderFetch , 0 , false )
951+ headers , hashes , err = d .fetchHeadersByNumber (p , from , MaxHeaderFetch , 0 , false )
944952 }
945953 switch err {
946954 case nil :
@@ -1038,12 +1046,14 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10381046 // If we received a skeleton batch, resolve internals concurrently
10391047 var progressed bool
10401048 if skeleton {
1041- filled , proced , err := d .fillHeaderSkeleton (from , headers )
1049+ filled , hashset , proced , err := d .fillHeaderSkeleton (from , headers )
10421050 if err != nil {
10431051 p .log .Debug ("Skeleton chain invalid" , "err" , err )
10441052 return fmt .Errorf ("%w: %v" , errInvalidChain , err )
10451053 }
10461054 headers = filled [proced :]
1055+ hashes = hashset [proced :]
1056+
10471057 progressed = proced > 0
10481058 from += uint64 (proced )
10491059 } else {
@@ -1079,6 +1089,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10791089 delay = n
10801090 }
10811091 headers = headers [:n - delay ]
1092+ hashes = hashes [:n - delay ]
10821093 }
10831094 }
10841095 }
@@ -1098,7 +1109,10 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
10981109 if len (headers ) > 0 {
10991110 p .log .Trace ("Scheduling new headers" , "count" , len (headers ), "from" , from )
11001111 select {
1101- case d .headerProcCh <- headers :
1112+ case d .headerProcCh <- & headerTask {
1113+ headers : headers ,
1114+ hashes : hashes ,
1115+ }:
11021116 case <- d .cancelCh :
11031117 return errCanceled
11041118 }
@@ -1121,19 +1135,19 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
11211135//
11221136// The method returns the entire filled skeleton and also the number of headers
11231137// already forwarded for processing.
1124- func (d * Downloader ) fillHeaderSkeleton (from uint64 , skeleton []* types.Header ) ([]* types.Header , int , error ) {
1138+ func (d * Downloader ) fillHeaderSkeleton (from uint64 , skeleton []* types.Header ) ([]* types.Header , []common. Hash , int , error ) {
11251139 log .Debug ("Filling up skeleton" , "from" , from )
11261140 d .queue .ScheduleSkeleton (from , skeleton )
11271141
11281142 err := d .concurrentFetch ((* headerQueue )(d ))
11291143 if err != nil {
11301144 log .Debug ("Skeleton fill failed" , "err" , err )
11311145 }
1132- filled , proced := d .queue .RetrieveHeaders ()
1146+ filled , hashes , proced := d .queue .RetrieveHeaders ()
11331147 if err == nil {
11341148 log .Debug ("Skeleton fill succeeded" , "filled" , len (filled ), "processed" , proced )
11351149 }
1136- return filled , proced , err
1150+ return filled , hashes , proced , err
11371151}
11381152
11391153// fetchBodies iteratively downloads the scheduled block bodies, taking any
@@ -1199,9 +1213,9 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
11991213 rollbackErr = errCanceled
12001214 return errCanceled
12011215
1202- case headers := <- d .headerProcCh :
1216+ case task := <- d .headerProcCh :
12031217 // Terminate header processing if we synced up
1204- if len (headers ) == 0 {
1218+ if task == nil || len (task . headers ) == 0 {
12051219 // Notify everyone that headers are fully processed
12061220 for _ , ch := range []chan bool {d .queue .blockWakeCh , d .queue .receiptWakeCh } {
12071221 select {
@@ -1245,6 +1259,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12451259 return nil
12461260 }
12471261 // Otherwise split the chunk of headers into batches and process them
1262+ headers , hashes := task .headers , task .hashes
1263+
12481264 gotHeaders = true
12491265 for len (headers ) > 0 {
12501266 // Terminate if something failed in between processing chunks
@@ -1259,7 +1275,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12591275 if limit > len (headers ) {
12601276 limit = len (headers )
12611277 }
1262- chunk := headers [:limit ]
1278+ chunkHeaders := headers [:limit ]
1279+ chunkHashes := hashes [:limit ]
12631280
12641281 // In case of header only syncing, validate the chunk immediately
12651282 if mode == SnapSync || mode == LightSync {
@@ -1273,22 +1290,22 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
12731290 d .pivotLock .RUnlock ()
12741291
12751292 frequency := fsHeaderCheckFrequency
1276- if chunk [len (chunk )- 1 ].Number .Uint64 ()+ uint64 (fsHeaderForceVerify ) > pivot {
1293+ if chunkHeaders [len (chunkHeaders )- 1 ].Number .Uint64 ()+ uint64 (fsHeaderForceVerify ) > pivot {
12771294 frequency = 1
12781295 }
1279- if n , err := d .lightchain .InsertHeaderChain (chunk , frequency ); err != nil {
1296+ if n , err := d .lightchain .InsertHeaderChain (chunkHeaders , frequency ); err != nil {
12801297 rollbackErr = err
12811298
12821299 // If some headers were inserted, track them as uncertain
12831300 if (mode == SnapSync || frequency > 1 ) && n > 0 && rollback == 0 {
1284- rollback = chunk [0 ].Number .Uint64 ()
1301+ rollback = chunkHeaders [0 ].Number .Uint64 ()
12851302 }
1286- log .Warn ("Invalid header encountered" , "number" , chunk [n ].Number , "hash" , chunk [n ]. Hash () , "parent" , chunk [n ].ParentHash , "err" , err )
1303+ log .Warn ("Invalid header encountered" , "number" , chunkHeaders [n ].Number , "hash" , chunkHashes [n ], "parent" , chunkHeaders [n ].ParentHash , "err" , err )
12871304 return fmt .Errorf ("%w: %v" , errInvalidChain , err )
12881305 }
12891306 // All verifications passed, track all headers within the alloted limits
12901307 if mode == SnapSync {
1291- head := chunk [len (chunk )- 1 ].Number .Uint64 ()
1308+ head := chunkHeaders [len (chunkHeaders )- 1 ].Number .Uint64 ()
12921309 if head - rollback > uint64 (fsHeaderSafetyNet ) {
12931310 rollback = head - uint64 (fsHeaderSafetyNet )
12941311 } else {
@@ -1308,13 +1325,14 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
13081325 }
13091326 }
13101327 // Otherwise insert the headers for content retrieval
1311- inserts := d .queue .Schedule (chunk , origin )
1312- if len (inserts ) != len (chunk ) {
1313- rollbackErr = fmt .Errorf ("stale headers: len inserts %v len(chunk) %v" , len (inserts ), len (chunk ))
1328+ inserts := d .queue .Schedule (chunkHeaders , chunkHashes , origin )
1329+ if len (inserts ) != len (chunkHeaders ) {
1330+ rollbackErr = fmt .Errorf ("stale headers: len inserts %v len(chunk) %v" , len (inserts ), len (chunkHeaders ))
13141331 return fmt .Errorf ("%w: stale headers" , errBadPeer )
13151332 }
13161333 }
13171334 headers = headers [limit :]
1335+ hashes = hashes [limit :]
13181336 origin += uint64 (limit )
13191337 }
13201338 // Update the highest block number we know if a higher one is found.
0 commit comments