@@ -34,27 +34,27 @@ const (
3434// * live request delivery with or without checkback
3535// * (live/non-live historical) chunk syncing per proximity bin
3636type SwarmSyncerServer struct {
37- po uint8
38- store chunk. FetchStore
39- quit chan struct {}
37+ po uint8
38+ netStore * storage. NetStore
39+ quit chan struct {}
4040}
4141
4242// NewSwarmSyncerServer is constructor for SwarmSyncerServer
43- func NewSwarmSyncerServer (po uint8 , syncChunkStore chunk. FetchStore ) (* SwarmSyncerServer , error ) {
43+ func NewSwarmSyncerServer (po uint8 , netStore * storage. NetStore ) (* SwarmSyncerServer , error ) {
4444 return & SwarmSyncerServer {
45- po : po ,
46- store : syncChunkStore ,
47- quit : make (chan struct {}),
45+ po : po ,
46+ netStore : netStore ,
47+ quit : make (chan struct {}),
4848 }, nil
4949}
5050
51- func RegisterSwarmSyncerServer (streamer * Registry , syncChunkStore chunk. FetchStore ) {
51+ func RegisterSwarmSyncerServer (streamer * Registry , netStore * storage. NetStore ) {
5252 streamer .RegisterServerFunc ("SYNC" , func (_ * Peer , t string , _ bool ) (Server , error ) {
5353 po , err := ParseSyncBinKey (t )
5454 if err != nil {
5555 return nil , err
5656 }
57- return NewSwarmSyncerServer (po , syncChunkStore )
57+ return NewSwarmSyncerServer (po , netStore )
5858 })
5959 // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
6060 // return NewOutgoingProvableSwarmSyncer(po, db)
@@ -68,7 +68,7 @@ func (s *SwarmSyncerServer) Close() {
6868
6969// GetData retrieves the actual chunk from netstore
7070func (s * SwarmSyncerServer ) GetData (ctx context.Context , key []byte ) ([]byte , error ) {
71- ch , err := s .store .Get (ctx , chunk .ModeGetSync , storage .Address (key ))
71+ ch , err := s .netStore .Get (ctx , chunk .ModeGetSync , storage .Address (key ))
7272 if err != nil {
7373 return nil , err
7474 }
@@ -77,7 +77,7 @@ func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, er
7777
7878// SessionIndex returns current storage bin (po) index.
7979func (s * SwarmSyncerServer ) SessionIndex () (uint64 , error ) {
80- return s .store .LastPullSubscriptionBinID (s .po )
80+ return s .netStore .LastPullSubscriptionBinID (s .po )
8181}
8282
8383// SetNextBatch retrieves the next batch of hashes from the localstore.
@@ -88,7 +88,7 @@ func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
8888// are added in batchTimeout period, the batch will be returned. This function
8989// will block until new chunks are received from localstore pull subscription.
9090func (s * SwarmSyncerServer ) SetNextBatch (from , to uint64 ) ([]byte , uint64 , uint64 , * HandoverProof , error ) {
91- descriptors , stop := s .store .SubscribePull (context .Background (), s .po , from , to )
91+ descriptors , stop := s .netStore .SubscribePull (context .Background (), s .po , from , to )
9292 defer stop ()
9393
9494 const batchTimeout = 2 * time .Second
@@ -118,7 +118,7 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
118118 // This is the most naive approach to label the chunk as synced
119119 // allowing it to be garbage collected. A proper way requires
120120 // validating that the chunk is successfully stored by the peer.
121- err := s .store .Set (context .Background (), chunk .ModeSetSync , d .Address )
121+ err := s .netStore .Set (context .Background (), chunk .ModeSetSync , d .Address )
122122 if err != nil {
123123 return nil , 0 , 0 , nil , err
124124 }
@@ -158,67 +158,31 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
158158
159159// SwarmSyncerClient
160160type SwarmSyncerClient struct {
161- store chunk. FetchStore
162- peer * Peer
163- stream Stream
161+ netStore * storage. NetStore
162+ peer * Peer
163+ stream Stream
164164}
165165
166166// NewSwarmSyncerClient is a contructor for provable data exchange syncer
167- func NewSwarmSyncerClient (p * Peer , store chunk. FetchStore , stream Stream ) (* SwarmSyncerClient , error ) {
167+ func NewSwarmSyncerClient (p * Peer , netStore * storage. NetStore , stream Stream ) (* SwarmSyncerClient , error ) {
168168 return & SwarmSyncerClient {
169- store : store ,
170- peer : p ,
171- stream : stream ,
169+ netStore : netStore ,
170+ peer : p ,
171+ stream : stream ,
172172 }, nil
173173}
174174
175- // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
176- // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
177- // retrieveC := make(storage.Chunk, chunksCap)
178- // RunChunkRequestor(p, retrieveC)
179- // storeC := make(storage.Chunk, chunksCap)
180- // RunChunkStorer(store, storeC)
181- // s := &SwarmSyncerClient{
182- // po: po,
183- // priority: priority,
184- // sessionAt: sessionAt,
185- // start: index,
186- // end: index,
187- // nextC: make(chan struct{}, 1),
188- // intervals: intervals,
189- // sessionRoot: sessionRoot,
190- // sessionReader: chunker.Join(sessionRoot, retrieveC),
191- // retrieveC: retrieveC,
192- // storeC: storeC,
193- // }
194- // return s
195- // }
196-
197- // // StartSyncing is called on the Peer to start the syncing process
198- // // the idea is that it is called only after kademlia is close to healthy
199- // func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
200- // lastPO := po
201- // if nn {
202- // lastPO = maxPO
203- // }
204- //
205- // for i := po; i <= lastPO; i++ {
206- // s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
207- // s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
208- // }
209- // }
210-
211175// RegisterSwarmSyncerClient registers the client constructor function for
212176// to handle incoming sync streams
213- func RegisterSwarmSyncerClient (streamer * Registry , store chunk. FetchStore ) {
177+ func RegisterSwarmSyncerClient (streamer * Registry , netStore * storage. NetStore ) {
214178 streamer .RegisterClientFunc ("SYNC" , func (p * Peer , t string , live bool ) (Client , error ) {
215- return NewSwarmSyncerClient (p , store , NewStream ("SYNC" , t , live ))
179+ return NewSwarmSyncerClient (p , netStore , NewStream ("SYNC" , t , live ))
216180 })
217181}
218182
219183// NeedData
220184func (s * SwarmSyncerClient ) NeedData (ctx context.Context , key []byte ) (wait func (context.Context ) error ) {
221- return s .store .FetchFunc (ctx , key )
185+ return s .netStore .FetchFunc (ctx , key )
222186}
223187
224188// BatchDone
0 commit comments