Skip to content
This repository was archived by the owner on Aug 2, 2021. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
df7d887
swarm/storage/localstore: add hasser
janos Feb 27, 2019
c0a4b1b
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 4, 2019
8381e38
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 4, 2019
5ac0f79
cmd/swarm, swarm: integrate localstore partially
janos Mar 4, 2019
a96aeeb
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 5, 2019
44633ce
swarm/network/stream: start of localstore integration
janos Mar 5, 2019
3e6e03a
swarm/storage/localstore: add TestExportImport and fix Export function
janos Mar 5, 2019
6ce3481
cmd/swarm: re-enable TestCLISwarmExportImport
janos Mar 5, 2019
5e69e3d
swarm: extend chunk.Store
janos Mar 5, 2019
5d7f169
swarm/storage/feed: fix TestValidatorInStore
janos Mar 5, 2019
f0a8d3a
swarm: rename ModeGetFeedLookup to ModeGetLookup
janos Mar 5, 2019
5474d30
swarm: minor updates to chunk, storage and localstore packages
janos Mar 6, 2019
9ca71f4
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 11, 2019
83b64ce
swarm/shed: add vector uint64 field
janos Mar 12, 2019
547f30b
cmd/swarm: fix merge issue
janos Mar 12, 2019
970b305
swarm: use BinID for pull syncing index
janos Mar 12, 2019
5a669c6
swarm/network, swarm/storage/localstore: SetNextBatch pull subscription
janos Mar 12, 2019
4594130
swarm/network/stream: fix TestSyncerSimulation
janos Mar 13, 2019
4af1d7a
swarm: change localstore SubscribePull function signature
janos Mar 13, 2019
4df51ee
swarm/storage/localstore: fix SubscribePull comment
janos Mar 14, 2019
804301e
swarm/network/stream: return errors in roundRobinStore
janos Mar 14, 2019
6aa0d0e
swarm/storage/localstore: SubscribePull with both range ends inclusive
janos Mar 14, 2019
c16dcd1
swarm/network/stream: close intervals store in newStreamerTester
janos Mar 15, 2019
14758a4
swarm/storage/localstore: improve TestDB_SubscribePull_since
janos Mar 15, 2019
be1a360
swarm/network/stream: implement new SetNextBatch
janos Mar 15, 2019
36a0d96
swarm/network/stream: update handleChunkDeliveryMsg function
janos Mar 15, 2019
3b3d2b0
swarm/storage: remove unused code
janos Mar 15, 2019
4b8a5c8
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 15, 2019
277ecf9
swarm: minor code cleanups
janos Mar 15, 2019
682eebd
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 15, 2019
5ee9143
Merge branch 'master' into localstore-storage-integration
nonsense Mar 21, 2019
c3a76d1
lint
nonsense Mar 21, 2019
2d20bf9
swarm/storage/localstore: add tags to push index
acud Mar 25, 2019
b828a34
swarm/storage, swarm/shed: add support for persisting push tags
acud Mar 26, 2019
d8c648b
swarm/storage/localstore: TestDB_pushIndex_Tags
janos Mar 26, 2019
e295dc8
swarm: address pr review comments
janos Mar 26, 2019
c9d679d
swarm: merge branch 'master' into localstore-storage-integration
janos Mar 26, 2019
2fe747e
swarm/storage/localstore: implement backward compatible Import function
janos Mar 26, 2019
3e82234
swamr/network/stream: fix handleChunkDeliveryMsg
janos Mar 27, 2019
4c14a09
swarm/storage/localstore: remove getters, setters and putters
acud Mar 27, 2019
31f4b0d
swarm/network/stream: update handleChunkDeliveryMsg
janos Mar 27, 2019
2e24e99
swarm/storage/localstore: only one item in pull and push indexes for …
janos Mar 29, 2019
9c79e26
Revert "swarm/storage/localstore: TestDB_pushIndex_Tags"
acud Apr 8, 2019
ca7dcc2
Revert "swarm/storage, swarm/shed: add support for persisting push tags"
acud Apr 8, 2019
e4d723c
swarm/chunk, swarm/storage: adjust Store.Put interface to return bool…
acud Apr 9, 2019
904df7a
cmd/swarm: new localstore database migration (#1297)
acud Apr 10, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmd/swarm/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,15 +252,15 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
}

if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
currentConfig.LocalStoreParams.ChunkDbPath = storePath
currentConfig.ChunkDbPath = storePath
}

if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
currentConfig.DbCapacity = storeCapacity
}

if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
currentConfig.LocalStoreParams.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
}

if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
Expand Down
4 changes: 2 additions & 2 deletions cmd/swarm/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,8 +447,8 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
t.Fatal("Expected Sync to be disabled, but is true")
}

if info.LocalStoreParams.DbCapacity != 9000000 {
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
if info.DbCapacity != 9000000 {
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity)
}

if info.HiveParams.KeepAliveInterval != 6000000000 {
Expand Down
118 changes: 105 additions & 13 deletions cmd/swarm/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
package main

import (
"archive/tar"
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"os"
Expand All @@ -25,10 +29,22 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"gopkg.in/urfave/cli.v1"
)

var legacyKeyIndex = byte(0)
var keyData = byte(6)

type dpaDBIndex struct {
Idx uint64
Access uint64
}

var dbCommand = cli.Command{
Name: "db",
CustomHelpTemplate: helpTemplate,
Expand Down Expand Up @@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:

pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
Flags: []cli.Flag{
SwarmLegacyFlag,
},
},
},
}
Expand All @@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
}

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()

var out io.Writer
if args[1] == "-" {
out = os.Stdout
Expand All @@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
out = f
}

isLegacy := localstore.IsLegacyDatabase(args[0])
if isLegacy {
count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
if err != nil {
utils.Fatalf("error exporting legacy local chunk database: %s", err)
}

log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
return
}

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()

count, err := store.Export(out)
if err != nil {
utils.Fatalf("error exporting local chunk database: %s", err)
Expand All @@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
}

legacy := ctx.IsSet(SwarmLegacyFlag.Name)

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
Expand All @@ -127,21 +159,81 @@ func dbImport(ctx *cli.Context) {
in = f
}

count, err := store.Import(in)
count, err := store.Import(in, legacy)
if err != nil {
utils.Fatalf("error importing local chunk database: %s", err)
}

log.Info(fmt.Sprintf("successfully imported %d chunks", count))
}

func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could this not be in the localstore pkg as one of the constructors?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, I am not sure that we need it. It only checks if goleveldb data is present in a directory. Is it really necessary to have that check for export and import?

if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}

storeparams := storage.NewDefaultStoreParams()
ldbparams := storage.NewLDBStoreParams(storeparams, path)
ldbparams.BaseKey = basekey
return storage.NewLDBStore(ldbparams)
return localstore.New(path, basekey, nil)
}

func decodeIndex(data []byte, index *dpaDBIndex) error {
dec := rlp.NewStream(bytes.NewReader(data), 0)
return dec.Decode(index)
}

func getDataKey(idx uint64, po uint8) []byte {
key := make([]byte, 10)
key[0] = keyData
key[1] = po
binary.BigEndian.PutUint64(key[2:], idx)

return key
}

func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
tw := tar.NewWriter(out)
defer tw.Close()
db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
if err != nil {
return 0, err
}
defer db.Close()

it := db.NewIterator(nil, nil)
defer it.Release()
var count int64
for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
key := it.Key()
if (key == nil) || (key[0] != legacyKeyIndex) {
break
}

var index dpaDBIndex

hash := key[1:]
decodeIndex(it.Value(), &index)

po := uint8(chunk.Proximity(basekey, hash))

datakey := getDataKey(index.Idx, po)
data, err := db.Get(datakey, nil)
if err != nil {
log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
continue
}

hdr := &tar.Header{
Name: hex.EncodeToString(hash),
Mode: 0644,
Size: int64(len(data)),
}
if err := tw.WriteHeader(hdr); err != nil {
return count, err
}
if _, err := tw.Write(data); err != nil {
return count, err
}
count++
}

return count, nil
}
Loading