Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cmd: Added support for downloading to another DB instance #14785

Merged
merged 2 commits into from
Oct 11, 2017
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
cmd, eth: separate out FakePeer for future reuse
  • Loading branch information
karalabe committed Oct 10, 2017
commit 35767dfd0c468872acf99c3fccc3a206ce709128
163 changes: 18 additions & 145 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"fmt"
"math/big"
"os"
"runtime"
"strconv"
Expand Down Expand Up @@ -97,13 +96,15 @@ if already existing.`,
copydbCommand = cli.Command{
Action: utils.MigrateFlags(copyDb),
Name: "copydb",
Usage: "Copy from one chain DB into another using the downloader",
ArgsUsage: "<filename>",
Usage: "Create a local chain from a target chaindata folder",
ArgsUsage: "<sourceChaindataDir>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.SyncModeFlag,
utils.FakePoWFlag,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we're going to need

			utils.TestnetFlag,
			utils.RinkebyFlag,

too to allow copying over the testnet chains.

utils.TestnetFlag,
utils.RinkebyFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
Expand Down Expand Up @@ -286,172 +287,44 @@ func exportChain(ctx *cli.Context) error {
return nil
}

type localPeer struct {
chainDb ethdb.Database
hc *core.HeaderChain
dl *downloader.Downloader
}

func (lp *localPeer) Head() (common.Hash, *big.Int) {
header := lp.hc.CurrentHeader()
return header.Hash(), header.Number
}

func (lp *localPeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)

for !unknown && len(headers) < amount {
origin := lp.hc.GetHeaderByHash(hash)
if origin == nil {
break
}

number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
for i := 0; i < int(skip)+1; i++ {
if header := lp.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
} else {
unknown = true
break
}
}
} else {
var (
current = origin.Number.Uint64()
next = current + uint64(skip) + 1
)
if header := lp.hc.GetHeaderByNumber(next); header != nil {
if lp.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
}
}

lp.dl.DeliverHeaders("local", headers)
return nil
}

func (lp *localPeer) RequestHeadersByNumber(num uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)

for !unknown && len(headers) < amount {
origin := lp.hc.GetHeaderByNumber(num)
if origin == nil {
break
}

if reverse {
if num >= uint64(skip+1) {
num -= uint64(skip + 1)
} else {
unknown = true
}
} else {
num += uint64(skip + 1)
}
headers = append(headers, origin)
}

lp.dl.DeliverHeaders("local", headers)
return nil
}

func (lp *localPeer) RequestBodies(hashes []common.Hash) error {
var (
transactions [][]*types.Transaction
uncles [][]*types.Header
)

for _, hash := range hashes {
block := core.GetBlock(lp.chainDb, hash, lp.hc.GetBlockNumber(hash))
transactions = append(transactions, block.Transactions())
uncles = append(uncles, block.Uncles())
}

lp.dl.DeliverBodies("local", transactions, uncles)
return nil
}

func (lp *localPeer) RequestReceipts(hashes []common.Hash) error {
var receipts [][]*types.Receipt

for _, hash := range hashes {
receipts = append(receipts, core.GetBlockReceipts(lp.chainDb, hash, lp.hc.GetBlockNumber(hash)))
}

lp.dl.DeliverReceipts("local", receipts)
return nil
}

func (lp *localPeer) RequestNodeData(hashes []common.Hash) error {
var data [][]byte

for _, hash := range hashes {
if entry, err := lp.chainDb.Get(hash.Bytes()); err == nil {
data = append(data, entry)
}
}

lp.dl.DeliverNodeData("local", data)
return nil
}

func copyDb(ctx *cli.Context) error {
// Ensure we have a source chain directory to copy
if len(ctx.Args()) != 1 {
utils.Fatalf("This command requires an argument.")
utils.Fatalf("Source chaindata directory path argument missing")
}

// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
start := time.Now()

syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
mux := new(event.TypeMux)
dl := downloader.New(syncmode, chainDb, mux, chain, nil, nil)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)

var err error
filename := ctx.Args().First()
cache := ctx.GlobalInt(utils.CacheFlag.Name)
handles := 256
localdb, err := ethdb.NewLDBDatabase(filename, cache, handles)
// Create a source peer to satisfy downloader requests from
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
if err != nil {
return err
}

hc, err := core.NewHeaderChain(localdb, chain.Config(), chain.Engine(), func() bool { return false })
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
if err != nil {
return err
}

peer := &localPeer{localdb, hc, dl}
if err := dl.RegisterPeer("local", 63, peer); err != nil {
peer := downloader.NewFakePeer("local", db, hc, dl)
if err = dl.RegisterPeer("local", 63, peer); err != nil {
return err
}
// Synchronise with the simulated peer
start := time.Now()

currentHeader := hc.CurrentHeader()
if err := dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
return err
}
for dl.Synchronising() {
time.Sleep(10 * time.Millisecond)
}
fmt.Printf("Database copy done in %v\n", time.Since(start))

fmt.Printf("Database copy done in %v", time.Since(start))

// Compact the entire database to remove any sync overhead
start = time.Now()
fmt.Println("Compacting entire database...")
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
Expand Down
21 changes: 14 additions & 7 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
Expand Down Expand Up @@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
var err error
chainDb = MakeChainDatabase(ctx, stack)

engine := ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
)
}
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
var engine consensus.Engine
if config.Clique != nil {
engine = clique.New(config.Clique, chainDb)
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
)
}
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
if err != nil {
Expand Down
Loading