|
17 | 17 | package main |
18 | 18 |
|
19 | 19 | import ( |
| 20 | + "bytes" |
20 | 21 | "errors" |
21 | 22 | "fmt" |
22 | 23 | "os" |
| 24 | + "os/signal" |
23 | 25 | "path/filepath" |
24 | 26 | "sort" |
25 | 27 | "strconv" |
| 28 | + "strings" |
| 29 | + "syscall" |
26 | 30 | "time" |
27 | 31 |
|
28 | 32 | "github.com/ethereum/go-ethereum/cmd/utils" |
@@ -63,6 +67,8 @@ Remove blockchain and state databases`, |
63 | 67 | dbPutCmd, |
64 | 68 | dbGetSlotsCmd, |
65 | 69 | dbDumpFreezerIndex, |
| 70 | + dbImportCmd, |
| 71 | + dbExportCmd, |
66 | 72 | }, |
67 | 73 | } |
68 | 74 | dbInspectCmd = cli.Command{ |
@@ -172,6 +178,36 @@ WARNING: This is a low-level operation which may cause database corruption!`, |
172 | 178 | }, |
173 | 179 | Description: "This command displays information about the freezer index.", |
174 | 180 | } |
| 181 | + dbImportCmd = cli.Command{ |
| 182 | + Action: utils.MigrateFlags(importLDBdata), |
| 183 | + Name: "import", |
| 184 | + Usage: "Imports leveldb-data from an exported RLP dump.", |
| 185 | + ArgsUsage: "<dumpfile> <start (optional)", |
| 186 | + Flags: []cli.Flag{ |
| 187 | + utils.DataDirFlag, |
| 188 | + utils.SyncModeFlag, |
| 189 | + utils.MainnetFlag, |
| 190 | + utils.RopstenFlag, |
| 191 | + utils.RinkebyFlag, |
| 192 | + utils.GoerliFlag, |
| 193 | + }, |
| 194 | + Description: "The import command imports the specific chain data from an RLP encoded stream.", |
| 195 | + } |
| 196 | + dbExportCmd = cli.Command{ |
| 197 | + Action: utils.MigrateFlags(exportChaindata), |
| 198 | + Name: "export", |
| 199 | + Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.", |
| 200 | + ArgsUsage: "<type> <dumpfile>", |
| 201 | + Flags: []cli.Flag{ |
| 202 | + utils.DataDirFlag, |
| 203 | + utils.SyncModeFlag, |
| 204 | + utils.MainnetFlag, |
| 205 | + utils.RopstenFlag, |
| 206 | + utils.RinkebyFlag, |
| 207 | + utils.GoerliFlag, |
| 208 | + }, |
| 209 | + Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", |
| 210 | + } |
175 | 211 | ) |
176 | 212 |
|
177 | 213 | func removeDB(ctx *cli.Context) error { |
@@ -494,3 +530,133 @@ func parseHexOrString(str string) ([]byte, error) { |
494 | 530 | } |
495 | 531 | return b, err |
496 | 532 | } |
| 533 | + |
| 534 | +func importLDBdata(ctx *cli.Context) error { |
| 535 | + start := 0 |
| 536 | + switch ctx.NArg() { |
| 537 | + case 1: |
| 538 | + break |
| 539 | + case 2: |
| 540 | + s, err := strconv.Atoi(ctx.Args().Get(1)) |
| 541 | + if err != nil { |
| 542 | + return fmt.Errorf("second arg must be an integer: %v", err) |
| 543 | + } |
| 544 | + start = s |
| 545 | + default: |
| 546 | + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) |
| 547 | + } |
| 548 | + var ( |
| 549 | + fName = ctx.Args().Get(0) |
| 550 | + stack, _ = makeConfigNode(ctx) |
| 551 | + interrupt = make(chan os.Signal, 1) |
| 552 | + stop = make(chan struct{}) |
| 553 | + ) |
| 554 | + defer stack.Close() |
| 555 | + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) |
| 556 | + defer signal.Stop(interrupt) |
| 557 | + defer close(interrupt) |
| 558 | + go func() { |
| 559 | + if _, ok := <-interrupt; ok { |
| 560 | + log.Info("Interrupted during ldb import, stopping at next batch") |
| 561 | + } |
| 562 | + close(stop) |
| 563 | + }() |
| 564 | + db := utils.MakeChainDatabase(ctx, stack, false) |
| 565 | + return utils.ImportLDBData(db, fName, int64(start), stop) |
| 566 | +} |
| 567 | + |
| 568 | +type preimageIterator struct { |
| 569 | + iter ethdb.Iterator |
| 570 | +} |
| 571 | + |
| 572 | +func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) { |
| 573 | + for iter.iter.Next() { |
| 574 | + key := iter.iter.Key() |
| 575 | + if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) { |
| 576 | + return utils.OpBatchAdd, key, iter.iter.Value(), true |
| 577 | + } |
| 578 | + } |
| 579 | + return 0, nil, nil, false |
| 580 | +} |
| 581 | + |
| 582 | +func (iter *preimageIterator) Release() { |
| 583 | + iter.iter.Release() |
| 584 | +} |
| 585 | + |
| 586 | +type snapshotIterator struct { |
| 587 | + init bool |
| 588 | + account ethdb.Iterator |
| 589 | + storage ethdb.Iterator |
| 590 | +} |
| 591 | + |
| 592 | +func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) { |
| 593 | + if !iter.init { |
| 594 | + iter.init = true |
| 595 | + return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true |
| 596 | + } |
| 597 | + for iter.account.Next() { |
| 598 | + key := iter.account.Key() |
| 599 | + if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) { |
| 600 | + return utils.OpBatchAdd, key, iter.account.Value(), true |
| 601 | + } |
| 602 | + } |
| 603 | + for iter.storage.Next() { |
| 604 | + key := iter.storage.Key() |
| 605 | + if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) { |
| 606 | + return utils.OpBatchAdd, key, iter.storage.Value(), true |
| 607 | + } |
| 608 | + } |
| 609 | + return 0, nil, nil, false |
| 610 | +} |
| 611 | + |
| 612 | +func (iter *snapshotIterator) Release() { |
| 613 | + iter.account.Release() |
| 614 | + iter.storage.Release() |
| 615 | +} |
| 616 | + |
| 617 | +// chainExporters defines the export scheme for all exportable chain data. |
| 618 | +var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{ |
| 619 | + "preimage": func(db ethdb.Database) utils.ChainDataIterator { |
| 620 | + iter := db.NewIterator(rawdb.PreimagePrefix, nil) |
| 621 | + return &preimageIterator{iter: iter} |
| 622 | + }, |
| 623 | + "snapshot": func(db ethdb.Database) utils.ChainDataIterator { |
| 624 | + account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) |
| 625 | + storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil) |
| 626 | + return &snapshotIterator{account: account, storage: storage} |
| 627 | + }, |
| 628 | +} |
| 629 | + |
| 630 | +func exportChaindata(ctx *cli.Context) error { |
| 631 | + if ctx.NArg() < 2 { |
| 632 | + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) |
| 633 | + } |
| 634 | + // Parse the required chain data type, make sure it's supported. |
| 635 | + kind := ctx.Args().Get(0) |
| 636 | + kind = strings.ToLower(strings.Trim(kind, " ")) |
| 637 | + exporter, ok := chainExporters[kind] |
| 638 | + if !ok { |
| 639 | + var kinds []string |
| 640 | + for kind := range chainExporters { |
| 641 | + kinds = append(kinds, kind) |
| 642 | + } |
| 643 | + return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", ")) |
| 644 | + } |
| 645 | + var ( |
| 646 | + stack, _ = makeConfigNode(ctx) |
| 647 | + interrupt = make(chan os.Signal, 1) |
| 648 | + stop = make(chan struct{}) |
| 649 | + ) |
| 650 | + defer stack.Close() |
| 651 | + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) |
| 652 | + defer signal.Stop(interrupt) |
| 653 | + defer close(interrupt) |
| 654 | + go func() { |
| 655 | + if _, ok := <-interrupt; ok { |
| 656 | + log.Info("Interrupted during db export, stopping at next batch") |
| 657 | + } |
| 658 | + close(stop) |
| 659 | + }() |
| 660 | + db := utils.MakeChainDatabase(ctx, stack, true) |
| 661 | + return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) |
| 662 | +} |
0 commit comments