diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 40572eac8d5..9d7a95aaf59 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -43,6 +43,7 @@ import ( "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/event" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" @@ -95,7 +96,7 @@ func NewSimulatedBackendWithConfig(alloc core.GenesisAlloc, config *params.Chain m: m, prependBlock: m.Genesis, getHeader: func(hash common.Hash, number uint64) (h *types.Header) { - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { h = rawdb.ReadHeader(tx, hash, number) return nil }); err != nil { @@ -104,7 +105,7 @@ func NewSimulatedBackendWithConfig(alloc core.GenesisAlloc, config *params.Chain return h }, } - backend.checkTEVM = ethdb.GetCheckTEVM(kv.NewObjectDatabase(m.DB)) + backend.checkTEVM = ethdb.GetCheckTEVM(olddb.NewObjectDatabase(m.DB)) backend.events = filters.NewEventSystem(&filterBackend{m.DB, backend}) backend.emptyPendingBlock() return backend @@ -119,7 +120,7 @@ func NewSimulatedBackend(t *testing.T, alloc core.GenesisAlloc, gasLimit uint64) return b } -func (b *SimulatedBackend) DB() ethdb.RwKV { +func (b *SimulatedBackend) DB() kv.RwDB { return b.m.DB } @@ -166,12 +167,12 @@ func (b *SimulatedBackend) emptyPendingBlock() { b.pendingReceipts = chain.Receipts[0] b.pendingHeader = chain.Headers[0] b.gasPool = new(core.GasPool).AddGas(b.pendingHeader.GasLimit) - b.pendingReader = state.NewPlainStateReader(kv.NewObjectDatabase(b.m.DB)) + b.pendingReader = state.NewPlainStateReader(olddb.NewObjectDatabase(b.m.DB)) b.pendingState = state.New(b.pendingReader) } // stateByBlockNumber retrieves a state by a given blocknumber. -func (b *SimulatedBackend) stateByBlockNumber(db ethdb.Tx, blockNumber *big.Int) *state.IntraBlockState { +func (b *SimulatedBackend) stateByBlockNumber(db kv.Tx, blockNumber *big.Int) *state.IntraBlockState { if blockNumber == nil || blockNumber.Cmp(b.pendingBlock.Number()) == 0 { return state.New(state.NewPlainState(db, b.pendingBlock.NumberU64())) } @@ -490,7 +491,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM return nil, errBlockNumberUnsupported } var res *core.ExecutionResult - if err := b.m.DB.View(context.Background(), func(tx ethdb.Tx) (err error) { + if err := b.m.DB.View(context.Background(), func(tx kv.Tx) (err error) { s := state.New(state.NewPlainStateReader(tx)) res, err = b.callContract(ctx, call, b.pendingBlock, s) if err != nil { @@ -846,7 +847,7 @@ func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. type filterBackend struct { - db ethdb.RwKV + db kv.RwDB b *SimulatedBackend } diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index a96079f61b5..ddd569c8034 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -37,7 +37,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" ) @@ -140,7 +140,7 @@ func TestNewSimulatedBackend(t *testing.T) { defer tx.Rollback() var num uint64 - if err := sim.m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := sim.m.DB.View(context.Background(), func(tx kv.Tx) error { num = rawdb.ReadCurrentHeader(tx).Number.Uint64() return nil }); err != nil { diff --git a/cmd/cons/commands/clique.go b/cmd/cons/commands/clique.go index 89c10f821d0..2b96f0d53bb 100644 --- a/cmd/cons/commands/clique.go +++ b/cmd/cons/commands/clique.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/clique" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" "github.com/ledgerwatch/erigon/params" @@ -50,11 +50,12 @@ var cliqueCmd = &cobra.Command{ Short: "Run clique consensus engine", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - return cliqueEngine(ctx) + logger := log.New() + return cliqueEngine(ctx, logger) }, } -func cliqueEngine(ctx context.Context) error { +func cliqueEngine(ctx context.Context, logger log.Logger) error { var server *CliqueServerImpl var err error if config == "test" { @@ -85,7 +86,7 @@ func cliqueEngine(ctx context.Context) error { return err } } - server.db = openDatabase(filepath.Join(datadir, "clique", "db")) + server.db = openDB(filepath.Join(datadir, "clique", "db"), logger) server.c = clique.New(server.chainConfig, ¶ms.SnapshotConfig{}, server.db) <-ctx.Done() return nil @@ -151,7 +152,7 @@ type CliqueServerImpl struct { genesis *core.Genesis chainConfig *params.ChainConfig c *clique.Clique - db ethdb.RwKV + db kv.RwDB } func NewCliqueServer(_ context.Context) *CliqueServerImpl { diff --git a/cmd/cons/commands/root.go b/cmd/cons/commands/root.go index 339d2b616ee..88d2a2f7197 100644 --- a/cmd/cons/commands/root.go +++ b/cmd/cons/commands/root.go @@ -6,9 +6,10 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" ) @@ -62,14 +63,6 @@ func withConfig(cmd *cobra.Command) { cmd.Flags().StringVar(&config, "config", "", "`file:` to specify config file in file system, `embed:` to use embedded file, `test` to register test interface and receive config from test driver") } -func openDatabase(path string) ethdb.RwKV { - return openKV(path, false) -} - -func openKV(path string, exclusive bool) ethdb.RwKV { - opts := kv.NewMDBX().Path(path) - if exclusive { - opts = opts.Exclusive() - } - return opts.MustOpen() +func openDB(path string, logger log.Logger) kv.RwDB { + return mdbx.NewMDBX(logger).Path(path).MustOpen() } diff --git a/cmd/erigon/main.go b/cmd/erigon/main.go index a348ad84eb1..309933e95d5 100644 --- a/cmd/erigon/main.go +++ b/cmd/erigon/main.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" @@ -19,7 +19,6 @@ import ( func main() { defer debug.LogPanic() - // creating a erigon-api app with all defaults app := erigoncli.MakeApp(runErigon, erigoncli.DefaultFlags) if err := app.Run(os.Args); err != nil { fmt.Fprintln(os.Stderr, err) @@ -28,15 +27,16 @@ func main() { } func runErigon(cliCtx *cli.Context) { + logger := log.New() // initializing the node and providing the current git commit there - log.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) nodeCfg := node.NewNodConfigUrfave(cliCtx) ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg) if cliCtx.GlobalIsSet(utils.DataDirFlag.Name) { // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - chaindb := utils.MakeChainDatabase(nodeCfg) - if err := chaindb.View(context.Background(), func(tx ethdb.Tx) error { + chaindb := utils.MakeChainDatabase(logger, nodeCfg) + if err := chaindb.View(context.Background(), func(tx kv.Tx) error { h, err := rawdb.ReadCanonicalHash(tx, 0) if err != nil { panic(err) @@ -51,7 +51,7 @@ func runErigon(cliCtx *cli.Context) { chaindb.Close() } - err := node.New(nodeCfg, ethCfg).Serve() + err := node.New(nodeCfg, ethCfg, logger).Serve() if err != nil { log.Error("error while serving a Erigon node", "err", err) } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c61076dcde3..571b2b1da1d 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -30,8 +30,8 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -90,7 +90,7 @@ type stEnvMarshaling struct { // Apply applies a set of transactions to a pre-state func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (ethdb.RwKV, *ExecutionResult, error) { + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (kv.RwDB, *ExecutionResult, error) { // Capture errors for BLOCKHASH operation, if we haven't been supplied the // required blockhashes @@ -106,7 +106,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } return h } - db := kv.NewMemKV() + db := memdb.New() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -265,7 +265,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return db, execRs, nil } -func MakePreState(chainRules params.Rules, tx ethdb.RwTx, accounts core.GenesisAlloc) *state.IntraBlockState { +func MakePreState(chainRules params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) *state.IntraBlockState { var blockNr uint64 = 0 r, _ := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) statedb := state.New(r) diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 5efd30b6513..5759b264016 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -30,7 +30,7 @@ import ( "time" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/urfave/cli" "github.com/ledgerwatch/erigon/cmd/evm/internal/compiler" @@ -135,7 +135,7 @@ func runCmd(ctx *cli.Context) error { } else { debugLogger = vm.NewStructLogger(logconfig) } - db := kv.NewMemKV() + db := memdb.New() if ctx.GlobalString(GenesisFlag.Name) != "" { gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) gen.MustCommit(db) diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 92ced4a2717..eadd7c8c7d7 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -27,7 +27,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/tests" @@ -98,7 +98,7 @@ func stateTestCmd(ctx *cli.Context) error { Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), } results := make([]StatetestResult, 0, len(tests)) - db := kv.NewMemKV() + db := memdb.New() defer db.Close() tx, txErr := db.BeginRw(context.Background()) diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go index 6a87a7dc46b..4b478cc099f 100644 --- a/cmd/hack/db/lmdb.go +++ b/cmd/hack/db/lmdb.go @@ -5,25 +5,24 @@ import ( "bytes" "context" "encoding/binary" - // "errors" "fmt" "io" "io/ioutil" "math" "os" - "os/exec" "path" "strings" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" ) +var logger = log.New() + const ( PageSize = 4096 MdbxMagic uint64 = 0x59659DBDEF4C11 @@ -479,12 +478,12 @@ func (n *mdbx_node) getKV() (key string, value string) { /* ----------------------- DB generator functions ----------------------- */ // Generates an empty database and returns the file name -func nothing(kv ethdb.RwKV, _ ethdb.RwTx) (bool, error) { +func nothing(kv kv.RwDB, _ kv.RwTx) (bool, error) { return true, nil } // Generates a database with single table and two key-value pair in "t" DBI, and returns the file name -func generate2(tx ethdb.RwTx, entries int) error { +func generate2(tx kv.RwTx, entries int) error { c, err := tx.RwCursor("t") if err != nil { return err @@ -500,10 +499,10 @@ func generate2(tx ethdb.RwTx, entries int) error { } // Generates a database with 100 (maximum) of DBIs to produce branches in MAIN_DBI -func generate3(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate3(_ kv.RwDB, tx kv.RwTx) (bool, error) { for i := 0; i < 61; i++ { k := fmt.Sprintf("table_%05d", i) - if err := tx.(ethdb.BucketMigrator).CreateBucket(k); err != nil { + if err := tx.(kv.BucketMigrator).CreateBucket(k); err != nil { return false, err } } @@ -511,7 +510,7 @@ func generate3(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } // Generates a database with one table, containing 1 short and 1 long (more than one page) values -func generate4(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate4(_ kv.RwDB, tx kv.RwTx) (bool, error) { c, err := tx.RwCursor("t") if err != nil { return false, err @@ -527,7 +526,7 @@ func generate4(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } // Generates a database with one table, containing some DupSort values -func generate5(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate5(_ kv.RwDB, tx kv.RwTx) (bool, error) { c, err := tx.RwCursorDupSort("t") if err != nil { return false, err @@ -555,7 +554,7 @@ func generate5(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } // Generate a database with one table, containing lots of dupsort values -func generate6(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate6(_ kv.RwDB, tx kv.RwTx) (bool, error) { c, err := tx.RwCursorDupSort("t") if err != nil { return false, err @@ -579,14 +578,14 @@ func generate6(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, nil } -func dropT(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - if err := tx.(ethdb.BucketMigrator).ClearBucket("t"); err != nil { +func dropT(_ kv.RwDB, tx kv.RwTx) (bool, error) { + if err := tx.(kv.BucketMigrator).ClearBucket("t"); err != nil { return false, err } return true, nil } -func generate7(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate7(_ kv.RwDB, tx kv.RwTx) (bool, error) { c1, err := tx.RwCursor("t1") if err != nil { return false, err @@ -609,33 +608,33 @@ func generate7(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, nil } -func dropT1(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - if err := tx.(ethdb.BucketMigrator).ClearBucket("t1"); err != nil { +func dropT1(_ kv.RwDB, tx kv.RwTx) (bool, error) { + if err := tx.(kv.BucketMigrator).ClearBucket("t1"); err != nil { return false, err } return true, nil } -func dropT2(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - if err := tx.(ethdb.BucketMigrator).ClearBucket("t2"); err != nil { +func dropT2(_ kv.RwDB, tx kv.RwTx) (bool, error) { + if err := tx.(kv.BucketMigrator).ClearBucket("t2"); err != nil { return false, err } return true, nil } // Generates a database with 100 (maximum) of DBIs to produce branches in MAIN_DBI -func generate8(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func generate8(_ kv.RwDB, tx kv.RwTx) (bool, error) { for i := 0; i < 100; i++ { k := fmt.Sprintf("table_%05d", i) - if err := tx.(ethdb.BucketMigrator).CreateBucket(k); err != nil { + if err := tx.(kv.BucketMigrator).CreateBucket(k); err != nil { return false, err } } return false, nil } -func generate9(tx ethdb.RwTx, entries int) error { - var cs []ethdb.RwCursor +func generate9(tx kv.RwTx, entries int) error { + var cs []kv.RwCursor for i := 0; i < 100; i++ { k := fmt.Sprintf("table_%05d", i) c, err := tx.RwCursor(k) @@ -656,10 +655,10 @@ func generate9(tx ethdb.RwTx, entries int) error { return nil } -func dropAll(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func dropAll(_ kv.RwDB, tx kv.RwTx) (bool, error) { for i := 0; i < 100; i++ { k := fmt.Sprintf("table_%05d", i) - if err := tx.(ethdb.BucketMigrator).DropBucket(k); err != nil { + if err := tx.(kv.BucketMigrator).DropBucket(k); err != nil { return false, err } } @@ -667,12 +666,12 @@ func dropAll(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } // dropGradually drops every other table in its own transaction -func dropGradually(kv ethdb.RwKV, tx ethdb.RwTx) (bool, error) { +func dropGradually(db kv.RwDB, tx kv.RwTx) (bool, error) { tx.Rollback() for i := 0; i < 100; i += 2 { k := fmt.Sprintf("table_%05d", i) - if err := kv.Update(context.Background(), func(tx1 ethdb.RwTx) error { - return tx1.(ethdb.BucketMigrator).DropBucket(k) + if err := db.Update(context.Background(), func(tx1 kv.RwTx) error { + return tx1.DropBucket(k) }); err != nil { return false, err } @@ -680,7 +679,7 @@ func dropGradually(kv ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, nil } -func change1(tx ethdb.RwTx) (bool, error) { +func change1(tx kv.RwTx) (bool, error) { c, err := tx.RwCursor("t") if err != nil { return false, err @@ -695,7 +694,7 @@ func change1(tx ethdb.RwTx) (bool, error) { return true, nil } -func change2(tx ethdb.RwTx) (bool, error) { +func change2(tx kv.RwTx) (bool, error) { c, err := tx.RwCursor("t") if err != nil { return false, err @@ -710,7 +709,7 @@ func change2(tx ethdb.RwTx) (bool, error) { return true, nil } -func change3(tx ethdb.RwTx) (bool, error) { +func change3(tx kv.RwTx) (bool, error) { c, err := tx.RwCursor("t") if err != nil { return false, err @@ -725,7 +724,7 @@ func change3(tx ethdb.RwTx) (bool, error) { return true, nil } -func launchReader(kv ethdb.RwKV, tx ethdb.Tx, expectVal string, startCh chan struct{}, errorCh chan error) (bool, error) { +func launchReader(kv kv.RwDB, tx kv.Tx, expectVal string, startCh chan struct{}, errorCh chan error) (bool, error) { tx.Rollback() tx1, err1 := kv.BeginRo(context.Background()) if err1 != nil { @@ -759,13 +758,13 @@ func launchReader(kv ethdb.RwKV, tx ethdb.Tx, expectVal string, startCh chan str return false, nil } -func startReader(tx ethdb.Tx, startCh chan struct{}) (bool, error) { +func startReader(tx kv.Tx, startCh chan struct{}) (bool, error) { tx.Rollback() startCh <- struct{}{} return false, nil } -func checkReader(tx ethdb.Tx, errorCh chan error) (bool, error) { +func checkReader(tx kv.Tx, errorCh chan error) (bool, error) { tx.Rollback() if err := <-errorCh; err != nil { return false, err @@ -773,14 +772,14 @@ func checkReader(tx ethdb.Tx, errorCh chan error) (bool, error) { return false, nil } -func defragSteps(filename string, bucketsCfg dbutils.BucketsCfg, generateFs ...func(ethdb.RwKV, ethdb.RwTx) (bool, error)) error { +func defragSteps(filename string, bucketsCfg kv.TableCfg, generateFs ...func(kv.RwDB, kv.RwTx) (bool, error)) error { dir, err := ioutil.TempDir(".", "db-vis") if err != nil { return fmt.Errorf("creating temp dir for db visualisation: %w", err) } defer os.RemoveAll(dir) - var db ethdb.RwKV - db, err = kv2.NewMDBX().Path(dir).WithBucketsConfig(func(dbutils.BucketsCfg) dbutils.BucketsCfg { + var db kv.RwDB + db, err = kv2.NewMDBX(logger).Path(dir).WithBucketsConfig(func(kv.TableCfg) kv.TableCfg { return bucketsCfg }).Open() if err != nil { @@ -789,7 +788,7 @@ func defragSteps(filename string, bucketsCfg dbutils.BucketsCfg, generateFs ...f defer db.Close() for gi, generateF := range generateFs { var display bool - if err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err = db.Update(context.Background(), func(tx kv.RwTx) error { var err1 error //nolint:scopelint display, err1 = generateF(db, tx) @@ -826,16 +825,16 @@ func defragSteps(filename string, bucketsCfg dbutils.BucketsCfg, generateFs ...f } func Defrag() error { - emptyBucketCfg := make(dbutils.BucketsCfg) + emptyBucketCfg := make(kv.TableCfg) fmt.Println("------------------- 1 -------------------") if err := defragSteps("vis1", emptyBucketCfg, nothing); err != nil { return err } - oneBucketCfg := make(dbutils.BucketsCfg) - oneBucketCfg["t"] = dbutils.BucketConfigItem{} + oneBucketCfg := make(kv.TableCfg) + oneBucketCfg["t"] = kv.TableConfigItem{} fmt.Println("------------------- 2 -------------------") - if err := defragSteps("vis2", oneBucketCfg, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate2(tx, 2) }); err != nil { + if err := defragSteps("vis2", oneBucketCfg, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate2(tx, 2) }); err != nil { return err } fmt.Println("------------------- 3 -------------------") @@ -843,15 +842,15 @@ func Defrag() error { return err } fmt.Println("------------------- 4 -------------------") - if err := defragSteps("vis4", oneBucketCfg, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate2(tx, 200) }); err != nil { + if err := defragSteps("vis4", oneBucketCfg, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate2(tx, 200) }); err != nil { return err } fmt.Println("------------------- 5 -------------------") if err := defragSteps("vis5", oneBucketCfg, generate4); err != nil { return err } - oneDupSortCfg := make(dbutils.BucketsCfg) - oneDupSortCfg["t"] = dbutils.BucketConfigItem{Flags: dbutils.DupSort} + oneDupSortCfg := make(kv.TableCfg) + oneDupSortCfg["t"] = kv.TableConfigItem{Flags: kv.DupSort} fmt.Println("------------------- 6 -------------------") if err := defragSteps("vis6", oneDupSortCfg, generate5); err != nil { return err @@ -861,13 +860,13 @@ func Defrag() error { return err } fmt.Println("------------------- 8 -------------------") - if err := defragSteps("vis8", oneDupSortCfg, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate2(tx, 1000) }, dropT); err != nil { + if err := defragSteps("vis8", oneDupSortCfg, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate2(tx, 1000) }, dropT); err != nil { return err } - twoBucketCfg := make(dbutils.BucketsCfg) - twoBucketCfg["t1"] = dbutils.BucketConfigItem{} - twoBucketCfg["t2"] = dbutils.BucketConfigItem{} + twoBucketCfg := make(kv.TableCfg) + twoBucketCfg["t1"] = kv.TableConfigItem{} + twoBucketCfg["t2"] = kv.TableConfigItem{} fmt.Println("------------------- 9 -------------------") if err := defragSteps("vis9", twoBucketCfg, generate7); err != nil { return err @@ -880,33 +879,33 @@ func Defrag() error { if err := defragSteps("vis11", twoBucketCfg, generate7, dropT1, dropT2); err != nil { return err } - manyBucketCfg := make(dbutils.BucketsCfg) + manyBucketCfg := make(kv.TableCfg) for i := 0; i < 100; i++ { k := fmt.Sprintf("table_%05d", i) - manyBucketCfg[k] = dbutils.BucketConfigItem{IsDeprecated: true} + manyBucketCfg[k] = kv.TableConfigItem{IsDeprecated: true} } fmt.Println("------------------- 12 -------------------") - if err := defragSteps("vis12", manyBucketCfg, generate8, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate9(tx, 1000) }, dropGradually); err != nil { + if err := defragSteps("vis12", manyBucketCfg, generate8, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate9(tx, 1000) }, dropGradually); err != nil { return err } fmt.Println("------------------- 13 -------------------") - if err := defragSteps("vis13", manyBucketCfg, generate8, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return false, generate9(tx, 10000) }, dropAll); err != nil { + if err := defragSteps("vis13", manyBucketCfg, generate8, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return false, generate9(tx, 10000) }, dropAll); err != nil { return err } fmt.Println("------------------- 14 -------------------") - if err := defragSteps("vis14", manyBucketCfg, generate8, func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return false, generate9(tx, 300000) }, dropGradually); err != nil { + if err := defragSteps("vis14", manyBucketCfg, generate8, func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return false, generate9(tx, 300000) }, dropGradually); err != nil { return err } fmt.Println("------------------- 15 -------------------") if err := defragSteps("vis15", oneBucketCfg, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate2(tx, 1000) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change1(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate2(tx, 1000) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change1(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, ); err != nil { return err } @@ -916,21 +915,21 @@ func Defrag() error { fmt.Println("------------------- 16 -------------------") if err := defragSteps("vis16", oneBucketCfg, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return true, generate2(tx, 1000) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change1(tx) }, - func(kv ethdb.RwKV, tx ethdb.RwTx) (bool, error) { + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return true, generate2(tx, 1000) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change1(tx) }, + func(kv kv.RwDB, tx kv.RwTx) (bool, error) { return launchReader(kv, tx, "another_short_value_1", readerStartCh, readerErrorCh) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change2(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { return change3(tx) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change2(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return change3(tx) }, + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return startReader(tx, readerStartCh) }, - func(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { + func(_ kv.RwDB, tx kv.RwTx) (bool, error) { return checkReader(tx, readerErrorCh) }, ); err != nil { diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 38db47170b9..23b6d71b173 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -24,7 +24,8 @@ import ( "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/ethdb/cbor" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/params" "github.com/wcharczuk/go-chart" "github.com/wcharczuk/go-chart/util" @@ -48,7 +49,6 @@ import ( "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/trie" - "github.com/torquem-ch/mdbx-go/mdbx" ) var ( @@ -304,44 +304,45 @@ func mychart() { } func bucketStats(chaindata string) error { - ethDb := kv2.MustOpen(chaindata) - defer ethDb.Close() - - var bucketList []string - if err1 := ethDb.View(context.Background(), func(txa ethdb.Tx) error { - if bl, err := txa.(ethdb.BucketMigrator).ListBuckets(); err == nil { - bucketList = bl - } else { - return err - } - return nil - }); err1 != nil { - ethDb.Close() - return err1 - } - - fmt.Printf(",BranchPageN,LeafPageN,OverflowN,Entries\n") - switch kv := ethDb.(type) { - case *kv2.MdbxKV: - type MdbxStat interface { - BucketStat(name string) (*mdbx.Stat, error) + /* + ethDb := mdbx.MustOpen(chaindata) + defer ethDb.Close() + + var bucketList []string + if err1 := ethDb.View(context.Background(), func(txa kv.Tx) error { + if bl, err := txa.(kv.BucketMigrator).ListBuckets(); err == nil { + bucketList = bl + } else { + return err + } + return nil + }); err1 != nil { + ethDb.Close() + return err1 } + fmt.Printf(",BranchPageN,LeafPageN,OverflowN,Entries\n") + switch db := ethDb.(type) { + case *mdbx.MdbxKV: + type MdbxStat interface { + BucketStat(name string) (*mdbx.Stat, error) + } - if err := kv.View(context.Background(), func(tx ethdb.Tx) error { - for _, bucket := range bucketList { - bs, statErr := tx.(MdbxStat).BucketStat(bucket) + if err := db.View(context.Background(), func(tx kv.Tx) error { + for _, bucket := range bucketList { + bs, statErr := tx.(MdbxStat).BucketStat(bucket) + tool.Check(statErr) + fmt.Printf("%s,%d,%d,%d,%d\n", bucket, + bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) + } + bs, statErr := tx.(MdbxStat).BucketStat("freelist") tool.Check(statErr) - fmt.Printf("%s,%d,%d,%d,%d\n", bucket, - bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) + fmt.Printf("%s,%d,%d,%d,%d\n", "freelist", bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) + return nil + }); err != nil { + panic(err) } - bs, statErr := tx.(MdbxStat).BucketStat("freelist") - tool.Check(statErr) - fmt.Printf("%s,%d,%d,%d,%d\n", "freelist", bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) - return nil - }); err != nil { - panic(err) } - } + */ return nil } @@ -554,9 +555,9 @@ func trieChart() { } func dbSlice(chaindata string, bucket string, prefix []byte) { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { c, err := tx.Cursor(bucket) if err != nil { return err @@ -624,9 +625,9 @@ func printFullNodeRLPs() { // Searches 1000 blocks from the given one to try to find the one with the given state root hash func testBlockHashes(chaindata string, block int, stateRoot common.Hash) { - ethDb := kv2.MustOpen(chaindata) + ethDb := mdbx.MustOpen(chaindata) defer ethDb.Close() - tool.Check(ethDb.View(context.Background(), func(tx ethdb.Tx) error { + tool.Check(ethDb.View(context.Background(), func(tx kv.Tx) error { blocksToSearch := 10000000 for i := uint64(block); i < uint64(block+blocksToSearch); i++ { hash, err := rawdb.ReadCanonicalHash(tx, i) @@ -646,9 +647,9 @@ func testBlockHashes(chaindata string, block int, stateRoot common.Hash) { } func printCurrentBlockNumber(chaindata string) { - ethDb := kv2.MustOpen(chaindata) + ethDb := mdbx.MustOpen(chaindata) defer ethDb.Close() - ethDb.View(context.Background(), func(tx ethdb.Tx) error { + ethDb.View(context.Background(), func(tx kv.Tx) error { hash := rawdb.ReadHeadBlockHash(tx) number := rawdb.ReadHeaderNumber(tx, hash) fmt.Printf("Block number: %d\n", *number) @@ -657,9 +658,9 @@ func printCurrentBlockNumber(chaindata string) { } func printTxHashes() { - db := kv2.MustOpen(paths.DefaultDataDir() + "/geth/chaindata") + db := mdbx.MustOpen(paths.DefaultDataDir() + "/geth/chaindata") defer db.Close() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { for b := uint64(0); b < uint64(100000); b++ { hash, err := rawdb.ReadCanonicalHash(tx, b) tool.Check(err) @@ -700,7 +701,7 @@ func invTree(wrong, right, diff string, name string) { } func readAccount(chaindata string, account common.Address) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, txErr := db.BeginRo(context.Background()) @@ -717,7 +718,7 @@ func readAccount(chaindata string, account common.Address) error { } fmt.Printf("CodeHash:%x\nIncarnation:%d\n", a.CodeHash, a.Incarnation) - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } @@ -734,15 +735,15 @@ func readAccount(chaindata string, account common.Address) error { } func nextIncarnation(chaindata string, addrHash common.Hash) { - ethDb := kv2.MustOpen(chaindata) + ethDb := mdbx.MustOpen(chaindata) defer ethDb.Close() var found bool var incarnationBytes [common.IncarnationLength]byte startkey := make([]byte, common.HashLength+common.IncarnationLength+common.HashLength) var fixedbits = 8 * common.HashLength copy(startkey, addrHash[:]) - tool.Check(ethDb.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.HashedStorageBucket) + tool.Check(ethDb.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.HashedStorage) if err != nil { return err } @@ -762,21 +763,21 @@ func nextIncarnation(chaindata string, addrHash common.Hash) { } func repairCurrent() { - historyDb := kv2.MustOpen("/Volumes/tb4/erigon/ropsten/geth/chaindata") + historyDb := mdbx.MustOpen("/Volumes/tb4/erigon/ropsten/geth/chaindata") defer historyDb.Close() - currentDb := kv2.MustOpen("statedb") + currentDb := mdbx.MustOpen("statedb") defer currentDb.Close() - tool.Check(historyDb.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.ClearBucket(dbutils.HashedStorageBucket) + tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error { + return tx.ClearBucket(kv.HashedStorage) })) - tool.Check(historyDb.Update(context.Background(), func(tx ethdb.RwTx) error { - newB, err := tx.RwCursor(dbutils.HashedStorageBucket) + tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error { + newB, err := tx.RwCursor(kv.HashedStorage) if err != nil { return err } count := 0 - if err := currentDb.View(context.Background(), func(ctx ethdb.Tx) error { - c, err := ctx.Cursor(dbutils.HashedStorageBucket) + if err := currentDb.View(context.Background(), func(ctx kv.Tx) error { + c, err := ctx.Cursor(kv.HashedStorage) if err != nil { return err } @@ -799,10 +800,10 @@ func repairCurrent() { } func dumpStorage() { - db := kv2.MustOpen(paths.DefaultDataDir() + "/geth/chaindata") + db := mdbx.MustOpen(paths.DefaultDataDir() + "/geth/chaindata") defer db.Close() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.StorageHistoryBucket, nil, func(k, v []byte) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.StorageHistory, nil, func(k, v []byte) error { fmt.Printf("%x %x\n", k, v) return nil }) @@ -812,15 +813,15 @@ func dumpStorage() { } func printBucket(chaindata string) { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() f, err := os.Create("bucket.txt") tool.Check(err) defer f.Close() fb := bufio.NewWriter(f) defer fb.Flush() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.StorageHistoryBucket) + if err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.StorageHistory) if err != nil { return err } @@ -837,7 +838,7 @@ func printBucket(chaindata string) { } func ValidateTxLookups2(chaindata string) { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() startTime := time.Now() sigs := make(chan os.Signal, 1) @@ -852,7 +853,7 @@ func ValidateTxLookups2(chaindata string) { log.Info("All done", "duration", time.Since(startTime)) } -func validateTxLookups2(db ethdb.RwKV, startBlock uint64, interruptCh chan bool) { +func validateTxLookups2(db kv.RwDB, startBlock uint64, interruptCh chan bool) { tx, err := db.BeginRo(context.Background()) if err != nil { panic(err) @@ -881,7 +882,7 @@ func validateTxLookups2(db ethdb.RwKV, startBlock uint64, interruptCh chan bool) bn := blockBytes.Bytes() for _, txn := range body.Transactions { - val, err := tx.GetOne(dbutils.TxLookupPrefix, txn.Hash().Bytes()) + val, err := tx.GetOne(kv.TxLookup, txn.Hash().Bytes()) iterations++ if iterations%100000 == 0 { log.Info("Validated", "entries", iterations, "number", blockNum) @@ -898,9 +899,9 @@ func validateTxLookups2(db ethdb.RwKV, startBlock uint64, interruptCh chan bool) func getModifiedAccounts(chaindata string) { // TODO(tjayrush): The call to GetModifiedAccounts needs a database tx fmt.Println("hack - getModiiedAccounts is temporarily disabled.") - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() - tool.Check(db.View(context.Background(), func(tx ethdb.Tx) error { + tool.Check(db.View(context.Background(), func(tx kv.Tx) error { addrs, err := changeset.GetModifiedAccounts(tx, 49300, 49400) tool.Check(err) fmt.Printf("Len(addrs)=%d\n", len(addrs)) @@ -971,7 +972,7 @@ func (r *Receiver) Result() trie.SubTries { } func regenerate(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -1005,7 +1006,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo storageKeys := []string{} var m runtime.MemStats runtime.ReadMemStats(&m) - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err1 := db.BeginRo(context.Background()) if err1 != nil { @@ -1022,7 +1023,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo ts := dbutils.EncodeBlockNumber(block + 1) accountMap := make(map[string]*accounts.Account) - if err := changeset.Walk(tx, dbutils.AccountChangeSetBucket, ts, 0, func(blockN uint64, address, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.AccountChangeSet, ts, 0, func(blockN uint64, address, v []byte) (bool, error) { if blockN > *headNumber { return false, nil } @@ -1052,7 +1053,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo log.Info("Constructed account map", "size", len(accountMap), "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) storageMap := make(map[string][]byte) - if err := changeset.Walk(tx, dbutils.StorageChangeSetBucket, ts, 0, func(blockN uint64, address, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.StorageChangeSet, ts, 0, func(blockN uint64, address, v []byte) (bool, error) { if blockN > *headNumber { return false, nil } @@ -1081,7 +1082,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo if acc != nil { // Fill the code hashes if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err1 := tx.GetOne(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix([]byte(ks), acc.Incarnation)); err1 == nil { + if codeHash, err1 := tx.GetOne(kv.ContractCode, dbutils.GenerateStoragePrefix([]byte(ks), acc.Incarnation)); err1 == nil { copy(acc.CodeHash[:], codeHash) } else { return err1 @@ -1146,7 +1147,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo } func dumpState(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() f, err := os.Create("statedump") if err != nil { @@ -1158,8 +1159,8 @@ func dumpState(chaindata string) error { stAccounts := 0 stStorage := 0 var varintBuf [10]byte // Buffer for varint number - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.PlainStateBucket) + if err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } @@ -1199,14 +1200,14 @@ func dumpState(chaindata string) error { } func changeSetStats(chaindata string, block1, block2 uint64) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() fmt.Printf("State stats\n") stAccounts := 0 stStorage := 0 - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.PlainStateBucket) + if err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } @@ -1233,7 +1234,7 @@ func changeSetStats(chaindata string, block1, block2 uint64) error { return err1 } defer tx.Rollback() - if err := changeset.Walk(tx, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(block1), 0, func(blockN uint64, k, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.AccountChangeSet, dbutils.EncodeBlockNumber(block1), 0, func(blockN uint64, k, v []byte) (bool, error) { if blockN >= block2 { return false, nil } @@ -1247,7 +1248,7 @@ func changeSetStats(chaindata string, block1, block2 uint64) error { } storage := make(map[string]struct{}) - if err := changeset.Walk(tx, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(block1), 0, func(blockN uint64, k, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.StorageChangeSet, dbutils.EncodeBlockNumber(block1), 0, func(blockN uint64, k, v []byte) (bool, error) { if blockN >= block2 { return false, nil } @@ -1266,7 +1267,7 @@ func changeSetStats(chaindata string, block1, block2 uint64) error { func searchChangeSet(chaindata string, key []byte, block uint64) error { fmt.Printf("Searching changesets\n") - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err1 := db.BeginRw(context.Background()) if err1 != nil { @@ -1274,7 +1275,7 @@ func searchChangeSet(chaindata string, key []byte, block uint64) error { } defer tx.Rollback() - if err := changeset.Walk(tx, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(block), 0, func(blockN uint64, k, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.AccountChangeSet, dbutils.EncodeBlockNumber(block), 0, func(blockN uint64, k, v []byte) (bool, error) { if bytes.Equal(k, key) { fmt.Printf("Found in block %d with value %x\n", blockN, v) } @@ -1287,14 +1288,14 @@ func searchChangeSet(chaindata string, key []byte, block uint64) error { func searchStorageChangeSet(chaindata string, key []byte, block uint64) error { fmt.Printf("Searching storage changesets\n") - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err1 := db.BeginRw(context.Background()) if err1 != nil { return err1 } defer tx.Rollback() - if err := changeset.Walk(tx, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(block), 0, func(blockN uint64, k, v []byte) (bool, error) { + if err := changeset.Walk(tx, kv.StorageChangeSet, dbutils.EncodeBlockNumber(block), 0, func(blockN uint64, k, v []byte) (bool, error) { if bytes.Equal(k, key) { fmt.Printf("Found in block %d with value %x\n", blockN, v) } @@ -1308,13 +1309,13 @@ func searchStorageChangeSet(chaindata string, key []byte, block uint64) error { func supply(chaindata string) error { startTime := time.Now() - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() count := 0 supply := uint256.NewInt(0) var a accounts.Account - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.PlainStateBucket) + if err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } @@ -1343,11 +1344,11 @@ func supply(chaindata string) error { } func extractCode(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() var contractCount int - if err1 := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.CodeBucket) + if err1 := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.CodeBucket) if err != nil { return err } @@ -1368,15 +1369,15 @@ func extractCode(chaindata string) error { } func iterateOverCode(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() var contractCount int var contractKeyTotalLength int var contractValTotalLength int var codeHashTotalLength int var codeTotalLength int // Total length of all byte code (just to illustrate iterating) - if err1 := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.PlainContractCodeBucket) + if err1 := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.PlainContractCode) if err != nil { return err } @@ -1388,7 +1389,7 @@ func iterateOverCode(chaindata string) error { contractKeyTotalLength += len(k) contractValTotalLength += len(v) } - c, err = tx.Cursor(dbutils.CodeBucket) + c, err = tx.Cursor(kv.CodeBucket) if err != nil { return err } @@ -1418,7 +1419,7 @@ func mint(chaindata string, block uint64) error { defer f.Close() w := bufio.NewWriter(f) defer w.Flush() - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -1431,7 +1432,7 @@ func mint(chaindata string, block uint64) error { gwei.SetUint64(1000000000) blockEncoded := dbutils.EncodeBlockNumber(block) canonical := make(map[common.Hash]struct{}) - c, err := tx.Cursor(dbutils.HeaderCanonicalBucket) + c, err := tx.Cursor(kv.HeaderCanonical) if err != nil { return err } @@ -1448,7 +1449,7 @@ func mint(chaindata string, block uint64) error { } } log.Info("Read canonical hashes", "count", len(canonical)) - c, err = tx.Cursor(dbutils.BlockBodyPrefix) + c, err = tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -1502,7 +1503,7 @@ func mint(chaindata string, block uint64) error { } func extractHashes(chaindata string, blockStep uint64, blockTotal uint64, name string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() f, err := os.Create(fmt.Sprintf("preverified_hashes_%s.go", name)) @@ -1518,7 +1519,7 @@ func extractHashes(chaindata string, blockStep uint64, blockTotal uint64, name s fmt.Fprintf(w, "var %sPreverifiedHashes = []string{\n", name) b := uint64(0) - tool.Check(db.View(context.Background(), func(tx ethdb.Tx) error { + tool.Check(db.View(context.Background(), func(tx kv.Tx) error { for b <= blockTotal { hash, err := rawdb.ReadCanonicalHash(tx, b) if err != nil { @@ -1543,14 +1544,14 @@ func extractHashes(chaindata string, blockStep uint64, blockTotal uint64, name s } func extractHeaders(chaindata string, block uint64) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRo(context.Background()) if err != nil { return err } defer tx.Rollback() - c, err := tx.Cursor(dbutils.HeadersBucket) + c, err := tx.Cursor(kv.Headers) if err != nil { return err } @@ -1572,14 +1573,14 @@ func extractHeaders(chaindata string, block uint64) error { } func extractBodies(chaindata string, block uint64) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRo(context.Background()) if err != nil { return err } defer tx.Rollback() - c, err := tx.Cursor(dbutils.BlockBodyPrefix) + c, err := tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -1599,17 +1600,17 @@ func extractBodies(chaindata string, block uint64) error { func fixUnwind(chaindata string) error { contractAddr := common.HexToAddress("0x577a32aa9c40cf4266e49fc1e44c749c356309bd") - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() - tool.Check(db.Update(context.Background(), func(tx ethdb.RwTx) error { - i, err := tx.GetOne(dbutils.IncarnationMapBucket, contractAddr[:]) + tool.Check(db.Update(context.Background(), func(tx kv.RwTx) error { + i, err := tx.GetOne(kv.IncarnationMap, contractAddr[:]) if err != nil { return err } else if i == nil { fmt.Print("Not found\n") var b [8]byte binary.BigEndian.PutUint64(b[:], 1) - if err = tx.Put(dbutils.IncarnationMapBucket, contractAddr[:], b[:]); err != nil { + if err = tx.Put(kv.IncarnationMap, contractAddr[:], b[:]); err != nil { return err } } else { @@ -1621,7 +1622,7 @@ func fixUnwind(chaindata string) error { } func snapSizes(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRo(context.Background()) @@ -1630,7 +1631,7 @@ func snapSizes(chaindata string) error { } defer tx.Rollback() - c, _ := tx.Cursor(dbutils.CliqueSeparateBucket) + c, _ := tx.Cursor(kv.CliqueSeparate) defer c.Close() sizes := make(map[int]int) @@ -1670,14 +1671,14 @@ func snapSizes(chaindata string) error { } func readCallTraces(chaindata string, block uint64) error { - kv := kv2.MustOpen(chaindata) - defer kv.Close() - tx, err := kv.BeginRw(context.Background()) + db := mdbx.MustOpen(chaindata) + defer db.Close() + tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - traceCursor, err1 := tx.RwCursorDupSort(dbutils.CallTraceSet) + traceCursor, err1 := tx.RwCursorDupSort(kv.CallTraceSet) if err1 != nil { return err1 } @@ -1696,7 +1697,7 @@ func readCallTraces(chaindata string, block uint64) error { count++ } fmt.Printf("Found %d records\n", count) - idxCursor, err2 := tx.Cursor(dbutils.CallToIndex) + idxCursor, err2 := tx.Cursor(kv.CallToIndex) if err2 != nil { return err2 } @@ -1716,21 +1717,21 @@ func readCallTraces(chaindata string, block uint64) error { } func fixTd(chaindata string) error { - kv := kv2.MustOpen(chaindata) - defer kv.Close() - tx, err := kv.BeginRw(context.Background()) + db := mdbx.MustOpen(chaindata) + defer db.Close() + tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - c, err1 := tx.RwCursor(dbutils.HeadersBucket) + c, err1 := tx.RwCursor(kv.Headers) if err1 != nil { return err1 } defer c.Close() var k, v []byte for k, v, err = c.First(); err == nil && k != nil; k, v, err = c.Next() { - hv, herr := tx.GetOne(dbutils.HeaderTDBucket, k) + hv, herr := tx.GetOne(kv.HeaderTD, k) if herr != nil { return herr } @@ -1747,7 +1748,7 @@ func fixTd(chaindata string) error { binary.BigEndian.PutUint64(parentK[:], header.Number.Uint64()-1) copy(parentK[8:], header.ParentHash[:]) var parentTdRec []byte - if parentTdRec, err = tx.GetOne(dbutils.HeaderTDBucket, parentK[:]); err != nil { + if parentTdRec, err = tx.GetOne(kv.HeaderTD, parentK[:]); err != nil { return fmt.Errorf("reading parentTd Rec for %d: %v", header.Number.Uint64(), err) } var parentTd big.Int @@ -1760,7 +1761,7 @@ func fixTd(chaindata string) error { if newHv, err = rlp.EncodeToBytes(&td); err != nil { return fmt.Errorf("encoding td record for block %d: %v", header.Number.Uint64(), err) } - if err = tx.Put(dbutils.HeaderTDBucket, k, newHv); err != nil { + if err = tx.Put(kv.HeaderTD, k, newHv); err != nil { return err } } @@ -1772,7 +1773,7 @@ func fixTd(chaindata string) error { } func advanceExec(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -1800,7 +1801,7 @@ func advanceExec(chaindata string) error { } func backExec(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -1828,14 +1829,14 @@ func backExec(chaindata string) error { } func fixState(chaindata string) error { - kv := kv2.MustOpen(chaindata) - defer kv.Close() - tx, err := kv.BeginRw(context.Background()) + db := mdbx.MustOpen(chaindata) + defer db.Close() + tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - c, err1 := tx.RwCursor(dbutils.HeaderCanonicalBucket) + c, err1 := tx.RwCursor(kv.HeaderCanonical) if err1 != nil { return err1 } @@ -1846,7 +1847,7 @@ func fixState(chaindata string) error { var headerKey [40]byte copy(headerKey[:], k) copy(headerKey[8:], v) - hv, herr := tx.GetOne(dbutils.HeadersBucket, headerKey[:]) + hv, herr := tx.GetOne(kv.Headers, headerKey[:]) if herr != nil { return herr } @@ -1874,23 +1875,23 @@ func fixState(chaindata string) error { } func trimTxs(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - lastTxId, err := tx.ReadSequence(dbutils.EthTx) + lastTxId, err := tx.ReadSequence(kv.EthTx) if err != nil { return err } - txs, err1 := tx.RwCursor(dbutils.EthTx) + txs, err1 := tx.RwCursor(kv.EthTx) if err1 != nil { return err1 } defer txs.Close() - bodies, err2 := tx.Cursor(dbutils.BlockBodyPrefix) + bodies, err2 := tx.Cursor(kv.BlockBody) if err2 != nil { return err } @@ -1957,7 +1958,7 @@ func trimTxs(chaindata string) error { return err } defer tx.Rollback() - txs, err = tx.RwCursor(dbutils.EthTx) + txs, err = tx.RwCursor(kv.EthTx) if err != nil { return err } @@ -1967,14 +1968,14 @@ func trimTxs(chaindata string) error { } func scanTxs(chaindata string) error { - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRo(context.Background()) if err != nil { return err } defer tx.Rollback() - c, err := tx.Cursor(dbutils.EthTx) + c, err := tx.Cursor(kv.EthTx) if err != nil { return err } @@ -2005,7 +2006,7 @@ func scanTxs(chaindata string) error { } func scanReceipts3(chaindata string, block uint64) error { - dbdb := kv2.MustOpen(chaindata) + dbdb := mdbx.MustOpen(chaindata) defer dbdb.Close() tx, err := dbdb.BeginRw(context.Background()) if err != nil { @@ -2015,7 +2016,7 @@ func scanReceipts3(chaindata string, block uint64) error { var key [8]byte var v []byte binary.BigEndian.PutUint64(key[:], block) - if v, err = tx.GetOne(dbutils.Receipts, key[:]); err != nil { + if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { return err } fmt.Printf("%x\n", v) @@ -2030,7 +2031,7 @@ func scanReceipts2(chaindata string) error { defer f.Close() w := bufio.NewWriter(f) defer w.Flush() - dbdb := kv2.MustOpen(chaindata) + dbdb := mdbx.MustOpen(chaindata) defer dbdb.Close() tx, err := dbdb.BeginRw(context.Background()) if err != nil { @@ -2060,7 +2061,7 @@ func scanReceipts2(chaindata string) error { break } binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(dbutils.Receipts, key[:]); err != nil { + if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { return err } var receipts types.Receipts @@ -2095,7 +2096,7 @@ func scanReceipts(chaindata string, block uint64) error { defer f.Close() w := bufio.NewWriter(f) defer w.Flush() - db := kv2.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -2144,7 +2145,7 @@ func scanReceipts(chaindata string, block uint64) error { break } binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(dbutils.Receipts, key[:]); err != nil { + if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { return err } var receipts types.Receipts @@ -2183,7 +2184,7 @@ func scanReceipts(chaindata string, block uint64) error { if err = cbor.Marshal(&buf, receipts); err != nil { return err } - if err = tx.Put(dbutils.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { + if err = tx.Put(kv.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { return err } fixedCount++ @@ -2217,7 +2218,7 @@ func scanReceipts(chaindata string, block uint64) error { if err != nil { return fmt.Errorf("encode block receipts for block %d: %v", blockNum, err) } - if err = tx.Put(dbutils.Receipts, key[:], buf.Bytes()); err != nil { + if err = tx.Put(kv.Receipts, key[:], buf.Bytes()); err != nil { return fmt.Errorf("writing receipts for block %d: %v", blockNum, err) } if _, err = w.Write([]byte(fmt.Sprintf("%d\n", blockNum))); err != nil { diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index 41b27fc5745..362cd34f5fe 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -11,30 +11,29 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + mdbx2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" "github.com/torquem-ch/mdbx-go/mdbx" ) var stateBuckets = []string{ - dbutils.HashedAccountsBucket, - dbutils.HashedStorageBucket, - dbutils.ContractCodeBucket, - dbutils.PlainStateBucket, - dbutils.AccountChangeSetBucket, - dbutils.StorageChangeSetBucket, - dbutils.PlainContractCodeBucket, - dbutils.IncarnationMapBucket, - dbutils.CodeBucket, - dbutils.TrieOfAccountsBucket, - dbutils.TrieOfStorageBucket, - dbutils.AccountsHistoryBucket, - dbutils.StorageHistoryBucket, - dbutils.TxLookupPrefix, - dbutils.ContractTEVMCodeBucket, + kv.HashedAccounts, + kv.HashedStorage, + kv.ContractCode, + kv.PlainStateBucket, + kv.AccountChangeSet, + kv.StorageChangeSet, + kv.PlainContractCode, + kv.IncarnationMap, + kv.CodeBucket, + kv.TrieOfAccounts, + kv.TrieOfStorage, + kv.AccountsHistory, + kv.StorageHistory, + kv.TxLookup, + kv.ContractTEVMCode, } var cmdCompareBucket = &cobra.Command{ @@ -76,7 +75,8 @@ var cmdMdbxToMdbx = &cobra.Command{ Short: "copy data from '--chaindata' to '--chaindata.to'", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - err := mdbxToMdbx(ctx, chaindata, toChaindata) + logger := log.New() + err := mdbxToMdbx(ctx, logger, chaindata, toChaindata) if err != nil { log.Error(err.Error()) return err @@ -90,7 +90,8 @@ var cmdFToMdbx = &cobra.Command{ Short: "copy data from '--chaindata' to '--chaindata.to'", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - err := fToMdbx(ctx, toChaindata) + logger := log.New() + err := fToMdbx(ctx, logger, toChaindata) if err != nil { log.Error(err.Error()) return err @@ -126,14 +127,14 @@ func init() { } func compareStates(ctx context.Context, chaindata string, referenceChaindata string) error { - db := kv.MustOpen(chaindata) + db := mdbx2.MustOpen(chaindata) defer db.Close() - refDB := kv.MustOpen(referenceChaindata) + refDB := mdbx2.MustOpen(referenceChaindata) defer refDB.Close() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - if err := refDB.View(context.Background(), func(refTX ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { + if err := refDB.View(context.Background(), func(refTX kv.Tx) error { for _, bucket := range stateBuckets { fmt.Printf("\nBucket: %s\n", bucket) if err := compareBuckets(ctx, tx, bucket, refTX, bucket); err != nil { @@ -152,14 +153,14 @@ func compareStates(ctx context.Context, chaindata string, referenceChaindata str return nil } func compareBucketBetweenDatabases(ctx context.Context, chaindata string, referenceChaindata string, bucket string) error { - db := kv.MustOpen(chaindata) + db := mdbx2.MustOpen(chaindata) defer db.Close() - refDB := kv.MustOpen(referenceChaindata) + refDB := mdbx2.MustOpen(referenceChaindata) defer refDB.Close() - if err := db.View(context.Background(), func(tx ethdb.Tx) error { - return refDB.View(context.Background(), func(refTX ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { + return refDB.View(context.Background(), func(refTX kv.Tx) error { return compareBuckets(ctx, tx, bucket, refTX, bucket) }) }); err != nil { @@ -169,7 +170,7 @@ func compareBucketBetweenDatabases(ctx context.Context, chaindata string, refere return nil } -func compareBuckets(ctx context.Context, tx ethdb.Tx, b string, refTx ethdb.Tx, refB string) error { +func compareBuckets(ctx context.Context, tx kv.Tx, b string, refTx kv.Tx, refB string) error { count := 0 c, err := tx.Cursor(b) if err != nil { @@ -243,14 +244,14 @@ func compareBuckets(ctx context.Context, tx ethdb.Tx, b string, refTx ethdb.Tx, return nil } -func fToMdbx(ctx context.Context, to string) error { +func fToMdbx(ctx context.Context, logger log.Logger, to string) error { file, err := os.Open(file) if err != nil { panic(err) } defer file.Close() - dst := kv.NewMDBX().Path(to).MustOpen() + dst := mdbx2.NewMDBX(logger).Path(to).MustOpen() dstTx, err1 := dst.BeginRw(ctx) if err1 != nil { return err1 @@ -313,7 +314,7 @@ MainLoop: v := common.CopyBytes(fileScanner.Bytes()) v = common.FromHex(string(v[1:])) - if casted, ok := c.(ethdb.RwCursorDupSort); ok { + if casted, ok := c.(kv.RwCursorDupSort); ok { if err = casted.AppendDup(k, v); err != nil { panic(err) } @@ -343,14 +344,14 @@ MainLoop: return nil } -func mdbxToMdbx(ctx context.Context, from, to string) error { +func mdbxToMdbx(ctx context.Context, logger log.Logger, from, to string) error { _ = os.RemoveAll(to) - src := kv.NewMDBX().Path(from).Flags(func(flags uint) uint { return mdbx.Readonly | mdbx.Accede }).MustOpen() - dst := kv.NewMDBX().Path(to).MustOpen() + src := mdbx2.NewMDBX(logger).Path(from).Flags(func(flags uint) uint { return mdbx.Readonly | mdbx.Accede }).MustOpen() + dst := mdbx2.NewMDBX(logger).Path(to).MustOpen() return kv2kv(ctx, src, dst) } -func kv2kv(ctx context.Context, src, dst ethdb.RwKV) error { +func kv2kv(ctx context.Context, src, dst kv.RwDB) error { srcTx, err1 := src.BeginRo(ctx) if err1 != nil { return err1 @@ -378,7 +379,7 @@ func kv2kv(ctx context.Context, src, dst ethdb.RwKV) error { if err != nil { return err } - casted, isDupsort := c.(ethdb.RwCursorDupSort) + casted, isDupsort := c.(kv.RwCursorDupSort) for k, v, err := srcC.First(); k != nil; k, v, err = srcC.Next() { if err != nil { @@ -412,7 +413,7 @@ func kv2kv(ctx context.Context, src, dst ethdb.RwKV) error { if err != nil { return err } - casted, isDupsort = c.(ethdb.RwCursorDupSort) + casted, isDupsort = c.(kv.RwCursorDupSort) default: } } diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 4d785574776..aa7cbd85a2f 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -7,11 +7,10 @@ import ( "text/tabwriter" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" @@ -22,10 +21,11 @@ var cmdResetState = &cobra.Command{ Short: "Reset StateStages (5,6,7,8,9,10) and buckets", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() - err := resetState(db, ctx) + err := resetState(db, logger, ctx) if err != nil { log.Error(err.Error()) return err @@ -42,51 +42,51 @@ func init() { rootCmd.AddCommand(cmdResetState) } -func resetState(kv ethdb.RwKV, ctx context.Context) error { - if err := kv.View(ctx, func(tx ethdb.Tx) error { return printStages(tx) }); err != nil { +func resetState(db kv.RwDB, logger log.Logger, ctx context.Context) error { + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { return err } // don't reset senders here - if err := kv.Update(ctx, stagedsync.ResetHashState); err != nil { + if err := db.Update(ctx, stagedsync.ResetHashState); err != nil { return err } - if err := kv.Update(ctx, stagedsync.ResetIH); err != nil { + if err := db.Update(ctx, stagedsync.ResetIH); err != nil { return err } - if err := kv.Update(ctx, resetHistory); err != nil { + if err := db.Update(ctx, resetHistory); err != nil { return err } - if err := kv.Update(ctx, resetLogIndex); err != nil { + if err := db.Update(ctx, resetLogIndex); err != nil { return err } - if err := kv.Update(ctx, resetCallTraces); err != nil { + if err := db.Update(ctx, resetCallTraces); err != nil { return err } - if err := kv.Update(ctx, resetTxLookup); err != nil { + if err := db.Update(ctx, resetTxLookup); err != nil { return err } - if err := kv.Update(ctx, resetTxPool); err != nil { + if err := db.Update(ctx, resetTxPool); err != nil { return err } - if err := kv.Update(ctx, resetFinish); err != nil { + if err := db.Update(ctx, resetFinish); err != nil { return err } genesis, _ := byChain() - if err := kv.Update(ctx, func(tx ethdb.RwTx) error { return resetExec(tx, genesis) }); err != nil { + if err := db.Update(ctx, func(tx kv.RwTx) error { return resetExec(tx, genesis) }); err != nil { return err } // set genesis after reset all buckets fmt.Printf("After reset: \n") - if err := kv.View(ctx, func(tx ethdb.Tx) error { return printStages(tx) }); err != nil { + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { return err } return nil } -func resetSenders(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.Senders); err != nil { +func resetSenders(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.Senders); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.Senders, 0); err != nil { @@ -98,47 +98,47 @@ func resetSenders(tx ethdb.RwTx) error { return nil } -func resetExec(tx ethdb.RwTx, g *core.Genesis) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.HashedAccountsBucket); err != nil { +func resetExec(tx kv.RwTx, g *core.Genesis) error { + if err := tx.ClearBucket(kv.HashedAccounts); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.HashedStorageBucket); err != nil { + if err := tx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.ContractCodeBucket); err != nil { + if err := tx.ClearBucket(kv.ContractCode); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.PlainStateBucket); err != nil { + if err := tx.ClearBucket(kv.PlainStateBucket); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.AccountChangeSetBucket); err != nil { + if err := tx.ClearBucket(kv.AccountChangeSet); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.StorageChangeSetBucket); err != nil { + if err := tx.ClearBucket(kv.StorageChangeSet); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.PlainContractCodeBucket); err != nil { + if err := tx.ClearBucket(kv.PlainContractCode); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.Receipts); err != nil { + if err := tx.ClearBucket(kv.Receipts); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.Log); err != nil { + if err := tx.ClearBucket(kv.Log); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.IncarnationMapBucket); err != nil { + if err := tx.ClearBucket(kv.IncarnationMap); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.CodeBucket); err != nil { + if err := tx.ClearBucket(kv.CodeBucket); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.CallTraceSet); err != nil { + if err := tx.ClearBucket(kv.CallTraceSet); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.Epoch); err != nil { + if err := tx.ClearBucket(kv.Epoch); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.PendingEpoch); err != nil { + if err := tx.ClearBucket(kv.PendingEpoch); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { @@ -155,11 +155,11 @@ func resetExec(tx ethdb.RwTx, g *core.Genesis) error { return nil } -func resetHistory(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.AccountsHistoryBucket); err != nil { +func resetHistory(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.AccountsHistory); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.StorageHistoryBucket); err != nil { + if err := tx.ClearBucket(kv.StorageHistory); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.AccountHistoryIndex, 0); err != nil { @@ -178,11 +178,11 @@ func resetHistory(tx ethdb.RwTx) error { return nil } -func resetLogIndex(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.LogAddressIndex); err != nil { +func resetLogIndex(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.LogAddressIndex); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.LogTopicIndex); err != nil { + if err := tx.ClearBucket(kv.LogTopicIndex); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.LogIndex, 0); err != nil { @@ -194,11 +194,11 @@ func resetLogIndex(tx ethdb.RwTx) error { return nil } -func resetCallTraces(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.CallFromIndex); err != nil { +func resetCallTraces(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.CallFromIndex); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.CallToIndex); err != nil { + if err := tx.ClearBucket(kv.CallToIndex); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.CallTraces, 0); err != nil { @@ -210,8 +210,8 @@ func resetCallTraces(tx ethdb.RwTx) error { return nil } -func resetTxLookup(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.TxLookupPrefix); err != nil { +func resetTxLookup(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.TxLookup); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.TxLookup, 0); err != nil { @@ -223,7 +223,7 @@ func resetTxLookup(tx ethdb.RwTx) error { return nil } -func resetTxPool(tx ethdb.RwTx) error { +func resetTxPool(tx kv.RwTx) error { if err := stages.SaveStageProgress(tx, stages.TxPool, 0); err != nil { return err } @@ -233,7 +233,7 @@ func resetTxPool(tx ethdb.RwTx) error { return nil } -func resetFinish(tx ethdb.RwTx) error { +func resetFinish(tx kv.RwTx) error { if err := stages.SaveStageProgress(tx, stages.Finish, 0); err != nil { return err } @@ -243,7 +243,7 @@ func resetFinish(tx ethdb.RwTx) error { return nil } -func printStages(db ethdb.KVGetter) error { +func printStages(db kv.Getter) error { var err error var progress uint64 w := new(tabwriter.Writer) diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 8f1a2b6f64e..a55afb43f15 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -4,8 +4,8 @@ import ( "path" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/migrations" @@ -36,9 +36,9 @@ func RootCommand() *cobra.Command { return rootCmd } -func openDB(path string, applyMigrations bool) ethdb.RwKV { - label := ethdb.Chain - db := openKV(label, path, false) +func openDB(path string, logger log.Logger, applyMigrations bool) kv.RwDB { + label := kv.ChainDB + db := openKV(label, logger, path, false) if applyMigrations { has, err := migrations.NewMigrator(label).HasPendingMigrations(db) if err != nil { @@ -47,25 +47,24 @@ func openDB(path string, applyMigrations bool) ethdb.RwKV { if has { log.Info("Re-Opening DB in exclusive mode to apply DB migrations") db.Close() - db = openKV(label, path, true) + db = openKV(label, logger, path, true) if err := migrations.NewMigrator(label).Apply(db, datadir); err != nil { panic(err) } db.Close() - db = openKV(label, path, false) + db = openKV(label, logger, path, false) } } return db } -func openKV(label ethdb.Label, path string, exclusive bool) ethdb.RwKV { - opts := kv2.NewMDBX().Path(path).Label(label) +func openKV(label kv.Label, logger log.Logger, path string, exclusive bool) kv.RwDB { + opts := kv2.NewMDBX(logger).Path(path).Label(label) if exclusive { opts = opts.Exclusive() } if databaseVerbosity != -1 { - opts = opts.DBVerbosity(ethdb.DBVerbosityLvl(databaseVerbosity)) + opts = opts.DBVerbosity(kv.DBVerbosityLvl(databaseVerbosity)) } - kv := opts.MustOpen() - return kv + return opts.MustOpen() } diff --git a/cmd/integration/commands/snapshot_check.go b/cmd/integration/commands/snapshot_check.go index 7f10235ac13..28ca0f22092 100644 --- a/cmd/integration/commands/snapshot_check.go +++ b/cmd/integration/commands/snapshot_check.go @@ -10,13 +10,13 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/spf13/cobra" @@ -38,8 +38,9 @@ var cmdSnapshotCheck = &cobra.Command{ Example: "go run cmd/integration/main.go snapshot_check --block 11400000 --datadir /media/b00ris/nvme/backup/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshots/ --snapshotMode s --tmp_db /media/b00ris/nvme/tmp/debug", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() + logger := log.New() //db to provide headers, blocks, senders ... - mainDB, err := kv2.Open(chaindata, true) + mainDB, err := kv2.Open(chaindata, logger, true) if err != nil { return err } @@ -53,11 +54,11 @@ var cmdSnapshotCheck = &cobra.Command{ } stateSnapshotPath := filepath.Join(snapshotDir, "state") - stateSnapshot := kv2.NewMDBX().Path(stateSnapshotPath).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.PlainStateBucket: dbutils.BucketsConfigs[dbutils.PlainStateBucket], - dbutils.PlainContractCodeBucket: dbutils.BucketsConfigs[dbutils.PlainContractCodeBucket], - dbutils.CodeBucket: dbutils.BucketsConfigs[dbutils.CodeBucket], + stateSnapshot := kv2.NewMDBX(logger).Path(stateSnapshotPath).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.PlainStateBucket: kv.BucketsConfigs[kv.PlainStateBucket], + kv.PlainContractCode: kv.BucketsConfigs[kv.PlainContractCode], + kv.CodeBucket: kv.BucketsConfigs[kv.CodeBucket], } }).Readonly().MustOpen() isNew := true @@ -79,24 +80,24 @@ var cmdSnapshotCheck = &cobra.Command{ log.Info("Temp database", "path", path) } }() - tmpDb := kv2.NewMDBX().Path(path).MustOpen() - kv := kv2.NewSnapshotKV(). + tmpDb := kv2.NewMDBX(logger).Path(path).MustOpen() + db := snapshotdb.NewSnapshotKV(). DB(tmpDb). //broken - //SnapshotDB([]string{dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, dbutils.HeaderTDBucket, dbutils.BlockBodyPrefix, dbutils.Senders, dbutils.HeadBlockKey, dbutils.HeaderNumberBucket}, mainDB.RwKV()). - //SnapshotDB([]string{dbutils.PlainStateBucket, dbutils.CodeBucket, dbutils.PlainContractCodeBucket}, stateSnapshot). + //SnapshotDB([]string{dbutils.Headers, dbutils.HeaderCanonical, dbutils.HeaderTD, dbutils.BlockBody, dbutils.Senders, dbutils.HeadBlockKey, dbutils.HeaderNumber}, mainDB.RwDB()). + //SnapshotDB([]string{dbutils.PlainStateBucket, dbutils.CodeBucket, dbutils.PlainContractCode}, stateSnapshot). Open() _ = mainDB _ = stateSnapshot if isNew { - if err := kv.Update(ctx, func(tx ethdb.RwTx) error { + if err := db.Update(ctx, func(tx kv.RwTx) error { return prune.SetIfNotExist(tx, prune.DefaultMode) }); err != nil { return err } } - if err := snapshotCheck(ctx, kv, isNew, os.TempDir()); err != nil { + if err := snapshotCheck(ctx, db, isNew, os.TempDir()); err != nil { log.Error("snapshotCheck error", "err", err) return err } @@ -104,12 +105,12 @@ var cmdSnapshotCheck = &cobra.Command{ }, } -func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string) (err error) { +func snapshotCheck(ctx context.Context, db kv.RwDB, isNew bool, tmpDir string) (err error) { pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil) var snapshotBlock uint64 = 11_000_000 var lastBlockHeaderNumber, blockNum uint64 - if err := db.View(ctx, func(tx ethdb.Tx) error { + if err := db.View(ctx, func(tx kv.Tx) error { blockNum, err = stages.GetStageProgress(tx, stages.Execution) if err != nil { return err @@ -142,7 +143,7 @@ func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string if isNew { log.Info("New tmp db. We need to promote hash state.") - if err := db.Update(ctx, func(tx ethdb.RwTx) error { + if err := db.Update(ctx, func(tx kv.RwTx) error { tt := time.Now() err = stagedsync.PromoteHashedStateCleanly("", tx, stagedsync.StageHashStateCfg(db, tmpDir), ctx.Done()) @@ -164,7 +165,7 @@ func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string if isNew { log.Info("Regenerate IH") - if err := db.Update(ctx, func(tx ethdb.RwTx) error { + if err := db.Update(ctx, func(tx kv.RwTx) error { hash, innerErr := rawdb.ReadCanonicalHash(tx, snapshotBlock) if innerErr != nil { return innerErr diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 6d5835ba57a..f29f9245059 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -11,7 +11,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/sentry/download" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -22,9 +21,9 @@ import ( "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/params" @@ -38,7 +37,8 @@ var cmdStageBodies = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageBodies(db, ctx); err != nil { @@ -53,8 +53,9 @@ var cmdStageSenders = &cobra.Command{ Use: "stage_senders", Short: "", RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + db := openDB(chaindata, logger, true) defer db.Close() if err := stageSenders(db, ctx); err != nil { @@ -70,7 +71,8 @@ var cmdStageExec = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageExec(db, ctx); err != nil { @@ -86,7 +88,8 @@ var cmdStageTrie = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageTrie(db, ctx); err != nil { @@ -101,8 +104,9 @@ var cmdStageHashState = &cobra.Command{ Use: "stage_hash_state", Short: "", RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + db := openDB(chaindata, logger, true) defer db.Close() if err := stageHashState(db, ctx); err != nil { @@ -118,7 +122,8 @@ var cmdStageHistory = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageHistory(db, ctx); err != nil { @@ -134,7 +139,8 @@ var cmdLogIndex = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageLogIndex(db, ctx); err != nil { @@ -150,7 +156,8 @@ var cmdCallTraces = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageCallTraces(db, ctx); err != nil { @@ -166,7 +173,8 @@ var cmdStageTxLookup = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if err := stageTxLookup(db, ctx); err != nil { @@ -181,7 +189,8 @@ var cmdPrintStages = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, false) + logger := log.New() + db := openDB(chaindata, logger, false) defer db.Close() if err := printAllStages(db, ctx); err != nil { @@ -197,7 +206,8 @@ var cmdPrintMigrations = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, false) + logger := log.New() + db := openDB(chaindata, logger, false) defer db.Close() if err := printAppliedMigrations(db, ctx); err != nil { log.Error("Error", "err", err) @@ -212,7 +222,8 @@ var cmdRemoveMigration = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, false) + logger := log.New() + db := openDB(chaindata, logger, false) defer db.Close() if err := removeMigration(db, ctx); err != nil { log.Error("Error", "err", err) @@ -226,7 +237,8 @@ var cmdRunMigrations = &cobra.Command{ Use: "run_migrations", Short: "", RunE: func(cmd *cobra.Command, args []string) error { - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() // Nothing to do, migrations will be applied automatically return nil @@ -237,7 +249,8 @@ var cmdSetPrune = &cobra.Command{ Use: "set_prune", Short: "Override existing --prune flag value (if you know what you are doing)", RunE: func(cmd *cobra.Command, args []string) error { - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() return overrideStorageMode(db) }, @@ -352,8 +365,8 @@ func init() { rootCmd.AddCommand(cmdSetPrune) } -func stageBodies(db ethdb.RwKV, ctx context.Context) error { - return db.Update(ctx, func(tx ethdb.RwTx) error { +func stageBodies(db kv.RwDB, ctx context.Context) error { + return db.Update(ctx, func(tx kv.RwTx) error { if unwind > 0 { progress, err := stages.GetStageProgress(tx, stages.Bodies) if err != nil { @@ -377,7 +390,7 @@ func stageBodies(db ethdb.RwKV, ctx context.Context) error { }) } -func stageSenders(db ethdb.RwKV, ctx context.Context) error { +func stageSenders(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) _, _, chainConfig, _, _, sync, _, _ := newSync(ctx, db, nil) @@ -415,12 +428,12 @@ func stageSenders(db ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func stageExec(db ethdb.RwKV, ctx context.Context) error { +func stageExec(db kv.RwDB, ctx context.Context) error { pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil) if reset { genesis, _ := byChain() - if err := db.Update(ctx, func(tx ethdb.RwTx) error { return resetExec(tx, genesis) }); err != nil { + if err := db.Update(ctx, func(tx kv.RwTx) error { return resetExec(tx, genesis) }); err != nil { return err } return nil @@ -473,7 +486,7 @@ func stageExec(db ethdb.RwKV, ctx context.Context) error { return nil } -func stageTrie(db ethdb.RwKV, ctx context.Context) error { +func stageTrie(db kv.RwDB, ctx context.Context) error { pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) tmpdir := path.Join(datadir, etl.TmpDirName) @@ -523,7 +536,7 @@ func stageTrie(db ethdb.RwKV, ctx context.Context) error { return nil } -func stageHashState(db ethdb.RwKV, ctx context.Context) error { +func stageHashState(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) @@ -576,7 +589,7 @@ func stageHashState(db ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func stageLogIndex(db ethdb.RwKV, ctx context.Context) error { +func stageLogIndex(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) @@ -630,7 +643,7 @@ func stageLogIndex(db ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func stageCallTraces(kv ethdb.RwKV, ctx context.Context) error { +func stageCallTraces(kv kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) pm, _, _, _, _, sync, _, _ := newSync(ctx, kv, nil) @@ -690,7 +703,7 @@ func stageCallTraces(kv ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func stageHistory(db ethdb.RwKV, ctx context.Context) error { +func stageHistory(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) tx, err := db.BeginRw(ctx) @@ -757,7 +770,7 @@ func stageHistory(db ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func stageTxLookup(db ethdb.RwKV, ctx context.Context) error { +func stageTxLookup(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) @@ -809,12 +822,12 @@ func stageTxLookup(db ethdb.RwKV, ctx context.Context) error { return tx.Commit() } -func printAllStages(db ethdb.RoKV, ctx context.Context) error { - return db.View(ctx, func(tx ethdb.Tx) error { return printStages(tx) }) +func printAllStages(db kv.RoDB, ctx context.Context) error { + return db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }) } -func printAppliedMigrations(db ethdb.RwKV, ctx context.Context) error { - return db.View(ctx, func(tx ethdb.Tx) error { +func printAppliedMigrations(db kv.RwDB, ctx context.Context) error { + return db.View(ctx, func(tx kv.Tx) error { applied, err := migrations.AppliedMigrations(tx, false /* withPayload */) if err != nil { return err @@ -831,9 +844,9 @@ func printAppliedMigrations(db ethdb.RwKV, ctx context.Context) error { }) } -func removeMigration(db ethdb.RwKV, ctx context.Context) error { - return db.Update(ctx, func(tx ethdb.RwTx) error { - return tx.Delete(dbutils.Migrations, []byte(migration), nil) +func removeMigration(db kv.RwDB, ctx context.Context) error { + return db.Update(ctx, func(tx kv.RwTx) error { + return tx.Delete(kv.Migrations, []byte(migration), nil) }) } @@ -864,13 +877,14 @@ func byChain() (*core.Genesis, *params.ChainConfig) { return genesis, chainConfig } -func newSync(ctx context.Context, db ethdb.RwKV, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { +func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { tmpdir := path.Join(datadir, etl.TmpDirName) snapshotDir = path.Join(datadir, "erigon", "snapshot") + logger := log.New() var pm prune.Mode var err error - if err = db.View(context.Background(), func(tx ethdb.Tx) error { + if err = db.View(context.Background(), func(tx kv.Tx) error { pm, err = prune.Get(tx) if err != nil { return err @@ -886,10 +900,10 @@ func newSync(ctx context.Context, db ethdb.RwKV, miningConfig *params.MiningConf engine = ethash.NewFaker() switch chain { case params.SokolChainName: - engine = ethconfig.CreateConsensusEngine(chainConfig, ¶ms.AuRaConfig{DBPath: path.Join(datadir, "aura")}, nil, false) + engine = ethconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: path.Join(datadir, "aura")}, nil, false) } - events := remotedbserver.NewEvents() + events := privateapi.NewEvents() txPool := core.NewTxPool(ethconfig.Defaults.TxPool, chainConfig, db) @@ -926,7 +940,7 @@ func newSync(ctx context.Context, db ethdb.RwKV, miningConfig *params.MiningConf cfg.Miner = *miningConfig } - sync, err := stages2.NewStagedSync2(context.Background(), db, cfg, + sync, err := stages2.NewStagedSync2(context.Background(), logger, db, cfg, downloadServer, tmpdir, txPool, @@ -953,7 +967,7 @@ func newSync(ctx context.Context, db ethdb.RwKV, miningConfig *params.MiningConf return pm, engine, chainConfig, vmConfig, txPool, sync, miningSync, miner } -func progress(tx ethdb.KVGetter, stage stages.SyncStage) uint64 { +func progress(tx kv.Getter, stage stages.SyncStage) uint64 { res, err := stages.GetStageProgress(tx, stage) if err != nil { panic(err) @@ -961,7 +975,7 @@ func progress(tx ethdb.KVGetter, stage stages.SyncStage) uint64 { return res } -func stage(st *stagedsync.Sync, tx ethdb.Tx, db ethdb.RoKV, stage stages.SyncStage) *stagedsync.StageState { +func stage(st *stagedsync.Sync, tx kv.Tx, db kv.RoDB, stage stages.SyncStage) *stagedsync.StageState { res, err := st.StageState(stage, tx, db) if err != nil { panic(err) @@ -969,12 +983,12 @@ func stage(st *stagedsync.Sync, tx ethdb.Tx, db ethdb.RoKV, stage stages.SyncSta return res } -func overrideStorageMode(db ethdb.RwKV) error { +func overrideStorageMode(db kv.RwDB) error { pm, err := prune.FromCli(pruneFlag, pruneH, pruneR, pruneT, pruneC, experiments) if err != nil { return err } - return db.Update(context.Background(), func(tx ethdb.RwTx) error { + return db.Update(context.Background(), func(tx kv.RwTx) error { if err = prune.Override(tx, pm); err != nil { return err } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index f9c5bcbf9cb..7002a464d5e 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -11,6 +11,7 @@ import ( "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/spf13/cobra" "github.com/ledgerwatch/erigon/cmd/utils" @@ -28,7 +29,6 @@ import ( "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/node" @@ -56,7 +56,8 @@ Examples: erigoncli.ApplyFlagsForEthConfigCobra(cmd.Flags(), ethConfig) miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) - db := openDB(path.Join(cfg.DataDir, "erigon", "chaindata"), true) + logger := log.New() + db := openDB(path.Join(cfg.DataDir, "erigon", "chaindata"), logger, true) defer db.Close() if err := syncBySmallSteps(db, miningConfig, ctx); err != nil { @@ -78,7 +79,8 @@ var loopIhCmd = &cobra.Command{ Use: "loop_ih", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if unwind == 0 { @@ -97,7 +99,8 @@ var loopExecCmd = &cobra.Command{ Use: "loop_exec", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := utils.RootContext() - db := openDB(chaindata, true) + logger := log.New() + db := openDB(chaindata, logger, true) defer db.Close() if unwind == 0 { unwind = 1 @@ -138,7 +141,7 @@ func init() { rootCmd.AddCommand(loopExecCmd) } -func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx context.Context) error { +func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.Context) error { pm, engine, chainConfig, vmConfig, txPool, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig) tx, err := db.BeginRw(ctx) @@ -178,13 +181,13 @@ func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx conte stages.CreateHeadersSnapshot, stages.CreateBodiesSnapshot, stages.CreateStateSnapshot, - stages.TxPool, // TODO: enable TxPool stage + stages.TxPool, // TODO: enable TxPoolDB stage stages.Finish) execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, tmpDir) - execUntilFunc := func(execToBlock uint64) func(firstCycle bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error { - return func(firstCycle bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error { + execUntilFunc := func(execToBlock uint64) func(firstCycle bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { + return func(firstCycle bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, tx, execToBlock, ctx, execCfg, firstCycle); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } @@ -284,9 +287,6 @@ func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx conte } //receiptsInDB := rawdb.ReadReceiptsByNumber(tx, progress(tx, stages.Execution)+1) - //if err := tx.RollbackAndBegin(context.Background()); err != nil { - // return err - //} if err := tx.Commit(); err != nil { return err } @@ -310,7 +310,7 @@ func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx conte if miner.MiningConfig.Enabled && nextBlock != nil && nextBlock.Header().Coinbase != (common.Address{}) { miner.MiningConfig.Etherbase = nextBlock.Header().Coinbase miner.MiningConfig.ExtraData = nextBlock.Header().Extra - miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx ethdb.RwTx) error { + miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx) error { err = stagedsync.SpawnMiningCreateBlockStage(s, tx, stagedsync.StageMiningCreateBlockCfg(db, miner, @@ -373,7 +373,7 @@ func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx conte return nil } -func checkChanges(expectedAccountChanges map[uint64]*changeset.ChangeSet, tx ethdb.Tx, expectedStorageChanges map[uint64]*changeset.ChangeSet, execAtBlock, prunedTo uint64) error { +func checkChanges(expectedAccountChanges map[uint64]*changeset.ChangeSet, tx kv.Tx, expectedStorageChanges map[uint64]*changeset.ChangeSet, execAtBlock, prunedTo uint64) error { checkHistoryFrom := execAtBlock if prunedTo > checkHistoryFrom { checkHistoryFrom = prunedTo @@ -389,10 +389,10 @@ func checkChanges(expectedAccountChanges map[uint64]*changeset.ChangeSet, tx eth delete(expectedStorageChanges, blockN) } - if err := checkHistory(tx, dbutils.AccountChangeSetBucket, checkHistoryFrom); err != nil { + if err := checkHistory(tx, kv.AccountChangeSet, checkHistoryFrom); err != nil { return err } - if err := checkHistory(tx, dbutils.StorageChangeSetBucket, checkHistoryFrom); err != nil { + if err := checkHistory(tx, kv.StorageChangeSet, checkHistoryFrom); err != nil { return err } return nil @@ -413,7 +413,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *params.ChainConfig) { } } -func loopIh(db ethdb.RwKV, ctx context.Context, unwind uint64) error { +func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { _, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) tmpdir := path.Join(datadir, etl.TmpDirName) tx, err := db.BeginRw(ctx) @@ -478,7 +478,7 @@ func loopIh(db ethdb.RwKV, ctx context.Context, unwind uint64) error { } } -func loopExec(db ethdb.RwKV, ctx context.Context, unwind uint64) error { +func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil) tx, err := db.BeginRw(ctx) @@ -501,7 +501,7 @@ func loopExec(db ethdb.RwKV, ctx context.Context, unwind uint64) error { cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDBPath) // set block limit of execute stage - sync.MockExecFunc(stages.Execution, func(firstCycle bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error { + sync.MockExecFunc(stages.Execution, func(firstCycle bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, tx, to, ctx, cfg, false); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } @@ -530,10 +530,10 @@ func loopExec(db ethdb.RwKV, ctx context.Context, unwind uint64) error { } } -func checkChangeSet(db ethdb.Tx, blockNum uint64, expectedAccountChanges *changeset.ChangeSet, expectedStorageChanges *changeset.ChangeSet) error { +func checkChangeSet(db kv.Tx, blockNum uint64, expectedAccountChanges *changeset.ChangeSet, expectedStorageChanges *changeset.ChangeSet) error { i := 0 sort.Sort(expectedAccountChanges) - err := changeset.Walk(db, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { + err := changeset.Walk(db, kv.AccountChangeSet, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { c := expectedAccountChanges.Changes[i] i++ if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { @@ -559,7 +559,7 @@ func checkChangeSet(db ethdb.Tx, blockNum uint64, expectedAccountChanges *change i = 0 sort.Sort(expectedStorageChanges) - err = changeset.Walk(db, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { + err = changeset.Walk(db, kv.StorageChangeSet, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { c := expectedStorageChanges.Changes[i] i++ if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { @@ -583,7 +583,7 @@ func checkChangeSet(db ethdb.Tx, blockNum uint64, expectedAccountChanges *change return nil } -func checkHistory(tx ethdb.Tx, changeSetBucket string, blockNum uint64) error { +func checkHistory(tx kv.Tx, changeSetBucket string, blockNum uint64) error { indexBucket := changeset.Mapper[changeSetBucket].IndexBucket blockNumBytes := dbutils.EncodeBlockNumber(blockNum) if err := changeset.Walk(tx, changeSetBucket, blockNumBytes, 0, func(blockN uint64, address, v []byte) (bool, error) { diff --git a/cmd/pics/state.go b/cmd/pics/state.go index c38955b8b28..c1b2753bfbf 100644 --- a/cmd/pics/state.go +++ b/cmd/pics/state.go @@ -16,12 +16,11 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" "github.com/ledgerwatch/erigon/cmd/pics/contracts" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" @@ -69,35 +68,35 @@ import ( }*/ var bucketLabels = map[string]string{ - dbutils.Receipts: "Receipts", - dbutils.Log: "Event Logs", - dbutils.AccountsHistoryBucket: "History Of Accounts", - dbutils.StorageHistoryBucket: "History Of Storage", - dbutils.HeadersBucket: "Headers", - dbutils.HeaderCanonicalBucket: "Canonical headers", - dbutils.HeaderTDBucket: "Headers TD", - dbutils.BlockBodyPrefix: "Block Bodies", - dbutils.HeaderNumberBucket: "Header Numbers", - dbutils.TxLookupPrefix: "Transaction Index", - dbutils.CodeBucket: "Code Of Contracts", - dbutils.SyncStageProgress: "Sync Progress", - dbutils.PlainStateBucket: "Plain State", - dbutils.HashedAccountsBucket: "Hashed Accounts", - dbutils.HashedStorageBucket: "Hashed Storage", - dbutils.TrieOfAccountsBucket: "Intermediate Hashes Of Accounts", - dbutils.TrieOfStorageBucket: "Intermediate Hashes Of Storage", - dbutils.AccountChangeSetBucket: "Account Changes", - dbutils.StorageChangeSetBucket: "Storage Changes", - dbutils.IncarnationMapBucket: "Incarnations", - dbutils.Senders: "Transaction Senders", - dbutils.ContractTEVMCodeBucket: "Contract TEVM code", + kv.Receipts: "Receipts", + kv.Log: "Event Logs", + kv.AccountsHistory: "History Of Accounts", + kv.StorageHistory: "History Of Storage", + kv.Headers: "Headers", + kv.HeaderCanonical: "Canonical headers", + kv.HeaderTD: "Headers TD", + kv.BlockBody: "Block Bodies", + kv.HeaderNumber: "Header Numbers", + kv.TxLookup: "Transaction Index", + kv.CodeBucket: "Code Of Contracts", + kv.SyncStageProgress: "Sync Progress", + kv.PlainStateBucket: "Plain State", + kv.HashedAccounts: "Hashed Accounts", + kv.HashedStorage: "Hashed Storage", + kv.TrieOfAccounts: "Intermediate Hashes Of Accounts", + kv.TrieOfStorage: "Intermediate Hashes Of Storage", + kv.AccountChangeSet: "Account Changes", + kv.StorageChangeSet: "Storage Changes", + kv.IncarnationMap: "Incarnations", + kv.Senders: "Transaction Senders", + kv.ContractTEVMCode: "Contract TEVM code", } -/*dbutils.PlainContractCodeBucket, +/*dbutils.PlainContractCode, dbutils.CodeBucket, -dbutils.AccountsHistoryBucket, -dbutils.StorageHistoryBucket, -dbutils.TxLookupPrefix,*/ +dbutils.AccountsHistory, +dbutils.StorageHistory, +dbutils.TxLookup,*/ func hexPalette() error { filename := "hex_palette.dot" @@ -120,7 +119,7 @@ func hexPalette() error { return nil } -func stateDatabaseComparison(first ethdb.RwKV, second ethdb.RwKV, number int) error { +func stateDatabaseComparison(first kv.RwDB, second kv.RwDB, number int) error { filename := fmt.Sprintf("changes_%d.dot", number) f, err := os.Create(filename) if err != nil { @@ -132,8 +131,8 @@ func stateDatabaseComparison(first ethdb.RwKV, second ethdb.RwKV, number int) er noValues := make(map[int]struct{}) perBucketFiles := make(map[string]*os.File) - if err = second.View(context.Background(), func(readTx ethdb.Tx) error { - return first.View(context.Background(), func(firstTx ethdb.Tx) error { + if err = second.View(context.Background(), func(readTx kv.Tx) error { + return first.View(context.Background(), func(firstTx kv.Tx) error { for bucketName := range bucketLabels { bucketName := bucketName if err := readTx.ForEach(bucketName, nil, func(k, v []byte) error { @@ -411,7 +410,7 @@ func initialState1() error { return err } - emptyKv := kv.NewMemKV() + emptyKv := memdb.New() if err = stateDatabaseComparison(emptyKv, m.DB, 0); err != nil { return err } diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 10598021355..0d2842f8b6d 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -11,11 +11,11 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/services" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/remotedb" + "github.com/ledgerwatch/erigon/ethdb/remotedbserver" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/node" @@ -123,16 +123,16 @@ func RootCommand() (*cobra.Command, *Flags) { return rootCmd, cfg } -func checkDbCompatibility(db ethdb.RoKV) error { +func checkDbCompatibility(db kv.RoDB) error { // DB schema version compatibility check var version []byte var compatErr error - var compatTx ethdb.Tx + var compatTx kv.Tx if compatTx, compatErr = db.BeginRo(context.Background()); compatErr != nil { return fmt.Errorf("open Ro Tx for DB schema compability check: %w", compatErr) } defer compatTx.Rollback() - if version, compatErr = compatTx.GetOne(dbutils.DatabaseInfoBucket, dbutils.DBSchemaVersionKey); compatErr != nil { + if version, compatErr = compatTx.GetOne(kv.DatabaseInfo, kv.DBSchemaVersionKey); compatErr != nil { return fmt.Errorf("read version for DB schema compability check: %w", compatErr) } if len(version) != 12 { @@ -142,7 +142,7 @@ func checkDbCompatibility(db ethdb.RoKV) error { minor := binary.BigEndian.Uint32(version[4:]) patch := binary.BigEndian.Uint32(version[8:]) var compatible bool - dbSchemaVersion := &dbutils.DBSchemaVersion + dbSchemaVersion := &kv.DBSchemaVersion if major != dbSchemaVersion.Major { compatible = false } else if minor != dbSchemaVersion.Minor { @@ -160,35 +160,35 @@ func checkDbCompatibility(db ethdb.RoKV) error { return nil } -func RemoteServices(cfg Flags, rootCancel context.CancelFunc) (kv ethdb.RoKV, eth services.ApiBackend, txPool *services.TxPoolService, mining *services.MiningService, err error) { +func RemoteServices(cfg Flags, logger log.Logger, rootCancel context.CancelFunc) (db kv.RoDB, eth services.ApiBackend, txPool *services.TxPoolService, mining *services.MiningService, err error) { if !cfg.SingleNodeMode && cfg.PrivateApiAddr == "" { return nil, nil, nil, nil, fmt.Errorf("either remote db or local db must be specified") } // Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not "" // If PrivateApiAddr is checked first, the Chaindata option will never work if cfg.SingleNodeMode { - var rwKv ethdb.RwKV - rwKv, err = kv2.NewMDBX().Path(cfg.Chaindata).Readonly().Open() + var rwKv kv.RwDB + rwKv, err = kv2.NewMDBX(logger).Path(cfg.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, err } if compatErr := checkDbCompatibility(rwKv); compatErr != nil { return nil, nil, nil, nil, compatErr } - kv = rwKv + db = rwKv } else { log.Info("if you run RPCDaemon on same machine with Erigon add --datadir option") } if cfg.PrivateApiAddr != "" { - remoteKv, err := kv2.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion)).Path(cfg.PrivateApiAddr).Open(cfg.TLSCertfile, cfg.TLSKeyFile, cfg.TLSCACert) + remoteKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger).Path(cfg.PrivateApiAddr).Open(cfg.TLSCertfile, cfg.TLSKeyFile, cfg.TLSCACert) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could not connect to remoteKv: %w", err) } remoteEth := services.NewRemoteBackend(remoteKv.GrpcConn()) mining = services.NewMiningService(remoteKv.GrpcConn()) txPool = services.NewTxPoolService(remoteKv.GrpcConn()) - if kv == nil { - kv = remoteKv + if db == nil { + db = remoteKv } eth = remoteEth go func() { @@ -206,7 +206,7 @@ func RemoteServices(cfg Flags, rootCancel context.CancelFunc) (kv ethdb.RoKV, et } }() } - return kv, eth, txPool, mining, err + return db, eth, txPool, mining, err } func StartRpcServer(ctx context.Context, cfg Flags, rpcAPI []rpc.API) error { diff --git a/cmd/rpcdaemon/commands/daemon.go b/cmd/rpcdaemon/commands/daemon.go index 9a04d346ea8..8678712eebd 100644 --- a/cmd/rpcdaemon/commands/daemon.go +++ b/cmd/rpcdaemon/commands/daemon.go @@ -7,12 +7,12 @@ import ( "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/filters" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/services" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) // APIList describes the list of available RPC apis -func APIList(ctx context.Context, db ethdb.RoKV, eth services.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, filters *filters.Filters, cfg cli.Flags, customAPIList []rpc.API) []rpc.API { +func APIList(ctx context.Context, db kv.RoDB, eth services.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, filters *filters.Filters, cfg cli.Flags, customAPIList []rpc.API) []rpc.API { var defaultAPIList []rpc.API base := NewBaseApi(filters) diff --git a/cmd/rpcdaemon/commands/debug_api.go b/cmd/rpcdaemon/commands/debug_api.go index 49f8170904a..7e208cfcb1a 100644 --- a/cmd/rpcdaemon/commands/debug_api.go +++ b/cmd/rpcdaemon/commands/debug_api.go @@ -16,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/transactions" @@ -35,12 +36,12 @@ type PrivateDebugAPI interface { // PrivateDebugAPIImpl is implementation of the PrivateDebugAPI interface based on remote Db access type PrivateDebugAPIImpl struct { *BaseAPI - db ethdb.RoKV + db kv.RoDB GasCap uint64 } // NewPrivateDebugAPI returns PrivateDebugAPIImpl instance -func NewPrivateDebugAPI(base *BaseAPI, db ethdb.RoKV, gascap uint64) *PrivateDebugAPIImpl { +func NewPrivateDebugAPI(base *BaseAPI, db kv.RoDB, gascap uint64) *PrivateDebugAPIImpl { return &PrivateDebugAPIImpl{ BaseAPI: base, db: db, diff --git a/cmd/rpcdaemon/commands/erigon_api.go b/cmd/rpcdaemon/commands/erigon_api.go index 4c0b798b65f..75a01a21d78 100644 --- a/cmd/rpcdaemon/commands/erigon_api.go +++ b/cmd/rpcdaemon/commands/erigon_api.go @@ -5,7 +5,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) @@ -31,11 +31,11 @@ type ErigonAPI interface { // ErigonImpl is implementation of the ErigonAPI interface type ErigonImpl struct { *BaseAPI - db ethdb.RoKV + db kv.RoDB } // NewErigonAPI returns ErigonImpl instance -func NewErigonAPI(base *BaseAPI, db ethdb.RoKV) *ErigonImpl { +func NewErigonAPI(base *BaseAPI, db kv.RoDB) *ErigonImpl { return &ErigonImpl{ BaseAPI: base, db: db, diff --git a/cmd/rpcdaemon/commands/erigon_issuance.go b/cmd/rpcdaemon/commands/erigon_issuance.go index a57fe458be7..aa1c8b79fda 100644 --- a/cmd/rpcdaemon/commands/erigon_issuance.go +++ b/cmd/rpcdaemon/commands/erigon_issuance.go @@ -7,7 +7,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) @@ -69,7 +69,7 @@ func (api *ErigonImpl) Issuance(ctx context.Context, blockNr rpc.BlockNumber) (I return ret, nil } -func (api *ErigonImpl) getBlockByRPCNumber(tx ethdb.Tx, blockNr rpc.BlockNumber) (*types.Block, error) { +func (api *ErigonImpl) getBlockByRPCNumber(tx kv.Tx, blockNr rpc.BlockNumber) (*types.Block, error) { blockNum, err := getBlockNumber(blockNr, tx) if err != nil { return nil, err diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index 1ec1ce8a728..39cafe30653 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -8,6 +8,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/filters" @@ -18,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" ethFilters "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" @@ -107,18 +107,18 @@ func NewBaseApi(f *filters.Filters) *BaseAPI { return &BaseAPI{filters: f} } -func (api *BaseAPI) chainConfig(tx ethdb.Tx) (*params.ChainConfig, error) { +func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { cfg, _, err := api.chainConfigWithGenesis(tx) return cfg, err } // nolint:unused -func (api *BaseAPI) genesis(tx ethdb.Tx) (*types.Block, error) { +func (api *BaseAPI) genesis(tx kv.Tx) (*types.Block, error) { _, genesis, err := api.chainConfigWithGenesis(tx) return genesis, err } -func (api *BaseAPI) chainConfigWithGenesis(tx ethdb.Tx) (*params.ChainConfig, *types.Block, error) { +func (api *BaseAPI) chainConfigWithGenesis(tx kv.Tx) (*params.ChainConfig, *types.Block, error) { if api._chainConfig != nil { return api._chainConfig, api._genesis, nil } @@ -144,7 +144,7 @@ func (api *BaseAPI) pendingBlock() *types.Block { return api.filters.LastPendingBlock() } -func (api *BaseAPI) getBlockByNumber(number rpc.BlockNumber, tx ethdb.Tx) (*types.Block, error) { +func (api *BaseAPI) getBlockByNumber(number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { if number == rpc.PendingBlockNumber { return api.pendingBlock(), nil } @@ -164,12 +164,12 @@ type APIImpl struct { ethBackend services.ApiBackend txPool txpool.TxpoolClient mining txpool.MiningClient - db ethdb.RoKV + db kv.RoDB GasCap uint64 } // NewEthAPI returns APIImpl instance -func NewEthAPI(base *BaseAPI, db ethdb.RoKV, eth services.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { +func NewEthAPI(base *BaseAPI, db kv.RoDB, eth services.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { if gascap == 0 { gascap = uint64(math.MaxUint64 / 2) } diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index cb4ab6d7e4f..1606b573f0c 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -13,7 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" @@ -51,7 +51,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return result.Return(), result.Err } -func HeaderByNumberOrHash(ctx context.Context, tx ethdb.Tx, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { +func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { if blockLabel, ok := blockNrOrHash.Number(); ok { blockNum, err := getBlockNumber(blockLabel, tx) if err != nil { diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 25fc27481b5..008c69fe18d 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -8,6 +8,7 @@ import ( "math/big" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/RoaringBitmap/roaring" "github.com/ledgerwatch/erigon/common" @@ -28,7 +29,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/transactions" ) -func getReceipts(ctx context.Context, tx ethdb.Tx, chainConfig *params.ChainConfig, block *types.Block, senders []common.Address) (types.Receipts, error) { +func getReceipts(ctx context.Context, tx kv.Tx, chainConfig *params.ChainConfig, block *types.Block, senders []common.Address) (types.Receipts, error) { if cached := rawdb.ReadReceipts(tx, block, senders); cached != nil { return cached, nil } @@ -110,7 +111,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var addrBitmap *roaring.Bitmap for _, addr := range crit.Addresses { - m, err := bitmapdb.Get(tx, dbutils.LogAddressIndex, addr[:], uint32(begin), uint32(end)) + m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end)) if err != nil { return nil, err } @@ -142,7 +143,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ blockNToMatch := uint64(iter.Next()) var logIndex uint var blockLogs types.Logs - if err := tx.ForPrefix(dbutils.Log, dbutils.EncodeBlockNumber(blockNToMatch), func(k, v []byte) error { + if err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(blockNToMatch), func(k, v []byte) error { var logs types.Logs if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { return fmt.Errorf("receipt unmarshal failed: %w", err) @@ -195,12 +196,12 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ // {{}, {B}} matches any topic in first position AND B in second position // {{A}, {B}} matches topic A in first position AND B in second position // {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position -func getTopicsBitmap(c ethdb.Tx, topics [][]common.Hash, from, to uint32) (*roaring.Bitmap, error) { +func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring.Bitmap, error) { var result *roaring.Bitmap for _, sub := range topics { var bitmapForORing *roaring.Bitmap for _, topic := range sub { - m, err := bitmapdb.Get(c, dbutils.LogTopicIndex, topic[:], from, to) + m, err := bitmapdb.Get(c, kv.LogTopicIndex, topic[:], from, to) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/get_chain_config_test.go b/cmd/rpcdaemon/commands/get_chain_config_test.go index 4f7d0cf636e..c7b136a3f2f 100644 --- a/cmd/rpcdaemon/commands/get_chain_config_test.go +++ b/cmd/rpcdaemon/commands/get_chain_config_test.go @@ -5,11 +5,11 @@ import ( "testing" "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" ) func TestGetChainConfig(t *testing.T) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config, _, err := core.CommitGenesisBlock(db, core.DefaultGenesisBlock()) if err != nil { t.Fatalf("setting up genensis block: %v", err) diff --git a/cmd/rpcdaemon/commands/rpc_block.go b/cmd/rpcdaemon/commands/rpc_block.go index 79d12785a79..d3a746c25f2 100644 --- a/cmd/rpcdaemon/commands/rpc_block.go +++ b/cmd/rpcdaemon/commands/rpc_block.go @@ -4,11 +4,11 @@ import ( "fmt" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) -func getBlockNumber(number rpc.BlockNumber, tx ethdb.Tx) (uint64, error) { +func getBlockNumber(number rpc.BlockNumber, tx kv.Tx) (uint64, error) { var blockNum uint64 var err error if number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber { @@ -25,7 +25,7 @@ func getBlockNumber(number rpc.BlockNumber, tx ethdb.Tx) (uint64, error) { return blockNum, nil } -func getLatestBlockNumber(tx ethdb.Tx) (uint64, error) { +func getLatestBlockNumber(tx kv.Tx) (uint64, error) { blockNum, err := stages.GetStageProgress(tx, stages.Execution) if err != nil { return 0, fmt.Errorf("getting latest block number: %v", err) diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index c2eec7fc44f..b4f05b693e0 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -57,7 +57,7 @@ func TestSendRawTransaction(t *testing.T) { initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 758ae020de1..b48dcf7c14d 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/stack" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -860,7 +860,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return api.doCallMany(ctx, dbtx, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */) } -func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx ethdb.Tx, msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, +func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, gasBailout bool, txIndexNeeded int) ([]*TraceCallResult, error) { chainConfig, err := api.chainConfig(dbtx) if err != nil { diff --git a/cmd/rpcdaemon/commands/trace_adhoc_test.go b/cmd/rpcdaemon/commands/trace_adhoc_test.go index 6c3cb3fedb9..8bcb36b616e 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc_test.go +++ b/cmd/rpcdaemon/commands/trace_adhoc_test.go @@ -10,7 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" "github.com/stretchr/testify/require" ) @@ -61,7 +61,7 @@ func TestReplayTransaction(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) api := NewTraceAPI(NewBaseApi(nil), db, &cli.Flags{}) var txnHash common.Hash - if err := db.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { b, err := rawdb.ReadBlockByNumber(tx, 6) if err != nil { return err diff --git a/cmd/rpcdaemon/commands/trace_api.go b/cmd/rpcdaemon/commands/trace_api.go index db1ce63ecf7..33505fc0792 100644 --- a/cmd/rpcdaemon/commands/trace_api.go +++ b/cmd/rpcdaemon/commands/trace_api.go @@ -8,7 +8,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) @@ -31,14 +31,14 @@ type TraceAPI interface { // TraceAPIImpl is implementation of the TraceAPI interface based on remote Db access type TraceAPIImpl struct { *BaseAPI - kv ethdb.RoKV + kv kv.RoDB maxTraces uint64 gasCap uint64 compatibility bool // Bug for bug compatiblity with OpenEthereum } // NewTraceAPI returns NewTraceAPI instance -func NewTraceAPI(base *BaseAPI, kv ethdb.RoKV, cfg *cli.Flags) *TraceAPIImpl { +func NewTraceAPI(base *BaseAPI, kv kv.RoDB, cfg *cli.Flags) *TraceAPIImpl { return &TraceAPIImpl{ BaseAPI: base, kv: kv, diff --git a/cmd/rpcdaemon/commands/trace_filtering.go b/cmd/rpcdaemon/commands/trace_filtering.go index 99dc849abc7..7f12f35eea0 100644 --- a/cmd/rpcdaemon/commands/trace_filtering.go +++ b/cmd/rpcdaemon/commands/trace_filtering.go @@ -7,13 +7,12 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" ) @@ -233,7 +232,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str var allBlocks roaring64.Bitmap for _, addr := range req.FromAddress { if addr != nil { - b, err := bitmapdb.Get64(dbtx, dbutils.CallFromIndex, addr.Bytes(), fromBlock, toBlock) + b, err := bitmapdb.Get64(dbtx, kv.CallFromIndex, addr.Bytes(), fromBlock, toBlock) if err != nil { stream.WriteNil() return err @@ -244,7 +243,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str } for _, addr := range req.ToAddress { if addr != nil { - b, err := bitmapdb.Get64(dbtx, dbutils.CallToIndex, addr.Bytes(), fromBlock, toBlock) + b, err := bitmapdb.Get64(dbtx, kv.CallToIndex, addr.Bytes(), fromBlock, toBlock) if err != nil { stream.WriteNil() return err @@ -416,7 +415,7 @@ func filter_trace(pt *ParityTrace, fromAddresses map[common.Address]struct{}, to return false } -func (api *TraceAPIImpl) callManyTransactions(ctx context.Context, dbtx ethdb.Tx, txs []types.Transaction, parentHash common.Hash, parentNo rpc.BlockNumber, header *types.Header, txIndex int, signer *types.Signer) ([]*TraceCallResult, error) { +func (api *TraceAPIImpl) callManyTransactions(ctx context.Context, dbtx kv.Tx, txs []types.Transaction, parentHash common.Hash, parentNo rpc.BlockNumber, header *types.Header, txIndex int, signer *types.Signer) ([]*TraceCallResult, error) { callParams := make([]TraceCallParam, 0, len(txs)) msgs := make([]types.Message, len(txs)) for i, tx := range txs { diff --git a/cmd/rpcdaemon/commands/txpool_api.go b/cmd/rpcdaemon/commands/txpool_api.go index 9d27f2b7ecf..336af072706 100644 --- a/cmd/rpcdaemon/commands/txpool_api.go +++ b/cmd/rpcdaemon/commands/txpool_api.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rlp" ) @@ -22,11 +22,11 @@ type TxPoolAPI interface { type TxPoolAPIImpl struct { *BaseAPI pool proto_txpool.TxpoolClient - db ethdb.RoKV + db kv.RoDB } // NewTxPoolAPI returns NetAPIImplImpl instance -func NewTxPoolAPI(base *BaseAPI, db ethdb.RoKV, pool proto_txpool.TxpoolClient) *TxPoolAPIImpl { +func NewTxPoolAPI(base *BaseAPI, db kv.RoDB, pool proto_txpool.TxpoolClient) *TxPoolAPIImpl { return &TxPoolAPIImpl{ BaseAPI: base, pool: pool, diff --git a/cmd/rpcdaemon/commands/txpool_api_test.go b/cmd/rpcdaemon/commands/txpool_api_test.go index 80c28c367be..41f535569f2 100644 --- a/cmd/rpcdaemon/commands/txpool_api_test.go +++ b/cmd/rpcdaemon/commands/txpool_api_test.go @@ -53,7 +53,7 @@ func TestTxPoolContent(t *testing.T) { initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } diff --git a/cmd/rpcdaemon/main.go b/cmd/rpcdaemon/main.go index def1ea095e4..c9b6532b274 100644 --- a/cmd/rpcdaemon/main.go +++ b/cmd/rpcdaemon/main.go @@ -17,7 +17,8 @@ func main() { cmd, cfg := cli.RootCommand() rootCtx, rootCancel := utils.RootContext() cmd.RunE = func(cmd *cobra.Command, args []string) error { - db, backend, txPool, mining, err := cli.RemoteServices(*cfg, rootCancel) + logger := log.New() + db, backend, txPool, mining, err := cli.RemoteServices(*cfg, logger, rootCancel) if err != nil { log.Error("Could not connect to DB", "error", err) return nil diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index 03de80a6cea..dbb5c25a82e 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -17,15 +17,15 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" ) -func CreateTestKV(t *testing.T) ethdb.RwKV { +func CreateTestKV(t *testing.T) kv.RwDB { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -214,8 +214,8 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g ethashApi := apis[1].Service.(*ethash.API) server := grpc.NewServer() - txpool.RegisterTxpoolServer(server, remotedbserver.NewTxPoolServer(ctx, m.TxPoolP2PServer.TxPool)) - txpool.RegisterMiningServer(server, remotedbserver.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) + txpool.RegisterTxpoolServer(server, privateapi.NewTxPoolServer(ctx, m.TxPoolP2PServer.TxPool)) + txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) listener := bufconn.Listen(1024 * 1024) dialer := func() func(context.Context, string) (net.Conn, error) { diff --git a/cmd/rpcdaemon/services/eth_backend.go b/cmd/rpcdaemon/services/eth_backend.go index 4fabf30fb03..a7e82b6f703 100644 --- a/cmd/rpcdaemon/services/eth_backend.go +++ b/cmd/rpcdaemon/services/eth_backend.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/log" "google.golang.org/grpc" "google.golang.org/grpc/status" @@ -37,7 +37,7 @@ type RemoteBackend struct { func NewRemoteBackend(cc grpc.ClientConnInterface) *RemoteBackend { return &RemoteBackend{ remoteEthBackend: remote.NewETHBACKENDClient(cc), - version: gointerfaces.VersionFromProto(remotedbserver.EthBackendAPIVersion), + version: gointerfaces.VersionFromProto(privateapi.EthBackendAPIVersion), log: log.New("remote_service", "eth_backend"), } } diff --git a/cmd/rpcdaemon/services/eth_mining.go b/cmd/rpcdaemon/services/eth_mining.go index 3f4ee155e05..957180740b0 100644 --- a/cmd/rpcdaemon/services/eth_mining.go +++ b/cmd/rpcdaemon/services/eth_mining.go @@ -6,7 +6,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/log" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -21,7 +21,7 @@ type MiningService struct { func NewMiningService(cc grpc.ClientConnInterface) *MiningService { return &MiningService{ MiningClient: txpool.NewMiningClient(cc), - version: gointerfaces.VersionFromProto(remotedbserver.MiningAPIVersion), + version: gointerfaces.VersionFromProto(privateapi.MiningAPIVersion), log: log.New("remote_service", "mining"), } } diff --git a/cmd/rpcdaemon/services/eth_txpool.go b/cmd/rpcdaemon/services/eth_txpool.go index d0cd93070b3..87b53ada136 100644 --- a/cmd/rpcdaemon/services/eth_txpool.go +++ b/cmd/rpcdaemon/services/eth_txpool.go @@ -6,7 +6,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/log" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -21,7 +21,7 @@ type TxPoolService struct { func NewTxPoolService(cc grpc.ClientConnInterface) *TxPoolService { return &TxPoolService{ TxpoolClient: txpool.NewTxpoolClient(cc), - version: gointerfaces.VersionFromProto(remotedbserver.TxPoolAPIVersion), + version: gointerfaces.VersionFromProto(privateapi.TxPoolAPIVersion), log: log.New("remote_service", "tx_pool"), } } diff --git a/cmd/rpctest/main.go b/cmd/rpctest/main.go index 6e1617bd6ce..92aeb1c55bc 100644 --- a/cmd/rpctest/main.go +++ b/cmd/rpctest/main.go @@ -235,7 +235,7 @@ func main() { Short: "", Long: ``, Run: func(cmd *cobra.Command, args []string) { - rpctest.CompareAccountRange(erigonURL, gethURL, tmpDataDir, tmpDataDirOrig, blockFrom, notRegenerateGethData) + rpctest.CompareAccountRange(log.New(), erigonURL, gethURL, tmpDataDir, tmpDataDirOrig, blockFrom, notRegenerateGethData) }, } with(compareAccountRange, withErigonUrl, withGethUrl, withBlockNum) diff --git a/cmd/rpctest/rpctest/account_range_verify.go b/cmd/rpctest/rpctest/account_range_verify.go index c49c9a26a72..f5579fca53f 100644 --- a/cmd/rpctest/rpctest/account_range_verify.go +++ b/cmd/rpctest/rpctest/account_range_verify.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "log" "net" "net/http" "os" @@ -13,26 +12,28 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/log" ) -func CompareAccountRange(erigonURL, gethURL, tmpDataDir, gethDataDir string, blockFrom uint64, notRegenerateGethData bool) { +func CompareAccountRange(logger log.Logger, erigonURL, gethURL, tmpDataDir, gethDataDir string, blockFrom uint64, notRegenerateGethData bool) { err := os.RemoveAll(tmpDataDir) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } if !notRegenerateGethData { err = os.RemoveAll(gethDataDir) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } } - resultsKV := kv.NewMDBX().Path(tmpDataDir).MustOpen() - gethKV := kv.NewMDBX().Path(gethDataDir).MustOpen() + resultsKV := mdbx.NewMDBX(logger).Path(tmpDataDir).MustOpen() + gethKV := mdbx.NewMDBX(logger).Path(gethDataDir).MustOpen() var client = &http.Client{ Timeout: time.Minute * 60, @@ -56,7 +57,7 @@ func CompareAccountRange(erigonURL, gethURL, tmpDataDir, gethDataDir string, blo Result state.IteratorDump `json:"result"` } - f := func(url string, db ethdb.RwTx) error { + f := func(url string, db kv.RwTx) error { i := uint64(0) reqGen := &RequestGenerator{ client: client, @@ -83,7 +84,7 @@ func CompareAccountRange(erigonURL, gethURL, tmpDataDir, gethDataDir string, blo if innerErr != nil { return innerErr } - err = db.Put(dbutils.AccountsHistoryBucket, addr.Bytes(), b) + err = db.Put(kv.AccountsHistory, addr.Bytes(), b) if err != nil { return err } @@ -95,49 +96,58 @@ func CompareAccountRange(erigonURL, gethURL, tmpDataDir, gethDataDir string, blo next = ar.Result.Next } } - err = resultsKV.Update(context.Background(), func(tx ethdb.RwTx) error { + err = resultsKV.Update(context.Background(), func(tx kv.RwTx) error { return f(erigonURL, tx) }) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return + } if !notRegenerateGethData { - err = gethKV.Update(context.Background(), func(tx ethdb.RwTx) error { + err = gethKV.Update(context.Background(), func(tx kv.RwTx) error { return f(erigonURL, tx) }) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } } tgTx, err := resultsKV.BeginRo(context.Background()) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } gethTx, err := gethKV.BeginRo(context.Background()) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } - tgCursor, err := tgTx.Cursor(dbutils.AccountsHistoryBucket) + tgCursor, err := tgTx.Cursor(kv.AccountsHistory) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } defer tgCursor.Close() - gethCursor, err := gethTx.Cursor(dbutils.AccountsHistoryBucket) + gethCursor, err := gethTx.Cursor(kv.AccountsHistory) if err != nil { - log.Fatal(err) + log.Error(err.Error()) + return } defer gethCursor.Close() tgKey, tgVal, err1 := tgCursor.Next() if err1 != nil { - log.Fatal(err) + log.Error(err.Error()) + return } gethKey, gethVal, err2 := gethCursor.Next() if err2 != nil { - log.Fatal(err) + log.Error(err.Error()) + return } i := 0 @@ -159,23 +169,31 @@ func CompareAccountRange(erigonURL, gethURL, tmpDataDir, gethDataDir string, blo tgKey, tgVal, err1 = tgCursor.Next() if err1 != nil { - log.Fatal(err) + log.Error(err.Error()) + return + } gethKey, gethVal, err2 = gethCursor.Next() if err2 != nil { - log.Fatal(err) + log.Error(err.Error()) + return + } } else if cmp < 0 { gethMissed++ tgKey, tgVal, err1 = tgCursor.Next() if err1 != nil { - log.Fatal(err) + log.Error(err.Error()) + return + } } else if cmp > 0 { tgMissed++ gethKey, gethVal, err2 = gethCursor.Next() if err2 != nil { - log.Fatal(err) + log.Error(err.Error()) + return + } } i++ diff --git a/cmd/sentry/download/downloader.go b/cmd/sentry/download/downloader.go index a6b415479e0..eec5975a491 100644 --- a/cmd/sentry/download/downloader.go +++ b/cmd/sentry/download/downloader.go @@ -19,7 +19,7 @@ import ( "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -253,11 +253,11 @@ type ControlServerImpl struct { forks []uint64 genesisHash common.Hash networkId uint64 - db ethdb.RwKV + db kv.RwDB Engine consensus.Engine } -func NewControlServer(db ethdb.RwKV, nodeName string, chainConfig *params.ChainConfig, genesisHash common.Hash, engine consensus.Engine, networkID uint64, sentries []remote.SentryClient, window int) (*ControlServerImpl, error) { +func NewControlServer(db kv.RwDB, nodeName string, chainConfig *params.ChainConfig, genesisHash common.Hash, engine consensus.Engine, networkID uint64, sentries []remote.SentryClient, window int) (*ControlServerImpl, error) { hd := headerdownload.NewHeaderDownload( 512, /* anchorLimit */ 1024*1024, /* linkLimit */ @@ -284,7 +284,7 @@ func NewControlServer(db ethdb.RwKV, nodeName string, chainConfig *params.ChainC cs.genesisHash = genesisHash cs.networkId = networkID var err error - err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.Update(context.Background(), func(tx kv.RwTx) error { cs.headHeight, cs.headHash, cs.headTd, err = bd.UpdateFromDb(tx) return err }) diff --git a/cmd/sentry/download/sentry_test.go b/cmd/sentry/download/sentry_test.go index a5663d790e0..23818a95669 100644 --- a/cmd/sentry/download/sentry_test.go +++ b/cmd/sentry/download/sentry_test.go @@ -14,14 +14,14 @@ import ( "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/require" ) -func testSentryServer(db ethdb.KVGetter, genesis *core.Genesis, genesisHash common.Hash) *SentryServerImpl { +func testSentryServer(db kv.Getter, genesis *core.Genesis, genesisHash common.Hash) *SentryServerImpl { s := &SentryServerImpl{ ctx: context.Background(), } @@ -64,8 +64,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { EIP158Block: big.NewInt(2), ByzantiumBlock: big.NewInt(3), } - dbNoFork = kv.NewTestKV(t) - dbProFork = kv.NewTestKV(t) + dbNoFork = memdb.NewTestDB(t) + dbProFork = memdb.NewTestDB(t) gspecNoFork = &core.Genesis{Config: configNoFork} gspecProFork = &core.Genesis{Config: configProFork} @@ -76,12 +76,12 @@ func testForkIDSplit(t *testing.T, protocol uint) { var s1, s2 *SentryServerImpl - err := dbNoFork.Update(context.Background(), func(tx ethdb.RwTx) error { + err := dbNoFork.Update(context.Background(), func(tx kv.RwTx) error { s1 = testSentryServer(tx, gspecNoFork, genesisNoFork.Hash()) return nil }) require.NoError(t, err) - err = dbProFork.Update(context.Background(), func(tx ethdb.RwTx) error { + err = dbProFork.Update(context.Background(), func(tx kv.RwTx) error { s2 = testSentryServer(tx, gspecProFork, genesisProFork.Hash()) return nil }) diff --git a/cmd/snapshots/debug/debug_test.go b/cmd/snapshots/debug/debug_test.go index fdc73413859..c2168e5bf54 100644 --- a/cmd/snapshots/debug/debug_test.go +++ b/cmd/snapshots/debug/debug_test.go @@ -19,7 +19,10 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" + "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) @@ -40,36 +43,36 @@ func TestMatreshkaStream(t *testing.T) { chaindataDir := "/media/b00ris/nvme/fresh_sync/tg/chaindata" tmpDbDir := "/home/b00ris/event_stream" - chaindata, err := kv2.Open(chaindataDir, true) + chaindata, err := mdbx.Open(chaindataDir, log.New(), true) if err != nil { t.Fatal(err) } //tmpDb:=ethdb.NewMemDatabase() os.RemoveAll(tmpDbDir) - kv, err := kv2.NewMDBX().Path(tmpDbDir).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - defaultBuckets[AccountDiff] = dbutils.BucketConfigItem{} - defaultBuckets[StorageDiff] = dbutils.BucketConfigItem{} - defaultBuckets[ContractDiff] = dbutils.BucketConfigItem{} + db, err := mdbx.NewMDBX(log.New()).Path(tmpDbDir).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + defaultBuckets[AccountDiff] = kv.TableConfigItem{} + defaultBuckets[StorageDiff] = kv.TableConfigItem{} + defaultBuckets[ContractDiff] = kv.TableConfigItem{} return defaultBuckets }).Open() if err != nil { t.Fatal(err) } - chainConfig, _, genesisErr := core.CommitGenesisBlock(kv, core.DefaultGenesisBlock()) + chainConfig, _, genesisErr := core.CommitGenesisBlock(db, core.DefaultGenesisBlock()) if genesisErr != nil { t.Fatal(err) } - if err := kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.ClearBucket(dbutils.HeadHeaderKey) + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.ClearBucket(kv.HeadHeaderKey) }); err != nil { t.Fatal(err) } - snkv := kv2.NewSnapshotKV().DB(kv). + snkv := snapshotdb.NewSnapshotKV().DB(db). //broken - //SnapshotDB([]string{dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, dbutils.HeaderTDBucket, dbutils.HeaderNumberBucket, dbutils.BlockBodyPrefix, dbutils.HeadHeaderKey, dbutils.Senders}, chaindata.RwKV()). + //SnapshotDB([]string{dbutils.Headers, dbutils.HeaderCanonical, dbutils.HeaderTD, dbutils.HeaderNumber, dbutils.BlockBody, dbutils.HeadHeaderKey, dbutils.Senders}, chaindata.RwDB()). Open() _ = chaindata defer snkv.Close() @@ -84,7 +87,7 @@ func TestMatreshkaStream(t *testing.T) { //if err != nil { // t.Fatal(err) //} - psCursor, err := tx.Cursor(dbutils.PlainStateBucket) + psCursor, err := tx.Cursor(kv.PlainStateBucket) if err != nil { t.Fatal(err) } diff --git a/cmd/snapshots/generator/commands/copy_from_state.go b/cmd/snapshots/generator/commands/copy_from_state.go index f44edada74e..1630ee5af9d 100644 --- a/cmd/snapshots/generator/commands/copy_from_state.go +++ b/cmd/snapshots/generator/commands/copy_from_state.go @@ -6,9 +6,8 @@ import ( "os" "time" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" ) @@ -27,12 +26,12 @@ var copyFromStateSnapshotCmd = &cobra.Command{ Short: "Copy from state snapshot", Example: "go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --datadir /media/b00ris/nvme/backup/snapshotsync", RunE: func(cmd *cobra.Command, args []string) error { - return CopyFromState(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) + return CopyFromState(cmd.Context(), log.New(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, } -func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, block uint64, snapshotDir, snapshotMode string) error { - db, err := kv.Open(dbpath, true) +func CopyFromState(ctx context.Context, logger log.Logger, dbpath string, snapshotPath string, block uint64, snapshotDir, snapshotMode string) error { + db, err := mdbx.Open(dbpath, logger, true) if err != nil { return err } @@ -48,11 +47,11 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc if err != nil { return err } - snkv := kv.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.PlainStateBucket: dbutils.BucketsConfigs[dbutils.PlainStateBucket], - dbutils.PlainContractCodeBucket: dbutils.BucketsConfigs[dbutils.PlainContractCodeBucket], - dbutils.CodeBucket: dbutils.BucketsConfigs[dbutils.CodeBucket], + snkv := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.PlainStateBucket: kv.BucketsConfigs[kv.PlainStateBucket], + kv.PlainContractCode: kv.BucketsConfigs[kv.PlainContractCode], + kv.CodeBucket: kv.BucketsConfigs[kv.CodeBucket], } }).Path(snapshotPath).MustOpen() log.Info("Create snapshot db", "path", snapshotPath) @@ -61,15 +60,15 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc defer logEvery.Stop() tt := time.Now() - if err = snkv.Update(ctx, func(snTx ethdb.RwTx) error { - return tx.ForEach(dbutils.PlainStateBucket, []byte{}, func(k, v []byte) error { - innerErr := snTx.Put(dbutils.PlainStateBucket, k, v) + if err = snkv.Update(ctx, func(snTx kv.RwTx) error { + return tx.ForEach(kv.PlainStateBucket, []byte{}, func(k, v []byte) error { + innerErr := snTx.Put(kv.PlainStateBucket, k, v) if innerErr != nil { return fmt.Errorf("put state err: %w", innerErr) } select { case <-logEvery.C: - log.Info("progress", "bucket", dbutils.PlainStateBucket, "key", fmt.Sprintf("%x", k)) + log.Info("progress", "bucket", kv.PlainStateBucket, "key", fmt.Sprintf("%x", k)) default: } @@ -82,15 +81,15 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc log.Info("Copy plain state end", "t", time.Since(tt)) tt = time.Now() - if err = snkv.Update(ctx, func(sntx ethdb.RwTx) error { - return tx.ForEach(dbutils.PlainContractCodeBucket, []byte{}, func(k, v []byte) error { - innerErr := sntx.Put(dbutils.PlainContractCodeBucket, k, v) + if err = snkv.Update(ctx, func(sntx kv.RwTx) error { + return tx.ForEach(kv.PlainContractCode, []byte{}, func(k, v []byte) error { + innerErr := sntx.Put(kv.PlainContractCode, k, v) if innerErr != nil { return fmt.Errorf("put contract code err: %w", innerErr) } select { case <-logEvery.C: - log.Info("progress", "bucket", dbutils.PlainContractCodeBucket, "key", fmt.Sprintf("%x", k)) + log.Info("progress", "bucket", kv.PlainContractCode, "key", fmt.Sprintf("%x", k)) default: } return nil @@ -101,15 +100,15 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc log.Info("Copy contract code end", "t", time.Since(tt)) tt = time.Now() - if err = snkv.Update(ctx, func(sntx ethdb.RwTx) error { - return tx.ForEach(dbutils.CodeBucket, []byte{}, func(k, v []byte) error { - innerErr := sntx.Put(dbutils.CodeBucket, k, v) + if err = snkv.Update(ctx, func(sntx kv.RwTx) error { + return tx.ForEach(kv.CodeBucket, []byte{}, func(k, v []byte) error { + innerErr := sntx.Put(kv.CodeBucket, k, v) if innerErr != nil { return fmt.Errorf("put code err: %w", innerErr) } select { case <-logEvery.C: - log.Info("progress", "bucket", dbutils.CodeBucket, "key", fmt.Sprintf("%x", k)) + log.Info("progress", "bucket", kv.CodeBucket, "key", fmt.Sprintf("%x", k)) default: } return nil @@ -125,5 +124,5 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc defer func() { log.Info("Verify end", "t", time.Since(tt)) }() - return VerifyStateSnapshot(ctx, dbpath, snapshotPath, block) + return VerifyStateSnapshot(ctx, logger, dbpath, snapshotPath, block) } diff --git a/cmd/snapshots/generator/commands/generate_body_snapshot.go b/cmd/snapshots/generator/commands/generate_body_snapshot.go index c4a1941875e..7c5e69b55ca 100644 --- a/cmd/snapshots/generator/commands/generate_body_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_body_snapshot.go @@ -6,13 +6,13 @@ import ( "os" "time" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/spf13/cobra" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" ) @@ -29,19 +29,19 @@ var generateBodiesSnapshotCmd = &cobra.Command{ Short: "Generate bodies snapshot", Example: "go run cmd/snapshots/generator/main.go bodies --block 11000000 --datadir /media/b00ris/nvme/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/bodies_test", RunE: func(cmd *cobra.Command, args []string) error { - return BodySnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) + return BodySnapshot(cmd.Context(), log.New(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, } -func BodySnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { - kv := kv2.NewMDBX().Path(dbPath).MustOpen() - snKV := kv2.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{}, +func BodySnapshot(ctx context.Context, logger log.Logger, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { + db := kv2.NewMDBX(logger).Path(dbPath).MustOpen() + snKV := kv2.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.BlockBody: kv.TableConfigItem{}, } }).Path(snapshotPath).MustOpen() - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) if err != nil { return err } @@ -52,7 +52,7 @@ func BodySnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint t := time.Now() var hash common.Hash - if err := snKV.Update(ctx, func(sntx ethdb.RwTx) error { + if err := snKV.Update(ctx, func(sntx kv.RwTx) error { for i := uint64(1); i <= toBlock; i++ { if common.IsCanceled(ctx) { return common.ErrStopped @@ -63,12 +63,12 @@ func BodySnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint return fmt.Errorf("getting canonical hash for block %d: %v", i, err) } body := rawdb.ReadBodyRLP(tx, hash, i) - if err = sntx.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(i, hash), body); err != nil { + if err = sntx.Put(kv.BlockBody, dbutils.BlockBodyKey(i, hash), body); err != nil { return err } select { case <-logEvery.C: - log.Info("progress", "bucket", dbutils.BlockBodyPrefix, "block num", i) + log.Info("progress", "bucket", kv.BlockBody, "block num", i) default: } } diff --git a/cmd/snapshots/generator/commands/generate_header_snapshot.go b/cmd/snapshots/generator/commands/generate_header_snapshot.go index 897b138d3c4..c7fee475a10 100644 --- a/cmd/snapshots/generator/commands/generate_header_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_header_snapshot.go @@ -7,7 +7,8 @@ import ( "os" "time" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/spf13/cobra" "github.com/ledgerwatch/erigon/common" @@ -29,11 +30,11 @@ var generateHeadersSnapshotCmd = &cobra.Command{ Short: "Generate headers snapshot", Example: "go run cmd/snapshots/generator/main.go headers --block 11000000 --datadir /media/b00ris/nvme/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/headers_test", RunE: func(cmd *cobra.Command, args []string) error { - return HeaderSnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) + return HeaderSnapshot(cmd.Context(), log.New(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, } -func HeaderSnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { +func HeaderSnapshot(ctx context.Context, logger log.Logger, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { if snapshotPath == "" { return errors.New("empty snapshot path") } @@ -41,15 +42,15 @@ func HeaderSnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock ui if err != nil { return err } - kv := kv2.NewMDBX().Path(dbPath).MustOpen() + db := kv2.NewMDBX(logger).Path(dbPath).MustOpen() - snKV := kv2.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + snKV := kv2.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).Path(snapshotPath).MustOpen() - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) if err != nil { return err } @@ -63,7 +64,7 @@ func HeaderSnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock ui t := time.Now() var hash common.Hash var header []byte - c, err := snTx.RwCursor(dbutils.HeadersBucket) + c, err := snTx.RwCursor(kv.Headers) if err != nil { return err } diff --git a/cmd/snapshots/generator/commands/generate_state_snapshot.go b/cmd/snapshots/generator/commands/generate_state_snapshot.go index 1e583a3088b..0f268390c87 100644 --- a/cmd/snapshots/generator/commands/generate_state_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_state_snapshot.go @@ -12,7 +12,9 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/spf13/cobra" ) @@ -31,11 +33,11 @@ var generateStateSnapshotCmd = &cobra.Command{ Short: "Generate state snapshot", Example: "go run ./cmd/state/main.go stateSnapshot --block 11000000 --datadir /media/b00ris/nvme/tgstaged/ --snapshot /media/b00ris/nvme/snapshots/state", RunE: func(cmd *cobra.Command, args []string) error { - return GenerateStateSnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) + return GenerateStateSnapshot(cmd.Context(), log.New(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, } -func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { +func GenerateStateSnapshot(ctx context.Context, logger log.Logger, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error { if snapshotPath == "" { return errors.New("empty snapshot path") } @@ -44,14 +46,14 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB if err != nil { return err } - var kv, snkv ethdb.RwKV - - kv = kv2.NewMDBX().Path(dbPath).MustOpen() - snkv = kv2.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.PlainStateBucket: dbutils.BucketConfigItem{}, - dbutils.PlainContractCodeBucket: dbutils.BucketConfigItem{}, - dbutils.CodeBucket: dbutils.BucketConfigItem{}, + var db, snkv kv.RwDB + + db = kv2.NewMDBX(logger).Path(dbPath).MustOpen() + snkv = kv2.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.PlainStateBucket: kv.TableConfigItem{}, + kv.PlainContractCode: kv.TableConfigItem{}, + kv.CodeBucket: kv.TableConfigItem{}, } }).Path(snapshotPath).MustOpen() @@ -61,11 +63,11 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB } defer writeTx.Rollback() - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) if err != nil { return err } - tx2, err := kv.BeginRo(context.Background()) + tx2, err := db.BeginRo(context.Background()) if err != nil { return err } @@ -104,7 +106,7 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB j := 0 innerErr := state.WalkAsOfStorage(tx2, common.BytesToAddress(k), acc.Incarnation, common.Hash{}, toBlock+1, func(k1, k2 []byte, vv []byte) (bool, error) { j++ - innerErr1 := writeTx.Put(dbutils.PlainStateBucket, dbutils.PlainGenerateCompositeStorageKey(k1, acc.Incarnation, k2), common.CopyBytes(vv)) + innerErr1 := writeTx.Put(kv.PlainStateBucket, dbutils.PlainGenerateCompositeStorageKey(k1, acc.Incarnation, k2), common.CopyBytes(vv)) if innerErr1 != nil { return false, innerErr1 } @@ -121,19 +123,19 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB } if acc.IsEmptyCodeHash() { - codeHash, err1 := tx2.GetOne(dbutils.PlainContractCodeBucket, storagePrefix) + codeHash, err1 := tx2.GetOne(kv.PlainContractCode, storagePrefix) if err1 != nil && errors.Is(err1, ethdb.ErrKeyNotFound) { return false, fmt.Errorf("getting code hash for %x: %v", k, err1) } if len(codeHash) > 0 { - code, err1 := tx2.GetOne(dbutils.CodeBucket, codeHash) + code, err1 := tx2.GetOne(kv.CodeBucket, codeHash) if err1 != nil { return false, err1 } - if err1 = writeTx.Put(dbutils.CodeBucket, codeHash, code); err1 != nil { + if err1 = writeTx.Put(kv.CodeBucket, codeHash, code); err1 != nil { return false, err1 } - if err1 = writeTx.Put(dbutils.PlainContractCodeBucket, storagePrefix, codeHash); err1 != nil { + if err1 = writeTx.Put(kv.PlainContractCode, storagePrefix, codeHash); err1 != nil { return false, err1 } } @@ -141,7 +143,7 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB } newAcc := make([]byte, acc.EncodingLengthForStorage()) acc.EncodeForStorage(newAcc) - innerErr := writeTx.Put(dbutils.PlainStateBucket, common.CopyBytes(k), newAcc) + innerErr := writeTx.Put(kv.PlainStateBucket, common.CopyBytes(k), newAcc) if innerErr != nil { return false, innerErr } @@ -157,5 +159,5 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB } fmt.Println("took", time.Since(t)) - return VerifyStateSnapshot(ctx, dbPath, snapshotFile, block) + return VerifyStateSnapshot(ctx, logger, dbPath, snapshotFile, block) } diff --git a/cmd/snapshots/generator/commands/verify_headers.go b/cmd/snapshots/generator/commands/verify_headers.go index 86bea7a390a..c042a989a3b 100644 --- a/cmd/snapshots/generator/commands/verify_headers.go +++ b/cmd/snapshots/generator/commands/verify_headers.go @@ -6,9 +6,8 @@ import ( "sync/atomic" "time" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -53,8 +52,8 @@ func VerifyHeadersSnapshot(ctx context.Context, snapshotPath string) error { if err != nil { return err } - err = snKV.View(ctx, func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.HeadersBucket) + err = snKV.View(ctx, func(tx kv.Tx) error { + c, err := tx.Cursor(kv.Headers) if err != nil { return err } diff --git a/cmd/snapshots/generator/commands/verify_state_snapshot.go b/cmd/snapshots/generator/commands/verify_state_snapshot.go index 3d112b9ad94..60b286e829d 100644 --- a/cmd/snapshots/generator/commands/verify_state_snapshot.go +++ b/cmd/snapshots/generator/commands/verify_state_snapshot.go @@ -7,11 +7,12 @@ import ( "os" "time" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" + "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" ) @@ -29,29 +30,30 @@ var verifyStateSnapshotCmd = &cobra.Command{ Short: "Verify state snapshot", Example: "go run cmd/snapshots/generator/main.go verify_state --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state/ --datadir /media/b00ris/nvme/backup/snapshotsync/", RunE: func(cmd *cobra.Command, args []string) error { - return VerifyStateSnapshot(cmd.Context(), chaindata, snapshotFile, block) + logger := log.New() + return VerifyStateSnapshot(cmd.Context(), logger, chaindata, snapshotFile, block) }, } -func VerifyStateSnapshot(ctx context.Context, dbPath, snapshotPath string, block uint64) error { - var snkv, tmpDB ethdb.RwKV +func VerifyStateSnapshot(ctx context.Context, logger log.Logger, dbPath, snapshotPath string, block uint64) error { + var snkv, tmpDB kv.RwDB tmpPath, err := ioutil.TempDir(os.TempDir(), "vrf*") if err != nil { return err } - snkv = kv.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.PlainStateBucket: dbutils.BucketsConfigs[dbutils.PlainStateBucket], - dbutils.PlainContractCodeBucket: dbutils.BucketsConfigs[dbutils.PlainContractCodeBucket], - dbutils.CodeBucket: dbutils.BucketsConfigs[dbutils.CodeBucket], + snkv = mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.PlainStateBucket: kv.BucketsConfigs[kv.PlainStateBucket], + kv.PlainContractCode: kv.BucketsConfigs[kv.PlainContractCode], + kv.CodeBucket: kv.BucketsConfigs[kv.CodeBucket], } }).Path(snapshotPath).Readonly().MustOpen() - tmpDB = kv.NewMDBX().Path(tmpPath).MustOpen() + tmpDB = mdbx.NewMDBX(logger).Path(tmpPath).MustOpen() defer os.RemoveAll(tmpPath) defer tmpDB.Close() - snkv = kv.NewSnapshotKV().StateSnapshot(snkv).DB(tmpDB).Open() + snkv = snapshotdb.NewSnapshotKV().StateSnapshot(snkv).DB(tmpDB).Open() tx, err := snkv.BeginRw(context.Background()) if err != nil { return err diff --git a/cmd/snapshots/tracker/commands/root.go b/cmd/snapshots/tracker/commands/root.go index 798d758d38d..5d3e1f27e0b 100644 --- a/cmd/snapshots/tracker/commands/root.go +++ b/cmd/snapshots/tracker/commands/root.go @@ -18,9 +18,9 @@ import ( "github.com/anacrolix/torrent/tracker" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" @@ -74,7 +74,7 @@ var rootCmd = &cobra.Command{ Args: cobra.ExactArgs(1), ArgAliases: []string{"snapshots dir"}, RunE: func(cmd *cobra.Command, args []string) error { - db := kv.MustOpen(args[0]) + db := mdbx.MustOpen(args[0]) m := http.NewServeMux() m.Handle("/announce", &Tracker{db: db}) m.HandleFunc("/scrape", func(writer http.ResponseWriter, request *http.Request) { @@ -89,8 +89,8 @@ var rootCmd = &cobra.Command{ ih: {}, }} - err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.SnapshotInfoBucket) + err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.SnapshotInfo) if err != nil { return err } @@ -145,7 +145,7 @@ var rootCmd = &cobra.Command{ } type Tracker struct { - db ethdb.RwKV + db kv.RwDB } /* @@ -210,8 +210,8 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) { key := append(req.InfoHash, req.PeerID...) if req.Event == tracker.Stopped.String() { - err = t.db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Delete(dbutils.SnapshotInfoBucket, key, nil) + err = t.db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Delete(kv.SnapshotInfo, key, nil) }) if err != nil { log.Error("Json marshal", "err", err) @@ -220,8 +220,8 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } else { var prevBytes []byte - err = t.db.View(context.Background(), func(tx ethdb.Tx) error { - prevBytes, err = tx.GetOne(dbutils.SnapshotInfoBucket, key) + err = t.db.View(context.Background(), func(tx kv.Tx) error { + prevBytes, err = tx.GetOne(kv.SnapshotInfo, key) return err }) if err != nil { @@ -244,8 +244,8 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - if err = t.db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.SnapshotInfoBucket, key, peerBytes) + if err = t.db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.SnapshotInfo, key, peerBytes) }); err != nil { log.Error("db.Put", "err", err) WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact) @@ -258,8 +258,8 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) { TrackerId: trackerID, } - if err := t.db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForPrefix(dbutils.SnapshotInfoBucket, append(req.InfoHash, make([]byte, 20)...), func(k, v []byte) error { + if err := t.db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForPrefix(kv.SnapshotInfo, append(req.InfoHash, make([]byte, 20)...), func(k, v []byte) error { a := AnnounceReqWithTime{} err = json.Unmarshal(v, &a) if err != nil { diff --git a/cmd/snapshots/utils/utils.go b/cmd/snapshots/utils/utils.go index 78d88a92cec..5be3b3e19ca 100644 --- a/cmd/snapshots/utils/utils.go +++ b/cmd/snapshots/utils/utils.go @@ -4,8 +4,9 @@ import ( "errors" "os" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/log" ) var ErrUnsupported error = errors.New("unsupported KV type") @@ -14,6 +15,6 @@ func RmTmpFiles(snapshotPath string) error { return os.Remove(snapshotPath + "/mdbx.lck") } -func OpenSnapshotKV(configsFunc kv.BucketConfigsFunc, path string) ethdb.RwKV { - return kv.NewMDBX().WithBucketsConfig(configsFunc).Path(path).MustOpen() +func OpenSnapshotKV(configsFunc mdbx.BucketConfigsFunc, path string) kv.RwDB { + return mdbx.NewMDBX(log.New()).WithBucketsConfig(configsFunc).Path(path).MustOpen() } diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 396dd887f08..3ba7fcdf335 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -19,7 +19,8 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" ) @@ -43,13 +44,14 @@ var checkChangeSetsCmd = &cobra.Command{ Use: "checkChangeSets", Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets", RunE: func(cmd *cobra.Command, args []string) error { - return CheckChangeSets(genesis, block, chaindata, historyfile, nocheck, writeReceipts) + logger := log.New() + return CheckChangeSets(genesis, logger, block, chaindata, historyfile, nocheck, writeReceipts) }, } // CheckChangeSets re-executes historical transactions in read-only mode // and checks that their outputs match the database ChangeSets. -func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, writeReceipts bool) error { +func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, chaindata string, historyfile string, nocheck bool, writeReceipts bool) error { if len(historyfile) == 0 { historyfile = chaindata } @@ -64,11 +66,11 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h interruptCh <- true }() - kv, err := kv2.NewMDBX().Path(chaindata).Open() + db, err := kv2.NewMDBX(logger).Path(chaindata).Open() if err != nil { return err } - chainDb := kv + chainDb := db defer chainDb.Close() historyDb := chainDb if chaindata != historyfile { @@ -161,7 +163,7 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h sort.Sort(accountChanges) i := 0 match := true - err = changeset.Walk(historyTx, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { + err = changeset.Walk(historyTx, kv.AccountChangeSet, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { c := accountChanges.Changes[i] if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { i++ @@ -193,7 +195,7 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h expectedStorageChanges = changeset.NewChangeSet() } sort.Sort(expectedStorageChanges) - err = changeset.Walk(historyTx, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { + err = changeset.Walk(historyTx, kv.StorageChangeSet, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) { c := expectedStorageChanges.Changes[i] i++ if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index 959b26f7a17..d1759f913f3 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -1,8 +1,8 @@ package commands import ( - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/paths" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/spf13/cobra" ) @@ -45,9 +45,9 @@ func withStatsfile(cmd *cobra.Command) { } func withCSBucket(cmd *cobra.Command) { - cmd.Flags().StringVar(&changeSetBucket, "changeset-bucket", dbutils.AccountChangeSetBucket, dbutils.AccountChangeSetBucket+" for account and "+dbutils.StorageChangeSetBucket+" for storage") + cmd.Flags().StringVar(&changeSetBucket, "changeset-bucket", kv.AccountChangeSet, kv.AccountChangeSet+" for account and "+kv.StorageChangeSet+" for storage") } func withIndexBucket(cmd *cobra.Command) { - cmd.Flags().StringVar(&indexBucket, "index-bucket", dbutils.AccountsHistoryBucket, dbutils.AccountsHistoryBucket+" for account and "+dbutils.StorageHistoryBucket+" for storage") + cmd.Flags().StringVar(&indexBucket, "index-bucket", kv.AccountsHistory, kv.AccountsHistory+" for account and "+kv.StorageHistory+" for storage") } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 22b7b97602e..a25657ce3e8 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -23,8 +23,8 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/stack" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/spf13/cobra" @@ -393,7 +393,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB ot := NewOpcodeTracer(blockNum, saveOpcodes, saveBblocks) - chainDb := kv.MustOpen(chaindata) + chainDb := mdbx.MustOpen(chaindata) defer chainDb.Close() historyDb := chainDb historyTx, err1 := historyDb.BeginRo(context.Background()) @@ -526,7 +526,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB blockNumLastReport := blockNum for !interrupt { var block *types.Block - if err := chainDb.View(context.Background(), func(tx ethdb.Tx) (err error) { + if err := chainDb.View(context.Background(), func(tx kv.Tx) (err error) { block, err = rawdb.ReadBlockByNumber(tx, blockNum) return err }); err != nil { diff --git a/cmd/state/commands/verify_headers_snapshot.go b/cmd/state/commands/verify_headers_snapshot.go index 06512b3b083..f2ab66472d0 100644 --- a/cmd/state/commands/verify_headers_snapshot.go +++ b/cmd/state/commands/verify_headers_snapshot.go @@ -2,6 +2,7 @@ package commands import ( "github.com/ledgerwatch/erigon/cmd/state/verify" + "github.com/ledgerwatch/erigon/log" "github.com/spf13/cobra" ) @@ -17,6 +18,7 @@ var verifyHeadersSnapshotCmd = &cobra.Command{ if chaindata == "" && len(args) > 0 { chaindata = args[0] } - return verify.HeadersSnapshot(chaindata) + logger := log.New() + return verify.HeadersSnapshot(logger, chaindata) }, } diff --git a/cmd/state/generate/regenerate_tx_lookup.go b/cmd/state/generate/regenerate_tx_lookup.go index 6047c2265d1..954ef93e095 100644 --- a/cmd/state/generate/regenerate_tx_lookup.go +++ b/cmd/state/generate/regenerate_tx_lookup.go @@ -10,19 +10,20 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" ) func RegenerateTxLookup(chaindata string) error { - db := kv.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - if err := tx.ClearBucket(dbutils.TxLookupPrefix); err != nil { + if err := tx.ClearBucket(kv.TxLookup); err != nil { return err } diff --git a/cmd/state/stats/index_stats.go b/cmd/state/stats/index_stats.go index 21b0056bf84..4a02e1097d1 100644 --- a/cmd/state/stats/index_stats.go +++ b/cmd/state/stats/index_stats.go @@ -13,15 +13,15 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" ) func IndexStats(chaindata string, indexBucket string, statsFile string) error { - db := kv.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) startTime := time.Now() lenOfKey := common.AddressLength - if strings.HasPrefix(indexBucket, dbutils.StorageHistoryBucket) { + if strings.HasPrefix(indexBucket, kv.StorageHistory) { lenOfKey = common.AddressLength + common.HashLength + common.IncarnationLength } diff --git a/cmd/state/verify/check_changeset_enc.go b/cmd/state/verify/check_changeset_enc.go index 64d3f0c4100..0a4f634252a 100644 --- a/cmd/state/verify/check_changeset_enc.go +++ b/cmd/state/verify/check_changeset_enc.go @@ -10,9 +10,8 @@ import ( "time" "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "golang.org/x/sync/errgroup" ) @@ -22,7 +21,7 @@ type Walker interface { } func CheckEnc(chaindata string) error { - db := kv.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() var ( currentSize uint64 @@ -30,9 +29,9 @@ func CheckEnc(chaindata string) error { ) //set test methods - chainDataStorageDecoder := changeset.Mapper[dbutils.StorageChangeSetBucket].Decode - testStorageEncoder := changeset.Mapper[dbutils.StorageChangeSetBucket].Encode - testStorageDecoder := changeset.Mapper[dbutils.StorageChangeSetBucket].Decode + chainDataStorageDecoder := changeset.Mapper[kv.StorageChangeSet].Decode + testStorageEncoder := changeset.Mapper[kv.StorageChangeSet].Encode + testStorageDecoder := changeset.Mapper[kv.StorageChangeSet].Decode startTime := time.Now() ch := make(chan struct { @@ -80,8 +79,8 @@ func CheckEnc(chaindata string) error { defer func() { close(stop) }() - return db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.StorageChangeSetBucket, []byte{}, func(k, v []byte) error { + return db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.StorageChangeSet, []byte{}, func(k, v []byte) error { if i%100_000 == 0 { blockNum := binary.BigEndian.Uint64(k) fmt.Printf("Processed %dK, block number %d, current %d, new %d, time %s\n", diff --git a/cmd/state/verify/check_indexes.go b/cmd/state/verify/check_indexes.go index a79ed576c43..6dccc97e159 100644 --- a/cmd/state/verify/check_indexes.go +++ b/cmd/state/verify/check_indexes.go @@ -9,11 +9,11 @@ import ( "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" ) func CheckIndex(ctx context.Context, chaindata string, changeSetBucket string, indexBucket string) error { - db := kv.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) defer db.Close() tx, err := db.BeginRo(context.Background()) if err != nil { diff --git a/cmd/state/verify/verify_headers_snapshot.go b/cmd/state/verify/verify_headers_snapshot.go index 46298652549..e6a597386d4 100644 --- a/cmd/state/verify/verify_headers_snapshot.go +++ b/cmd/state/verify/verify_headers_snapshot.go @@ -4,23 +4,22 @@ import ( "context" "errors" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) -func HeadersSnapshot(snapshotPath string) error { - snKV := kv.NewMDBX().Path(snapshotPath).Readonly().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, +func HeadersSnapshot(logger log.Logger, snapshotPath string) error { + snKV := mdbx.NewMDBX(logger).Path(snapshotPath).Readonly().WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).MustOpen() var prevHeader *types.Header - err := snKV.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.HeadersBucket) + err := snKV.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.Headers) if err != nil { return err } diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index fbf849e8952..8cd41078645 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -10,14 +10,14 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" ) func ValidateTxLookups(chaindata string) error { - db := kv.MustOpen(chaindata) + db := mdbx.MustOpen(chaindata) tx, err := db.BeginRo(context.Background()) if err != nil { return err @@ -58,7 +58,7 @@ func ValidateTxLookups(chaindata string) error { bn := blockBytes.Bytes() for _, txn := range body.Transactions { - val, err := tx.GetOne(dbutils.TxLookupPrefix, txn.Hash().Bytes()) + val, err := tx.GetOne(kv.TxLookup, txn.Hash().Bytes()) iterations++ if iterations%100000 == 0 { log.Info("Validated", "entries", iterations, "number", blockNum) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index fd6d28e7091..e1d8550c9a5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,7 @@ import ( "text/template" "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/urfave/cli" @@ -42,7 +43,6 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/gasprice" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/internal/flags" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" @@ -1251,7 +1251,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *node.Config, cfg *ethconfig.Conf cfg.Miner.GasPrice = big.NewInt(1) } default: - Fatalf("Chain name is not recognized: %s", chain) + Fatalf("ChainDB name is not recognized: %s", chain) } } @@ -1285,8 +1285,8 @@ func SplitTagsFlag(tagsFlag string) map[string]string { } // MakeChainDatabase open a database using the flags passed to the client and will hard crash if it fails. -func MakeChainDatabase(cfg *node.Config) ethdb.RwKV { - chainDb, err := node.OpenDatabase(cfg, ethdb.Chain) +func MakeChainDatabase(logger log.Logger, cfg *node.Config) kv.RwDB { + chainDb, err := node.OpenDatabase(cfg, logger, kv.ChainDB) if err != nil { Fatalf("Could not open database: %v", err) } diff --git a/common/changeset/account_changeset.go b/common/changeset/account_changeset.go index ff121477274..02f8ef8b572 100644 --- a/common/changeset/account_changeset.go +++ b/common/changeset/account_changeset.go @@ -7,7 +7,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) type Encoder func(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error @@ -42,7 +42,7 @@ func DecodeAccounts(dbKey, dbValue []byte) (uint64, []byte, []byte) { return blockN, k, v } -func FindAccount(c ethdb.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) { +func FindAccount(c kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) { k := dbutils.EncodeBlockNumber(blockNumber) v, err := c.SeekBothRange(k, key) if err != nil { @@ -56,9 +56,9 @@ func FindAccount(c ethdb.CursorDupSort, blockNumber uint64, key []byte) ([]byte, } // GetModifiedAccounts returns a list of addresses that were modified in the block range -func GetModifiedAccounts(db ethdb.Tx, startNum, endNum uint64) ([]common.Address, error) { +func GetModifiedAccounts(db kv.Tx, startNum, endNum uint64) ([]common.Address, error) { changedAddrs := make(map[common.Address]struct{}) - if err := Walk(db, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(startNum), 0, func(blockN uint64, k, v []byte) (bool, error) { + if err := Walk(db, kv.AccountChangeSet, dbutils.EncodeBlockNumber(startNum), 0, func(blockN uint64, k, v []byte) (bool, error) { if blockN > endNum { return false, nil } diff --git a/common/changeset/account_changeset_test.go b/common/changeset/account_changeset_test.go index 8acee1836bb..02d051b72cb 100644 --- a/common/changeset/account_changeset_test.go +++ b/common/changeset/account_changeset_test.go @@ -7,12 +7,12 @@ import ( "testing" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/stretchr/testify/assert" ) func TestEncodingAccount(t *testing.T) { - bkt := dbutils.AccountChangeSetBucket + bkt := kv.AccountChangeSet m := Mapper[bkt] ch := m.New() diff --git a/common/changeset/changeset.go b/common/changeset/changeset.go index 3e2db9f779e..0b8f542cbf8 100644 --- a/common/changeset/changeset.go +++ b/common/changeset/changeset.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) func NewChangeSet() *ChangeSet { @@ -112,8 +113,8 @@ func FromDBFormat(dbKey, dbValue []byte) (uint64, []byte, []byte) { } } -func AvailableFrom(tx ethdb.Tx) (uint64, error) { - c, err := tx.Cursor(dbutils.AccountChangeSetBucket) +func AvailableFrom(tx kv.Tx) (uint64, error) { + c, err := tx.Cursor(kv.AccountChangeSet) if err != nil { return math.MaxUint64, err } @@ -127,8 +128,8 @@ func AvailableFrom(tx ethdb.Tx) (uint64, error) { } return binary.BigEndian.Uint64(k), nil } -func AvailableStorageFrom(tx ethdb.Tx) (uint64, error) { - c, err := tx.Cursor(dbutils.StorageChangeSetBucket) +func AvailableStorageFrom(tx kv.Tx) (uint64, error) { + c, err := tx.Cursor(kv.StorageChangeSet) if err != nil { return math.MaxUint64, err } @@ -143,7 +144,7 @@ func AvailableStorageFrom(tx ethdb.Tx) (uint64, error) { return binary.BigEndian.Uint64(k), nil } -func Walk(db ethdb.Tx, bucket string, startkey []byte, fixedbits int, walker func(blockN uint64, k, v []byte) (bool, error)) error { +func Walk(db kv.Tx, bucket string, startkey []byte, fixedbits int, walker func(blockN uint64, k, v []byte) (bool, error)) error { var blockN uint64 c, err := db.Cursor(bucket) if err != nil { @@ -156,11 +157,11 @@ func Walk(db ethdb.Tx, bucket string, startkey []byte, fixedbits int, walker fun }) } -func Truncate(tx ethdb.RwTx, from uint64) error { +func Truncate(tx kv.RwTx, from uint64) error { keyStart := dbutils.EncodeBlockNumber(from) { - c, err := tx.RwCursorDupSort(dbutils.AccountChangeSetBucket) + c, err := tx.RwCursorDupSort(kv.AccountChangeSet) if err != nil { return err } @@ -176,7 +177,7 @@ func Truncate(tx ethdb.RwTx, from uint64) error { } } { - c, err := tx.RwCursorDupSort(dbutils.StorageChangeSetBucket) + c, err := tx.RwCursorDupSort(kv.StorageChangeSet) if err != nil { return err } @@ -197,21 +198,21 @@ func Truncate(tx ethdb.RwTx, from uint64) error { var Mapper = map[string]struct { IndexBucket string IndexChunkKey func([]byte, uint64) []byte - Find func(cursor ethdb.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) + Find func(cursor kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) New func() *ChangeSet Encode Encoder Decode Decoder }{ - dbutils.AccountChangeSetBucket: { - IndexBucket: dbutils.AccountsHistoryBucket, + kv.AccountChangeSet: { + IndexBucket: kv.AccountsHistory, IndexChunkKey: dbutils.AccountIndexChunkKey, New: NewAccountChangeSet, Find: FindAccount, Encode: EncodeAccounts, Decode: DecodeAccounts, }, - dbutils.StorageChangeSetBucket: { - IndexBucket: dbutils.StorageHistoryBucket, + kv.StorageChangeSet: { + IndexBucket: kv.StorageHistory, IndexChunkKey: dbutils.StorageIndexChunkKey, Find: FindStorage, New: NewStorageChangeSet, diff --git a/common/changeset/storage_changeset.go b/common/changeset/storage_changeset.go index 0339cef3707..632a0d91765 100644 --- a/common/changeset/storage_changeset.go +++ b/common/changeset/storage_changeset.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) const ( @@ -58,7 +59,7 @@ func DecodeStorage(dbKey, dbValue []byte) (uint64, []byte, []byte) { return blockN, k, v } -func FindStorage(c ethdb.CursorDupSort, blockNumber uint64, k []byte) ([]byte, error) { +func FindStorage(c kv.CursorDupSort, blockNumber uint64, k []byte) ([]byte, error) { addWithInc, loc := k[:common.AddressLength+common.IncarnationLength], k[common.AddressLength+common.IncarnationLength:] seek := make([]byte, common.BlockNumberLength+common.AddressLength+common.IncarnationLength) binary.BigEndian.PutUint64(seek, blockNumber) @@ -75,10 +76,10 @@ func FindStorage(c ethdb.CursorDupSort, blockNumber uint64, k []byte) ([]byte, e // RewindDataPlain generates rewind data for all plain buckets between the timestamp // timestapSrc is the current timestamp, and timestamp Dst is where we rewind -func RewindData(db ethdb.Tx, timestampSrc, timestampDst uint64, changes *etl.Collector, quit <-chan struct{}) error { +func RewindData(db kv.Tx, timestampSrc, timestampDst uint64, changes *etl.Collector, quit <-chan struct{}) error { if err := walkAndCollect( changes.Collect, - db, dbutils.AccountChangeSetBucket, + db, kv.AccountChangeSet, timestampDst+1, timestampSrc, quit, ); err != nil { @@ -87,7 +88,7 @@ func RewindData(db ethdb.Tx, timestampSrc, timestampDst uint64, changes *etl.Col if err := walkAndCollect( changes.Collect, - db, dbutils.StorageChangeSetBucket, + db, kv.StorageChangeSet, timestampDst+1, timestampSrc, quit, ); err != nil { @@ -97,7 +98,7 @@ func RewindData(db ethdb.Tx, timestampSrc, timestampDst uint64, changes *etl.Col return nil } -func walkAndCollect(collectorFunc func([]byte, []byte) error, db ethdb.Tx, bucket string, timestampDst, timestampSrc uint64, quit <-chan struct{}) error { +func walkAndCollect(collectorFunc func([]byte, []byte) error, db kv.Tx, bucket string, timestampDst, timestampSrc uint64, quit <-chan struct{}) error { c, err := db.Cursor(bucket) if err != nil { return err diff --git a/common/changeset/storage_changeset_test.go b/common/changeset/storage_changeset_test.go index 0872b762a32..0139e09d5d7 100644 --- a/common/changeset/storage_changeset_test.go +++ b/common/changeset/storage_changeset_test.go @@ -10,14 +10,14 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( - storageTable = dbutils.StorageChangeSetBucket + storageTable = kv.StorageChangeSet defaultIncarnation = 1 ) @@ -198,7 +198,7 @@ func TestEncodingStorageNewWithoutNotDefaultIncarnationWalk(t *testing.T) { func TestEncodingStorageNewWithoutNotDefaultIncarnationFind(t *testing.T) { m := Mapper[storageTable] - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) clear := func() { c, err := tx.RwCursor(storageTable) @@ -221,7 +221,7 @@ func TestEncodingStorageNewWithoutNotDefaultIncarnationFind(t *testing.T) { func TestEncodingStorageNewWithoutNotDefaultIncarnationFindWithoutIncarnation(t *testing.T) { bkt := storageTable m := Mapper[bkt] - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) clear := func() { c, err := tx.RwCursor(bkt) @@ -243,8 +243,8 @@ func TestEncodingStorageNewWithoutNotDefaultIncarnationFindWithoutIncarnation(t func doTestFind( t *testing.T, - tx ethdb.RwTx, - findFunc func(ethdb.CursorDupSort, uint64, []byte) ([]byte, error), + tx kv.RwTx, + findFunc func(kv.CursorDupSort, uint64, []byte) ([]byte, error), clear func(), ) { m := Mapper[storageTable] @@ -361,9 +361,9 @@ func formatTestName(elements, keys int) string { } func TestMultipleIncarnationsOfTheSameContract(t *testing.T) { - bkt := dbutils.StorageChangeSetBucket + bkt := kv.StorageChangeSet m := Mapper[bkt] - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) c1, err := tx.CursorDupSort(bkt) require.NoError(t, err) diff --git a/common/etl/buffers.go b/common/etl/buffers.go index 15ab9ea3a31..7ea98ac1db1 100644 --- a/common/etl/buffers.go +++ b/common/etl/buffers.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/ethdb/kv" ) const ( @@ -32,7 +32,7 @@ type Buffer interface { GetEntries() []sortableBufferEntry Sort() CheckFlushSize() bool - SetComparator(cmp dbutils.CmpFunc) + SetComparator(cmp kv.CmpFunc) } type sortableBufferEntry struct { @@ -58,7 +58,7 @@ type sortableBuffer struct { entries []sortableBufferEntry size int optimalSize int - comparator dbutils.CmpFunc + comparator kv.CmpFunc } func (b *sortableBuffer) Put(k, v []byte) { @@ -75,7 +75,7 @@ func (b *sortableBuffer) Len() int { return len(b.entries) } -func (b *sortableBuffer) SetComparator(cmp dbutils.CmpFunc) { +func (b *sortableBuffer) SetComparator(cmp kv.CmpFunc) { b.comparator = cmp } @@ -123,7 +123,7 @@ type appendSortableBuffer struct { size int optimalSize int sortedBuf []sortableBufferEntry - comparator dbutils.CmpFunc + comparator kv.CmpFunc } func (b *appendSortableBuffer) Put(k, v []byte) { @@ -137,7 +137,7 @@ func (b *appendSortableBuffer) Put(k, v []byte) { b.entries[ks] = stored } -func (b *appendSortableBuffer) SetComparator(cmp dbutils.CmpFunc) { +func (b *appendSortableBuffer) SetComparator(cmp kv.CmpFunc) { b.comparator = cmp } @@ -196,10 +196,10 @@ type oldestEntrySortableBuffer struct { size int optimalSize int sortedBuf []sortableBufferEntry - comparator dbutils.CmpFunc + comparator kv.CmpFunc } -func (b *oldestEntrySortableBuffer) SetComparator(cmp dbutils.CmpFunc) { +func (b *oldestEntrySortableBuffer) SetComparator(cmp kv.CmpFunc) { b.comparator = cmp } diff --git a/common/etl/collector.go b/common/etl/collector.go index 6618dbea252..d7f5bae7339 100644 --- a/common/etl/collector.go +++ b/common/etl/collector.go @@ -13,8 +13,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ugorji/go/codec" ) @@ -108,7 +107,7 @@ func (c *Collector) Collect(k, v []byte) error { return c.extractNextFunc(k, k, v) } -func (c *Collector) Load(logPrefix string, db ethdb.RwTx, toBucket string, loadFunc LoadFunc, args TransformArgs) error { +func (c *Collector) Load(logPrefix string, db kv.RwTx, toBucket string, loadFunc LoadFunc, args TransformArgs) error { defer func() { if c.autoClean { c.Close(logPrefix) @@ -135,7 +134,7 @@ func (c *Collector) Close(logPrefix string) { } } -func loadFilesIntoBucket(logPrefix string, db ethdb.RwTx, bucket string, bufType int, providers []dataProvider, loadFunc LoadFunc, args TransformArgs) error { +func loadFilesIntoBucket(logPrefix string, db kv.RwTx, bucket string, bufType int, providers []dataProvider, loadFunc LoadFunc, args TransformArgs) error { decoder := codec.NewDecoder(nil, &cbor) var m runtime.MemStats @@ -151,7 +150,7 @@ func loadFilesIntoBucket(logPrefix string, db ethdb.RwTx, bucket string, bufType panic(eee) } } - var c ethdb.RwCursor + var c kv.RwCursor currentTable := ¤tTableReader{db, bucket} haveSortingGuaranties := isIdentityLoadFunc(loadFunc) // user-defined loadFunc may change ordering @@ -169,7 +168,7 @@ func loadFilesIntoBucket(logPrefix string, db ethdb.RwTx, bucket string, bufType } } var canUseAppend bool - isDupSort := dbutils.BucketsConfigs[bucket].Flags&dbutils.DupSort != 0 && !dbutils.BucketsConfigs[bucket].AutoDupSortKeysConversion + isDupSort := kv.BucketsConfigs[bucket].Flags&kv.DupSort != 0 && !kv.BucketsConfigs[bucket].AutoDupSortKeysConversion logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -217,7 +216,7 @@ func loadFilesIntoBucket(logPrefix string, db ethdb.RwTx, bucket string, bufType } if canUseAppend { if isDupSort { - if err := c.(ethdb.RwCursorDupSort).AppendDup(k, v); err != nil { + if err := c.(kv.RwCursorDupSort).AppendDup(k, v); err != nil { return fmt.Errorf("%s: bucket: %s, appendDup: k=%x, %w", logPrefix, bucket, k, err) } } else { diff --git a/common/etl/etl.go b/common/etl/etl.go index a991c7d7335..0a7ff828f71 100644 --- a/common/etl/etl.go +++ b/common/etl/etl.go @@ -10,8 +10,8 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ugorji/go/codec" ) @@ -56,7 +56,7 @@ func NextKey(key []byte) ([]byte, error) { // loaded from files into a DB // * `key`: last commited key to the database (use etl.NextKey helper to use in LoadStartKey) // * `isDone`: true, if everything is processed -type LoadCommitHandler func(db ethdb.Putter, key []byte, isDone bool) error +type LoadCommitHandler func(db kv.Putter, key []byte, isDone bool) error type AdditionalLogArguments func(k, v []byte) (additionalLogArguments []interface{}) type TransformArgs struct { @@ -70,12 +70,12 @@ type TransformArgs struct { LogDetailsExtract AdditionalLogArguments LogDetailsLoad AdditionalLogArguments - Comparator dbutils.CmpFunc + Comparator kv.CmpFunc } func Transform( logPrefix string, - db ethdb.RwTx, + db kv.RwTx, fromBucket string, toBucket string, tmpdir string, @@ -105,7 +105,7 @@ func Transform( func extractBucketIntoFiles( logPrefix string, - db ethdb.Tx, + db kv.Tx, bucket string, startkey []byte, endkey []byte, @@ -157,7 +157,7 @@ func extractBucketIntoFiles( } type currentTableReader struct { - getter ethdb.Tx + getter kv.Tx bucket string } diff --git a/common/etl/etl_test.go b/common/etl/etl_test.go index 32b974efab1..d3ce83d2dee 100644 --- a/common/etl/etl_test.go +++ b/common/etl/etl_test.go @@ -9,9 +9,8 @@ import ( "testing" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" "github.com/ugorji/go/codec" ) @@ -81,8 +80,8 @@ func TestNextKeyErr(t *testing.T) { func TestFileDataProviders(t *testing.T) { // test invariant when we go through files (> 1 buffer) - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] generateTestData(t, tx, sourceBucket, 10) @@ -112,8 +111,8 @@ func TestFileDataProviders(t *testing.T) { func TestRAMDataProviders(t *testing.T) { // test invariant when we go through memory (1 buffer) - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] generateTestData(t, tx, sourceBucket, 10) collector := NewCollector("", NewSortableBuffer(BufferOptimalSize)) @@ -131,10 +130,10 @@ func TestRAMDataProviders(t *testing.T) { func TestTransformRAMOnly(t *testing.T) { // test invariant when we only have one buffer and it fits into RAM (exactly 1 buffer) - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] generateTestData(t, tx, sourceBucket, 20) err := Transform( "logPrefix", @@ -151,9 +150,9 @@ func TestTransformRAMOnly(t *testing.T) { } func TestEmptySourceBucket(t *testing.T) { - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] err := Transform( "logPrefix", tx, @@ -170,9 +169,9 @@ func TestEmptySourceBucket(t *testing.T) { func TestTransformExtractStartKey(t *testing.T) { // test invariant when we only have one buffer and it fits into RAM (exactly 1 buffer) - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] generateTestData(t, tx, sourceBucket, 10) err := Transform( "logPrefix", @@ -190,9 +189,9 @@ func TestTransformExtractStartKey(t *testing.T) { func TestTransformThroughFiles(t *testing.T) { // test invariant when we go through files (> 1 buffer) - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] generateTestData(t, tx, sourceBucket, 10) err := Transform( "logPrefix", @@ -212,9 +211,9 @@ func TestTransformThroughFiles(t *testing.T) { func TestTransformDoubleOnExtract(t *testing.T) { // test invariant when extractFunc multiplies the data 2x - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] generateTestData(t, tx, sourceBucket, 10) err := Transform( "logPrefix", @@ -232,9 +231,9 @@ func TestTransformDoubleOnExtract(t *testing.T) { func TestTransformDoubleOnLoad(t *testing.T) { // test invariant when loadFunc multiplies the data 2x - _, tx := kv.NewTestTx(t) - sourceBucket := dbutils.Buckets[0] - destBucket := dbutils.Buckets[1] + _, tx := memdb.NewTestTx(t) + sourceBucket := kv.ErigonTables[0] + destBucket := kv.ErigonTables[1] generateTestData(t, tx, sourceBucket, 10) err := Transform( "logPrefix", @@ -250,7 +249,7 @@ func TestTransformDoubleOnLoad(t *testing.T) { compareBucketsDouble(t, tx, sourceBucket, destBucket) } -func generateTestData(t *testing.T, db ethdb.Putter, bucket string, count int) { +func generateTestData(t *testing.T, db kv.Putter, bucket string, count int) { for i := 0; i < count; i++ { k := []byte(fmt.Sprintf("%10d-key-%010d", i, i)) v := []byte(fmt.Sprintf("val-%099d", i)) @@ -324,7 +323,7 @@ func testLoadFromMapDoubleFunc(k []byte, v []byte, _ CurrentTableReader, next Lo return next(k, append(k, 0xBB), append(realValue, 0xBB)) } -func compareBuckets(t *testing.T, db ethdb.Tx, b1, b2 string, startKey []byte) { +func compareBuckets(t *testing.T, db kv.Tx, b1, b2 string, startKey []byte) { t.Helper() b1Map := make(map[string]string) err := db.ForEach(b1, startKey, func(k, v []byte) error { @@ -341,7 +340,7 @@ func compareBuckets(t *testing.T, db ethdb.Tx, b1, b2 string, startKey []byte) { assert.Equal(t, b1Map, b2Map) } -func compareBucketsDouble(t *testing.T, db ethdb.Tx, b1, b2 string) { +func compareBucketsDouble(t *testing.T, db kv.Tx, b1, b2 string) { t.Helper() b1Map := make(map[string]string) err := db.ForEach(b1, nil, func(k, v []byte) error { diff --git a/common/etl/heap.go b/common/etl/heap.go index 2637cfa30f7..3e5ff8d378f 100644 --- a/common/etl/heap.go +++ b/common/etl/heap.go @@ -3,7 +3,7 @@ package etl import ( "bytes" - "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/ethdb/kv" ) type HeapElem struct { @@ -13,7 +13,7 @@ type HeapElem struct { } type Heap struct { - comparator dbutils.CmpFunc + comparator kv.CmpFunc elems []HeapElem } diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 1226e99c57a..cead2e9aa81 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -37,7 +37,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -322,7 +322,7 @@ func epochTransitionFor(chain consensus.ChainHeaderReader, e consensus.EpochRead // AuRa //nolint type AuRa struct { - db ethdb.RwKV // Database to store and retrieve snapshot checkpoints + db kv.RwDB // Database to store and retrieve snapshot checkpoints exitCh chan struct{} lock sync.RWMutex // Protects the signer fields @@ -389,7 +389,7 @@ func (pb *GasLimitOverride) Add(hash common.Hash, b *uint256.Int) { pb.cache.ContainsOrAdd(hash, b) } -func NewAuRa(config *params.AuRaConfig, db ethdb.RwKV, ourSigningAddress common.Address, engineParamsJson []byte) (*AuRa, error) { +func NewAuRa(config *params.AuRaConfig, db kv.RwDB, ourSigningAddress common.Address, engineParamsJson []byte) (*AuRa, error) { spec := JsonSpec{} err := json.Unmarshal(engineParamsJson, &spec) if err != nil { diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index 144381f8f03..51a4d4fce7e 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -11,8 +11,8 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" ) @@ -92,14 +92,14 @@ import ( */ func TestRewardContract(t *testing.T) { t.Skip("not ready yet") - auraDB, require := kv.NewTestKV(t), require.New(t) + auraDB, require := memdb.NewTestDB(t), require.New(t) engine, err := aura.NewAuRa(nil, auraDB, common.Address{}, test.AuthorityRoundBlockRewardContract) require.NoError(err) m := stages.MockWithGenesisEngine(t, core.DefaultSokolGenesisBlock(), engine) m.EnableLogs() var accBefore *accounts.Account - err = auraDB.View(context.Background(), func(tx ethdb.Tx) (err error) { _, err = rawdb.ReadAccount(tx, m.Address, accBefore); return err }) + err = auraDB.View(context.Background(), func(tx kv.Tx) (err error) { _, err = rawdb.ReadAccount(tx, m.Address, accBefore); return err }) require.NoError(err) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, gen *core.BlockGen) { @@ -111,7 +111,7 @@ func TestRewardContract(t *testing.T) { require.NoError(err) var accAfter *accounts.Account - err = auraDB.View(context.Background(), func(tx ethdb.Tx) (err error) { _, err = rawdb.ReadAccount(tx, m.Address, accAfter); return err }) + err = auraDB.View(context.Background(), func(tx kv.Tx) (err error) { _, err = rawdb.ReadAccount(tx, m.Address, accAfter); return err }) require.NoError(err) fmt.Printf("balance: %d\n", accAfter.Balance.Uint64()) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 36d5d150d29..9773048e749 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -29,6 +29,7 @@ import ( "github.com/goccy/go-json" lru "github.com/hashicorp/golang-lru" + "github.com/ledgerwatch/erigon/ethdb/kv" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common" @@ -40,7 +41,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -175,7 +175,7 @@ type Clique struct { chainConfig *params.ChainConfig config *params.CliqueConfig // Consensus engine configuration parameters snapshotConfig *params.SnapshotConfig // Consensus engine configuration parameters - db ethdb.RwKV // Database to store and retrieve snapshot checkpoints + db kv.RwDB // Database to store and retrieve snapshot checkpoints signatures *lru.ARCCache // Signatures of recent blocks to speed up mining recents *lru.ARCCache // Snapshots for recent block to speed up reorgs @@ -194,7 +194,7 @@ type Clique struct { // New creates a Clique proof-of-authority consensus engine with the initial // signers set to the ones provided by the user. -func New(cfg *params.ChainConfig, snapshotConfig *params.SnapshotConfig, cliqueDB ethdb.RwKV) *Clique { +func New(cfg *params.ChainConfig, snapshotConfig *params.SnapshotConfig, cliqueDB kv.RwDB) *Clique { config := cfg.Clique // Set any missing consensus parameters to their defaults @@ -577,7 +577,7 @@ func (c *Clique) snapshots(latest uint64, total int) ([]*Snapshot, error) { } defer tx.Rollback() - cur, err1 := tx.Cursor(dbutils.CliqueSeparateBucket) + cur, err1 := tx.Cursor(kv.CliqueSeparate) if err1 != nil { return nil, err1 } diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go index c0795b82a49..40096c6d093 100644 --- a/consensus/clique/clique_test.go +++ b/consensus/clique/clique_test.go @@ -23,6 +23,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/clique" @@ -30,7 +31,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" ) @@ -44,7 +44,7 @@ import ( func TestReimportMirroredState(t *testing.T) { // Initialize a Clique chain with a single signer var ( - cliqueDB = kv.NewTestKV(t) + cliqueDB = memdb.NewTestDB(t) key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key.PublicKey) engine = clique.New(params.AllCliqueProtocolChanges, params.CliqueSnapshot, cliqueDB) @@ -62,7 +62,7 @@ func TestReimportMirroredState(t *testing.T) { // Generate a batch of blocks, each properly signed getHeader := func(hash common.Hash, number uint64) (h *types.Header) { - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { h = rawdb.ReadHeader(tx, hash, number) return nil }); err != nil { @@ -107,7 +107,7 @@ func TestReimportMirroredState(t *testing.T) { if err := m.InsertChain(chain.Slice(0, 2)); err != nil { t.Fatalf("failed to insert initial blocks: %v", err) } - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { if head, err1 := rawdb.ReadBlockByHash(tx, rawdb.ReadHeadHeaderHash(tx)); err1 != nil { t.Errorf("could not read chain head: %v", err1) } else if head.NumberU64() != 2 { @@ -124,7 +124,7 @@ func TestReimportMirroredState(t *testing.T) { if err := m.InsertChain(chain.Slice(2, chain.Length)); err != nil { t.Fatalf("failed to insert final block: %v", err) } - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { if head, err1 := rawdb.ReadBlockByHash(tx, rawdb.ReadHeadHeaderHash(tx)); err1 != nil { t.Errorf("could not read chain head: %v", err1) } else if head.NumberU64() != 3 { diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 578cf3a209b..cc57d8ce0f6 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -26,11 +26,11 @@ import ( "github.com/goccy/go-json" lru "github.com/hashicorp/golang-lru" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) @@ -91,13 +91,13 @@ func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, s } // loadSnapshot loads an existing snapshot from the database. -func loadSnapshot(config *params.CliqueConfig, db ethdb.RwKV, num uint64, hash common.Hash) (*Snapshot, error) { +func loadSnapshot(config *params.CliqueConfig, db kv.RwDB, num uint64, hash common.Hash) (*Snapshot, error) { tx, err := db.BeginRo(context.Background()) if err != nil { return nil, err } defer tx.Rollback() - blob, err := tx.GetOne(dbutils.CliqueSeparateBucket, SnapshotFullKey(num, hash)) + blob, err := tx.GetOne(kv.CliqueSeparate, SnapshotFullKey(num, hash)) if err != nil { return nil, err } @@ -113,14 +113,14 @@ func loadSnapshot(config *params.CliqueConfig, db ethdb.RwKV, num uint64, hash c var ErrNotFound = errors.New("not found") -func lastSnapshot(db ethdb.RwKV) (uint64, error) { +func lastSnapshot(db kv.RwDB) (uint64, error) { tx, err := db.BeginRo(context.Background()) if err != nil { return 0, err } defer tx.Rollback() - lastEnc, err := tx.GetOne(dbutils.CliqueLastSnapshotBucket, LastSnapshotKey()) + lastEnc, err := tx.GetOne(kv.CliqueLastSnapshot, LastSnapshotKey()) if err != nil { return 0, fmt.Errorf("failed check last clique snapshot: %d", err) } @@ -138,13 +138,13 @@ func lastSnapshot(db ethdb.RwKV) (uint64, error) { } // store inserts the snapshot into the database. -func (s *Snapshot) store(db ethdb.RwKV) error { +func (s *Snapshot) store(db kv.RwDB) error { blob, err := json.Marshal(s) if err != nil { return err } - return db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.CliqueSeparateBucket, SnapshotFullKey(s.Number, s.Hash), blob) + return db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.CliqueSeparate, SnapshotFullKey(s.Number, s.Hash), blob) }) } diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index f7b0283b35f..e5550acd982 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -28,7 +28,8 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" ) @@ -417,7 +418,7 @@ func TestClique(t *testing.T) { Epoch: tt.epoch, } - cliqueDB := kv.NewTestKV(t) + cliqueDB := memdb.NewTestDB(t) engine := clique.New(&config, params.CliqueSnapshot, cliqueDB) engine.FakeDiff = true @@ -503,7 +504,7 @@ func TestClique(t *testing.T) { // No failure was produced or requested, generate the final voting snapshot head := chain.Blocks[len(chain.Blocks)-1] - snap, err := engine.Snapshot(stagedsync.ChainReader{Cfg: config, Db: kv.NewObjectDatabase(m.DB)}, head.NumberU64(), head.Hash(), nil) + snap, err := engine.Snapshot(stagedsync.ChainReader{Cfg: config, Db: olddb.NewObjectDatabase(m.DB)}, head.NumberU64(), head.Hash(), nil) if err != nil { t.Errorf("test %d: failed to retrieve voting snapshot %d(%s): %v", i, head.NumberU64(), head.Hash().Hex(), err) diff --git a/consensus/db/db.go b/consensus/db/db.go index dad772c5b22..17705e902e3 100644 --- a/consensus/db/db.go +++ b/consensus/db/db.go @@ -1,12 +1,13 @@ package db import ( - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/log" ) -func OpenDatabase(path string, inmem bool) ethdb.RwKV { - opts := kv.NewMDBX() +func OpenDatabase(path string, logger log.Logger, inmem bool) kv.RwDB { + opts := mdbx.NewMDBX(logger) if inmem { opts = opts.InMem() } else { diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 4688bef72b4..6c7c5ac908f 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -23,7 +23,7 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" ) @@ -47,10 +47,10 @@ func TestHeaderVerification(t *testing.T) { for j, valid := range []bool{true, false} { if valid { engine := ethash.NewFaker() - err = engine.VerifyHeaders(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: kv.NewObjectDatabase(m.DB)}, []*types.Header{chain.Headers[i]}, []bool{true}) + err = engine.VerifyHeaders(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: olddb.NewObjectDatabase(m.DB)}, []*types.Header{chain.Headers[i]}, []bool{true}) } else { engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeaders(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: kv.NewObjectDatabase(m.DB)}, []*types.Header{chain.Headers[i]}, []bool{true}) + err = engine.VerifyHeaders(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: olddb.NewObjectDatabase(m.DB)}, []*types.Header{chain.Headers[i]}, []bool{true}) } if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) diff --git a/core/chain_makers.go b/core/chain_makers.go index 0a5364752e8..8192b5afddc 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -22,13 +22,12 @@ import ( "math/big" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/trie" ) @@ -240,7 +239,7 @@ func (cp ChainPack) Slice(i, j int) *ChainPack { // Blocks created by GenerateChain do not contain valid proof of work // values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. -func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.RwKV, n int, gen func(int, *BlockGen), +func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db kv.RwDB, n int, gen func(int, *BlockGen), intermediateHashes bool, ) (*ChainPack, error) { if config == nil { @@ -284,19 +283,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return nil, nil, fmt.Errorf("call to CommitBlock to plainStateWriter: %w", err) } - if err := tx.ClearBucket(dbutils.HashedAccountsBucket); err != nil { - return nil, nil, fmt.Errorf("clear HashedAccountsBucket bucket: %w", err) + if err := tx.ClearBucket(kv.HashedAccounts); err != nil { + return nil, nil, fmt.Errorf("clear HashedAccounts bucket: %w", err) } - if err := tx.ClearBucket(dbutils.HashedStorageBucket); err != nil { - return nil, nil, fmt.Errorf("clear HashedStorageBucket bucket: %w", err) + if err := tx.ClearBucket(kv.HashedStorage); err != nil { + return nil, nil, fmt.Errorf("clear HashedStorage bucket: %w", err) } - if err := tx.ClearBucket(dbutils.TrieOfAccountsBucket); err != nil { - return nil, nil, fmt.Errorf("clear TrieOfAccountsBucket bucket: %w", err) + if err := tx.ClearBucket(kv.TrieOfAccounts); err != nil { + return nil, nil, fmt.Errorf("clear TrieOfAccounts bucket: %w", err) } - if err := tx.ClearBucket(dbutils.TrieOfStorageBucket); err != nil { - return nil, nil, fmt.Errorf("clear TrieOfStorageBucket bucket: %w", err) + if err := tx.ClearBucket(kv.TrieOfStorage); err != nil { + return nil, nil, fmt.Errorf("clear TrieOfStorage bucket: %w", err) } - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return nil, nil, err } @@ -324,11 +323,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse h.Sha.Write(k[common.AddressLength+common.IncarnationLength:]) //nolint:errcheck h.Sha.Read(newK[common.HashLength+common.IncarnationLength:]) - if err = tx.Put(dbutils.HashedStorageBucket, newK, common.CopyBytes(v)); err != nil { + if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { return nil, nil, fmt.Errorf("insert hashed key: %w", err) } } else { - if err = tx.Put(dbutils.HashedAccountsBucket, newK, common.CopyBytes(v)); err != nil { + if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil { return nil, nil, fmt.Errorf("insert hashed key: %w", err) } } @@ -337,14 +336,14 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse c.Close() if GenerateTrace { fmt.Printf("State after %d================\n", b.header.Number) - if err := tx.ForEach(dbutils.HashedAccountsBucket, nil, func(k, v []byte) error { + if err := tx.ForEach(kv.HashedAccounts, nil, func(k, v []byte) error { fmt.Printf("%x: %x\n", k, v) return nil }); err != nil { return nil, nil, fmt.Errorf("print state: %w", err) } fmt.Printf("..................\n") - if err := tx.ForEach(dbutils.HashedStorageBucket, nil, func(k, v []byte) error { + if err := tx.ForEach(kv.HashedStorage, nil, func(k, v []byte) error { fmt.Printf("%x: %x\n", k, v) return nil }); err != nil { diff --git a/core/genesis.go b/core/genesis.go index c30f1d39c2a..f9301bf2141 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -30,15 +30,14 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -170,7 +169,7 @@ func (e *GenesisMismatchError) Error() string { // error is a *params.ConfigCompatError and the new, unwritten config is returned. // // The returned chain configuration is never nil. -func CommitGenesisBlock(db ethdb.RwKV, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { +func CommitGenesisBlock(db kv.RwDB, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { tx, err := db.BeginRw(context.Background()) if err != nil { return nil, nil, err @@ -187,7 +186,7 @@ func CommitGenesisBlock(db ethdb.RwKV, genesis *Genesis) (*params.ChainConfig, * return c, b, nil } -func MustCommitGenesisBlock(db ethdb.RwKV, genesis *Genesis) (*params.ChainConfig, *types.Block) { +func MustCommitGenesisBlock(db kv.RwDB, genesis *Genesis) (*params.ChainConfig, *types.Block) { c, b, err := CommitGenesisBlock(db, genesis) if err != nil { panic(err) @@ -195,7 +194,7 @@ func MustCommitGenesisBlock(db ethdb.RwKV, genesis *Genesis) (*params.ChainConfi return c, b } -func OverrideGenesisBlock(db ethdb.RwTx, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { +func OverrideGenesisBlock(db kv.RwTx, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { stored, err := rawdb.ReadCanonicalHash(db, 0) if err != nil { return nil, nil, err @@ -211,7 +210,7 @@ func OverrideGenesisBlock(db ethdb.RwTx, genesis *Genesis) (*params.ChainConfig, return WriteGenesisBlock(db, genesis) } -func WriteGenesisBlock(db ethdb.RwTx, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { +func WriteGenesisBlock(db kv.RwTx, genesis *Genesis) (*params.ChainConfig, *types.Block, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, nil, ErrGenesisNoConfig } @@ -324,7 +323,7 @@ func (g *Genesis) ToBlock() (*types.Block, *state.IntraBlockState, error) { wg.Add(1) go func() { // we may run inside write tx, can't open 2nd write tx in same goroutine defer wg.Done() - tmpDB := kv.NewMDBX().InMem().MustOpen() + tmpDB := mdbx.NewMDBX(log.New()).InMem().MustOpen() defer tmpDB.Close() tx, err := tmpDB.BeginRw(context.Background()) if err != nil { @@ -405,7 +404,7 @@ func (g *Genesis) ToBlock() (*types.Block, *state.IntraBlockState, error) { return types.NewBlock(head, nil, nil, nil), statedb, nil } -func (g *Genesis) WriteGenesisState(tx ethdb.RwTx) (*types.Block, *state.IntraBlockState, error) { +func (g *Genesis) WriteGenesisState(tx kv.RwTx) (*types.Block, *state.IntraBlockState, error) { block, statedb, err := g.ToBlock() if err != nil { return nil, nil, err @@ -415,7 +414,7 @@ func (g *Genesis) WriteGenesisState(tx ethdb.RwTx) (*types.Block, *state.IntraBl // Special case for weird tests - inaccessible storage var b [8]byte binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) - if err := tx.Put(dbutils.IncarnationMapBucket, addr[:], b[:]); err != nil { + if err := tx.Put(kv.IncarnationMap, addr[:], b[:]); err != nil { return nil, nil, err } } @@ -439,7 +438,7 @@ func (g *Genesis) WriteGenesisState(tx ethdb.RwTx) (*types.Block, *state.IntraBl return block, statedb, nil } -func (g *Genesis) MustWrite(tx ethdb.RwTx, history bool) (*types.Block, *state.IntraBlockState) { +func (g *Genesis) MustWrite(tx kv.RwTx, history bool) (*types.Block, *state.IntraBlockState) { b, s, err := g.Write(tx) if err != nil { panic(err) @@ -449,7 +448,7 @@ func (g *Genesis) MustWrite(tx ethdb.RwTx, history bool) (*types.Block, *state.I // Write writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func (g *Genesis) Write(tx ethdb.RwTx) (*types.Block, *state.IntraBlockState, error) { +func (g *Genesis) Write(tx kv.RwTx) (*types.Block, *state.IntraBlockState, error) { block, statedb, err2 := g.WriteGenesisState(tx) if err2 != nil { return block, statedb, err2 @@ -487,7 +486,7 @@ func (g *Genesis) Write(tx ethdb.RwTx) (*types.Block, *state.IntraBlockState, er // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. -func (g *Genesis) MustCommit(db ethdb.RwKV) *types.Block { +func (g *Genesis) MustCommit(db kv.RwDB) *types.Block { tx, err := db.BeginRw(context.Background()) if err != nil { panic(err) @@ -502,7 +501,7 @@ func (g *Genesis) MustCommit(db ethdb.RwKV) *types.Block { } // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. -func GenesisBlockForTesting(db ethdb.RwKV, addr common.Address, balance *big.Int) *types.Block { +func GenesisBlockForTesting(db kv.RwDB, addr common.Address, balance *big.Int) *types.Block { g := Genesis{Alloc: GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig} block := g.MustCommit(db) return block @@ -513,7 +512,7 @@ type GenAccount struct { Balance *big.Int } -func GenesisWithAccounts(db ethdb.RwKV, accs []GenAccount) *types.Block { +func GenesisWithAccounts(db kv.RwDB, accs []GenAccount) *types.Block { g := Genesis{Config: params.TestChainConfig} allocs := make(map[common.Address]GenesisAccount) for _, acc := range accs { diff --git a/core/rawdb/accessors_account.go b/core/rawdb/accessors_account.go index 0bbe72f4ea8..4edd926cdd5 100644 --- a/core/rawdb/accessors_account.go +++ b/core/rawdb/accessors_account.go @@ -18,14 +18,13 @@ package rawdb import ( "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) -func ReadAccount(db ethdb.Tx, addrHash common.Address, acc *accounts.Account) (bool, error) { +func ReadAccount(db kv.Tx, addrHash common.Address, acc *accounts.Account) (bool, error) { addrHashBytes := addrHash[:] - enc, err := db.GetOne(dbutils.PlainStateBucket, addrHashBytes) + enc, err := db.GetOne(kv.PlainStateBucket, addrHashBytes) if err != nil { return false, err } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index b8d6b94ec57..af74d94280d 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -26,15 +26,15 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) // ReadCanonicalHash retrieves the hash assigned to a canonical block number. -func ReadCanonicalHash(db ethdb.KVGetter, number uint64) (common.Hash, error) { - data, err := db.GetOne(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(number)) +func ReadCanonicalHash(db kv.Getter, number uint64) (common.Hash, error) { + data, err := db.GetOne(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number)) if err != nil { return common.Hash{}, fmt.Errorf("failed ReadCanonicalHash: %w, number=%d", err, number) } @@ -45,24 +45,24 @@ func ReadCanonicalHash(db ethdb.KVGetter, number uint64) (common.Hash, error) { } // WriteCanonicalHash stores the hash assigned to a canonical block number. -func WriteCanonicalHash(db ethdb.Putter, hash common.Hash, number uint64) error { - if err := db.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(number), hash.Bytes()); err != nil { +func WriteCanonicalHash(db kv.Putter, hash common.Hash, number uint64) error { + if err := db.Put(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number), hash.Bytes()); err != nil { return fmt.Errorf("failed to store number to hash mapping: %w", err) } return nil } // DeleteCanonicalHash removes the number to hash canonical mapping. -func DeleteCanonicalHash(db ethdb.Deleter, number uint64) error { - if err := db.Delete(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(number), nil); err != nil { +func DeleteCanonicalHash(db kv.Deleter, number uint64) error { + if err := db.Delete(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number), nil); err != nil { return fmt.Errorf("failed to delete number to hash mapping: %w", err) } return nil } // ReadHeaderNumber returns the header number assigned to a hash. -func ReadHeaderNumber(db ethdb.KVGetter, hash common.Hash) *uint64 { - data, err := db.GetOne(dbutils.HeaderNumberBucket, hash.Bytes()) +func ReadHeaderNumber(db kv.Getter, hash common.Hash) *uint64 { + data, err := db.GetOne(kv.HeaderNumber, hash.Bytes()) if err != nil { log.Error("ReadHeaderNumber failed", "err", err) } @@ -78,23 +78,23 @@ func ReadHeaderNumber(db ethdb.KVGetter, hash common.Hash) *uint64 { } // WriteHeaderNumber stores the hash->number mapping. -func WriteHeaderNumber(db ethdb.Putter, hash common.Hash, number uint64) { +func WriteHeaderNumber(db kv.Putter, hash common.Hash, number uint64) { enc := dbutils.EncodeBlockNumber(number) - if err := db.Put(dbutils.HeaderNumberBucket, hash[:], enc); err != nil { + if err := db.Put(kv.HeaderNumber, hash[:], enc); err != nil { log.Crit("Failed to store hash to number mapping", "err", err) } } // DeleteHeaderNumber removes hash->number mapping. -func DeleteHeaderNumber(db ethdb.Deleter, hash common.Hash) { - if err := db.Delete(dbutils.HeaderNumberBucket, hash[:], nil); err != nil { +func DeleteHeaderNumber(db kv.Deleter, hash common.Hash) { + if err := db.Delete(kv.HeaderNumber, hash[:], nil); err != nil { log.Crit("Failed to delete hash to number mapping", "err", err) } } // ReadHeadHeaderHash retrieves the hash of the current canonical head header. -func ReadHeadHeaderHash(db ethdb.KVGetter) common.Hash { - data, err := db.GetOne(dbutils.HeadHeaderKey, []byte(dbutils.HeadHeaderKey)) +func ReadHeadHeaderHash(db kv.Getter) common.Hash { + data, err := db.GetOne(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey)) if err != nil { log.Error("ReadHeadHeaderHash failed", "err", err) } @@ -105,16 +105,16 @@ func ReadHeadHeaderHash(db ethdb.KVGetter) common.Hash { } // WriteHeadHeaderHash stores the hash of the current canonical head header. -func WriteHeadHeaderHash(db ethdb.Putter, hash common.Hash) error { - if err := db.Put(dbutils.HeadHeaderKey, []byte(dbutils.HeadHeaderKey), hash.Bytes()); err != nil { +func WriteHeadHeaderHash(db kv.Putter, hash common.Hash) error { + if err := db.Put(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey), hash.Bytes()); err != nil { return fmt.Errorf("failed to store last header's hash: %w", err) } return nil } // ReadHeadBlockHash retrieves the hash of the current canonical head block. -func ReadHeadBlockHash(db ethdb.KVGetter) common.Hash { - data, err := db.GetOne(dbutils.HeadBlockKey, []byte(dbutils.HeadBlockKey)) +func ReadHeadBlockHash(db kv.Getter) common.Hash { + data, err := db.GetOne(kv.HeadBlockKey, []byte(kv.HeadBlockKey)) if err != nil { log.Error("ReadHeadBlockHash failed", "err", err) } @@ -125,15 +125,15 @@ func ReadHeadBlockHash(db ethdb.KVGetter) common.Hash { } // WriteHeadBlockHash stores the head block's hash. -func WriteHeadBlockHash(db ethdb.Putter, hash common.Hash) { - if err := db.Put(dbutils.HeadBlockKey, []byte(dbutils.HeadBlockKey), hash.Bytes()); err != nil { +func WriteHeadBlockHash(db kv.Putter, hash common.Hash) { + if err := db.Put(kv.HeadBlockKey, []byte(kv.HeadBlockKey), hash.Bytes()); err != nil { log.Crit("Failed to store last block's hash", "err", err) } } // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. -func ReadHeaderRLP(db ethdb.KVGetter, hash common.Hash, number uint64) rlp.RawValue { - data, err := db.GetOne(dbutils.HeadersBucket, dbutils.HeaderKey(number, hash)) +func ReadHeaderRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue { + data, err := db.GetOne(kv.Headers, dbutils.HeaderKey(number, hash)) if err != nil { log.Error("ReadHeaderRLP failed", "err", err) } @@ -141,15 +141,15 @@ func ReadHeaderRLP(db ethdb.KVGetter, hash common.Hash, number uint64) rlp.RawVa } // HasHeader verifies the existence of a block header corresponding to the hash. -func HasHeader(db ethdb.Has, hash common.Hash, number uint64) bool { - if has, err := db.Has(dbutils.HeadersBucket, dbutils.HeaderKey(number, hash)); !has || err != nil { +func HasHeader(db kv.Has, hash common.Hash, number uint64) bool { + if has, err := db.Has(kv.Headers, dbutils.HeaderKey(number, hash)); !has || err != nil { return false } return true } // ReadHeader retrieves the block header corresponding to the hash. -func ReadHeader(db ethdb.KVGetter, hash common.Hash, number uint64) *types.Header { +func ReadHeader(db kv.Getter, hash common.Hash, number uint64) *types.Header { data := ReadHeaderRLP(db, hash, number) if len(data) == 0 { return nil @@ -162,12 +162,12 @@ func ReadHeader(db ethdb.KVGetter, hash common.Hash, number uint64) *types.Heade return header } -func ReadCurrentBlockNumber(db ethdb.KVGetter) *uint64 { +func ReadCurrentBlockNumber(db kv.Getter) *uint64 { headHash := ReadHeadHeaderHash(db) return ReadHeaderNumber(db, headHash) } -func ReadCurrentHeader(db ethdb.KVGetter) *types.Header { +func ReadCurrentHeader(db kv.Getter) *types.Header { headHash := ReadHeadHeaderHash(db) headNumber := ReadHeaderNumber(db, headHash) if headNumber == nil { @@ -176,7 +176,7 @@ func ReadCurrentHeader(db ethdb.KVGetter) *types.Header { return ReadHeader(db, headHash, *headNumber) } -func ReadCurrentBlock(db ethdb.Tx) *types.Block { +func ReadCurrentBlock(db kv.Tx) *types.Block { headHash := ReadHeadBlockHash(db) headNumber := ReadHeaderNumber(db, headHash) if headNumber == nil { @@ -185,9 +185,9 @@ func ReadCurrentBlock(db ethdb.Tx) *types.Block { return ReadBlock(db, headHash, *headNumber) } -func ReadHeadersByNumber(db ethdb.Tx, number uint64) ([]*types.Header, error) { +func ReadHeadersByNumber(db kv.Tx, number uint64) ([]*types.Header, error) { var res []*types.Header - c, err := db.Cursor(dbutils.HeadersBucket) + c, err := db.Cursor(kv.Headers) if err != nil { return nil, err } @@ -212,13 +212,13 @@ func ReadHeadersByNumber(db ethdb.Tx, number uint64) ([]*types.Header, error) { // WriteHeader stores a block header into the database and also stores the hash- // to-number mapping. -func WriteHeader(db ethdb.Putter, header *types.Header) { +func WriteHeader(db kv.Putter, header *types.Header) { var ( hash = header.Hash() number = header.Number.Uint64() encoded = dbutils.EncodeBlockNumber(number) ) - if err := db.Put(dbutils.HeaderNumberBucket, hash[:], encoded); err != nil { + if err := db.Put(kv.HeaderNumber, hash[:], encoded); err != nil { log.Crit("Failed to store hash to number mapping", "err", err) } // Write the encoded header @@ -226,23 +226,23 @@ func WriteHeader(db ethdb.Putter, header *types.Header) { if err != nil { log.Crit("Failed to RLP encode header", "err", err) } - if err := db.Put(dbutils.HeadersBucket, dbutils.HeaderKey(number, hash), data); err != nil { + if err := db.Put(kv.Headers, dbutils.HeaderKey(number, hash), data); err != nil { log.Crit("Failed to store header", "err", err) } } // DeleteHeader removes all block header data associated with a hash. -func DeleteHeader(db ethdb.Deleter, hash common.Hash, number uint64) { - if err := db.Delete(dbutils.HeadersBucket, dbutils.HeaderKey(number, hash), nil); err != nil { +func DeleteHeader(db kv.Deleter, hash common.Hash, number uint64) { + if err := db.Delete(kv.Headers, dbutils.HeaderKey(number, hash), nil); err != nil { log.Crit("Failed to delete header", "err", err) } - if err := db.Delete(dbutils.HeaderNumberBucket, hash.Bytes(), nil); err != nil { + if err := db.Delete(kv.HeaderNumber, hash.Bytes(), nil); err != nil { log.Crit("Failed to delete hash to number mapping", "err", err) } } // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. -func ReadBodyRLP(db ethdb.Tx, hash common.Hash, number uint64) rlp.RawValue { +func ReadBodyRLP(db kv.Tx, hash common.Hash, number uint64) rlp.RawValue { body := ReadBody(db, hash, number) bodyRlp, err := rlp.EncodeToBytes(body) if err != nil { @@ -251,15 +251,15 @@ func ReadBodyRLP(db ethdb.Tx, hash common.Hash, number uint64) rlp.RawValue { return bodyRlp } -func ReadStorageBodyRLP(db ethdb.KVGetter, hash common.Hash, number uint64) rlp.RawValue { - bodyRlp, err := db.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(number, hash)) +func ReadStorageBodyRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue { + bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash)) if err != nil { log.Error("ReadBodyRLP failed", "err", err) } return bodyRlp } -func ReadTransactions(db ethdb.KVGetter, baseTxId uint64, amount uint32) ([]types.Transaction, error) { +func ReadTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]types.Transaction, error) { if amount == 0 { return []types.Transaction{}, nil } @@ -270,7 +270,7 @@ func ReadTransactions(db ethdb.KVGetter, baseTxId uint64, amount uint32) ([]type binary.BigEndian.PutUint64(txIdKey, baseTxId) i := uint32(0) - if err := db.ForAmount(dbutils.EthTx, txIdKey, amount, func(k, v []byte) error { + if err := db.ForAmount(kv.EthTx, txIdKey, amount, func(k, v []byte) error { var decodeErr error reader.Reset(v) stream.Reset(reader, 0) @@ -286,10 +286,10 @@ func ReadTransactions(db ethdb.KVGetter, baseTxId uint64, amount uint32) ([]type return txs, nil } -func WriteTransactions(db ethdb.RwTx, txs []types.Transaction, baseTxId uint64) error { +func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) error { txId := baseTxId buf := bytes.NewBuffer(nil) - c, err := db.RwCursor(dbutils.EthTx) + c, err := db.RwCursor(kv.EthTx) if err != nil { return err } @@ -313,9 +313,9 @@ func WriteTransactions(db ethdb.RwTx, txs []types.Transaction, baseTxId uint64) return nil } -func WriteRawTransactions(db ethdb.RwTx, txs [][]byte, baseTxId uint64) error { +func WriteRawTransactions(db kv.RwTx, txs [][]byte, baseTxId uint64) error { txId := baseTxId - c, err := db.RwCursor(dbutils.EthTx) + c, err := db.RwCursor(kv.EthTx) if err != nil { return err } @@ -334,21 +334,21 @@ func WriteRawTransactions(db ethdb.RwTx, txs [][]byte, baseTxId uint64) error { } // WriteBodyRLP stores an RLP encoded block body into the database. -func WriteBodyRLP(db ethdb.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) { - if err := db.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(number, hash), rlp); err != nil { +func WriteBodyRLP(db kv.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) { + if err := db.Put(kv.BlockBody, dbutils.BlockBodyKey(number, hash), rlp); err != nil { log.Crit("Failed to store block body", "err", err) } } // HasBody verifies the existence of a block body corresponding to the hash. -func HasBody(db ethdb.Has, hash common.Hash, number uint64) bool { - if has, err := db.Has(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(number, hash)); !has || err != nil { +func HasBody(db kv.Has, hash common.Hash, number uint64) bool { + if has, err := db.Has(kv.BlockBody, dbutils.BlockBodyKey(number, hash)); !has || err != nil { return false } return true } -func ReadBodyByNumber(db ethdb.Tx, number uint64) (*types.Body, uint64, uint32, error) { +func ReadBodyByNumber(db kv.Tx, number uint64) (*types.Body, uint64, uint32, error) { hash, err := ReadCanonicalHash(db, number) if err != nil { return nil, 0, 0, fmt.Errorf("failed ReadCanonicalHash: %w", err) @@ -360,7 +360,7 @@ func ReadBodyByNumber(db ethdb.Tx, number uint64) (*types.Body, uint64, uint32, return body, baseTxId, txAmount, nil } -func ReadBody(db ethdb.KVGetter, hash common.Hash, number uint64) *types.Body { +func ReadBody(db kv.Getter, hash common.Hash, number uint64) *types.Body { body, baseTxId, txAmount := ReadBodyWithoutTransactions(db, hash, number) if body == nil { return nil @@ -374,7 +374,7 @@ func ReadBody(db ethdb.KVGetter, hash common.Hash, number uint64) *types.Body { return body } -func ReadBodyWithoutTransactions(db ethdb.KVGetter, hash common.Hash, number uint64) (*types.Body, uint64, uint32) { +func ReadBodyWithoutTransactions(db kv.Getter, hash common.Hash, number uint64) (*types.Body, uint64, uint32) { data := ReadStorageBodyRLP(db, hash, number) if len(data) == 0 { return nil, 0, 0 @@ -390,8 +390,8 @@ func ReadBodyWithoutTransactions(db ethdb.KVGetter, hash common.Hash, number uin return body, bodyForStorage.BaseTxId, bodyForStorage.TxAmount } -func ReadSenders(db ethdb.KVGetter, hash common.Hash, number uint64) ([]common.Address, error) { - data, err := db.GetOne(dbutils.Senders, dbutils.BlockBodyKey(number, hash)) +func ReadSenders(db kv.Getter, hash common.Hash, number uint64) ([]common.Address, error) { + data, err := db.GetOne(kv.Senders, dbutils.BlockBodyKey(number, hash)) if err != nil { return nil, fmt.Errorf("readSenders failed: %w", err) } @@ -402,8 +402,8 @@ func ReadSenders(db ethdb.KVGetter, hash common.Hash, number uint64) ([]common.A return senders, nil } -func WriteRawBody(db ethdb.RwTx, hash common.Hash, number uint64, body *types.RawBody) error { - baseTxId, err := db.IncrementSequence(dbutils.EthTx, uint64(len(body.Transactions))) +func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) error { + baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))) if err != nil { return err } @@ -423,10 +423,10 @@ func WriteRawBody(db ethdb.RwTx, hash common.Hash, number uint64, body *types.Ra return nil } -func WriteBody(db ethdb.RwTx, hash common.Hash, number uint64, body *types.Body) error { +func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) error { // Pre-processing body.SendersFromTxs() - baseTxId, err := db.IncrementSequence(dbutils.EthTx, uint64(len(body.Transactions))) + baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))) if err != nil { return err } @@ -446,27 +446,27 @@ func WriteBody(db ethdb.RwTx, hash common.Hash, number uint64, body *types.Body) return nil } -func WriteSenders(db ethdb.RwTx, hash common.Hash, number uint64, senders []common.Address) error { +func WriteSenders(db kv.RwTx, hash common.Hash, number uint64, senders []common.Address) error { data := make([]byte, common.AddressLength*len(senders)) for i, sender := range senders { copy(data[i*common.AddressLength:], sender[:]) } - if err := db.Put(dbutils.Senders, dbutils.BlockBodyKey(number, hash), data); err != nil { + if err := db.Put(kv.Senders, dbutils.BlockBodyKey(number, hash), data); err != nil { return fmt.Errorf("failed to store block senders: %w", err) } return nil } // DeleteBody removes all block body data associated with a hash. -func DeleteBody(db ethdb.Deleter, hash common.Hash, number uint64) { - if err := db.Delete(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(number, hash), nil); err != nil { +func DeleteBody(db kv.Deleter, hash common.Hash, number uint64) { + if err := db.Delete(kv.BlockBody, dbutils.BlockBodyKey(number, hash), nil); err != nil { log.Crit("Failed to delete block body", "err", err) } } // ReadTd retrieves a block's total difficulty corresponding to the hash. -func ReadTd(db ethdb.KVGetter, hash common.Hash, number uint64) (*big.Int, error) { - data, err := db.GetOne(dbutils.HeaderTDBucket, dbutils.HeaderKey(number, hash)) +func ReadTd(db kv.Getter, hash common.Hash, number uint64) (*big.Int, error) { + data, err := db.GetOne(kv.HeaderTD, dbutils.HeaderKey(number, hash)) if err != nil { return nil, fmt.Errorf("failed ReadTd: %w", err) } @@ -480,7 +480,7 @@ func ReadTd(db ethdb.KVGetter, hash common.Hash, number uint64) (*big.Int, error return td, nil } -func ReadTdByHash(db ethdb.KVGetter, hash common.Hash) (*big.Int, error) { +func ReadTdByHash(db kv.Getter, hash common.Hash) (*big.Int, error) { headNumber := ReadHeaderNumber(db, hash) if headNumber == nil { return nil, nil @@ -489,20 +489,20 @@ func ReadTdByHash(db ethdb.KVGetter, hash common.Hash) (*big.Int, error) { } // WriteTd stores the total difficulty of a block into the database. -func WriteTd(db ethdb.Putter, hash common.Hash, number uint64, td *big.Int) error { +func WriteTd(db kv.Putter, hash common.Hash, number uint64, td *big.Int) error { data, err := rlp.EncodeToBytes(td) if err != nil { return fmt.Errorf("failed to RLP encode block total difficulty: %w", err) } - if err := db.Put(dbutils.HeaderTDBucket, dbutils.HeaderKey(number, hash), data); err != nil { + if err := db.Put(kv.HeaderTD, dbutils.HeaderKey(number, hash), data); err != nil { return fmt.Errorf("failed to store block total difficulty: %w", err) } return nil } // DeleteTd removes all block total difficulty data associated with a hash. -func DeleteTd(db ethdb.Deleter, hash common.Hash, number uint64) error { - if err := db.Delete(dbutils.HeaderTDBucket, dbutils.HeaderKey(number, hash), nil); err != nil { +func DeleteTd(db kv.Deleter, hash common.Hash, number uint64) error { + if err := db.Delete(kv.HeaderTD, dbutils.HeaderKey(number, hash), nil); err != nil { return fmt.Errorf("failed to delete block total difficulty: %w", err) } return nil @@ -510,8 +510,8 @@ func DeleteTd(db ethdb.Deleter, hash common.Hash, number uint64) error { // HasReceipts verifies the existence of all the transaction receipts belonging // to a block. -func HasReceipts(db ethdb.Has, hash common.Hash, number uint64) bool { - if has, err := db.Has(dbutils.Receipts, dbutils.ReceiptsKey(number)); !has || err != nil { +func HasReceipts(db kv.Has, hash common.Hash, number uint64) bool { + if has, err := db.Has(kv.Receipts, dbutils.ReceiptsKey(number)); !has || err != nil { return false } return true @@ -520,9 +520,9 @@ func HasReceipts(db ethdb.Has, hash common.Hash, number uint64) bool { // ReadRawReceipts retrieves all the transaction receipts belonging to a block. // The receipt metadata fields are not guaranteed to be populated, so they // should not be used. Use ReadReceipts instead if the metadata is needed. -func ReadRawReceipts(db ethdb.Tx, blockNum uint64) types.Receipts { +func ReadRawReceipts(db kv.Tx, blockNum uint64) types.Receipts { // Retrieve the flattened receipt slice - data, err := db.GetOne(dbutils.Receipts, dbutils.ReceiptsKey(blockNum)) + data, err := db.GetOne(kv.Receipts, dbutils.ReceiptsKey(blockNum)) if err != nil { log.Error("ReadRawReceipts failed", "err", err) } @@ -537,7 +537,7 @@ func ReadRawReceipts(db ethdb.Tx, blockNum uint64) types.Receipts { prefix := make([]byte, 8) binary.BigEndian.PutUint64(prefix, blockNum) - if err := db.ForPrefix(dbutils.Log, prefix, func(k, v []byte) error { + if err := db.ForPrefix(kv.Log, prefix, func(k, v []byte) error { var logs types.Logs if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { return fmt.Errorf("receipt unmarshal failed: %w", err) @@ -560,7 +560,7 @@ func ReadRawReceipts(db ethdb.Tx, blockNum uint64) types.Receipts { // The current implementation populates these metadata fields by reading the receipts' // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. -func ReadReceipts(db ethdb.Tx, block *types.Block, senders []common.Address) types.Receipts { +func ReadReceipts(db kv.Tx, block *types.Block, senders []common.Address) types.Receipts { if block == nil { return nil } @@ -577,7 +577,7 @@ func ReadReceipts(db ethdb.Tx, block *types.Block, senders []common.Address) typ return receipts } -func ReadReceiptsByHash(db ethdb.Tx, hash common.Hash) (types.Receipts, error) { +func ReadReceiptsByHash(db kv.Tx, hash common.Hash) (types.Receipts, error) { b, s, err := ReadBlockByHashWithSenders(db, hash) if err != nil { return nil, err @@ -593,7 +593,7 @@ func ReadReceiptsByHash(db ethdb.Tx, hash common.Hash) (types.Receipts, error) { } // WriteReceipts stores all the transaction receipts belonging to a block. -func WriteReceipts(tx ethdb.Putter, number uint64, receipts types.Receipts) error { +func WriteReceipts(tx kv.Putter, number uint64, receipts types.Receipts) error { buf := bytes.NewBuffer(make([]byte, 0, 1024)) for txId, r := range receipts { if len(r.Logs) == 0 { @@ -606,7 +606,7 @@ func WriteReceipts(tx ethdb.Putter, number uint64, receipts types.Receipts) erro return fmt.Errorf("encode block logs for block %d: %v", number, err) } - if err = tx.Put(dbutils.Log, dbutils.LogKey(number, uint32(txId)), buf.Bytes()); err != nil { + if err = tx.Put(kv.Log, dbutils.LogKey(number, uint32(txId)), buf.Bytes()); err != nil { return fmt.Errorf("writing logs for block %d: %v", number, err) } } @@ -617,14 +617,14 @@ func WriteReceipts(tx ethdb.Putter, number uint64, receipts types.Receipts) erro return fmt.Errorf("encode block receipts for block %d: %v", number, err) } - if err = tx.Put(dbutils.Receipts, dbutils.ReceiptsKey(number), buf.Bytes()); err != nil { + if err = tx.Put(kv.Receipts, dbutils.ReceiptsKey(number), buf.Bytes()); err != nil { return fmt.Errorf("writing receipts for block %d: %v", number, err) } return nil } // AppendReceipts stores all the transaction receipts belonging to a block. -func AppendReceipts(tx ethdb.RwTx, blockNumber uint64, receipts types.Receipts) error { +func AppendReceipts(tx kv.RwTx, blockNumber uint64, receipts types.Receipts) error { buf := bytes.NewBuffer(make([]byte, 0, 1024)) for txId, r := range receipts { @@ -638,7 +638,7 @@ func AppendReceipts(tx ethdb.RwTx, blockNumber uint64, receipts types.Receipts) return fmt.Errorf("encode block receipts for block %d: %v", blockNumber, err) } - if err = tx.Append(dbutils.Log, dbutils.LogKey(blockNumber, uint32(txId)), buf.Bytes()); err != nil { + if err = tx.Append(kv.Log, dbutils.LogKey(blockNumber, uint32(txId)), buf.Bytes()); err != nil { return fmt.Errorf("writing receipts for block %d: %v", blockNumber, err) } } @@ -649,22 +649,22 @@ func AppendReceipts(tx ethdb.RwTx, blockNumber uint64, receipts types.Receipts) return fmt.Errorf("encode block receipts for block %d: %v", blockNumber, err) } - if err = tx.Append(dbutils.Receipts, dbutils.ReceiptsKey(blockNumber), buf.Bytes()); err != nil { + if err = tx.Append(kv.Receipts, dbutils.ReceiptsKey(blockNumber), buf.Bytes()); err != nil { return fmt.Errorf("writing receipts for block %d: %v", blockNumber, err) } return nil } // DeleteReceipts removes all receipt data associated with a block hash. -func DeleteReceipts(db ethdb.RwTx, number uint64) error { - if err := db.Delete(dbutils.Receipts, dbutils.ReceiptsKey(number), nil); err != nil { +func DeleteReceipts(db kv.RwTx, number uint64) error { + if err := db.Delete(kv.Receipts, dbutils.ReceiptsKey(number), nil); err != nil { return fmt.Errorf("receipts delete failed: %d, %w", number, err) } prefix := make([]byte, 8) binary.BigEndian.PutUint64(prefix, number) - if err := db.ForPrefix(dbutils.Log, prefix, func(k, v []byte) error { - return db.Delete(dbutils.Log, k, nil) + if err := db.ForPrefix(kv.Log, prefix, func(k, v []byte) error { + return db.Delete(kv.Log, k, nil) }); err != nil { return err } @@ -672,25 +672,25 @@ func DeleteReceipts(db ethdb.RwTx, number uint64) error { } // DeleteNewerReceipts removes all receipt for given block number or newer -func DeleteNewerReceipts(db ethdb.RwTx, number uint64) error { - if err := db.ForEach(dbutils.Receipts, dbutils.ReceiptsKey(number), func(k, v []byte) error { - return db.Delete(dbutils.Receipts, k, nil) +func DeleteNewerReceipts(db kv.RwTx, number uint64) error { + if err := db.ForEach(kv.Receipts, dbutils.ReceiptsKey(number), func(k, v []byte) error { + return db.Delete(kv.Receipts, k, nil) }); err != nil { return err } from := make([]byte, 8) binary.BigEndian.PutUint64(from, number) - if err := db.ForEach(dbutils.Log, from, func(k, v []byte) error { - return db.Delete(dbutils.Log, k, nil) + if err := db.ForEach(kv.Log, from, func(k, v []byte) error { + return db.Delete(kv.Log, k, nil) }); err != nil { return err } return nil } -func ReceiptsAvailableFrom(tx ethdb.Tx) (uint64, error) { - c, err := tx.Cursor(dbutils.Receipts) +func ReceiptsAvailableFrom(tx kv.Tx) (uint64, error) { + c, err := tx.Cursor(kv.Receipts) if err != nil { return math.MaxUint64, err } @@ -711,7 +711,7 @@ func ReceiptsAvailableFrom(tx ethdb.Tx) (uint64, error) { // // Note, due to concurrent download of header and block body the header and thus // canonical hash can be stored in the database but the body data not (yet). -func ReadBlock(tx ethdb.KVGetter, hash common.Hash, number uint64) *types.Block { +func ReadBlock(tx kv.Getter, hash common.Hash, number uint64) *types.Block { header := ReadHeader(tx, hash, number) if header == nil { return nil @@ -726,12 +726,12 @@ func ReadBlock(tx ethdb.KVGetter, hash common.Hash, number uint64) *types.Block // HasBlock - is more efficient than ReadBlock because doesn't read transactions. // It's is not equivalent of HasHeader because headers and bodies written by different stages -func HasBlock(db ethdb.KVGetter, hash common.Hash, number uint64) bool { +func HasBlock(db kv.Getter, hash common.Hash, number uint64) bool { body := ReadStorageBodyRLP(db, hash, number) return len(body) > 0 } -func ReadBlockWithSenders(db ethdb.Tx, hash common.Hash, number uint64) (*types.Block, []common.Address, error) { +func ReadBlockWithSenders(db kv.Tx, hash common.Hash, number uint64) (*types.Block, []common.Address, error) { block := ReadBlock(db, hash, number) if block == nil { return nil, nil, nil @@ -748,7 +748,7 @@ func ReadBlockWithSenders(db ethdb.Tx, hash common.Hash, number uint64) (*types. } // WriteBlock serializes a block into the database, header and body separately. -func WriteBlock(db ethdb.RwTx, block *types.Block) error { +func WriteBlock(db kv.RwTx, block *types.Block) error { if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { return err } @@ -757,7 +757,7 @@ func WriteBlock(db ethdb.RwTx, block *types.Block) error { } // DeleteBlock removes all block data associated with a hash. -func DeleteBlock(db ethdb.RwTx, hash common.Hash, number uint64) error { +func DeleteBlock(db kv.RwTx, hash common.Hash, number uint64) error { if err := DeleteReceipts(db, number); err != nil { return err } @@ -769,7 +769,7 @@ func DeleteBlock(db ethdb.RwTx, hash common.Hash, number uint64) error { return nil } -func ReadBlockByNumber(db ethdb.Tx, number uint64) (*types.Block, error) { +func ReadBlockByNumber(db kv.Tx, number uint64) (*types.Block, error) { hash, err := ReadCanonicalHash(db, number) if err != nil { return nil, fmt.Errorf("failed ReadCanonicalHash: %w", err) @@ -781,7 +781,7 @@ func ReadBlockByNumber(db ethdb.Tx, number uint64) (*types.Block, error) { return ReadBlock(db, hash, number), nil } -func ReadBlockByNumberWithSenders(db ethdb.Tx, number uint64) (*types.Block, []common.Address, error) { +func ReadBlockByNumberWithSenders(db kv.Tx, number uint64) (*types.Block, []common.Address, error) { hash, err := ReadCanonicalHash(db, number) if err != nil { return nil, nil, fmt.Errorf("failed ReadCanonicalHash: %w", err) @@ -793,7 +793,7 @@ func ReadBlockByNumberWithSenders(db ethdb.Tx, number uint64) (*types.Block, []c return ReadBlockWithSenders(db, hash, number) } -func ReadBlockByHash(db ethdb.Tx, hash common.Hash) (*types.Block, error) { +func ReadBlockByHash(db kv.Tx, hash common.Hash) (*types.Block, error) { number := ReadHeaderNumber(db, hash) if number == nil { return nil, nil @@ -801,7 +801,7 @@ func ReadBlockByHash(db ethdb.Tx, hash common.Hash) (*types.Block, error) { return ReadBlock(db, hash, *number), nil } -func ReadBlockByHashWithSenders(db ethdb.Tx, hash common.Hash) (*types.Block, []common.Address, error) { +func ReadBlockByHashWithSenders(db kv.Tx, hash common.Hash) (*types.Block, []common.Address, error) { number := ReadHeaderNumber(db, hash) if number == nil { return nil, nil, nil @@ -809,7 +809,7 @@ func ReadBlockByHashWithSenders(db ethdb.Tx, hash common.Hash) (*types.Block, [] return ReadBlockWithSenders(db, hash, *number) } -func ReadHeaderByNumber(db ethdb.KVGetter, number uint64) *types.Header { +func ReadHeaderByNumber(db kv.Getter, number uint64) *types.Header { hash, err := ReadCanonicalHash(db, number) if err != nil { log.Error("ReadCanonicalHash failed", "err", err) @@ -822,7 +822,7 @@ func ReadHeaderByNumber(db ethdb.KVGetter, number uint64) *types.Header { return ReadHeader(db, hash, number) } -func ReadHeaderByHash(db ethdb.KVGetter, hash common.Hash) (*types.Header, error) { +func ReadHeaderByHash(db kv.Getter, hash common.Hash) (*types.Header, error) { number := ReadHeaderNumber(db, hash) if number == nil { return nil, nil @@ -830,7 +830,7 @@ func ReadHeaderByHash(db ethdb.KVGetter, hash common.Hash) (*types.Header, error return ReadHeader(db, hash, *number), nil } -func ReadAncestor(db ethdb.KVGetter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { +func ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { if ancestor > number { return common.Hash{}, 0 } @@ -875,24 +875,24 @@ func ReadAncestor(db ethdb.KVGetter, hash common.Hash, number, ancestor uint64, return hash, number } -func DeleteNewerEpochs(tx ethdb.RwTx, number uint64) error { - if err := tx.ForEach(dbutils.PendingEpoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error { - return tx.Delete(dbutils.Epoch, k, nil) +func DeleteNewerEpochs(tx kv.RwTx, number uint64) error { + if err := tx.ForEach(kv.PendingEpoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error { + return tx.Delete(kv.Epoch, k, nil) }); err != nil { return err } - return tx.ForEach(dbutils.Epoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error { - return tx.Delete(dbutils.Epoch, k, nil) + return tx.ForEach(kv.Epoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error { + return tx.Delete(kv.Epoch, k, nil) }) } -func ReadEpoch(tx ethdb.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) { +func ReadEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) { k := make([]byte, dbutils.NumberLength+common.HashLength) binary.BigEndian.PutUint64(k, blockNum) copy(k[dbutils.NumberLength:], blockHash[:]) - return tx.GetOne(dbutils.Epoch, k) + return tx.GetOne(kv.Epoch, k) } -func FindEpochBeforeOrEqualNumber(tx ethdb.Tx, n uint64) (blockNum uint64, blockHash common.Hash, transitionProof []byte, err error) { - c, err := tx.Cursor(dbutils.Epoch) +func FindEpochBeforeOrEqualNumber(tx kv.Tx, n uint64) (blockNum uint64, blockHash common.Hash, transitionProof []byte, err error) { + c, err := tx.Cursor(kv.Epoch) if err != nil { return 0, common.Hash{}, nil, err } @@ -918,23 +918,23 @@ func FindEpochBeforeOrEqualNumber(tx ethdb.Tx, n uint64) (blockNum uint64, block return binary.BigEndian.Uint64(k), common.BytesToHash(k[dbutils.NumberLength:]), v, nil } -func WriteEpoch(tx ethdb.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) { +func WriteEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) { k := make([]byte, dbutils.NumberLength+common.HashLength) binary.BigEndian.PutUint64(k, blockNum) copy(k[dbutils.NumberLength:], blockHash[:]) - return tx.Put(dbutils.Epoch, k, transitionProof) + return tx.Put(kv.Epoch, k, transitionProof) } -func ReadPendingEpoch(tx ethdb.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) { +func ReadPendingEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) { k := make([]byte, 8+32) binary.BigEndian.PutUint64(k, blockNum) copy(k[8:], blockHash[:]) - return tx.GetOne(dbutils.PendingEpoch, k) + return tx.GetOne(kv.PendingEpoch, k) } -func WritePendingEpoch(tx ethdb.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) { +func WritePendingEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) { k := make([]byte, 8+32) binary.BigEndian.PutUint64(k, blockNum) copy(k[8:], blockHash[:]) - return tx.Put(dbutils.PendingEpoch, k, transitionProof) + return tx.Put(kv.PendingEpoch, k, transitionProof) } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 93b3099b87e..fa8a584a1ec 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -27,7 +27,7 @@ import ( "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/stretchr/testify/require" @@ -36,7 +36,7 @@ import ( // Tests block header storage and retrieval operations. func TestHeaderStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) // Create a test header to move around the database and make sure it's really new header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")} @@ -69,7 +69,7 @@ func TestHeaderStorage(t *testing.T) { // Tests block body storage and retrieval operations. func TestBodyStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) require := require.New(t) var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -124,7 +124,7 @@ func TestBodyStorage(t *testing.T) { // Tests block storage and retrieval operations. func TestBlockStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) // Create a test block to move around the database and make sure it's really new block := types.NewBlockWithHeader(&types.Header{ @@ -179,7 +179,7 @@ func TestBlockStorage(t *testing.T) { // Tests that partial block contents don't get reassembled into full blocks. func TestPartialBlockStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) block := types.NewBlockWithHeader(&types.Header{ Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, @@ -217,7 +217,7 @@ func TestPartialBlockStorage(t *testing.T) { // Tests block total difficulty storage and retrieval operations. func TestTdStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) // Create a test TD to move around the database and make sure it's really new hash, td := common.Hash{}, big.NewInt(314) @@ -258,7 +258,7 @@ func TestTdStorage(t *testing.T) { // Tests that canonical numbers can be mapped to hashes and retrieved. func TestCanonicalMappingStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) // Create a test canonical number and assinged hash to move around hash, number := common.Hash{0: 0xff}, uint64(314) @@ -299,7 +299,7 @@ func TestCanonicalMappingStorage(t *testing.T) { // Tests that head headers and head blocks can be assigned, individually. func TestHeadStorage(t *testing.T) { - _, db := kv.NewTestTx(t) + _, db := memdb.NewTestTx(t) blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) @@ -326,7 +326,7 @@ func TestHeadStorage(t *testing.T) { // Tests that receipts associated with a single block can be stored and retrieved. func TestBlockReceiptStorage(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) require := require.New(t) // Create a live block since we need metadata to reconstruct the receipt diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index 65cf9ccab0c..9147f4feb71 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -20,9 +20,8 @@ import ( "math/big" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) @@ -36,8 +35,8 @@ type TxLookupEntry struct { // ReadTxLookupEntry retrieves the positional metadata associated with a transaction // hash to allow retrieving the transaction or receipt by hash. -func ReadTxLookupEntry(db ethdb.Tx, txnHash common.Hash) (*uint64, error) { - data, err := db.GetOne(dbutils.TxLookupPrefix, txnHash.Bytes()) +func ReadTxLookupEntry(db kv.Tx, txnHash common.Hash) (*uint64, error) { + data, err := db.GetOne(kv.TxLookup, txnHash.Bytes()) if err != nil { return nil, err } @@ -50,23 +49,23 @@ func ReadTxLookupEntry(db ethdb.Tx, txnHash common.Hash) (*uint64, error) { // WriteTxLookupEntries stores a positional metadata for every transaction from // a block, enabling hash based transaction and receipt lookups. -func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) { +func WriteTxLookupEntries(db kv.Putter, block *types.Block) { for _, tx := range block.Transactions() { data := block.Number().Bytes() - if err := db.Put(dbutils.TxLookupPrefix, tx.Hash().Bytes(), data); err != nil { + if err := db.Put(kv.TxLookup, tx.Hash().Bytes(), data); err != nil { log.Crit("Failed to store transaction lookup entry", "err", err) } } } // DeleteTxLookupEntry removes all transaction data associated with a hash. -func DeleteTxLookupEntry(db ethdb.Deleter, hash common.Hash) error { - return db.Delete(dbutils.TxLookupPrefix, hash.Bytes(), nil) +func DeleteTxLookupEntry(db kv.Deleter, hash common.Hash) error { + return db.Delete(kv.TxLookup, hash.Bytes(), nil) } // ReadTransaction retrieves a specific transaction from the database, along with // its added positional metadata. -func ReadTransaction(db ethdb.Tx, hash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { +func ReadTransaction(db kv.Tx, hash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { blockNumber, err := ReadTxLookupEntry(db, hash) if err != nil { return nil, common.Hash{}, 0, 0, err @@ -100,7 +99,7 @@ func ReadTransaction(db ethdb.Tx, hash common.Hash) (types.Transaction, common.H return nil, common.Hash{}, 0, 0, nil } -func ReadReceipt(db ethdb.Tx, txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64, error) { +func ReadReceipt(db kv.Tx, txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64, error) { // Retrieve the context of the receipt based on the transaction hash blockNumber, err := ReadTxLookupEntry(db, txHash) if err != nil { diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 06a258328c9..3e77f17b84b 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -22,21 +22,21 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" ) // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { tests := []struct { name string - writeTxLookupEntries func(ethdb.Putter, *types.Block) + writeTxLookupEntries func(kv.Putter, *types.Block) }{ { "DatabaseV6", - func(db ethdb.Putter, block *types.Block) { + func(db kv.Putter, block *types.Block) { WriteTxLookupEntries(db, block) }, }, @@ -45,7 +45,7 @@ func TestLookupStorage(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), uint256.NewInt(111), 1111, uint256.NewInt(11111), []byte{0x11, 0x11, 0x11}) tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), uint256.NewInt(222), 2222, uint256.NewInt(22222), []byte{0x22, 0x22, 0x22}) diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 4ded1ceb23e..1fb5029f7e4 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -20,16 +20,15 @@ import ( "encoding/json" "fmt" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/params" ) // ReadChainConfig retrieves the consensus settings based on the given genesis hash. -func ReadChainConfig(db ethdb.KVGetter, hash common.Hash) (*params.ChainConfig, error) { - data, err := db.GetOne(dbutils.ConfigPrefix, hash[:]) +func ReadChainConfig(db kv.Getter, hash common.Hash) (*params.ChainConfig, error) { + data, err := db.GetOne(kv.ConfigTable, hash[:]) if err != nil { return nil, err } @@ -44,7 +43,7 @@ func ReadChainConfig(db ethdb.KVGetter, hash common.Hash) (*params.ChainConfig, } // WriteChainConfig writes the chain config settings to the database. -func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig) error { +func WriteChainConfig(db kv.Putter, hash common.Hash, cfg *params.ChainConfig) error { if cfg == nil { return nil } @@ -52,13 +51,13 @@ func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig if err != nil { return fmt.Errorf("failed to JSON encode chain config: %w", err) } - if err := db.Put(dbutils.ConfigPrefix, hash[:], data); err != nil { + if err := db.Put(kv.ConfigTable, hash[:], data); err != nil { return fmt.Errorf("failed to store chain config: %w", err) } return nil } // DeleteChainConfig retrieves the consensus settings based on the given genesis hash. -func DeleteChainConfig(db ethdb.Deleter, hash common.Hash) error { - return db.Delete(dbutils.ConfigPrefix, hash[:], nil) +func DeleteChainConfig(db kv.Deleter, hash common.Hash) error { + return db.Delete(kv.ConfigTable, hash[:], nil) } diff --git a/core/rlp_test.go b/core/rlp_test.go index 1d8c741f765..95e5d4899ca 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -21,7 +21,7 @@ import ( "math/big" "testing" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common" @@ -34,7 +34,7 @@ import ( ) func getBlock(transactions int, uncles int, dataSize int) *types.Block { - db := kv.NewMemKV() + db := memdb.New() defer db.Close() var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") diff --git a/core/state/change_set_writer.go b/core/state/change_set_writer.go index 521791f7fff..47edd87ff58 100644 --- a/core/state/change_set_writer.go +++ b/core/state/change_set_writer.go @@ -4,18 +4,18 @@ import ( "fmt" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" ) // ChangeSetWriter is a mock StateWriter that accumulates changes in-memory into ChangeSets. type ChangeSetWriter struct { - db ethdb.RwTx + db kv.RwTx accountChanges map[common.Address][]byte storageChanged map[common.Address]bool storageChanges map[string][]byte @@ -29,7 +29,7 @@ func NewChangeSetWriter() *ChangeSetWriter { storageChanges: make(map[string][]byte), } } -func NewChangeSetWriterPlain(db ethdb.RwTx, blockNumber uint64) *ChangeSetWriter { +func NewChangeSetWriterPlain(db kv.RwTx, blockNumber uint64) *ChangeSetWriter { return &ChangeSetWriter{ db: db, accountChanges: make(map[common.Address][]byte), @@ -121,8 +121,8 @@ func (w *ChangeSetWriter) WriteChangeSets() error { if err != nil { return err } - if err = changeset.Mapper[dbutils.AccountChangeSetBucket].Encode(w.blockNumber, accountChanges, func(k, v []byte) error { - if err = w.db.AppendDup(dbutils.AccountChangeSetBucket, k, v); err != nil { + if err = changeset.Mapper[kv.AccountChangeSet].Encode(w.blockNumber, accountChanges, func(k, v []byte) error { + if err = w.db.AppendDup(kv.AccountChangeSet, k, v); err != nil { return err } return nil @@ -137,8 +137,8 @@ func (w *ChangeSetWriter) WriteChangeSets() error { if storageChanges.Len() == 0 { return nil } - if err = changeset.Mapper[dbutils.StorageChangeSetBucket].Encode(w.blockNumber, storageChanges, func(k, v []byte) error { - if err = w.db.AppendDup(dbutils.StorageChangeSetBucket, k, v); err != nil { + if err = changeset.Mapper[kv.StorageChangeSet].Encode(w.blockNumber, storageChanges, func(k, v []byte) error { + if err = w.db.AppendDup(kv.StorageChangeSet, k, v); err != nil { return err } return nil @@ -153,7 +153,7 @@ func (w *ChangeSetWriter) WriteHistory() error { if err != nil { return err } - err = writeIndex(w.blockNumber, accountChanges, dbutils.AccountsHistoryBucket, w.db) + err = writeIndex(w.blockNumber, accountChanges, kv.AccountsHistory, w.db) if err != nil { return err } @@ -162,7 +162,7 @@ func (w *ChangeSetWriter) WriteHistory() error { if err != nil { return err } - err = writeIndex(w.blockNumber, storageChanges, dbutils.StorageHistoryBucket, w.db) + err = writeIndex(w.blockNumber, storageChanges, kv.StorageHistory, w.db) if err != nil { return err } diff --git a/core/state/database_test.go b/core/state/database_test.go index 4492e568069..fd88404daaf 100644 --- a/core/state/database_test.go +++ b/core/state/database_test.go @@ -24,8 +24,8 @@ import ( "testing" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/assert" @@ -41,7 +41,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" ) @@ -129,7 +128,7 @@ func TestCreate2Revive(t *testing.T) { t.Fatalf("generate blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -146,7 +145,7 @@ func TestCreate2Revive(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -173,7 +172,7 @@ func TestCreate2Revive(t *testing.T) { var key2 common.Hash var check2 uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) @@ -192,7 +191,7 @@ func TestCreate2Revive(t *testing.T) { if err = m.InsertChain(chain.Slice(2, 3)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if st.Exist(create2address) { t.Error("expected create2address to be self-destructed at the block 3", create2address.String()) @@ -215,7 +214,7 @@ func TestCreate2Revive(t *testing.T) { if it.Event.D != create2address { t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) @@ -355,7 +354,7 @@ func TestCreate2Polymorth(t *testing.T) { t.Fatalf("generate blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { @@ -373,7 +372,7 @@ func TestCreate2Polymorth(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { @@ -398,7 +397,7 @@ func TestCreate2Polymorth(t *testing.T) { if it.Event.D != create2address { t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) @@ -417,7 +416,7 @@ func TestCreate2Polymorth(t *testing.T) { if err = m.InsertChain(chain.Slice(2, 3)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if st.Exist(create2address) { t.Error("expected create2address to be self-destructed at the block 3", create2address.String()) @@ -440,7 +439,7 @@ func TestCreate2Polymorth(t *testing.T) { if it.Event.D != create2address { t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 4", create2address.String()) @@ -470,7 +469,7 @@ func TestCreate2Polymorth(t *testing.T) { if it.Event.D != create2address { t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 5", create2address.String()) @@ -573,7 +572,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { t.Fatalf("generate long blocks") } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { @@ -592,7 +591,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { var key0 common.Hash var correctValueX uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -609,7 +608,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if st.Exist(contractAddress) { t.Error("expected contractAddress to not exist at the block 3", contractAddress.String()) @@ -622,7 +621,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { if err = m.InsertChain(longerChain.Slice(1, 4)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 4", contractAddress.String()) @@ -716,7 +715,7 @@ func TestReorgOverStateChange(t *testing.T) { t.Fatalf("generate longer blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -735,7 +734,7 @@ func TestReorgOverStateChange(t *testing.T) { var key0 common.Hash var correctValueX uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -756,7 +755,7 @@ func TestReorgOverStateChange(t *testing.T) { if err = m.InsertChain(longerChain.Slice(1, 3)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 4", contractAddress.String()) @@ -847,7 +846,7 @@ func TestCreateOnExistingStorage(t *testing.T) { t.Fatalf("generate blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -866,7 +865,7 @@ func TestCreateOnExistingStorage(t *testing.T) { var key0 common.Hash var check0 uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -895,7 +894,7 @@ func TestReproduceCrash(t *testing.T) { storageKey2 := common.HexToHash("0x0e4c0e7175f9d22279a4f63ff74f7fa28b7a954a6454debaa62ce43dd9132542") value2 := uint256.NewInt(0x58c00a51) - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) tsw := state.NewPlainStateWriter(tx, nil, 0) intraBlockState := state.New(state.NewPlainState(tx, 0)) // Start the 1st transaction @@ -984,7 +983,7 @@ func TestEip2200Gas(t *testing.T) { } var balanceBefore *uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -1002,7 +1001,7 @@ func TestEip2200Gas(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -1073,7 +1072,7 @@ func TestWrongIncarnation(t *testing.T) { t.Fatalf("generate blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -1091,7 +1090,7 @@ func TestWrongIncarnation(t *testing.T) { } var acc accounts.Account - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { ok, err := rawdb.ReadAccount(tx, contractAddress, &acc) if err != nil { t.Fatal(err) @@ -1116,7 +1115,7 @@ func TestWrongIncarnation(t *testing.T) { if err = m.InsertChain(chain.Slice(1, 2)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { ok, err := rawdb.ReadAccount(tx, contractAddress, &acc) if err != nil { t.Fatal(err) @@ -1222,7 +1221,7 @@ func TestWrongIncarnation2(t *testing.T) { t.Fatalf("generate longer blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") @@ -1242,7 +1241,7 @@ func TestWrongIncarnation2(t *testing.T) { } var acc accounts.Account - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) @@ -1266,7 +1265,7 @@ func TestWrongIncarnation2(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { ok, err := rawdb.ReadAccount(tx, contractAddress, &acc) if err != nil { t.Fatal(err) @@ -1286,7 +1285,7 @@ func TestWrongIncarnation2(t *testing.T) { func TestChangeAccountCodeBetweenBlocks(t *testing.T) { contract := common.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) r, tsw := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, nil, 0) intraBlockState := state.New(r) // Start the 1st transaction @@ -1324,7 +1323,7 @@ func TestCacheCodeSizeSeparately(t *testing.T) { contract := common.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") //root := common.HexToHash("0xb939e5bcf5809adfb87ab07f0795b05b95a1d64a90f0eddd0c3123ac5b433854") - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) r, w := state.NewPlainState(tx, 0), state.NewPlainStateWriter(tx, nil, 0) intraBlockState := state.New(r) // Start the 1st transaction @@ -1357,7 +1356,7 @@ func TestCacheCodeSizeInTrie(t *testing.T) { contract := common.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") root := common.HexToHash("0xb939e5bcf5809adfb87ab07f0795b05b95a1d64a90f0eddd0c3123ac5b433854") - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) r, w := state.NewPlainState(tx, 0), state.NewPlainStateWriter(tx, nil, 0) intraBlockState := state.New(r) // Start the 1st transaction @@ -1383,7 +1382,7 @@ func TestCacheCodeSizeInTrie(t *testing.T) { assert.NoError(t, err, "you can receive the code size ") assert.Equal(t, len(code), codeSize, "you can receive the code size") - assert.NoError(t, tx.Delete(dbutils.CodeBucket, codeHash[:], nil), nil) + assert.NoError(t, tx.Delete(kv.CodeBucket, codeHash[:], nil), nil) codeSize2, err := r.ReadAccountCodeSize(contract, 1, codeHash) assert.NoError(t, err, "you can still receive code size even with empty DB") @@ -1542,7 +1541,7 @@ func TestRecreateAndRewind(t *testing.T) { var key0 common.Hash var check0 uint256.Int - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(phoenixAddress) { t.Errorf("expected phoenix %x to exist after first insert", phoenixAddress) @@ -1560,7 +1559,7 @@ func TestRecreateAndRewind(t *testing.T) { if err = m.InsertChain(chain.Slice(2, chain.Length)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(phoenixAddress) { @@ -1579,7 +1578,7 @@ func TestRecreateAndRewind(t *testing.T) { if err = m.InsertChain(longerChain); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(phoenixAddress) { t.Errorf("expected phoenix %x to exist after second insert", phoenixAddress) diff --git a/core/state/db_state_reader.go b/core/state/db_state_reader.go index 7ffb96c8a57..64b6dbd27b4 100644 --- a/core/state/db_state_reader.go +++ b/core/state/db_state_reader.go @@ -8,19 +8,19 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) // Implements StateReader by wrapping database only, without trie type DbStateReader struct { - db ethdb.KVGetter + db kv.Getter accountCache *fastcache.Cache storageCache *fastcache.Cache codeCache *fastcache.Cache codeSizeCache *fastcache.Cache } -func NewDbStateReader(db ethdb.KVGetter) *DbStateReader { +func NewDbStateReader(db kv.Getter) *DbStateReader { return &DbStateReader{ db: db, } @@ -51,7 +51,7 @@ func (dbr *DbStateReader) ReadAccountData(address common.Address) (*accounts.Acc if !ok { var err error if addrHash, err1 := common.HashData(address[:]); err1 == nil { - enc, err = dbr.db.GetOne(dbutils.HashedAccountsBucket, addrHash[:]) + enc, err = dbr.db.GetOne(kv.HashedAccounts, addrHash[:]) } else { return nil, err1 } @@ -87,7 +87,7 @@ func (dbr *DbStateReader) ReadAccountStorage(address common.Address, incarnation return enc, nil } } - enc, err2 := dbr.db.GetOne(dbutils.HashedStorageBucket, compositeKey) + enc, err2 := dbr.db.GetOne(kv.HashedStorage, compositeKey) if err2 != nil { return nil, err2 } @@ -106,7 +106,7 @@ func (dbr *DbStateReader) ReadAccountCode(address common.Address, incarnation ui return code, nil } } - code, err := dbr.db.GetOne(dbutils.CodeBucket, codeHash[:]) + code, err := dbr.db.GetOne(kv.CodeBucket, codeHash[:]) if err != nil { return nil, err } @@ -131,7 +131,7 @@ func (dbr *DbStateReader) ReadAccountCodeSize(address common.Address, incarnatio } } var code []byte - code, err = dbr.db.GetOne(dbutils.CodeBucket, codeHash[:]) + code, err = dbr.db.GetOne(kv.CodeBucket, codeHash[:]) if err != nil { return 0, err } @@ -144,7 +144,7 @@ func (dbr *DbStateReader) ReadAccountCodeSize(address common.Address, incarnatio } func (dbr *DbStateReader) ReadAccountIncarnation(address common.Address) (uint64, error) { - b, err := dbr.db.GetOne(dbutils.IncarnationMapBucket, address[:]) + b, err := dbr.db.GetOne(kv.IncarnationMap, address[:]) if err != nil { return 0, err } diff --git a/core/state/db_state_writer.go b/core/state/db_state_writer.go index 3d76cf0eec5..ce22101e95a 100644 --- a/core/state/db_state_writer.go +++ b/core/state/db_state_writer.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/trie" ) @@ -69,7 +70,7 @@ func (dsw *DbStateWriter) UpdateAccountData(address common.Address, original, ac } value := make([]byte, account.EncodingLengthForStorage()) account.EncodeForStorage(value) - if err := dsw.db.Put(dbutils.HashedAccountsBucket, addrHash[:], value); err != nil { + if err := dsw.db.Put(kv.HashedAccounts, addrHash[:], value); err != nil { return err } return nil @@ -83,13 +84,13 @@ func (dsw *DbStateWriter) DeleteAccount(address common.Address, original *accoun if err != nil { return err } - if err := dsw.db.Delete(dbutils.HashedAccountsBucket, addrHash[:], nil); err != nil { + if err := dsw.db.Delete(kv.HashedAccounts, addrHash[:], nil); err != nil { return err } if original.Incarnation > 0 { var b [8]byte binary.BigEndian.PutUint64(b[:], original.Incarnation) - if err := dsw.db.Put(dbutils.IncarnationMapBucket, address[:], b[:]); err != nil { + if err := dsw.db.Put(kv.IncarnationMap, address[:], b[:]); err != nil { return err } } @@ -101,7 +102,7 @@ func (dsw *DbStateWriter) UpdateAccountCode(address common.Address, incarnation return err } //save contract code mapping - if err := dsw.db.Put(dbutils.CodeBucket, codeHash[:], code); err != nil { + if err := dsw.db.Put(kv.CodeBucket, codeHash[:], code); err != nil { return err } addrHash, err := common.HashData(address.Bytes()) @@ -109,7 +110,7 @@ func (dsw *DbStateWriter) UpdateAccountCode(address common.Address, incarnation return err } //save contract to codeHash mapping - if err := dsw.db.Put(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash[:], incarnation), codeHash[:]); err != nil { + if err := dsw.db.Put(kv.ContractCode, dbutils.GenerateStoragePrefix(addrHash[:], incarnation), codeHash[:]); err != nil { return err } return nil @@ -135,9 +136,9 @@ func (dsw *DbStateWriter) WriteAccountStorage(address common.Address, incarnatio v := value.Bytes() if len(v) == 0 { - return dsw.db.Delete(dbutils.HashedStorageBucket, compositeKey, nil) + return dsw.db.Delete(kv.HashedStorage, compositeKey, nil) } - return dsw.db.Put(dbutils.HashedStorageBucket, compositeKey, v) + return dsw.db.Put(kv.HashedStorage, compositeKey, v) } func (dsw *DbStateWriter) CreateContract(address common.Address) error { @@ -158,7 +159,7 @@ func (dsw *DbStateWriter) WriteHistory() error { if err != nil { return err } - err = writeIndex(dsw.blockNr, accountChanges, dbutils.AccountsHistoryBucket, dsw.db.(ethdb.HasTx).Tx().(ethdb.RwTx)) + err = writeIndex(dsw.blockNr, accountChanges, kv.AccountsHistory, dsw.db.(ethdb.HasTx).Tx().(kv.RwTx)) if err != nil { return err } @@ -167,7 +168,7 @@ func (dsw *DbStateWriter) WriteHistory() error { if err != nil { return err } - err = writeIndex(dsw.blockNr, storageChanges, dbutils.StorageHistoryBucket, dsw.db.(ethdb.HasTx).Tx().(ethdb.RwTx)) + err = writeIndex(dsw.blockNr, storageChanges, kv.StorageHistory, dsw.db.(ethdb.HasTx).Tx().(kv.RwTx)) if err != nil { return err } @@ -175,7 +176,7 @@ func (dsw *DbStateWriter) WriteHistory() error { return nil } -func writeIndex(blocknum uint64, changes *changeset.ChangeSet, bucket string, changeDb ethdb.RwTx) error { +func writeIndex(blocknum uint64, changes *changeset.ChangeSet, bucket string, changeDb kv.RwTx) error { buf := bytes.NewBuffer(nil) for _, change := range changes.Changes { k := dbutils.CompositeKeyWithoutIncarnation(change.Key) diff --git a/core/state/dump.go b/core/state/dump.go index 8bd07bba782..38ee6d18be9 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -26,13 +26,13 @@ import ( "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/trie" ) type Dumper struct { blockNumber uint64 - db ethdb.Tx + db kv.Tx hashedState bool } @@ -121,7 +121,7 @@ func (d iterativeDump) OnRoot(root common.Hash) { }{root}) } -func NewDumper(db ethdb.Tx, blockNumber uint64) *Dumper { +func NewDumper(db kv.Tx, blockNumber uint64) *Dumper { return &Dumper{ db: db, blockNumber: blockNumber, @@ -179,7 +179,7 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo incarnation := incarnationList[i] storagePrefix := dbutils.PlainGenerateStoragePrefix(addr[:], incarnation) if incarnation > 0 { - codeHash, err := d.db.GetOne(dbutils.PlainContractCodeBucket, storagePrefix) + codeHash, err := d.db.GetOne(kv.PlainContractCode, storagePrefix) if err != nil { return nil, fmt.Errorf("getting code hash for %x: %v", addr, err) } @@ -191,7 +191,7 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo if !excludeCode && codeHash != nil && !bytes.Equal(codeHash, emptyCodeHash[:]) { var code []byte - if code, err = d.db.GetOne(dbutils.CodeBucket, codeHash); err != nil { + if code, err = d.db.GetOne(kv.CodeBucket, codeHash); err != nil { return nil, err } account.Code = code diff --git a/core/state/history.go b/core/state/history.go index 331348b8fe8..dbbaa847012 100644 --- a/core/state/history.go +++ b/core/state/history.go @@ -13,9 +13,10 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) -func GetAsOf(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]byte, error) { +func GetAsOf(tx kv.Tx, storage bool, key []byte, timestamp uint64) ([]byte, error) { v, err := FindByHistory(tx, storage, key, timestamp) if err == nil { return v, nil @@ -23,15 +24,15 @@ func GetAsOf(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]byte, e if !errors.Is(err, ethdb.ErrKeyNotFound) { return nil, err } - return tx.GetOne(dbutils.PlainStateBucket, key) + return tx.GetOne(kv.PlainStateBucket, key) } -func FindByHistory(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]byte, error) { +func FindByHistory(tx kv.Tx, storage bool, key []byte, timestamp uint64) ([]byte, error) { var csBucket string if storage { - csBucket = dbutils.StorageChangeSetBucket + csBucket = kv.StorageChangeSet } else { - csBucket = dbutils.AccountChangeSetBucket + csBucket = kv.AccountChangeSet } ch, err := tx.Cursor(changeset.Mapper[csBucket].IndexBucket) @@ -95,7 +96,7 @@ func FindByHistory(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]b if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { var codeHash []byte var err error - codeHash, err = tx.GetOne(dbutils.PlainContractCodeBucket, dbutils.PlainGenerateStoragePrefix(key, acc.Incarnation)) + codeHash, err = tx.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(key, acc.Incarnation)) if err != nil { return nil, err } @@ -112,7 +113,7 @@ func FindByHistory(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]b } // startKey is the concatenation of address and incarnation (BigEndian 8 byte) -func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, startLocation common.Hash, timestamp uint64, walker func(k1, k2, v []byte) (bool, error)) error { +func WalkAsOfStorage(tx kv.Tx, address common.Address, incarnation uint64, startLocation common.Hash, timestamp uint64, walker func(k1, k2, v []byte) (bool, error)) error { var startkey = make([]byte, common.AddressLength+common.IncarnationLength+common.HashLength) copy(startkey, address.Bytes()) binary.BigEndian.PutUint64(startkey[common.AddressLength:], incarnation) @@ -123,7 +124,7 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st copy(startkeyNoInc[common.AddressLength:], startLocation.Bytes()) //for storage - mCursor, err := tx.Cursor(dbutils.PlainStateBucket) + mCursor, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } @@ -138,7 +139,7 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st ) //for historic data - shCursor, err := tx.Cursor(dbutils.StorageHistoryBucket) + shCursor, err := tx.Cursor(kv.StorageHistory) if err != nil { return err } @@ -151,7 +152,7 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st common.AddressLength, /* part2start */ common.AddressLength+common.HashLength, /* part3start */ ) - csCursor, err := tx.CursorDupSort(dbutils.StorageChangeSetBucket) + csCursor, err := tx.CursorDupSort(kv.StorageChangeSet) if err != nil { return err } @@ -239,13 +240,13 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st return nil } -func WalkAsOfAccounts(tx ethdb.Tx, startAddress common.Address, timestamp uint64, walker func(k []byte, v []byte) (bool, error)) error { - mainCursor, err := tx.Cursor(dbutils.PlainStateBucket) +func WalkAsOfAccounts(tx kv.Tx, startAddress common.Address, timestamp uint64, walker func(k []byte, v []byte) (bool, error)) error { + mainCursor, err := tx.Cursor(kv.PlainStateBucket) if err != nil { return err } defer mainCursor.Close() - ahCursor, err := tx.Cursor(dbutils.AccountsHistoryBucket) + ahCursor, err := tx.Cursor(kv.AccountsHistory) if err != nil { return err } @@ -258,7 +259,7 @@ func WalkAsOfAccounts(tx ethdb.Tx, startAddress common.Address, timestamp uint64 common.AddressLength, /* part2start */ common.AddressLength+8, /* part3start */ ) - csCursor, err := tx.CursorDupSort(dbutils.AccountChangeSetBucket) + csCursor, err := tx.CursorDupSort(kv.AccountChangeSet) if err != nil { return err } diff --git a/core/state/history_test.go b/core/state/history_test.go index cb3c077e8e4..a775b3ea640 100644 --- a/core/state/history_test.go +++ b/core/state/history_test.go @@ -18,13 +18,14 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMutationDeleteTimestamp(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) acc := make([]*accounts.Account, 10) addr := make([]common.Address, 10) @@ -44,7 +45,7 @@ func TestMutationDeleteTimestamp(t *testing.T) { } i := 0 - err := changeset.Walk(tx, dbutils.AccountChangeSetBucket, nil, 0, func(blockN uint64, k, v []byte) (bool, error) { + err := changeset.Walk(tx, kv.AccountChangeSet, nil, 0, func(blockN uint64, k, v []byte) (bool, error) { i++ return true, nil }) @@ -55,7 +56,7 @@ func TestMutationDeleteTimestamp(t *testing.T) { t.FailNow() } - index, err := bitmapdb.Get64(tx, dbutils.AccountsHistoryBucket, addr[0].Bytes(), 0, math.MaxUint32) + index, err := bitmapdb.Get64(tx, kv.AccountsHistory, addr[0].Bytes(), 0, math.MaxUint32) if err != nil { t.Fatal(err) } @@ -66,7 +67,7 @@ func TestMutationDeleteTimestamp(t *testing.T) { } count := 0 - err = changeset.Walk(tx, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(1), 8*8, func(blockN uint64, k, v []byte) (bool, error) { + err = changeset.Walk(tx, kv.StorageChangeSet, dbutils.EncodeBlockNumber(1), 8*8, func(blockN uint64, k, v []byte) (bool, error) { count++ return true, nil }) @@ -77,13 +78,13 @@ func TestMutationDeleteTimestamp(t *testing.T) { t.Fatal("changeset must be deleted") } - found, err := tx.GetOne(dbutils.AccountsHistoryBucket, addr[0].Bytes()) + found, err := tx.GetOne(kv.AccountsHistory, addr[0].Bytes()) require.NoError(t, err) require.Nil(t, found, "account must be deleted") } func TestMutationCommit(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) numOfAccounts := 5 numOfStateKeys := 5 @@ -105,7 +106,7 @@ func TestMutationCommit(t *testing.T) { t.Fatal("Accounts not equals") } - index, err := bitmapdb.Get64(tx, dbutils.AccountsHistoryBucket, addr.Bytes(), 0, math.MaxUint32) + index, err := bitmapdb.Get64(tx, kv.AccountsHistory, addr.Bytes(), 0, math.MaxUint32) if err != nil { t.Fatal(err) } @@ -116,7 +117,7 @@ func TestMutationCommit(t *testing.T) { } resAccStorage := make(map[common.Hash]uint256.Int) - err = tx.ForPrefix(dbutils.PlainStateBucket, dbutils.PlainGenerateStoragePrefix(addr[:], acc.Incarnation), func(k, v []byte) error { + err = tx.ForPrefix(kv.PlainStateBucket, dbutils.PlainGenerateStoragePrefix(addr[:], acc.Incarnation), func(k, v []byte) error { resAccStorage[common.BytesToHash(k[common.AddressLength+8:])] = *uint256.NewInt(0).SetBytes(v) return nil }) @@ -144,7 +145,7 @@ func TestMutationCommit(t *testing.T) { } changeSetInDB := changeset.NewAccountChangeSet() - err := changeset.Walk(tx, dbutils.AccountChangeSetBucket, dbutils.EncodeBlockNumber(2), 8*8, func(_ uint64, k, v []byte) (bool, error) { + err := changeset.Walk(tx, kv.AccountChangeSet, dbutils.EncodeBlockNumber(2), 8*8, func(_ uint64, k, v []byte) (bool, error) { if err := changeSetInDB.Add(k, v); err != nil { return false, err } @@ -176,7 +177,7 @@ func TestMutationCommit(t *testing.T) { } cs := changeset.NewStorageChangeSet() - err = changeset.Walk(tx, dbutils.StorageChangeSetBucket, dbutils.EncodeBlockNumber(2), 8*8, func(_ uint64, k, v []byte) (bool, error) { + err = changeset.Walk(tx, kv.StorageChangeSet, dbutils.EncodeBlockNumber(2), 8*8, func(_ uint64, k, v []byte) (bool, error) { if err2 := cs.Add(k, v); err2 != nil { return false, err2 } @@ -285,7 +286,7 @@ func randomAccount(t *testing.T) (*accounts.Account, common.Address) { */ func TestWalkAsOfStatePlain(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) emptyVal := uint256.NewInt(0) block3Val := uint256.NewInt(0).SetBytes([]byte("block 3")) @@ -445,7 +446,7 @@ func TestWalkAsOfStatePlain(t *testing.T) { } func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) emptyVal := uint256.NewInt(0) block3Val := uint256.NewInt(0).SetBytes([]byte("block 3")) @@ -651,7 +652,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) { } func TestWalkAsOfAccountPlain(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) emptyValAcc := accounts.NewAccount() emptyVal := make([]byte, emptyValAcc.EncodingLengthForStorage()) @@ -799,7 +800,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) { } func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) emptyValAcc := accounts.NewAccount() emptyVal := make([]byte, emptyValAcc.EncodingLengthForStorage()) @@ -950,7 +951,7 @@ func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) { } func TestWalkAsOfStoragePlain_WithChunks(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) numOfAccounts := uint8(4) addrs := make([]common.Address, numOfAccounts) diff --git a/core/state/intra_block_state_test.go b/core/state/intra_block_state_test.go index da2143322fe..35d6f1027ad 100644 --- a/core/state/intra_block_state_test.go +++ b/core/state/intra_block_state_test.go @@ -30,7 +30,7 @@ import ( "testing/quick" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "gopkg.in/check.v1" @@ -217,7 +217,7 @@ func (test *snapshotTest) String() string { func (test *snapshotTest) run() bool { // Run all actions and create snapshots. - db := kv.NewMemKV() + db := memdb.New() defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -348,7 +348,7 @@ func TestAccessList(t *testing.T) { addr := common.HexToAddress slot := common.HexToHash - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) state := New(NewPlainState(tx, 0)) state.accessList = newAccessList() diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go index e23147b0b87..2eaef6edfbb 100644 --- a/core/state/plain_readonly.go +++ b/core/state/plain_readonly.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/petar/GoLLRB/llrb" ) @@ -40,12 +40,12 @@ func (a *storageItem) Less(b llrb.Item) bool { } type PlainState struct { - tx ethdb.Tx + tx kv.Tx blockNr uint64 storage map[common.Address]*llrb.LLRB } -func NewPlainState(tx ethdb.Tx, blockNr uint64) *PlainState { +func NewPlainState(tx kv.Tx, blockNr uint64) *PlainState { return &PlainState{ tx: tx, blockNr: blockNr, @@ -150,7 +150,7 @@ func (s *PlainState) ReadAccountData(address common.Address) (*accounts.Account, } //restore codehash if a.Incarnation > 0 && a.IsEmptyCodeHash() { - if codeHash, err1 := s.tx.GetOne(dbutils.PlainContractCodeBucket, dbutils.PlainGenerateStoragePrefix(address[:], a.Incarnation)); err1 == nil { + if codeHash, err1 := s.tx.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], a.Incarnation)); err1 == nil { if len(codeHash) > 0 { a.CodeHash = common.BytesToHash(codeHash) } @@ -177,7 +177,7 @@ func (s *PlainState) ReadAccountCode(address common.Address, incarnation uint64, if bytes.Equal(codeHash[:], emptyCodeHash) { return nil, nil } - code, err := s.tx.GetOne(dbutils.CodeBucket, codeHash[:]) + code, err := s.tx.GetOne(kv.CodeBucket, codeHash[:]) if len(code) == 0 { return nil, nil } diff --git a/core/state/plain_state_reader.go b/core/state/plain_state_reader.go index 8a5fe388eea..cc716ce1b47 100644 --- a/core/state/plain_state_reader.go +++ b/core/state/plain_state_reader.go @@ -7,7 +7,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) var _ StateReader = (*PlainStateReader)(nil) @@ -16,17 +16,17 @@ var _ StateReader = (*PlainStateReader)(nil) // Data in the plain state is stored using un-hashed account/storage items // as opposed to the "normal" state that uses hashes of merkle paths to store items. type PlainStateReader struct { - db ethdb.KVGetter + db kv.Getter } -func NewPlainStateReader(db ethdb.KVGetter) *PlainStateReader { +func NewPlainStateReader(db kv.Getter) *PlainStateReader { return &PlainStateReader{ db: db, } } func (r *PlainStateReader) ReadAccountData(address common.Address) (*accounts.Account, error) { - enc, err := r.db.GetOne(dbutils.PlainStateBucket, address.Bytes()) + enc, err := r.db.GetOne(kv.PlainStateBucket, address.Bytes()) if err != nil { return nil, err } @@ -42,7 +42,7 @@ func (r *PlainStateReader) ReadAccountData(address common.Address) (*accounts.Ac func (r *PlainStateReader) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { compositeKey := dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes()) - enc, err := r.db.GetOne(dbutils.PlainStateBucket, compositeKey) + enc, err := r.db.GetOne(kv.PlainStateBucket, compositeKey) if err != nil { return nil, err } @@ -56,7 +56,7 @@ func (r *PlainStateReader) ReadAccountCode(address common.Address, incarnation u if bytes.Equal(codeHash.Bytes(), emptyCodeHash) { return nil, nil } - code, err := r.db.GetOne(dbutils.CodeBucket, codeHash.Bytes()) + code, err := r.db.GetOne(kv.CodeBucket, codeHash.Bytes()) if len(code) == 0 { return nil, nil } @@ -69,7 +69,7 @@ func (r *PlainStateReader) ReadAccountCodeSize(address common.Address, incarnati } func (r *PlainStateReader) ReadAccountIncarnation(address common.Address) (uint64, error) { - b, err := r.db.GetOne(dbutils.IncarnationMapBucket, address.Bytes()) + b, err := r.db.GetOne(kv.IncarnationMap, address.Bytes()) if err != nil { return 0, err } diff --git a/core/state/plain_state_writer.go b/core/state/plain_state_writer.go index f01f655374a..67a6074468e 100644 --- a/core/state/plain_state_writer.go +++ b/core/state/plain_state_writer.go @@ -7,15 +7,15 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/shards" ) var _ WriterWithChangeSets = (*PlainStateWriter)(nil) type putDel interface { - ethdb.Putter - ethdb.Deleter + kv.Putter + kv.Deleter } type PlainStateWriter struct { db putDel @@ -23,7 +23,7 @@ type PlainStateWriter struct { accumulator *shards.Accumulator } -func NewPlainStateWriter(db putDel, changeSetsDB ethdb.RwTx, blockNumber uint64) *PlainStateWriter { +func NewPlainStateWriter(db putDel, changeSetsDB kv.RwTx, blockNumber uint64) *PlainStateWriter { return &PlainStateWriter{ db: db, csw: NewChangeSetWriterPlain(changeSetsDB, blockNumber), @@ -52,7 +52,7 @@ func (w *PlainStateWriter) UpdateAccountData(address common.Address, original, a if w.accumulator != nil { w.accumulator.ChangeAccount(address, value) } - return w.db.Put(dbutils.PlainStateBucket, address[:], value) + return w.db.Put(kv.PlainStateBucket, address[:], value) } func (w *PlainStateWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { @@ -64,10 +64,10 @@ func (w *PlainStateWriter) UpdateAccountCode(address common.Address, incarnation if w.accumulator != nil { w.accumulator.ChangeCode(address, incarnation, code) } - if err := w.db.Put(dbutils.CodeBucket, codeHash[:], code); err != nil { + if err := w.db.Put(kv.CodeBucket, codeHash[:], code); err != nil { return err } - return w.db.Put(dbutils.PlainContractCodeBucket, dbutils.PlainGenerateStoragePrefix(address[:], incarnation), codeHash[:]) + return w.db.Put(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation), codeHash[:]) } func (w *PlainStateWriter) DeleteAccount(address common.Address, original *accounts.Account) error { @@ -79,13 +79,13 @@ func (w *PlainStateWriter) DeleteAccount(address common.Address, original *accou if w.accumulator != nil { w.accumulator.DeleteAccount(address) } - if err := w.db.Delete(dbutils.PlainStateBucket, address[:], nil); err != nil { + if err := w.db.Delete(kv.PlainStateBucket, address[:], nil); err != nil { return err } if original.Incarnation > 0 { var b [8]byte binary.BigEndian.PutUint64(b[:], original.Incarnation) - if err := w.db.Put(dbutils.IncarnationMapBucket, address[:], b[:]); err != nil { + if err := w.db.Put(kv.IncarnationMap, address[:], b[:]); err != nil { return err } } @@ -108,9 +108,9 @@ func (w *PlainStateWriter) WriteAccountStorage(address common.Address, incarnati w.accumulator.ChangeStorage(address, incarnation, *key, v) } if len(v) == 0 { - return w.db.Delete(dbutils.PlainStateBucket, compositeKey, nil) + return w.db.Delete(kv.PlainStateBucket, compositeKey, nil) } - return w.db.Put(dbutils.PlainStateBucket, compositeKey, v) + return w.db.Put(kv.PlainStateBucket, compositeKey, v) } func (w *PlainStateWriter) CreateContract(address common.Address) error { diff --git a/core/state/state_test.go b/core/state/state_test.go index a3efc44bbff..a41190c1ed9 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -23,20 +23,20 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" checker "gopkg.in/check.v1" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" ) var toAddr = common.BytesToAddress type StateSuite struct { - kv ethdb.RwKV - tx ethdb.RwTx + kv kv.RwDB + tx kv.RwTx state *IntraBlockState r StateReader w StateWriter @@ -102,7 +102,7 @@ func (s *StateSuite) TestDump(c *checker.C) { } func (s *StateSuite) SetUpTest(c *checker.C) { - s.kv = kv.NewMemKV() + s.kv = memdb.New() tx, err := s.kv.BeginRw(context.Background()) //nolint if err != nil { panic(err) @@ -176,7 +176,7 @@ func (s *StateSuite) TestSnapshotEmpty(c *checker.C) { // use testing instead of checker because checker does not support // printing/logging in tests (-check.vv does not work) func TestSnapshot2(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) w := NewPlainState(tx, 0) state := New(NewPlainState(tx, 0)) @@ -291,7 +291,7 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) { } func TestDump(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) w := NewPlainStateWriter(tx, tx, 0) state := New(NewPlainStateReader(tx)) diff --git a/core/tx_pool.go b/core/tx_pool.go index 3c981a2769e..a8c231fbf41 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/event" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" @@ -257,7 +258,7 @@ type txpoolResetRequest struct { // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chaindb ethdb.RwKV) *TxPool { +func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chaindb kv.RwDB) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() @@ -278,7 +279,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chaindb eth reorgShutdownCh: make(chan struct{}, 1), gasPrice: new(uint256.Int).SetUint64(config.PriceLimit), stopCh: make(chan struct{}), - chaindb: kv.NewObjectDatabase(chaindb), + chaindb: olddb.NewObjectDatabase(chaindb), } pool.locals = newAccountSet(pool.signer) for _, addr := range pool.config.Locals { diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index baa0458d12f..358bfb9be1d 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -33,8 +33,8 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/require" ) @@ -101,7 +101,7 @@ func setupTxPool(t testing.TB) (*TxPool, *ecdsa.PrivateKey) { } func setupTxPoolWithConfig(t testing.TB, config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) key, _ := crypto.GenerateKey() pool := NewTxPool(TestTxPoolConfig, config, db) @@ -181,12 +181,12 @@ func deriveSender(tx types.Transaction) (common.Address, error) { // state reset and tests whether the pending state is in sync with the // block head event that initiated the resetState(). func TestStateChangeDuringTransactionPoolReset(t *testing.T) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) var ( key, _ = crypto.GenerateKey() address = crypto.PubkeyToAddress(key.PublicKey) ) - err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) @@ -355,7 +355,7 @@ func TestTransactionChainFork(t *testing.T) { addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { - err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) ibs.AddBalance(addr, uint256.NewInt(100000000000000)) @@ -583,7 +583,7 @@ func TestTransactionDropping(t *testing.T) { // postponed back into the future queue to prevent broadcasting them. func TestTransactionPostponing(t *testing.T) { // Create the pool to test the postponing with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { @@ -792,7 +792,7 @@ func TestTransactionQueueGlobalLimitingNoLocals(t *testing.T) { func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { // Create the pool to test the limit enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.NoLocals = nolocals @@ -886,7 +886,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { evictionInterval = time.Millisecond * 100 // Create the pool to test the non-expiration enforcement - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.Lifetime = time.Second @@ -1004,7 +1004,7 @@ func TestTransactionPendingLimiting(t *testing.T) { // attacks. func TestTransactionPendingGlobalLimiting(t *testing.T) { // Create the pool to test the limit enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 @@ -1105,7 +1105,7 @@ func TestTransactionAllowedTxSize(t *testing.T) { // Tests that if transactions start being capped, transactions are also removed from 'all' func TestTransactionCapClearsFromAll(t *testing.T) { // Create the pool to test the limit enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.AccountSlots = 2 @@ -1141,7 +1141,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) { // the transactions are still kept. func TestTransactionPendingMinimumAllowance(t *testing.T) { // Create the pool to test the limit enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.GlobalSlots = 1 @@ -1192,7 +1192,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { func TestTransactionPoolRepricing(t *testing.T) { t.Skip("deadlock") // Create the pool to test the pricing enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { @@ -1315,7 +1315,7 @@ func TestTransactionPoolRepricing(t *testing.T) { // remove local transactions. func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { // Create the pool to test the pricing enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { @@ -1379,7 +1379,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { // Note, local transactions are never allowed to be dropped. func TestTransactionPoolUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.GlobalSlots = 2 @@ -1487,7 +1487,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { // back and forth between queued/pending. func TestTransactionPoolStableUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.GlobalSlots = 128 @@ -1553,7 +1553,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { // Tests that the pool rejects duplicate transactions. func TestTransactionDeduplication(t *testing.T) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { @@ -1621,7 +1621,7 @@ func TestTransactionDeduplication(t *testing.T) { // price bump required. func TestTransactionReplacement(t *testing.T) { // Create the pool to test the pricing enforcement with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { @@ -1718,7 +1718,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { os.Remove(journal) // Create the original pool to inject transaction into the journal - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config := TestTxPoolConfig config.NoLocals = nolocals @@ -1737,7 +1737,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { local, _ := crypto.GenerateKey() remote, _ := crypto.GenerateKey() - err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) ibs.AddBalance(crypto.PubkeyToAddress(local.PublicKey), uint256.NewInt(1000000000)) @@ -1773,7 +1773,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive pool.Stop() - err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) @@ -1802,7 +1802,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { t.Fatalf("pool internal state corrupted: %v", err) } // Bump the nonce temporarily and ensure the newly invalidated transaction is removed - err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) @@ -1815,7 +1815,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { pool.Stop() - err = db.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.Update(context.Background(), func(tx kv.RwTx) error { stateWriter := state.NewPlainStateWriter(tx, nil, 1) ibs := state.New(state.NewPlainStateReader(tx)) ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) @@ -1850,7 +1850,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { // pending status of individual transactions. func TestTransactionStatusCheck(t *testing.T) { // Create the pool to test the status retrievals with - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) if err := pool.Start(1000000000, 0); err != nil { diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index 022741d36bb..c175421f19f 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -26,7 +26,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" ) @@ -87,7 +87,7 @@ func TestEIP2200(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { address := common.BytesToAddress([]byte("contract")) - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) s := state.New(state.NewPlainStateReader(tx)) s.CreateAccount(address, true) diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 382c6fe080b..c147ce65257 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -22,7 +22,8 @@ import ( "time" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/state" @@ -116,7 +117,7 @@ func Execute(code, input []byte, cfg *Config, blockNr uint64) ([]byte, *state.In setDefaults(cfg) if cfg.State == nil { - db := kv.NewObjectDatabase(kv.NewMemKV()) + db := olddb.NewObjectDatabase(memdb.New()) defer db.Close() cfg.r = state.NewDbStateReader(db) cfg.w = state.NewDbStateWriter(db, 0) @@ -154,7 +155,7 @@ func Create(input []byte, cfg *Config, blockNr uint64) ([]byte, common.Address, setDefaults(cfg) if cfg.State == nil { - db := kv.NewObjectDatabase(kv.NewMemKV()) + db := olddb.NewObjectDatabase(memdb.New()) defer db.Close() cfg.r = state.NewDbStateReader(db) cfg.w = state.NewDbStateWriter(db, 0) diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 7cd801275cb..5a125cf55c5 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -31,7 +31,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" ) @@ -103,7 +103,7 @@ func TestExecute(t *testing.T) { } func TestCall(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) state := state.New(state.NewDbStateReader(tx)) address := common.HexToAddress("0x0a") state.SetCode(address, []byte{ @@ -159,7 +159,7 @@ func BenchmarkCall(b *testing.B) { } } func benchmarkEVM_Create(bench *testing.B, code string) { - _, tx := kv.NewTestTx(bench) + _, tx := memdb.NewTestTx(bench) var ( statedb = state.New(state.NewPlainState(tx, 0)) sender = common.BytesToAddress([]byte("sender")) @@ -329,7 +329,7 @@ func TestBlockhash(t *testing.T) { func benchmarkNonModifyingCode(gas uint64, code []byte, name string, b *testing.B) { //nolint:unparam cfg := new(Config) setDefaults(cfg) - _, tx := kv.NewTestTx(b) + _, tx := memdb.NewTestTx(b) cfg.State = state.New(state.NewPlainState(tx, 0)) cfg.GasLimit = gas var ( diff --git a/eth/backend.go b/eth/backend.go index 8ef5185c394..1431e9647a8 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -36,7 +36,6 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" "github.com/ledgerwatch/erigon/cmd/sentry/download" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/consensus" @@ -53,9 +52,10 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + remotedbserver2 "github.com/ledgerwatch/erigon/ethdb/remotedbserver" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/p2p" @@ -78,12 +78,13 @@ type Config = ethconfig.Config // Ethereum implements the Ethereum full node service. type Ethereum struct { config *ethconfig.Config + logger log.Logger // Handlers txPool *core.TxPool // DB interfaces - chainKV ethdb.RwKV + chainKV kv.RwDB privateAPI *grpc.Server engine consensus.Engine @@ -120,7 +121,7 @@ type Ethereum struct { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { +func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) { if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 { log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) @@ -132,7 +133,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } // Assemble the Ethereum object - chainKv, err := node.OpenDatabase(stack.Config(), ethdb.Chain) + chainKv, err := node.OpenDatabase(stack.Config(), logger, kv.ChainDB) if err != nil { return nil, err } @@ -141,8 +142,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { config.Snapshot.Dir = stack.Config().ResolvePath("snapshots") if config.Snapshot.Enabled { var peerID string - if err = chainKv.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID)) + if err = chainKv.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, []byte(kv.BittorrentPeerID)) if err != nil { return err } @@ -157,7 +158,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } if len(peerID) == 0 { log.Info("Generate new bittorent peerID", "id", common.Bytes2Hex(torrentClient.PeerID())) - if err = chainKv.Update(context.Background(), func(tx ethdb.RwTx) error { + if err = chainKv.Update(context.Background(), func(tx kv.RwTx) error { return torrentClient.SavePeerID(tx) }); err != nil { log.Error("Bittorrent peerID haven't saved", "err", err) @@ -182,6 +183,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend := &Ethereum{ config: config, + logger: logger, chainKV: chainKv, networkID: config.NetworkID, etherbase: config.Miner.Etherbase, @@ -192,7 +194,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { waitForMiningStop: make(chan struct{}), sentries: []remote.SentryClient{}, notifications: &stagedsync.Notifications{ - Events: remotedbserver.NewEvents(), + Events: privateapi.NewEvents(), Accumulator: &shards.Accumulator{}, }, } @@ -209,11 +211,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { consensusConfig = &config.Ethash } - backend.engine = ethconfig.CreateConsensusEngine(chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify) + backend.engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify) log.Info("Initialising Ethereum protocol", "network", config.NetworkID) - if err := chainKv.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error { if err := prune.SetIfNotExist(tx, config.Prune); err != nil { return err } @@ -284,10 +286,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { ethashApi = casted.APIs(nil)[1].Service.(*ethash.API) } - kvRPC := remotedbserver.NewKvServer(backend.chainKV) - ethBackendRPC := remotedbserver.NewEthBackendServer(backend, backend.notifications.Events) - txPoolRPC := remotedbserver.NewTxPoolServer(context.Background(), backend.txPool) - miningRPC := remotedbserver.NewMiningServer(context.Background(), backend, ethashApi) + kvRPC := remotedbserver2.NewKvServer(backend.chainKV) + ethBackendRPC := privateapi.NewEthBackendServer(backend, backend.notifications.Events) + txPoolRPC := privateapi.NewTxPoolServer(context.Background(), backend.txPool) + miningRPC := privateapi.NewMiningServer(context.Background(), backend, ethashApi) if stack.Config().PrivateApiAddr != "" { @@ -323,7 +325,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - backend.privateAPI, err = remotedbserver.StartGrpc( + backend.privateAPI, err = privateapi.StartGrpc( kvRPC, ethBackendRPC, txPoolRPC, @@ -335,7 +337,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { return nil, err } } else { - backend.privateAPI, err = remotedbserver.StartGrpc( + backend.privateAPI, err = privateapi.StartGrpc( kvRPC, ethBackendRPC, txPoolRPC, @@ -361,7 +363,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } else { var readNodeInfo = func() *eth.NodeInfo { var res *eth.NodeInfo - _ = backend.chainKV.View(context.Background(), func(tx ethdb.Tx) error { + _ = backend.chainKV.View(context.Background(), func(tx kv.Tx) error { res = eth.ReadNodeInfo(tx, backend.chainConfig, backend.genesisHash, backend.networkID) return nil }) @@ -428,6 +430,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.stagedSync, err = stages2.NewStagedSync2( backend.downloadCtx, + backend.logger, backend.chainKV, *config, backend.downloadServer, @@ -442,7 +445,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } if config.BadBlock != 0 { var badHash common.Hash - if err = chainKv.View(context.Background(), func(tx ethdb.Tx) error { + if err = chainKv.View(context.Background(), func(tx kv.Tx) error { var hErr error badHash, hErr = rawdb.ReadCanonicalHash(tx, config.BadBlock) return hErr @@ -549,7 +552,7 @@ func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint // StartMining starts the miner with the given number of CPU threads. If mining // is already running, this method adjust the number of threads allowed to use // and updates the minimum price required by the transaction pool. -func (s *Ethereum) StartMining(ctx context.Context, kv ethdb.RwKV, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}) error { +func (s *Ethereum) StartMining(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}) error { if !cfg.Enabled { return nil } @@ -633,7 +636,7 @@ func (s *Ethereum) StartMining(ctx context.Context, kv ethdb.RwKV, mining *stage func (s *Ethereum) IsMining() bool { return s.config.Miner.Enabled } func (s *Ethereum) TxPool() *core.TxPool { return s.txPool } -func (s *Ethereum) ChainKV() ethdb.RwKV { return s.chainKV } +func (s *Ethereum) ChainKV() kv.RwDB { return s.chainKV } func (s *Ethereum) NetVersion() (uint64, error) { return s.networkID, nil } func (s *Ethereum) NetPeerCount() (uint64, error) { var sentryPc uint64 = 0 @@ -674,7 +677,7 @@ func (s *Ethereum) Start() error { }(i) } - go Loop(s.downloadCtx, s.chainKV, s.stagedSync, s.downloadServer, s.notifications, s.waitForStageLoopStop, s.config.SyncLoopThrottle) + go Loop(s.downloadCtx, s.logger, s.chainKV, s.stagedSync, s.downloadServer, s.notifications, s.waitForStageLoopStop, s.config.SyncLoopThrottle) return nil } @@ -717,7 +720,8 @@ func (s *Ethereum) Stop() error { //Deprecated - use stages.StageLoop func Loop( ctx context.Context, - db ethdb.RwKV, sync *stagedsync.Sync, + logger log.Logger, + db kv.RwDB, sync *stagedsync.Sync, controlServer *download.ControlServerImpl, notifications *stagedsync.Notifications, waitForDone chan struct{}, @@ -726,6 +730,7 @@ func Loop( defer debug.LogPanic() stages2.StageLoop( ctx, + logger, db, sync, controlServer.Hd, diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 7aa3eb95fca..4738f3e4be1 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -177,7 +177,7 @@ type Config struct { SyncLoopThrottle time.Duration } -func CreateConsensusEngine(chainConfig *params.ChainConfig, config interface{}, notify []string, noverify bool) consensus.Engine { +func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, config interface{}, notify []string, noverify bool) consensus.Engine { var eng consensus.Engine switch consensusCfg := config.(type) { @@ -205,12 +205,12 @@ func CreateConsensusEngine(chainConfig *params.ChainConfig, config interface{}, } case *params.SnapshotConfig: if chainConfig.Clique != nil { - eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory)) + eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory)) } case *params.AuRaConfig: if chainConfig.Aura != nil { var err error - eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory), chainConfig.Aura.Etherbase, consensusconfig.Sokol) + eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), chainConfig.Aura.Etherbase, consensusconfig.Sokol) if err != nil { panic(err) } diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index eb9d53f3b66..b5e531d715b 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -25,7 +25,7 @@ import ( "time" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -36,7 +36,7 @@ import ( ) var ( - testdb = kv.NewMemKV() + testdb = memdb.New() testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddress = crypto.PubkeyToAddress(testKey.PublicKey) genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) @@ -48,7 +48,7 @@ var ( // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { - db := kv.NewMemKV() + db := memdb.New() defer db.Close() core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000)) chain, err := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), db, n, func(i int, block *core.BlockGen) { diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 5c96d5a06c6..6179f282b94 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -29,14 +29,14 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/gasprice" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/stages" ) type testBackend struct { - db ethdb.RwKV + db kv.RwDB cfg *params.ChainConfig } diff --git a/eth/integrity/trie.go b/eth/integrity/trie.go index 7d9d7cd0306..721cfc4008a 100644 --- a/eth/integrity/trie.go +++ b/eth/integrity/trie.go @@ -9,9 +9,9 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/trie" ) @@ -23,7 +23,7 @@ func AssertSubset(prefix []byte, a, b uint16) { } } -func Trie(tx ethdb.Tx, slowChecks bool, ctx context.Context) { +func Trie(tx kv.Tx, slowChecks bool, ctx context.Context) { quit := ctx.Done() logEvery := time.NewTicker(10 * time.Second) defer logEvery.Stop() @@ -32,15 +32,15 @@ func Trie(tx ethdb.Tx, slowChecks bool, ctx context.Context) { buf2 := make([]byte, 256) { - c, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + c, err := tx.Cursor(kv.TrieOfAccounts) if err != nil { panic(err) } - trieAcc2, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + trieAcc2, err := tx.Cursor(kv.TrieOfAccounts) if err != nil { panic(err) } - accC, err := tx.Cursor(dbutils.HashedAccountsBucket) + accC, err := tx.Cursor(kv.HashedAccounts) if err != nil { panic(err) } @@ -141,15 +141,15 @@ func Trie(tx ethdb.Tx, slowChecks bool, ctx context.Context) { } } { - c, err := tx.Cursor(dbutils.TrieOfStorageBucket) + c, err := tx.Cursor(kv.TrieOfStorage) if err != nil { panic(err) } - trieStorage, err := tx.Cursor(dbutils.TrieOfStorageBucket) + trieStorage, err := tx.Cursor(kv.TrieOfStorage) if err != nil { panic(err) } - storageC, err := tx.Cursor(dbutils.HashedStorageBucket) + storageC, err := tx.Cursor(kv.HashedStorage) if err != nil { panic(err) } diff --git a/eth/protocols/eth/discovery.go b/eth/protocols/eth/discovery.go index 0fd0fee0197..d88f2c5199b 100644 --- a/eth/protocols/eth/discovery.go +++ b/eth/protocols/eth/discovery.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -41,7 +41,7 @@ func (e enrEntry) ENRKey() string { // StartENRUpdater starts the `eth` ENR updater loop, which listens for chain // head events and updates the requested node record whenever a fork is passed. -func StartENRUpdater(chainConfig *params.ChainConfig, genesisHash common.Hash, events *remotedbserver.Events, ln *enode.LocalNode) { +func StartENRUpdater(chainConfig *params.ChainConfig, genesisHash common.Hash, events *privateapi.Events, ln *enode.LocalNode) { events.AddHeaderSubscription(func(h *types.Header) error { ln.Set(CurrentENREntry(chainConfig, genesisHash, h.Number.Uint64())) return nil diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 9ec7304c002..1f63ca57704 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/p2p/enr" @@ -61,7 +61,7 @@ type Handler func(peer *Peer) error // Backend defines the data retrieval methods t,o serve remote requests and the // callback methods to invoke on remote deliveries. type Backend interface { - DB() ethdb.RwKV + DB() kv.RwDB // TxPool retrieves the transaction pool object to serve data. TxPool() TxPool @@ -128,12 +128,12 @@ type NodeInfo struct { Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules + Config *params.ChainConfig `json:"config"` // ChainDB configuration for the fork rules Head common.Hash `json:"head"` // Hex hash of the host's best owned block } // ReadNodeInfo retrieves some `eth` protocol metadata about the running host node. -func ReadNodeInfo(getter ethdb.KVGetter, config *params.ChainConfig, genesisHash common.Hash, network uint64) *NodeInfo { +func ReadNodeInfo(getter kv.Getter, config *params.ChainConfig, genesisHash common.Hash, network uint64) *NodeInfo { head := rawdb.ReadCurrentHeader(getter) td, _ := rawdb.ReadTd(getter, head.Hash(), head.Number.Uint64()) return &NodeInfo{ diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 043a6aa42c5..2874bc0484f 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -31,7 +31,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" @@ -52,7 +52,7 @@ var ( // purpose is to allow testing the request/reply workflows and wire serialization // in the `eth` protocol without actually doing any data processing. type testBackend struct { - db ethdb.RwKV + db kv.RwDB txpool *core.TxPool headBlock *types.Block genesis *types.Block @@ -98,7 +98,7 @@ func newTestBackendWithGenerator(t *testing.T, blocks int, generator func(int, * return b } -func (b *testBackend) DB() ethdb.RwKV { return b.db } +func (b *testBackend) DB() kv.RwDB { return b.db } func (b *testBackend) TxPool() eth.TxPool { return b.txpool } func (b *testBackend) RunPeer(peer *eth.Peer, handler eth.Handler) error { // Normally the backend would do peer mainentance and handshakes. All that @@ -113,7 +113,7 @@ func (b *testBackend) AcceptTxs() bool { func (b *testBackend) Handle(*eth.Peer, eth.Packet) error { panic("data processing tests should be done in the handler package") } -func (b *testBackend) GetBlockHashesFromHash(tx ethdb.Tx, hash common.Hash, max uint64) []common.Hash { +func (b *testBackend) GetBlockHashesFromHash(tx kv.Tx, hash common.Hash, max uint64) []common.Hash { // Get the origin header from which to fetch header, _ := rawdb.ReadHeaderByHash(tx, hash) if header == nil { @@ -441,7 +441,7 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts []rlp.RawValue ) - err := m.DB.View(m.Ctx, func(tx ethdb.Tx) error { + err := m.DB.View(m.Ctx, func(tx kv.Tx) error { for i := uint64(0); i <= rawdb.ReadCurrentHeader(tx).Number.Uint64(); i++ { block := rawdb.ReadHeaderByNumber(tx, i) diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 36fb04216ed..6121dd480c5 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -23,7 +23,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) @@ -68,7 +68,7 @@ func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { return peer.ReplyBlockHeaders(query.RequestId, response) } -func AnswerGetBlockHeadersQuery(db ethdb.KVGetter, query *GetBlockHeadersPacket) ([]*types.Header, error) { +func AnswerGetBlockHeadersQuery(db kv.Getter, query *GetBlockHeadersPacket) ([]*types.Header, error) { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -191,7 +191,7 @@ func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { return peer.ReplyBlockBodiesRLP(query.RequestId, response) } -func AnswerGetBlockBodiesQuery(db ethdb.Tx, query GetBlockBodiesPacket) []rlp.RawValue { //nolint:unparam +func AnswerGetBlockBodiesQuery(db kv.Tx, query GetBlockBodiesPacket) []rlp.RawValue { //nolint:unparam // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -278,7 +278,7 @@ func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { return peer.ReplyReceiptsRLP(query.RequestId, response) } -func AnswerGetReceiptsQuery(db ethdb.Tx, query GetReceiptsPacket) ([]rlp.RawValue, error) { //nolint:unparam +func AnswerGetReceiptsQuery(db kv.Tx, query GetReceiptsPacket) ([]rlp.RawValue, error) { //nolint:unparam // Gather state data until the fetch or network limits is reached var ( bytes int diff --git a/eth/stagedsync/all_stages.go b/eth/stagedsync/all_stages.go index 7ff6c63cfb0..fbc9f1cc512 100644 --- a/eth/stagedsync/all_stages.go +++ b/eth/stagedsync/all_stages.go @@ -2,12 +2,12 @@ package stagedsync import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) // UpdateMetrics - need update metrics manually because current "metrics" package doesn't support labels // need to fix it in future -func UpdateMetrics(tx ethdb.Tx) error { +func UpdateMetrics(tx kv.Tx) error { var progress uint64 var err error progress, err = stages.GetStageProgress(tx, stages.Headers) diff --git a/eth/stagedsync/chain_reader.go b/eth/stagedsync/chain_reader.go index 78f2b1c6474..1260c1e86e4 100644 --- a/eth/stagedsync/chain_reader.go +++ b/eth/stagedsync/chain_reader.go @@ -4,7 +4,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) @@ -12,7 +12,7 @@ import ( // Implements consensus.ChainReader type ChainReader struct { Cfg params.ChainConfig - Db ethdb.KVGetter + Db kv.Getter } // Config retrieves the blockchain's chain configuration. diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 627dcc448f6..493f0f71a24 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -4,7 +4,7 @@ import ( "context" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" ) @@ -33,26 +33,26 @@ func DefaultStages(ctx context.Context, { ID: stages.Headers, Description: "Download headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return HeadersForward(s, u, ctx, tx, headers, firstCycle, test) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return HeadersUnwind(u, s, tx, headers) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return HeadersPrune(p, tx, headers, ctx) }, }, { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneBlockHashStage(p, tx, blockHashCfg, ctx) }, }, @@ -61,26 +61,26 @@ func DefaultStages(ctx context.Context, Description: "Create headers snapshot", Disabled: true, DisabledDescription: "Enable by --snapshot.layout", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnHeadersSnapshotGenerationStage(s, tx, snapshotHeaders, firstCycle, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindHeadersSnapshotGenerationStage(u, tx, snapshotHeaders, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneHeadersSnapshotGenerationStage(p, tx, snapshotHeaders, ctx) }, }, { ID: stages.Bodies, Description: "Download block bodies", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return BodiesForward(s, u, ctx, tx, bodies, test) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindBodiesStage(u, tx, bodies, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneBodiesStage(p, tx, bodies, ctx) }, }, @@ -89,39 +89,39 @@ func DefaultStages(ctx context.Context, Description: "Create bodies snapshot", Disabled: true, DisabledDescription: "Enable by --snapshot.layout", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnBodiesSnapshotGenerationStage(s, tx, snapshotBodies, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindBodiesSnapshotGenerationStage(u, tx, snapshotBodies, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneBodiesSnapshotGenerationStage(p, tx, snapshotBodies, ctx) }, }, { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindSendersStage(u, tx, senders, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneSendersStage(p, tx, senders, ctx) }, }, { ID: stages.Execution, Description: "Execute blocks w/o hash checks", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneExecutionStage(p, tx, exec, ctx, firstCycle) }, }, @@ -130,13 +130,13 @@ func DefaultStages(ctx context.Context, Description: "Transpile marked EVM contracts to TEVM", Disabled: !sm.Experiments.TEVM, DisabledDescription: "Enable by adding `tevm` to --experiments", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnTranspileStage(s, tx, 0, trans, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindTranspileStage(u, s, tx, trans, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneTranspileStage(p, tx, trans, firstCycle, ctx) }, }, @@ -145,40 +145,40 @@ func DefaultStages(ctx context.Context, Description: "Create state snapshot", Disabled: true, DisabledDescription: "Enable by --snapshot.layout", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnStateSnapshotGenerationStage(s, tx, snapshotState, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindStateSnapshotGenerationStage(u, tx, snapshotState, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneStateSnapshotGenerationStage(p, tx, snapshotState, ctx) }, }, { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnHashStateStage(s, tx, hashState, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindHashStateStage(u, s, tx, hashState, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneHashStateStage(p, tx, hashState, ctx) }, }, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) return err }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneIntermediateHashesStage(p, tx, trieCfg, ctx) }, }, @@ -186,91 +186,91 @@ func DefaultStages(ctx context.Context, ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnCallTraces(s, tx, callTraces, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindCallTraces(u, s, tx, callTraces, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneCallTraces(p, tx, callTraces, ctx) }, }, { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnAccountHistoryIndex(s, tx, history, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindAccountHistoryIndex(u, s, tx, history, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneAccountHistoryIndex(p, tx, history, ctx) }, }, { ID: stages.StorageHistoryIndex, Description: "Generate storage history index", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnStorageHistoryIndex(s, tx, history, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindStorageHistoryIndex(u, s, tx, history, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneStorageHistoryIndex(p, tx, history, ctx) }, }, { ID: stages.LogIndex, Description: "Generate receipt logs index", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnLogIndex(s, tx, logIndex, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindLogIndex(u, s, tx, logIndex, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneLogIndex(p, tx, logIndex, ctx) }, }, { ID: stages.TxLookup, Description: "Generate tx lookup index", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnTxLookup(s, tx, txLookup, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindTxLookup(u, s, tx, txLookup, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneTxLookup(p, tx, txLookup, ctx) }, }, { ID: stages.TxPool, Description: "Update transaction pool", - Forward: func(firstCycle bool, s *StageState, _ Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, _ Unwinder, tx kv.RwTx) error { return SpawnTxPool(s, tx, txPool, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindTxPool(u, s, tx, txPool, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneTxPool(p, tx, txPool, ctx) }, }, { ID: stages.Finish, Description: "Final: update current block for the RPC API", - Forward: func(firstCycle bool, s *StageState, _ Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, _ Unwinder, tx kv.RwTx) error { return FinishForward(s, tx, finish) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindFinish(u, tx, finish, ctx) }, - Prune: func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error { + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { return PruneFinish(p, tx, finish, ctx) }, }, diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 92bd50ccfeb..0194139d371 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -3,22 +3,22 @@ package stagedsync import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. // * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used. -type ExecFunc func(firstCycle bool, s *StageState, unwinder Unwinder, tx ethdb.RwTx) error +type ExecFunc func(firstCycle bool, s *StageState, unwinder Unwinder, tx kv.RwTx) error // UnwindFunc is the unwinding logic of the stage. // * unwindState - contains information about the unwind itself. // * stageState - represents the state of this stage at the beginning of unwind. -type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error +type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error // PruneFunc is the execution function for the stage to prune old data. // * state - is the current state of the stage and contains stage data. -type PruneFunc func(firstCycle bool, p *PruneState, tx ethdb.RwTx) error +type PruneFunc func(firstCycle bool, p *PruneState, tx kv.RwTx) error // Stage is a single sync stage in staged sync. type Stage struct { @@ -47,12 +47,12 @@ type StageState struct { func (s *StageState) LogPrefix() string { return s.state.LogPrefix() } // Update updates the stage state (current block number) in the database. Can be called multiple times during stage execution. -func (s *StageState) Update(db ethdb.Putter, newBlockNum uint64) error { +func (s *StageState) Update(db kv.Putter, newBlockNum uint64) error { return stages.SaveStageProgress(db, s.ID, newBlockNum) } // ExecutionAt gets the current state of the "Execution" stage, which block is currently executed. -func (s *StageState) ExecutionAt(db ethdb.KVGetter) (uint64, error) { +func (s *StageState) ExecutionAt(db kv.Getter) (uint64, error) { execution, err := stages.GetStageProgress(db, stages.Execution) return execution, err } @@ -77,7 +77,7 @@ type UnwindState struct { func (u *UnwindState) LogPrefix() string { return u.state.LogPrefix() } // Done updates the DB state of the stage. -func (u *UnwindState) Done(db ethdb.Putter) error { +func (u *UnwindState) Done(db kv.Putter) error { return stages.SaveStageProgress(db, u.ID, u.UnwindPoint) } @@ -89,6 +89,6 @@ type PruneState struct { } func (s *PruneState) LogPrefix() string { return s.state.LogPrefix() } -func (s *PruneState) Done(db ethdb.Putter) error { +func (s *PruneState) Done(db kv.Putter) error { return stages.SaveStagePruneProgress(db, s.ID, s.ForwardProgress) } diff --git a/eth/stagedsync/stage_blockhashes.go b/eth/stagedsync/stage_blockhashes.go index 8ffe59d031a..6d2f579e307 100644 --- a/eth/stagedsync/stage_blockhashes.go +++ b/eth/stagedsync/stage_blockhashes.go @@ -10,7 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) func extractHeaders(k []byte, v []byte, next etl.ExtractNextFunc) error { @@ -22,18 +22,18 @@ func extractHeaders(k []byte, v []byte, next etl.ExtractNextFunc) error { } type BlockHashesCfg struct { - db ethdb.RwKV + db kv.RwDB tmpDir string } -func StageBlockHashesCfg(db ethdb.RwKV, tmpDir string) BlockHashesCfg { +func StageBlockHashesCfg(db kv.RwDB, tmpDir string) BlockHashesCfg { return BlockHashesCfg{ db: db, tmpDir: tmpDir, } } -func SpawnBlockHashStage(s *StageState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { +func SpawnBlockHashStage(s *StageState, tx kv.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -61,8 +61,8 @@ func SpawnBlockHashStage(s *StageState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx c if err := etl.Transform( logPrefix, tx, - dbutils.HeadersBucket, - dbutils.HeaderNumberBucket, + kv.Headers, + kv.HeaderNumber, cfg.tmpDir, extractHeaders, etl.IdentityLoadFunc, @@ -85,7 +85,7 @@ func SpawnBlockHashStage(s *StageState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx c return nil } -func UnwindBlockHashStage(u *UnwindState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { +func UnwindBlockHashStage(u *UnwindState, tx kv.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -106,7 +106,7 @@ func UnwindBlockHashStage(u *UnwindState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx return nil } -func PruneBlockHashStage(p *PruneState, tx ethdb.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { +func PruneBlockHashStage(p *PruneState, tx kv.RwTx, cfg BlockHashesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index f4236a7bdb7..e5eb0a9d527 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -10,7 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" "github.com/ledgerwatch/erigon/params" @@ -22,7 +22,7 @@ import ( var stageBodiesGauge = metrics.NewRegisteredGauge("stage/bodies", nil) type BodiesCfg struct { - db ethdb.RwKV + db kv.RwDB bd *bodydownload.BodyDownload bodyReqSend func(context.Context, *bodydownload.BodyRequest) []byte penalise func(context.Context, []headerdownload.PenaltyItem) @@ -33,7 +33,7 @@ type BodiesCfg struct { } func StageBodiesCfg( - db ethdb.RwKV, + db kv.RwDB, bd *bodydownload.BodyDownload, bodyReqSend func(context.Context, *bodydownload.BodyRequest) []byte, penalise func(context.Context, []headerdownload.PenaltyItem), @@ -50,7 +50,7 @@ func BodiesForward( s *StageState, u Unwinder, ctx context.Context, - tx ethdb.RwTx, + tx kv.RwTx, cfg BodiesCfg, test bool, // Set to true in tests, allows the stage to fail rather than wait indefinitely ) error { @@ -224,7 +224,7 @@ func logProgressBodies(logPrefix string, committed uint64, prevDeliveredCount, d "sys", common.StorageSize(m.Sys)) } -func UnwindBodiesStage(u *UnwindState, tx ethdb.RwTx, cfg BodiesCfg, ctx context.Context) (err error) { +func UnwindBodiesStage(u *UnwindState, tx kv.RwTx, cfg BodiesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -245,7 +245,7 @@ func UnwindBodiesStage(u *UnwindState, tx ethdb.RwTx, cfg BodiesCfg, ctx context return nil } -func PruneBodiesStage(s *PruneState, tx ethdb.RwTx, cfg BodiesCfg, ctx context.Context) (err error) { +func PruneBodiesStage(s *PruneState, tx kv.RwTx, cfg BodiesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_bodies_snapshot.go b/eth/stagedsync/stage_bodies_snapshot.go index 0a31a84e9a9..965f676625c 100644 --- a/eth/stagedsync/stage_bodies_snapshot.go +++ b/eth/stagedsync/stage_bodies_snapshot.go @@ -4,19 +4,19 @@ import ( "context" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) type SnapshotBodiesCfg struct { - db ethdb.RwKV + db kv.RwDB snapshotDir string tmpDir string client *snapshotsync.Client snapshotMigrator *snapshotsync.SnapshotMigrator } -func StageSnapshotBodiesCfg(db ethdb.RwKV, snapshot ethconfig.Snapshot, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator, tmpDir string) SnapshotBodiesCfg { +func StageSnapshotBodiesCfg(db kv.RwDB, snapshot ethconfig.Snapshot, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator, tmpDir string) SnapshotBodiesCfg { return SnapshotBodiesCfg{ db: db, snapshotDir: snapshot.Dir, @@ -26,11 +26,11 @@ func StageSnapshotBodiesCfg(db ethdb.RwKV, snapshot ethconfig.Snapshot, client * } } -func SpawnBodiesSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) error { +func SpawnBodiesSnapshotGenerationStage(s *StageState, tx kv.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) error { return nil } -func UnwindBodiesSnapshotGenerationStage(s *UnwindState, tx ethdb.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) (err error) { +func UnwindBodiesSnapshotGenerationStage(s *UnwindState, tx kv.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -51,7 +51,7 @@ func UnwindBodiesSnapshotGenerationStage(s *UnwindState, tx ethdb.RwTx, cfg Snap return nil } -func PruneBodiesSnapshotGenerationStage(s *PruneState, tx ethdb.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) (err error) { +func PruneBodiesSnapshotGenerationStage(s *PruneState, tx kv.RwTx, cfg SnapshotBodiesCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_call_traces.go b/eth/stagedsync/stage_call_traces.go index 08cdcde6d4f..cae0edd30eb 100644 --- a/eth/stagedsync/stage_call_traces.go +++ b/eth/stagedsync/stage_call_traces.go @@ -19,22 +19,22 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/stack" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) type CallTracesCfg struct { - db ethdb.RwKV + db kv.RwDB prune prune.Mode ToBlock uint64 // not setting this params means no limit tmpdir string } func StageCallTracesCfg( - db ethdb.RwKV, + db kv.RwDB, prune prune.Mode, toBlock uint64, tmpdir string, @@ -47,7 +47,7 @@ func StageCallTracesCfg( } } -func SpawnCallTraces(s *StageState, tx ethdb.RwTx, cfg CallTracesCfg, ctx context.Context) error { +func SpawnCallTraces(s *StageState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -87,7 +87,7 @@ func SpawnCallTraces(s *StageState, tx ethdb.RwTx, cfg CallTracesCfg, ctx contex return nil } -func promoteCallTraces(logPrefix string, tx ethdb.RwTx, startBlock, endBlock uint64, bufLimit datasize.ByteSize, flushEvery time.Duration, quit <-chan struct{}, cfg CallTracesCfg) error { +func promoteCallTraces(logPrefix string, tx kv.RwTx, startBlock, endBlock uint64, bufLimit datasize.ByteSize, flushEvery time.Duration, quit <-chan struct{}, cfg CallTracesCfg) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() @@ -100,7 +100,7 @@ func promoteCallTraces(logPrefix string, tx ethdb.RwTx, startBlock, endBlock uin checkFlushEvery := time.NewTicker(flushEvery) defer checkFlushEvery.Stop() - traceCursor, err := tx.RwCursorDupSort(dbutils.CallTraceSet) + traceCursor, err := tx.RwCursorDupSort(kv.CallTraceSet) if err != nil { return fmt.Errorf("failed to create cursor: %w", err) } @@ -212,7 +212,7 @@ func promoteCallTraces(logPrefix string, tx ethdb.RwTx, startBlock, endBlock uin return nil } -func finaliseCallTraces(collectorFrom, collectorTo *etl.Collector, logPrefix string, tx ethdb.RwTx, quit <-chan struct{}) error { +func finaliseCallTraces(collectorFrom, collectorTo *etl.Collector, logPrefix string, tx kv.RwTx, quit <-chan struct{}) error { var currentBitmap = roaring64.New() var buf = bytes.NewBuffer(nil) lastChunkKey := make([]byte, 128) @@ -252,16 +252,16 @@ func finaliseCallTraces(collectorFrom, collectorTo *etl.Collector, logPrefix str currentBitmap.Clear() return nil } - if err := collectorFrom.Load(logPrefix, tx, dbutils.CallFromIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := collectorFrom.Load(logPrefix, tx, kv.CallFromIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } - if err := collectorTo.Load(logPrefix, tx, dbutils.CallToIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := collectorTo.Load(logPrefix, tx, kv.CallToIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } return nil } -func UnwindCallTraces(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg CallTracesCfg, ctx context.Context) (err error) { +func UnwindCallTraces(u *UnwindState, s *StageState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context) (err error) { if s.BlockNumber <= u.UnwindPoint { return nil } @@ -292,14 +292,14 @@ func UnwindCallTraces(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg CallTrac return nil } -func DoUnwindCallTraces(logPrefix string, db ethdb.RwTx, from, to uint64, ctx context.Context, cfg CallTracesCfg) error { +func DoUnwindCallTraces(logPrefix string, db kv.RwTx, from, to uint64, ctx context.Context, cfg CallTracesCfg) error { froms := map[string]struct{}{} tos := map[string]struct{}{} logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - traceCursor, err := db.RwCursorDupSort(dbutils.CallTraceSet) + traceCursor, err := db.RwCursorDupSort(kv.CallTraceSet) if err != nil { return fmt.Errorf("create cursor for call traces: %w", err) } @@ -342,10 +342,10 @@ func DoUnwindCallTraces(logPrefix string, db ethdb.RwTx, from, to uint64, ctx co } } - if err := truncateBitmaps64(db, dbutils.CallFromIndex, froms, to); err != nil { + if err := truncateBitmaps64(db, kv.CallFromIndex, froms, to); err != nil { return err } - if err := truncateBitmaps64(db, dbutils.CallToIndex, tos, to); err != nil { + if err := truncateBitmaps64(db, kv.CallToIndex, tos, to); err != nil { return err } return nil @@ -407,7 +407,7 @@ func (ct *CallTracer) CaptureAccountWrite(account common.Address) error { return nil } -func PruneCallTraces(s *PruneState, tx ethdb.RwTx, cfg CallTracesCfg, ctx context.Context) (err error) { +func PruneCallTraces(s *PruneState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context) (err error) { logPrefix := s.LogPrefix() useExternalTx := tx != nil @@ -436,7 +436,7 @@ func PruneCallTraces(s *PruneState, tx ethdb.RwTx, cfg CallTracesCfg, ctx contex return nil } -func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ctx context.Context) error { +func pruneCallTraces(tx kv.RwTx, logPrefix, tmpDir string, pruneTo uint64, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() @@ -444,7 +444,7 @@ func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ct tos := map[string]struct{}{} { - traceCursor, err := tx.CursorDupSort(dbutils.CallTraceSet) + traceCursor, err := tx.CursorDupSort(kv.CallTraceSet) if err != nil { return fmt.Errorf("create cursor for call traces: %w", err) } @@ -488,7 +488,7 @@ func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ct sorted = append(sorted, k) } sort.Strings(sorted) - c, err := tx.RwCursor(dbutils.CallFromIndex) + c, err := tx.RwCursor(kv.CallFromIndex) if err != nil { return err } @@ -506,7 +506,7 @@ func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ct } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.CallFromIndex, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.CallFromIndex, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: @@ -523,7 +523,7 @@ func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ct sorted = append(sorted, k) } sort.Strings(sorted) - c, err := tx.RwCursor(dbutils.CallToIndex) + c, err := tx.RwCursor(kv.CallToIndex) if err != nil { return err } @@ -541,7 +541,7 @@ func pruneCallTraces(tx ethdb.RwTx, logPrefix, tmpDir string, pruneTo uint64, ct } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.CallToIndex, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.CallToIndex, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 5bd2d7b254d..c41d46d8c73 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -10,6 +10,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/common" @@ -44,7 +45,7 @@ type HasChangeSetWriter interface { type ChangeSetHook func(blockNum uint64, wr *state.ChangeSetWriter) type ExecuteBlockCfg struct { - db ethdb.RwKV + db kv.RwDB batchSize datasize.ByteSize prune prune.Mode changeSetHook ChangeSetHook @@ -57,7 +58,7 @@ type ExecuteBlockCfg struct { } func StageExecuteBlocksCfg( - kv ethdb.RwKV, + kv kv.RwDB, prune prune.Mode, batchSize datasize.ByteSize, changeSetHook ChangeSetHook, @@ -82,7 +83,7 @@ func StageExecuteBlocksCfg( } } -func readBlock(blockNum uint64, tx ethdb.Tx) (*types.Block, error) { +func readBlock(blockNum uint64, tx kv.Tx) (*types.Block, error) { blockHash, err := rawdb.ReadCanonicalHash(tx, blockNum) if err != nil { return nil, err @@ -93,7 +94,7 @@ func readBlock(blockNum uint64, tx ethdb.Tx) (*types.Block, error) { func executeBlock( block *types.Block, - tx ethdb.RwTx, + tx kv.RwTx, batch ethdb.Database, cfg ExecuteBlockCfg, vmConfig vm.Config, // emit copy, because will modify it @@ -169,11 +170,11 @@ func executeBlock( } } if j == 0 { - if err = tx.Append(dbutils.CallTraceSet, blockNumEnc[:], v[:]); err != nil { + if err = tx.Append(kv.CallTraceSet, blockNumEnc[:], v[:]); err != nil { return err } } else { - if err = tx.AppendDup(dbutils.CallTraceSet, blockNumEnc[:], v[:]); err != nil { + if err = tx.AppendDup(kv.CallTraceSet, blockNumEnc[:], v[:]); err != nil { return err } } @@ -186,7 +187,7 @@ func executeBlock( func newStateReaderWriter( batch ethdb.Database, - tx ethdb.RwTx, + tx kv.RwTx, blockNum uint64, blockHash common.Hash, writeChangesets bool, @@ -214,7 +215,7 @@ func newStateReaderWriter( return stateReader, stateWriter } -func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx ethdb.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { +func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -248,7 +249,7 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx ethdb.RwTx, toBlock u } var batch ethdb.DbWithPendingMutations - batch = kv.NewBatch(tx, quit) + batch = olddb.NewBatch(tx, quit) defer batch.Rollback() logEvery := time.NewTicker(logInterval) @@ -313,7 +314,7 @@ Loop: // TODO: This creates stacked up deferrals defer tx.Rollback() } - batch = kv.NewBatch(tx, quit) + batch = olddb.NewBatch(tx, quit) // TODO: This creates stacked up deferrals defer batch.Rollback() } @@ -347,7 +348,7 @@ Loop: return stoppedErr } -func pruneChangeSets(tx ethdb.RwTx, logPrefix string, table string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { +func pruneChangeSets(tx kv.RwTx, logPrefix string, table string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { c, err := tx.RwCursorDupSort(table) if err != nil { return fmt.Errorf("failed to create cursor for pruning %w", err) @@ -399,7 +400,7 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current return currentBlock, currentTx, currentTime } -func UnwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { +func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { quit := ctx.Done() if u.UnwindPoint >= s.BlockNumber { return nil @@ -430,9 +431,9 @@ func UnwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, ctx cont return nil } -func unwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, quit <-chan struct{}, cfg ExecuteBlockCfg, initialCycle bool) error { +func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan struct{}, cfg ExecuteBlockCfg, initialCycle bool) error { logPrefix := s.LogPrefix() - stateBucket := dbutils.PlainStateBucket + stateBucket := kv.PlainStateBucket storageKeyLength := common.AddressLength + common.IncarnationLength + common.HashLength var accumulator *shards.Accumulator @@ -472,7 +473,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, quit <-c if original != nil { // clean up all the code incarnations original incarnation and the new one for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { - err = tx.Delete(dbutils.PlainContractCodeBucket, dbutils.PlainGenerateStoragePrefix(address[:], incarnation), nil) + err = tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation), nil) if err != nil { return fmt.Errorf("writeAccountPlain for %x: %w", address, err) } @@ -536,7 +537,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, quit <-c // Truncate CallTraceSet keyStart := dbutils.EncodeBlockNumber(u.UnwindPoint + 1) - c, err := tx.RwCursorDupSort(dbutils.CallTraceSet) + c, err := tx.RwCursorDupSort(kv.CallTraceSet) if err != nil { return err } @@ -554,11 +555,11 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, quit <-c return nil } -func recoverCodeHashPlain(acc *accounts.Account, db ethdb.Tx, key []byte) { +func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { var address common.Address copy(address[:], key) if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err2 := db.GetOne(dbutils.PlainContractCodeBucket, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { + if codeHash, err2 := db.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { copy(acc.CodeHash[:], codeHash) } } @@ -571,7 +572,7 @@ func min(a, b uint64) uint64 { return b } -func PruneExecutionStage(s *PruneState, tx ethdb.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { +func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { logPrefix := s.LogPrefix() useExternalTx := tx != nil if !useExternalTx { @@ -586,10 +587,10 @@ func PruneExecutionStage(s *PruneState, tx ethdb.RwTx, cfg ExecuteBlockCfg, ctx defer logEvery.Stop() if cfg.prune.History.Enabled() { - if err = pruneChangeSets(tx, logPrefix, dbutils.AccountChangeSetBucket, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = pruneChangeSets(tx, logPrefix, kv.AccountChangeSet, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } - if err = pruneChangeSets(tx, logPrefix, dbutils.StorageChangeSetBucket, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = pruneChangeSets(tx, logPrefix, kv.StorageChangeSet, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } } @@ -616,8 +617,8 @@ func PruneExecutionStage(s *PruneState, tx ethdb.RwTx, cfg ExecuteBlockCfg, ctx return nil } -func pruneReceipts(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { - c, err := tx.RwCursor(dbutils.Receipts) +func pruneReceipts(tx kv.RwTx, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { + c, err := tx.RwCursor(kv.Receipts) if err != nil { return fmt.Errorf("failed to create cursor for pruning %w", err) } @@ -634,7 +635,7 @@ func pruneReceipts(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *ti } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.Receipts, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.Receipts, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: @@ -644,7 +645,7 @@ func pruneReceipts(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *ti } } - c, err = tx.RwCursor(dbutils.Log) + c, err = tx.RwCursor(kv.Log) if err != nil { return fmt.Errorf("failed to create cursor for pruning %w", err) } @@ -660,7 +661,7 @@ func pruneReceipts(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *ti } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.Log, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.Log, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: @@ -672,8 +673,8 @@ func pruneReceipts(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *ti return nil } -func pruneCallTracesSet(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { - c, err := tx.RwCursorDupSort(dbutils.CallTraceSet) +func pruneCallTracesSet(tx kv.RwTx, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { + c, err := tx.RwCursorDupSort(kv.CallTraceSet) if err != nil { return fmt.Errorf("failed to create cursor for pruning %w", err) } @@ -689,7 +690,7 @@ func pruneCallTracesSet(tx ethdb.RwTx, logPrefix string, pruneTo uint64, logEver } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.CallTraceSet, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.CallTraceSet, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: diff --git a/eth/stagedsync/stage_execute_test.go b/eth/stagedsync/stage_execute_test.go index eb1efdcd8c4..ec4545bd453 100644 --- a/eth/stagedsync/stage_execute_test.go +++ b/eth/stagedsync/stage_execute_test.go @@ -5,17 +5,17 @@ import ( "testing" "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/stretchr/testify/assert" ) func TestUnwindExecutionStagePlainStatic(t *testing.T) { ctx, assert := context.Background(), assert.New(t) - _, tx1 := kv.NewTestTx(t) - _, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + _, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, plainWriterGen(tx1), staticCodeStaticIncarnations) generateBlocks(t, 1, 100, plainWriterGen(tx2), staticCodeStaticIncarnations) @@ -28,13 +28,13 @@ func TestUnwindExecutionStagePlainStatic(t *testing.T) { err = UnwindExecutionStage(u, s, tx2, ctx, ExecuteBlockCfg{}, false) assert.NoError(err) - compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.ContractTEVMCodeBucket) + compareCurrentState(t, tx1, tx2, kv.PlainStateBucket, kv.PlainContractCode, kv.ContractTEVMCode) } func TestUnwindExecutionStagePlainWithIncarnationChanges(t *testing.T) { ctx, assert := context.Background(), assert.New(t) - _, tx1 := kv.NewTestTx(t) - _, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + _, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, plainWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 100, plainWriterGen(tx2), changeCodeWithIncarnations) @@ -47,14 +47,14 @@ func TestUnwindExecutionStagePlainWithIncarnationChanges(t *testing.T) { err = UnwindExecutionStage(u, s, tx2, ctx, ExecuteBlockCfg{}, false) assert.NoError(err) - compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket) + compareCurrentState(t, tx1, tx2, kv.PlainStateBucket, kv.PlainContractCode) } func TestUnwindExecutionStagePlainWithCodeChanges(t *testing.T) { t.Skip("not supported yet, to be restored") ctx := context.Background() - _, tx1 := kv.NewTestTx(t) - _, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + _, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, plainWriterGen(tx1), changeCodeIndepenentlyOfIncarnations) generateBlocks(t, 1, 100, plainWriterGen(tx2), changeCodeIndepenentlyOfIncarnations) @@ -70,12 +70,12 @@ func TestUnwindExecutionStagePlainWithCodeChanges(t *testing.T) { t.Errorf("error while unwinding state: %v", err) } - compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket) + compareCurrentState(t, tx1, tx2, kv.PlainStateBucket, kv.PlainContractCode) } func TestPruneExecution(t *testing.T) { ctx, assert := context.Background(), assert.New(t) - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 50, plainWriterGen(tx), changeCodeIndepenentlyOfIncarnations) err := stages.SaveStageProgress(tx, stages.Execution, 50) diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go index 19bd47744a9..5b5399c42b3 100644 --- a/eth/stagedsync/stage_finish.go +++ b/eth/stagedsync/stage_finish.go @@ -6,29 +6,30 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - - "github.com/ledgerwatch/erigon/ethdb" ) type FinishCfg struct { - db ethdb.RwKV + db kv.RwDB tmpDir string btClient *snapshotsync.Client snBuilder *snapshotsync.SnapshotMigrator + log log.Logger } -func StageFinishCfg(db ethdb.RwKV, tmpDir string, btClient *snapshotsync.Client, snBuilder *snapshotsync.SnapshotMigrator) FinishCfg { +func StageFinishCfg(db kv.RwDB, tmpDir string, btClient *snapshotsync.Client, snBuilder *snapshotsync.SnapshotMigrator, logger log.Logger) FinishCfg { return FinishCfg{ db: db, + log: logger, tmpDir: tmpDir, btClient: btClient, snBuilder: snBuilder, } } -func FinishForward(s *StageState, tx ethdb.RwTx, cfg FinishCfg) error { +func FinishForward(s *StageState, tx kv.RwTx, cfg FinishCfg) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -50,7 +51,7 @@ func FinishForward(s *StageState, tx ethdb.RwTx, cfg FinishCfg) error { if cfg.snBuilder != nil && useExternalTx { snBlock := snapshotsync.CalculateEpoch(executionAt, snapshotsync.EpochSize) - err = cfg.snBuilder.AsyncStages(snBlock, cfg.db, tx, cfg.btClient, true) + err = cfg.snBuilder.AsyncStages(snBlock, cfg.log, cfg.db, tx, cfg.btClient, true) if err != nil { return err } @@ -74,7 +75,7 @@ func FinishForward(s *StageState, tx ethdb.RwTx, cfg FinishCfg) error { return nil } -func UnwindFinish(u *UnwindState, tx ethdb.RwTx, cfg FinishCfg, ctx context.Context) (err error) { +func UnwindFinish(u *UnwindState, tx kv.RwTx, cfg FinishCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -95,7 +96,7 @@ func UnwindFinish(u *UnwindState, tx ethdb.RwTx, cfg FinishCfg, ctx context.Cont return nil } -func PruneFinish(u *PruneState, tx ethdb.RwTx, cfg FinishCfg, ctx context.Context) (err error) { +func PruneFinish(u *PruneState, tx kv.RwTx, cfg FinishCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -113,7 +114,7 @@ func PruneFinish(u *PruneState, tx ethdb.RwTx, cfg FinishCfg, ctx context.Contex return nil } -func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, unwindTo *uint64, notifier ChainEventNotifier, db ethdb.RwKV) error { +func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, unwindTo *uint64, notifier ChainEventNotifier, db kv.RwDB) error { tx, err := db.BeginRo(ctx) if err != nil { return err diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index ec2fb3c1ca1..b1761e64a2a 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -12,23 +12,23 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) type HashStateCfg struct { - db ethdb.RwKV + db kv.RwDB tmpDir string } -func StageHashStateCfg(db ethdb.RwKV, tmpDir string) HashStateCfg { +func StageHashStateCfg(db kv.RwDB, tmpDir string) HashStateCfg { return HashStateCfg{ db: db, tmpDir: tmpDir, } } -func SpawnHashStateStage(s *StageState, tx ethdb.RwTx, cfg HashStateCfg, ctx context.Context) error { +func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -79,7 +79,7 @@ func SpawnHashStateStage(s *StageState, tx ethdb.RwTx, cfg HashStateCfg, ctx con return nil } -func UnwindHashStateStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HashStateCfg, ctx context.Context) (err error) { +func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -104,7 +104,7 @@ func UnwindHashStateStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg Hash return nil } -func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { +func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { // Currently it does not require unwinding because it does not create any Intemediate Hash records // and recomputes the state root from scratch prom := NewPromoter(tx, quit) @@ -121,12 +121,12 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t return nil } -func PromoteHashedStateCleanly(logPrefix string, db ethdb.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { +func PromoteHashedStateCleanly(logPrefix string, db kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { err := etl.Transform( logPrefix, db, - dbutils.PlainStateBucket, - dbutils.HashedAccountsBucket, + kv.PlainStateBucket, + kv.HashedAccounts, cfg.tmpDir, keyTransformExtractAcc(transformPlainStateKey), etl.IdentityLoadFunc, @@ -141,8 +141,8 @@ func PromoteHashedStateCleanly(logPrefix string, db ethdb.RwTx, cfg HashStateCfg err = etl.Transform( logPrefix, db, - dbutils.PlainStateBucket, - dbutils.HashedStorageBucket, + kv.PlainStateBucket, + kv.HashedStorage, cfg.tmpDir, keyTransformExtractStorage(transformPlainStateKey), etl.IdentityLoadFunc, @@ -157,8 +157,8 @@ func PromoteHashedStateCleanly(logPrefix string, db ethdb.RwTx, cfg HashStateCfg return etl.Transform( logPrefix, db, - dbutils.PlainContractCodeBucket, - dbutils.ContractCodeBucket, + kv.PlainContractCode, + kv.ContractCode, cfg.tmpDir, keyTransformExtractFunc(transformContractCodeKey), etl.IdentityLoadFunc, @@ -259,7 +259,7 @@ func (l *OldestAppearedLoad) LoadFunc(k, v []byte, table etl.CurrentTableReader, return l.innerLoadFunc(k, v, table, next) } -func NewPromoter(db ethdb.RwTx, quitCh <-chan struct{}) *Promoter { +func NewPromoter(db kv.RwTx, quitCh <-chan struct{}) *Promoter { return &Promoter{ db: db, ChangeSetBufSize: 256 * 1024 * 1024, @@ -269,18 +269,18 @@ func NewPromoter(db ethdb.RwTx, quitCh <-chan struct{}) *Promoter { } type Promoter struct { - db ethdb.RwTx + db kv.RwTx ChangeSetBufSize uint64 TempDir string quitCh <-chan struct{} } -func getExtractFunc(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { +func getExtractFunc(db kv.Tx, changeSetBucket string) etl.ExtractFunc { decode := changeset.Mapper[changeSetBucket].Decode return func(dbKey, dbValue []byte, next etl.ExtractNextFunc) error { _, k, _ := decode(dbKey, dbValue) // ignoring value un purpose, we want the latest one and it is in PlainStateBucket - value, err := db.GetOne(dbutils.PlainStateBucket, k) + value, err := db.GetOne(kv.PlainStateBucket, k) if err != nil { return err } @@ -293,11 +293,11 @@ func getExtractFunc(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { } } -func getExtractCode(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { +func getExtractCode(db kv.Tx, changeSetBucket string) etl.ExtractFunc { decode := changeset.Mapper[changeSetBucket].Decode return func(dbKey, dbValue []byte, next etl.ExtractNextFunc) error { _, k, _ := decode(dbKey, dbValue) - value, err := db.GetOne(dbutils.PlainStateBucket, k) + value, err := db.GetOne(kv.PlainStateBucket, k) if err != nil { return err } @@ -313,7 +313,7 @@ func getExtractCode(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { } plainKey := dbutils.PlainGenerateStoragePrefix(k, a.Incarnation) var codeHash []byte - codeHash, err = db.GetOne(dbutils.PlainContractCodeBucket, plainKey) + codeHash, err = db.GetOne(kv.PlainContractCode, plainKey) if err != nil { return fmt.Errorf("getFromPlainCodesAndLoad for %x, inc %d: %w", plainKey, a.Incarnation, err) } @@ -340,7 +340,7 @@ func getUnwindExtractStorage(changeSetBucket string) etl.ExtractFunc { } } -func getUnwindExtractAccounts(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { +func getUnwindExtractAccounts(db kv.Tx, changeSetBucket string) etl.ExtractFunc { decode := changeset.Mapper[changeSetBucket].Decode return func(dbKey, dbValue []byte, next etl.ExtractNextFunc) error { _, k, v := decode(dbKey, dbValue) @@ -359,7 +359,7 @@ func getUnwindExtractAccounts(db ethdb.Tx, changeSetBucket string) etl.ExtractFu return next(dbKey, newK, v) } - if codeHash, err := db.GetOne(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(newK, acc.Incarnation)); err == nil { + if codeHash, err := db.GetOne(kv.ContractCode, dbutils.GenerateStoragePrefix(newK, acc.Incarnation)); err == nil { copy(acc.CodeHash[:], codeHash) } else { return fmt.Errorf("adjusting codeHash for ks %x, inc %d: %w", newK, acc.Incarnation, err) @@ -371,7 +371,7 @@ func getUnwindExtractAccounts(db ethdb.Tx, changeSetBucket string) etl.ExtractFu } } -func getCodeUnwindExtractFunc(db ethdb.Tx, changeSetBucket string) etl.ExtractFunc { +func getCodeUnwindExtractFunc(db kv.Tx, changeSetBucket string) etl.ExtractFunc { decode := changeset.Mapper[changeSetBucket].Decode return func(dbKey, dbValue []byte, next etl.ExtractNextFunc) error { _, k, v := decode(dbKey, dbValue) @@ -391,7 +391,7 @@ func getCodeUnwindExtractFunc(db ethdb.Tx, changeSetBucket string) etl.ExtractFu return nil } plainKey := dbutils.PlainGenerateStoragePrefix(k, a.Incarnation) - codeHash, err = db.GetOne(dbutils.PlainContractCodeBucket, plainKey) + codeHash, err = db.GetOne(kv.PlainContractCode, plainKey) if err != nil { return fmt.Errorf("getCodeUnwindExtractFunc: %w, key=%x", err, plainKey) } @@ -406,9 +406,9 @@ func getCodeUnwindExtractFunc(db ethdb.Tx, changeSetBucket string) etl.ExtractFu func (p *Promoter) Promote(logPrefix string, s *StageState, from, to uint64, storage bool, codes bool) error { var changeSetBucket string if storage { - changeSetBucket = dbutils.StorageChangeSetBucket + changeSetBucket = kv.StorageChangeSet } else { - changeSetBucket = dbutils.AccountChangeSetBucket + changeSetBucket = kv.AccountChangeSet } if to > from+16 { log.Info(fmt.Sprintf("[%s] Incremental promotion started", logPrefix), "from", from, "to", to, "codes", codes, "csbucket", changeSetBucket) @@ -422,13 +422,13 @@ func (p *Promoter) Promote(logPrefix string, s *StageState, from, to uint64, sto var loadBucket string var extract etl.ExtractFunc if codes { - loadBucket = dbutils.ContractCodeBucket + loadBucket = kv.ContractCode extract = getExtractCode(p.db, changeSetBucket) } else { if storage { - loadBucket = dbutils.HashedStorageBucket + loadBucket = kv.HashedStorage } else { - loadBucket = dbutils.HashedAccountsBucket + loadBucket = kv.HashedAccounts } extract = getExtractFunc(p.db, changeSetBucket) } @@ -456,9 +456,9 @@ func (p *Promoter) Promote(logPrefix string, s *StageState, from, to uint64, sto func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, storage bool, codes bool) error { var changeSetBucket string if storage { - changeSetBucket = dbutils.StorageChangeSetBucket + changeSetBucket = kv.StorageChangeSet } else { - changeSetBucket = dbutils.AccountChangeSetBucket + changeSetBucket = kv.AccountChangeSet } from := s.BlockNumber to := u.UnwindPoint @@ -471,16 +471,16 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora var loadBucket string var extractFunc etl.ExtractFunc if codes { - loadBucket = dbutils.ContractCodeBucket + loadBucket = kv.ContractCode extractFunc = getCodeUnwindExtractFunc(p.db, changeSetBucket) l.innerLoadFunc = etl.IdentityLoadFunc } else { l.innerLoadFunc = etl.IdentityLoadFunc if storage { - loadBucket = dbutils.HashedStorageBucket + loadBucket = kv.HashedStorage extractFunc = getUnwindExtractStorage(changeSetBucket) } else { - loadBucket = dbutils.HashedAccountsBucket + loadBucket = kv.HashedAccounts extractFunc = getUnwindExtractAccounts(p.db, changeSetBucket) } } @@ -507,7 +507,7 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora ) } -func promoteHashedStateIncrementally(logPrefix string, s *StageState, from, to uint64, db ethdb.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { +func promoteHashedStateIncrementally(logPrefix string, s *StageState, from, to uint64, db kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { prom := NewPromoter(db, quit) prom.TempDir = cfg.tmpDir if err := prom.Promote(logPrefix, s, from, to, false /* storage */, true /* codes */); err != nil { @@ -522,7 +522,7 @@ func promoteHashedStateIncrementally(logPrefix string, s *StageState, from, to u return nil } -func PruneHashStateStage(s *PruneState, tx ethdb.RwTx, cfg HashStateCfg, ctx context.Context) (err error) { +func PruneHashStateStage(s *PruneState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index 6ce532c2cb7..f4198f9985a 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -6,15 +6,15 @@ import ( "testing" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" ) func TestPromoteHashedStateClearState(t *testing.T) { - _, tx1 := kv.NewTestTx(t) - db2, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) @@ -24,12 +24,12 @@ func TestPromoteHashedStateClearState(t *testing.T) { t.Errorf("error while promoting state: %v", err) } - compareCurrentState(t, tx1, tx2, dbutils.HashedAccountsBucket, dbutils.HashedStorageBucket, dbutils.ContractCodeBucket) + compareCurrentState(t, tx1, tx2, kv.HashedAccounts, kv.HashedStorage, kv.ContractCode) } func TestPromoteHashedStateIncremental(t *testing.T) { - _, tx1 := kv.NewTestTx(t) - db2, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) @@ -48,12 +48,12 @@ func TestPromoteHashedStateIncremental(t *testing.T) { t.Errorf("error while promoting state: %v", err) } - compareCurrentState(t, tx1, tx2, dbutils.HashedAccountsBucket, dbutils.HashedStorageBucket) + compareCurrentState(t, tx1, tx2, kv.HashedAccounts, kv.HashedStorage) } func TestPromoteHashedStateIncrementalMixed(t *testing.T) { - _, tx1 := kv.NewTestTx(t) - db2, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 100, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, hashedWriterGen(tx2), changeCodeWithIncarnations) @@ -63,12 +63,12 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { if err != nil { t.Errorf("error while promoting state: %v", err) } - compareCurrentState(t, tx1, tx2, dbutils.HashedAccountsBucket, dbutils.HashedStorageBucket) + compareCurrentState(t, tx1, tx2, kv.HashedAccounts, kv.HashedStorage) } func TestUnwindHashed(t *testing.T) { - _, tx1 := kv.NewTestTx(t) - db2, tx2 := kv.NewTestTx(t) + _, tx1 := memdb.NewTestTx(t) + db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) @@ -84,7 +84,7 @@ func TestUnwindHashed(t *testing.T) { t.Errorf("error while unwind state: %v", err) } - compareCurrentState(t, tx1, tx2, dbutils.HashedAccountsBucket, dbutils.HashedStorageBucket) + compareCurrentState(t, tx1, tx2, kv.HashedAccounts, kv.HashedStorage) } func TestPromoteIncrementallyShutdown(t *testing.T) { @@ -106,7 +106,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { if tc.cancelFuncExec { cancel() } - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) if err := promoteHashedStateIncrementally("logPrefix", &StageState{BlockNumber: 1}, 1, 10, tx, StageHashStateCfg(db, t.TempDir()), ctx.Done()); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateIncrementally, got: %v, expected: %v", err, tc.errExp) @@ -138,7 +138,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { cancel() } - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) @@ -172,7 +172,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { cancel() } - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) cfg := StageHashStateCfg(db, t.TempDir()) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index fd0e643d946..b9fe1083e5e 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" "github.com/ledgerwatch/erigon/params" @@ -25,7 +25,7 @@ import ( var stageHeadersGauge = metrics.NewRegisteredGauge("stage/headers", nil) type HeadersCfg struct { - db ethdb.RwKV + db kv.RwDB hd *headerdownload.HeaderDownload chainConfig params.ChainConfig headerReqSend func(context.Context, *headerdownload.HeaderRequest) []byte @@ -35,7 +35,7 @@ type HeadersCfg struct { } func StageHeadersCfg( - db ethdb.RwKV, + db kv.RwDB, headerDownload *headerdownload.HeaderDownload, chainConfig params.ChainConfig, headerReqSend func(context.Context, *headerdownload.HeaderRequest) []byte, @@ -59,7 +59,7 @@ func HeadersForward( s *StageState, u Unwinder, ctx context.Context, - tx ethdb.RwTx, + tx kv.RwTx, cfg HeadersCfg, initialCycle bool, test bool, // Set to true in tests, allows the stage to fail rather than wait indefinitely @@ -204,7 +204,7 @@ func HeadersForward( return nil } -func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, hash common.Hash, tx ethdb.StatelessRwTx) error { +func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, hash common.Hash, tx kv.StatelessRwTx) error { if height == 0 { return nil } @@ -236,7 +236,7 @@ func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, h return nil } -func HeadersUnwind(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HeadersCfg) (err error) { +func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(context.Background()) @@ -255,7 +255,7 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HeadersCfg) if badBlock { cfg.hd.ReportBadHeader(u.BadBlock) // Mark all descendants of bad block as bad too - headerCursor, cErr := tx.Cursor(dbutils.HeadersBucket) + headerCursor, cErr := tx.Cursor(kv.Headers) if cErr != nil { return cErr } @@ -281,7 +281,7 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HeadersCfg) } if badBlock { // Find header with biggest TD - tdCursor, cErr := tx.Cursor(dbutils.HeaderTDBucket) + tdCursor, cErr := tx.Cursor(kv.HeaderTD) if cErr != nil { return cErr } @@ -355,7 +355,7 @@ func logProgressHeaders(logPrefix string, prev, now uint64) uint64 { type chainReader struct { config *params.ChainConfig - tx ethdb.RwTx + tx kv.RwTx } func (cr chainReader) Config() *params.ChainConfig { return cr.config } @@ -372,7 +372,7 @@ func (cr chainReader) GetHeaderByHash(hash common.Hash) *types.Header { } type epochReader struct { - tx ethdb.RwTx + tx kv.RwTx } func (cr epochReader) GetEpoch(hash common.Hash, number uint64) ([]byte, error) { @@ -391,7 +391,7 @@ func (cr epochReader) FindBeforeOrEqualNumber(number uint64) (blockNum uint64, b return rawdb.FindEpochBeforeOrEqualNumber(cr.tx, number) } -func HeadersPrune(p *PruneState, tx ethdb.RwTx, cfg HeadersCfg, ctx context.Context) (err error) { +func HeadersPrune(p *PruneState, tx kv.RwTx, cfg HeadersCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_headers_snapshot.go b/eth/stagedsync/stage_headers_snapshot.go index af8c8773d9b..dc9ea0396ef 100644 --- a/eth/stagedsync/stage_headers_snapshot.go +++ b/eth/stagedsync/stage_headers_snapshot.go @@ -8,28 +8,30 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) type SnapshotHeadersCfg struct { - db ethdb.RwKV + db kv.RwDB snapshotDir string client *snapshotsync.Client snapshotMigrator *snapshotsync.SnapshotMigrator + log log.Logger } -func StageSnapshotHeadersCfg(db ethdb.RwKV, snapshot ethconfig.Snapshot, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator) SnapshotHeadersCfg { +func StageSnapshotHeadersCfg(db kv.RwDB, snapshot ethconfig.Snapshot, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator, logger log.Logger) SnapshotHeadersCfg { return SnapshotHeadersCfg{ db: db, snapshotDir: snapshot.Dir, client: client, snapshotMigrator: snapshotMigrator, + log: logger, } } -func SpawnHeadersSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg SnapshotHeadersCfg, initial bool, ctx context.Context) error { +func SpawnHeadersSnapshotGenerationStage(s *StageState, tx kv.RwTx, cfg SnapshotHeadersCfg, initial bool, ctx context.Context) error { //generate snapshot only on initial mode if !initial { return nil @@ -64,7 +66,7 @@ func SpawnHeadersSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg Snaps return nil } - err = cfg.snapshotMigrator.AsyncStages(snapshotBlock, cfg.db, readTX, cfg.client, false) + err = cfg.snapshotMigrator.AsyncStages(snapshotBlock, cfg.log, cfg.db, readTX, cfg.client, false) if err != nil { return err } @@ -119,7 +121,7 @@ func SpawnHeadersSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg Snaps return nil } -func UnwindHeadersSnapshotGenerationStage(u *UnwindState, tx ethdb.RwTx, cfg SnapshotHeadersCfg, ctx context.Context) (err error) { +func UnwindHeadersSnapshotGenerationStage(u *UnwindState, tx kv.RwTx, cfg SnapshotHeadersCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -140,7 +142,7 @@ func UnwindHeadersSnapshotGenerationStage(u *UnwindState, tx ethdb.RwTx, cfg Sna return nil } -func PruneHeadersSnapshotGenerationStage(u *PruneState, tx ethdb.RwTx, cfg SnapshotHeadersCfg, ctx context.Context) (err error) { +func PruneHeadersSnapshotGenerationStage(u *PruneState, tx kv.RwTx, cfg SnapshotHeadersCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_indexes.go b/eth/stagedsync/stage_indexes.go index 9a9734c096e..38b43d65b99 100644 --- a/eth/stagedsync/stage_indexes.go +++ b/eth/stagedsync/stage_indexes.go @@ -18,19 +18,20 @@ import ( "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" ) type HistoryCfg struct { - db ethdb.RwKV + db kv.RwDB bufLimit datasize.ByteSize prune prune.Mode flushEvery time.Duration tmpdir string } -func StageHistoryCfg(db ethdb.RwKV, prune prune.Mode, tmpDir string) HistoryCfg { +func StageHistoryCfg(db kv.RwDB, prune prune.Mode, tmpDir string) HistoryCfg { return HistoryCfg{ db: db, prune: prune, @@ -40,7 +41,7 @@ func StageHistoryCfg(db ethdb.RwKV, prune prune.Mode, tmpDir string) HistoryCfg } } -func SpawnAccountHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) error { +func SpawnAccountHistoryIndex(s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -72,7 +73,7 @@ func SpawnAccountHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx startBlock = pruneTo } - if err := promoteHistory(logPrefix, tx, dbutils.AccountChangeSetBucket, startBlock, stopChangeSetsLookupAt, cfg, quitCh); err != nil { + if err := promoteHistory(logPrefix, tx, kv.AccountChangeSet, startBlock, stopChangeSetsLookupAt, cfg, quitCh); err != nil { return err } @@ -88,7 +89,7 @@ func SpawnAccountHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx return nil } -func SpawnStorageHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) error { +func SpawnStorageHistoryIndex(s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -115,7 +116,7 @@ func SpawnStorageHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx } stopChangeSetsLookupAt := executionAt + 1 - if err := promoteHistory(logPrefix, tx, dbutils.StorageChangeSetBucket, startChangeSetsLookupAt, stopChangeSetsLookupAt, cfg, quitCh); err != nil { + if err := promoteHistory(logPrefix, tx, kv.StorageChangeSet, startChangeSetsLookupAt, stopChangeSetsLookupAt, cfg, quitCh); err != nil { return err } @@ -130,7 +131,7 @@ func SpawnStorageHistoryIndex(s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx return nil } -func promoteHistory(logPrefix string, tx ethdb.RwTx, changesetBucket string, start, stop uint64, cfg HistoryCfg, quit <-chan struct{}) error { +func promoteHistory(logPrefix string, tx kv.RwTx, changesetBucket string, start, stop uint64, cfg HistoryCfg, quit <-chan struct{}) error { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -227,7 +228,7 @@ func promoteHistory(logPrefix string, tx ethdb.RwTx, changesetBucket string, sta return nil } -func UnwindAccountHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { +func UnwindAccountHistoryIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -239,7 +240,7 @@ func UnwindAccountHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg quitCh := ctx.Done() logPrefix := s.LogPrefix() - if err := unwindHistory(logPrefix, tx, dbutils.AccountChangeSetBucket, u.UnwindPoint, cfg, quitCh); err != nil { + if err := unwindHistory(logPrefix, tx, kv.AccountChangeSet, u.UnwindPoint, cfg, quitCh); err != nil { return err } @@ -255,7 +256,7 @@ func UnwindAccountHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg return nil } -func UnwindStorageHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { +func UnwindStorageHistoryIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { var err error @@ -268,7 +269,7 @@ func UnwindStorageHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg quitCh := ctx.Done() logPrefix := s.LogPrefix() - if err := unwindHistory(logPrefix, tx, dbutils.StorageChangeSetBucket, u.UnwindPoint, cfg, quitCh); err != nil { + if err := unwindHistory(logPrefix, tx, kv.StorageChangeSet, u.UnwindPoint, cfg, quitCh); err != nil { return err } @@ -284,7 +285,7 @@ func UnwindStorageHistoryIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg return nil } -func unwindHistory(logPrefix string, db ethdb.RwTx, csBucket string, to uint64, cfg HistoryCfg, quitCh <-chan struct{}) error { +func unwindHistory(logPrefix string, db kv.RwTx, csBucket string, to uint64, cfg HistoryCfg, quitCh <-chan struct{}) error { logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -338,7 +339,7 @@ func flushBitmaps64(c *etl.Collector, inMem map[string]*roaring64.Bitmap) error return nil } -func truncateBitmaps64(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { +func truncateBitmaps64(tx kv.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { keys := make([]string, 0, len(inMem)) for k := range inMem { keys = append(keys, k) @@ -353,7 +354,7 @@ func truncateBitmaps64(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, return nil } -func PruneAccountHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { +func PruneAccountHistoryIndex(s *PruneState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { if !cfg.prune.History.Enabled() { return nil } @@ -369,7 +370,7 @@ func PruneAccountHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx } pruneTo := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = pruneHistoryIndex(tx, dbutils.AccountChangeSetBucket, logPrefix, cfg.tmpdir, pruneTo, ctx); err != nil { + if err = pruneHistoryIndex(tx, kv.AccountChangeSet, logPrefix, cfg.tmpdir, pruneTo, ctx); err != nil { return err } if err = s.Done(tx); err != nil { @@ -384,7 +385,7 @@ func PruneAccountHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx return nil } -func PruneStorageHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { +func PruneStorageHistoryIndex(s *PruneState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { if !cfg.prune.History.Enabled() { return nil } @@ -399,7 +400,7 @@ func PruneStorageHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx defer tx.Rollback() } pruneTo := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = pruneHistoryIndex(tx, dbutils.StorageChangeSetBucket, logPrefix, cfg.tmpdir, pruneTo, ctx); err != nil { + if err = pruneHistoryIndex(tx, kv.StorageChangeSet, logPrefix, cfg.tmpdir, pruneTo, ctx); err != nil { return err } if err = s.Done(tx); err != nil { @@ -414,7 +415,7 @@ func PruneStorageHistoryIndex(s *PruneState, tx ethdb.RwTx, cfg HistoryCfg, ctx return nil } -func pruneHistoryIndex(tx ethdb.RwTx, csTable, logPrefix, tmpDir string, pruneTo uint64, ctx context.Context) error { +func pruneHistoryIndex(tx kv.RwTx, csTable, logPrefix, tmpDir string, pruneTo uint64, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() @@ -439,7 +440,7 @@ func pruneHistoryIndex(tx ethdb.RwTx, csTable, logPrefix, tmpDir string, pruneTo } defer c.Close() prefixLen := common.AddressLength - if csTable == dbutils.StorageChangeSetBucket { + if csTable == kv.StorageChangeSet { prefixLen = common.HashLength } if err := collector.Load(logPrefix, tx, "", func(addr, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { diff --git a/eth/stagedsync/stage_indexes_test.go b/eth/stagedsync/stage_indexes_test.go index f0a46cdf549..03620fd545c 100644 --- a/eth/stagedsync/stage_indexes_test.go +++ b/eth/stagedsync/stage_indexes_test.go @@ -15,20 +15,20 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestIndexGenerator_GenerateIndex_SimpleCase(t *testing.T) { - kv := kv2.NewTestKV(t) - cfg := StageHistoryCfg(kv, prune.DefaultMode, t.TempDir()) + db := kv2.NewTestDB(t) + cfg := StageHistoryCfg(db, prune.DefaultMode, t.TempDir()) test := func(blocksNum int, csBucket string) func(t *testing.T) { return func(t *testing.T) { - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() @@ -52,15 +52,15 @@ func TestIndexGenerator_GenerateIndex_SimpleCase(t *testing.T) { } } - t.Run("account plain state", test(2100, dbutils.AccountChangeSetBucket)) - t.Run("storage plain state", test(2100, dbutils.StorageChangeSetBucket)) + t.Run("account plain state", test(2100, kv.AccountChangeSet)) + t.Run("storage plain state", test(2100, kv.StorageChangeSet)) } func TestIndexGenerator_Truncate(t *testing.T) { - buckets := []string{dbutils.AccountChangeSetBucket, dbutils.StorageChangeSetBucket} + buckets := []string{kv.AccountChangeSet, kv.StorageChangeSet} tmpDir, ctx := t.TempDir(), context.Background() - kv := kv2.NewTestKV(t) + kv := kv2.NewTestDB(t) cfg := StageHistoryCfg(kv, prune.DefaultMode, t.TempDir()) for i := range buckets { csbucket := buckets[i] @@ -170,9 +170,9 @@ func TestIndexGenerator_Truncate(t *testing.T) { } } -func expectNoHistoryBefore(t *testing.T, tx ethdb.Tx, csbucket string, prunedTo uint64) { +func expectNoHistoryBefore(t *testing.T, tx kv.Tx, csbucket string, prunedTo uint64) { prefixLen := common.AddressLength - if csbucket == dbutils.StorageChangeSetBucket { + if csbucket == kv.StorageChangeSet { prefixLen = common.HashLength } afterPrune := 0 @@ -186,18 +186,18 @@ func expectNoHistoryBefore(t *testing.T, tx ethdb.Tx, csbucket string, prunedTo assert.NoError(t, err) } -func generateTestData(t *testing.T, tx ethdb.RwTx, csBucket string, numOfBlocks int) ([][]byte, map[string][]uint64) { //nolint +func generateTestData(t *testing.T, tx kv.RwTx, csBucket string, numOfBlocks int) ([][]byte, map[string][]uint64) { //nolint csInfo, ok := changeset.Mapper[csBucket] if !ok { t.Fatal("incorrect cs bucket") } var isPlain bool - if dbutils.StorageChangeSetBucket == csBucket || dbutils.AccountChangeSetBucket == csBucket { + if kv.StorageChangeSet == csBucket || kv.AccountChangeSet == csBucket { isPlain = true } addrs, err := generateAddrs(3, isPlain) require.NoError(t, err) - if dbutils.StorageChangeSetBucket == csBucket { + if kv.StorageChangeSet == csBucket { keys, innerErr := generateAddrs(3, false) require.NoError(t, innerErr) @@ -243,7 +243,7 @@ func generateTestData(t *testing.T, tx ethdb.RwTx, csBucket string, numOfBlocks } } -func checkIndex(t *testing.T, db ethdb.Tx, bucket string, k []byte, expected []uint64) { +func checkIndex(t *testing.T, db kv.Tx, bucket string, k []byte, expected []uint64) { t.Helper() k = dbutils.CompositeKeyWithoutIncarnation(k) m, err := bitmapdb.Get64(db, bucket, k, 0, math.MaxUint32) diff --git a/eth/stagedsync/stage_interhashes.go b/eth/stagedsync/stage_interhashes.go index 0ebb12bf49e..0016c9222dc 100644 --- a/eth/stagedsync/stage_interhashes.go +++ b/eth/stagedsync/stage_interhashes.go @@ -15,19 +15,19 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/trie" ) type TrieCfg struct { - db ethdb.RwKV + db kv.RwDB checkRoot bool tmpDir string saveNewHashesToDB bool // no reason to save changes when calculating root for mining } -func StageTrieCfg(db ethdb.RwKV, checkRoot, saveNewHashesToDB bool, tmpDir string) TrieCfg { +func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB bool, tmpDir string) TrieCfg { return TrieCfg{ db: db, checkRoot: checkRoot, @@ -36,7 +36,7 @@ func StageTrieCfg(db ethdb.RwKV, checkRoot, saveNewHashesToDB bool, tmpDir strin } } -func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx ethdb.RwTx, cfg TrieCfg, ctx context.Context) (common.Hash, error) { +func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (common.Hash, error) { quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -110,11 +110,11 @@ func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx ethdb.RwTx, cfg return root, err } -func RegenerateIntermediateHashes(logPrefix string, db ethdb.RwTx, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) (common.Hash, error) { +func RegenerateIntermediateHashes(logPrefix string, db kv.RwTx, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) (common.Hash, error) { log.Info(fmt.Sprintf("[%s] Regeneration trie hashes started", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Regeneration ended", logPrefix)) - _ = db.(ethdb.BucketMigrator).ClearBucket(dbutils.TrieOfAccountsBucket) - _ = db.(ethdb.BucketMigrator).ClearBucket(dbutils.TrieOfStorageBucket) + _ = db.ClearBucket(kv.TrieOfAccounts) + _ = db.ClearBucket(kv.TrieOfStorage) accTrieCollector := etl.NewCollector(cfg.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize)) defer accTrieCollector.Close(logPrefix) @@ -138,23 +138,23 @@ func RegenerateIntermediateHashes(logPrefix string, db ethdb.RwTx, cfg TrieCfg, } log.Info(fmt.Sprintf("[%s] Trie root", logPrefix), "hash", hash.Hex()) - if err := accTrieCollector.Load(logPrefix, db, dbutils.TrieOfAccountsBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := accTrieCollector.Load(logPrefix, db, kv.TrieOfAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return trie.EmptyRoot, err } - if err := stTrieCollector.Load(logPrefix, db, dbutils.TrieOfStorageBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := stTrieCollector.Load(logPrefix, db, kv.TrieOfStorage, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return trie.EmptyRoot, err } return hash, nil } type HashPromoter struct { - db ethdb.RwTx + db kv.RwTx ChangeSetBufSize uint64 TempDir string quitCh <-chan struct{} } -func NewHashPromoter(db ethdb.RwTx, quitCh <-chan struct{}) *HashPromoter { +func NewHashPromoter(db kv.RwTx, quitCh <-chan struct{}) *HashPromoter { return &HashPromoter{ db: db, ChangeSetBufSize: 256 * 1024 * 1024, @@ -166,9 +166,9 @@ func NewHashPromoter(db ethdb.RwTx, quitCh <-chan struct{}) *HashPromoter { func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, storage bool, load etl.LoadFunc) error { var changeSetBucket string if storage { - changeSetBucket = dbutils.StorageChangeSetBucket + changeSetBucket = kv.StorageChangeSet } else { - changeSetBucket = dbutils.AccountChangeSetBucket + changeSetBucket = kv.AccountChangeSet } log.Debug(fmt.Sprintf("[%s] Incremental state promotion of intermediate hashes", logPrefix), "from", from, "to", to, "csbucket", changeSetBucket) @@ -183,7 +183,7 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, return err } if !storage { - newValue, err := p.db.GetOne(dbutils.PlainStateBucket, k) + newValue, err := p.db.GetOne(kv.PlainStateBucket, k) if err != nil { return err } @@ -234,8 +234,8 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, if !storage { // delete Intermediate hashes of deleted accounts sort.Slice(deletedAccounts, func(i, j int) bool { return bytes.Compare(deletedAccounts[i], deletedAccounts[j]) < 0 }) for _, k := range deletedAccounts { - if err := p.db.ForPrefix(dbutils.TrieOfStorageBucket, k, func(k, v []byte) error { - if err := p.db.Delete(dbutils.TrieOfStorageBucket, k, v); err != nil { + if err := p.db.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { + if err := p.db.Delete(kv.TrieOfStorage, k, v); err != nil { return err } return nil @@ -253,9 +253,9 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s var changeSetBucket string if storage { - changeSetBucket = dbutils.StorageChangeSetBucket + changeSetBucket = kv.StorageChangeSet } else { - changeSetBucket = dbutils.AccountChangeSetBucket + changeSetBucket = kv.AccountChangeSet } log.Info(fmt.Sprintf("[%s] Unwinding of trie hashes", logPrefix), "from", s.BlockNumber, "to", to, "csbucket", changeSetBucket) @@ -270,7 +270,7 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s return err } // Plain state not unwind yet, it means - if key not-exists in PlainState but has value from ChangeSets - then need mark it as "created" in RetainList - value, err := p.db.GetOne(dbutils.PlainStateBucket, k) + value, err := p.db.GetOne(kv.PlainStateBucket, k) if err != nil { return err } @@ -320,8 +320,8 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s if !storage { // delete Intermediate hashes of deleted accounts sort.Slice(deletedAccounts, func(i, j int) bool { return bytes.Compare(deletedAccounts[i], deletedAccounts[j]) < 0 }) for _, k := range deletedAccounts { - if err := p.db.ForPrefix(dbutils.TrieOfStorageBucket, k, func(k, v []byte) error { - if err := p.db.Delete(dbutils.TrieOfStorageBucket, k, v); err != nil { + if err := p.db.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { + if err := p.db.Delete(kv.TrieOfStorage, k, v); err != nil { return err } return nil @@ -335,7 +335,7 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s return nil } -func incrementIntermediateHashes(logPrefix string, s *StageState, db ethdb.RwTx, to uint64, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) (common.Hash, error) { +func incrementIntermediateHashes(logPrefix string, s *StageState, db kv.RwTx, to uint64, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) (common.Hash, error) { p := NewHashPromoter(db, quit) p.TempDir = cfg.tmpDir rl := trie.NewRetainList(0) @@ -373,16 +373,16 @@ func incrementIntermediateHashes(logPrefix string, s *StageState, db ethdb.RwTx, log.Info(fmt.Sprintf("[%s] Trie root", logPrefix), " hash", hash.Hex()) - if err := accTrieCollector.Load(logPrefix, db, dbutils.TrieOfAccountsBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := accTrieCollector.Load(logPrefix, db, kv.TrieOfAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return trie.EmptyRoot, err } - if err := stTrieCollector.Load(logPrefix, db, dbutils.TrieOfStorageBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := stTrieCollector.Load(logPrefix, db, kv.TrieOfStorage, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return trie.EmptyRoot, err } return hash, nil } -func UnwindIntermediateHashesStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TrieCfg, ctx context.Context) (err error) { +func UnwindIntermediateHashesStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (err error) { quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -422,7 +422,7 @@ func UnwindIntermediateHashesStage(u *UnwindState, s *StageState, tx ethdb.RwTx, return nil } -func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *StageState, db ethdb.RwTx, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) error { +func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *StageState, db kv.RwTx, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) error { p := NewHashPromoter(db, quit) p.TempDir = cfg.tmpDir rl := trie.NewRetainList(0) @@ -457,23 +457,23 @@ func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *Stag return fmt.Errorf("wrong trie root: %x, expected (from header): %x", hash, expectedRootHash) } log.Info(fmt.Sprintf("[%s] Trie root", logPrefix), "hash", hash.Hex()) - if err := accTrieCollector.Load(logPrefix, db, dbutils.TrieOfAccountsBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := accTrieCollector.Load(logPrefix, db, kv.TrieOfAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } - if err := stTrieCollector.Load(logPrefix, db, dbutils.TrieOfStorageBucket, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := stTrieCollector.Load(logPrefix, db, kv.TrieOfStorage, etl.IdentityLoadFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } return nil } -func ResetHashState(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.HashedAccountsBucket); err != nil { +func ResetHashState(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.HashedAccounts); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.HashedStorageBucket); err != nil { + if err := tx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.ContractCodeBucket); err != nil { + if err := tx.ClearBucket(kv.ContractCode); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.HashState, 0); err != nil { @@ -486,11 +486,11 @@ func ResetHashState(tx ethdb.RwTx) error { return nil } -func ResetIH(tx ethdb.RwTx) error { - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.TrieOfAccountsBucket); err != nil { +func ResetIH(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.TrieOfAccounts); err != nil { return err } - if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.TrieOfStorageBucket); err != nil { + if err := tx.ClearBucket(kv.TrieOfStorage); err != nil { return err } if err := stages.SaveStageProgress(tx, stages.IntermediateHashes, 0); err != nil { @@ -548,7 +548,7 @@ func storageTrieCollector(collector *etl.Collector) trie.StorageHashCollector2 { } } -func PruneIntermediateHashesStage(s *PruneState, tx ethdb.RwTx, cfg TrieCfg, ctx context.Context) (err error) { +func PruneIntermediateHashesStage(s *PruneState, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index 720f73822f3..d144f297499 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -7,15 +7,15 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/assert" ) -func addTestAccount(tx ethdb.Putter, hash common.Hash, balance uint64, incarnation uint64) error { +func addTestAccount(tx kv.Putter, hash common.Hash, balance uint64, incarnation uint64) error { acc := accounts.NewAccount() acc.Balance.SetUint64(balance) acc.Incarnation = incarnation @@ -24,11 +24,11 @@ func addTestAccount(tx ethdb.Putter, hash common.Hash, balance uint64, incarnati } encoded := make([]byte, acc.EncodingLengthForStorage()) acc.EncodeForStorage(encoded) - return tx.Put(dbutils.HashedAccountsBucket, hash[:], encoded) + return tx.Put(kv.HashedAccounts, hash[:], encoded) } func TestAccountAndStorageTrie(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) hash1 := common.HexToHash("0xB000000000000000000000000000000000000000000000000000000000000000") assert.Nil(t, addTestAccount(tx, hash1, 3*params.Ether, 0)) @@ -50,10 +50,10 @@ func TestAccountAndStorageTrie(t *testing.T) { val3 := common.FromHex("0x127a89") val4 := common.FromHex("0x05") - assert.Nil(t, tx.Put(dbutils.HashedStorageBucket, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc1), val1)) - assert.Nil(t, tx.Put(dbutils.HashedStorageBucket, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc2), val2)) - assert.Nil(t, tx.Put(dbutils.HashedStorageBucket, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc3), val3)) - assert.Nil(t, tx.Put(dbutils.HashedStorageBucket, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc4), val4)) + assert.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc1), val1)) + assert.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc2), val2)) + assert.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc3), val3)) + assert.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hash3, incarnation, loc4), val4)) hash4 := common.HexToHash("0xB100000000000000000000000000000000000000000000000000000000000000") assert.Nil(t, addTestAccount(tx, hash4, 4*params.Ether, 0)) @@ -68,7 +68,7 @@ func TestAccountAndStorageTrie(t *testing.T) { assert.Nil(t, err) accountTrie := make(map[string][]byte) - err = tx.ForEach(dbutils.TrieOfAccountsBucket, nil, func(k, v []byte) error { + err = tx.ForEach(kv.TrieOfAccounts, nil, func(k, v []byte) error { accountTrie[string(k)] = v return nil }) @@ -91,7 +91,7 @@ func TestAccountAndStorageTrie(t *testing.T) { assert.Equal(t, 0, len(rootHash2)) storageTrie := make(map[string][]byte) - err = tx.ForEach(dbutils.TrieOfStorageBucket, nil, func(k, v []byte) error { + err = tx.ForEach(kv.TrieOfStorage, nil, func(k, v []byte) error { storageTrie[string(k)] = v return nil }) @@ -112,7 +112,7 @@ func TestAccountAndStorageTrie(t *testing.T) { } func TestAccountTrieAroundExtensionNode(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) acc := accounts.NewAccount() acc.Balance.SetUint64(1 * params.Ether) @@ -120,28 +120,28 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { acc.EncodeForStorage(encoded) hash1 := common.HexToHash("0x30af561000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash1[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash1[:], encoded)) hash2 := common.HexToHash("0x30af569000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash2[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash2[:], encoded)) hash3 := common.HexToHash("0x30af650000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash3[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash3[:], encoded)) hash4 := common.HexToHash("0x30af6f0000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash4[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash4[:], encoded)) hash5 := common.HexToHash("0x30af8f0000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash5[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash5[:], encoded)) hash6 := common.HexToHash("0x3100000000000000000000000000000000000000000000000000000000000000") - assert.Nil(t, tx.Put(dbutils.HashedAccountsBucket, hash6[:], encoded)) + assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, t.TempDir()), common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) accountTrie := make(map[string][]byte) - err = tx.ForEach(dbutils.TrieOfAccountsBucket, nil, func(k, v []byte) error { + err = tx.ForEach(kv.TrieOfAccounts, nil, func(k, v []byte) error { accountTrie[string(k)] = v return nil }) diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index 3a444d329e9..207dd828f2e 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -15,9 +15,9 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" ) @@ -29,13 +29,13 @@ const ( type LogIndexCfg struct { tmpdir string - db ethdb.RwKV + db kv.RwDB prune prune.Mode bufLimit datasize.ByteSize flushEvery time.Duration } -func StageLogIndexCfg(db ethdb.RwKV, prune prune.Mode, tmpDir string) LogIndexCfg { +func StageLogIndexCfg(db kv.RwDB, prune prune.Mode, tmpDir string) LogIndexCfg { return LogIndexCfg{ db: db, prune: prune, @@ -45,7 +45,7 @@ func StageLogIndexCfg(db ethdb.RwKV, prune prune.Mode, tmpDir string) LogIndexCf } } -func SpawnLogIndex(s *StageState, tx ethdb.RwTx, cfg LogIndexCfg, ctx context.Context) error { +func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -90,14 +90,14 @@ func SpawnLogIndex(s *StageState, tx ethdb.RwTx, cfg LogIndexCfg, ctx context.Co return nil } -func promoteLogIndex(logPrefix string, tx ethdb.RwTx, start uint64, cfg LogIndexCfg, ctx context.Context) error { +func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg, ctx context.Context) error { quit := ctx.Done() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() topics := map[string]*roaring.Bitmap{} addresses := map[string]*roaring.Bitmap{} - logs, err := tx.Cursor(dbutils.Log) + logs, err := tx.Cursor(kv.Log) if err != nil { return err } @@ -212,18 +212,18 @@ func promoteLogIndex(logPrefix string, tx ethdb.RwTx, start uint64, cfg LogIndex }) } - if err := collectorTopics.Load(logPrefix, tx, dbutils.LogTopicIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := collectorTopics.Load(logPrefix, tx, kv.LogTopicIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } - if err := collectorAddrs.Load(logPrefix, tx, dbutils.LogAddressIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { + if err := collectorAddrs.Load(logPrefix, tx, kv.LogAddressIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { return err } return nil } -func UnwindLogIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg LogIndexCfg, ctx context.Context) (err error) { +func UnwindLogIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) (err error) { quitCh := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -250,12 +250,12 @@ func UnwindLogIndex(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg LogIndexCf return nil } -func unwindLogIndex(logPrefix string, db ethdb.RwTx, to uint64, cfg LogIndexCfg, quitCh <-chan struct{}) error { +func unwindLogIndex(logPrefix string, db kv.RwTx, to uint64, cfg LogIndexCfg, quitCh <-chan struct{}) error { topics := map[string]struct{}{} addrs := map[string]struct{}{} reader := bytes.NewReader(nil) - c, err := db.Cursor(dbutils.Log) + c, err := db.Cursor(kv.Log) if err != nil { return err } @@ -282,10 +282,10 @@ func unwindLogIndex(logPrefix string, db ethdb.RwTx, to uint64, cfg LogIndexCfg, } } - if err := truncateBitmaps(db, dbutils.LogTopicIndex, topics, to); err != nil { + if err := truncateBitmaps(db, kv.LogTopicIndex, topics, to); err != nil { return err } - if err := truncateBitmaps(db, dbutils.LogAddressIndex, addrs, to); err != nil { + if err := truncateBitmaps(db, kv.LogAddressIndex, addrs, to); err != nil { return err } return nil @@ -317,7 +317,7 @@ func flushBitmaps(c *etl.Collector, inMem map[string]*roaring.Bitmap) error { return nil } -func truncateBitmaps(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { +func truncateBitmaps(tx kv.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { keys := make([]string, 0, len(inMem)) for k := range inMem { keys = append(keys, k) @@ -332,7 +332,7 @@ func truncateBitmaps(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, to return nil } -func pruneOldLogChunks(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, pruneTo uint64, logPrefix string, ctx context.Context) error { +func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem map[string]struct{}, pruneTo uint64, logPrefix string, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() keys := make([]string, 0, len(inMem)) @@ -357,7 +357,7 @@ func pruneOldLogChunks(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.AccountsHistoryBucket, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.AccountsHistory, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: @@ -370,7 +370,7 @@ func pruneOldLogChunks(tx ethdb.RwTx, bucket string, inMem map[string]struct{}, return nil } -func PruneLogIndex(s *PruneState, tx ethdb.RwTx, cfg LogIndexCfg, ctx context.Context) (err error) { +func PruneLogIndex(s *PruneState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) (err error) { if !cfg.prune.Receipts.Enabled() { return nil } @@ -401,7 +401,7 @@ func PruneLogIndex(s *PruneState, tx ethdb.RwTx, cfg LogIndexCfg, ctx context.Co return nil } -func pruneLogIndex(logPrefix string, tx ethdb.RwTx, tmpDir string, pruneTo uint64, ctx context.Context) error { +func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneTo uint64, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() @@ -410,7 +410,7 @@ func pruneLogIndex(logPrefix string, tx ethdb.RwTx, tmpDir string, pruneTo uint6 reader := bytes.NewReader(nil) { - c, err := tx.Cursor(dbutils.Log) + c, err := tx.Cursor(kv.Log) if err != nil { return err } @@ -426,7 +426,7 @@ func pruneLogIndex(logPrefix string, tx ethdb.RwTx, tmpDir string, pruneTo uint6 } select { case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", dbutils.Log, "block", blockNum) + log.Info(fmt.Sprintf("[%s] Mode", logPrefix), "table", kv.Log, "block", blockNum) case <-ctx.Done(): return common.ErrStopped default: @@ -447,10 +447,10 @@ func pruneLogIndex(logPrefix string, tx ethdb.RwTx, tmpDir string, pruneTo uint6 } } - if err := pruneOldLogChunks(tx, dbutils.LogTopicIndex, topics, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, logPrefix, ctx); err != nil { return err } - if err := pruneOldLogChunks(tx, dbutils.LogAddressIndex, addrs, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, logPrefix, ctx); err != nil { return err } return nil diff --git a/eth/stagedsync/stage_log_index_test.go b/eth/stagedsync/stage_log_index_test.go index c918b0479d0..23f3ae7a314 100644 --- a/eth/stagedsync/stage_log_index_test.go +++ b/eth/stagedsync/stage_log_index_test.go @@ -7,18 +7,17 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/stretchr/testify/require" ) -func genReceipts(t *testing.T, tx ethdb.RwTx, blocks uint64) (map[common.Address]uint64, map[common.Hash]uint64) { +func genReceipts(t *testing.T, tx kv.RwTx, blocks uint64) (map[common.Address]uint64, map[common.Hash]uint64) { addrs := []common.Address{{1}, {2}, {3}} topics := []common.Hash{{1}, {2}, {3}} @@ -92,7 +91,7 @@ func genReceipts(t *testing.T, tx ethdb.RwTx, blocks uint64) (map[common.Address func TestLogIndex(t *testing.T) { require, tmpDir, ctx := require.New(t), t.TempDir(), context.Background() - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) expectAddrs, expectTopics := genReceipts(t, tx, 10000) @@ -105,12 +104,12 @@ func TestLogIndex(t *testing.T) { // Check indices GetCardinality (in how many blocks they meet) for addr, expect := range expectAddrs { - m, err := bitmapdb.Get(tx, dbutils.LogAddressIndex, addr[:], 0, 10_000_000) + m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], 0, 10_000_000) require.NoError(err) require.Equal(expect, m.GetCardinality()) } for topic, expect := range expectTopics { - m, err := bitmapdb.Get(tx, dbutils.LogTopicIndex, topic[:], 0, 10_000_000) + m, err := bitmapdb.Get(tx, kv.LogTopicIndex, topic[:], 0, 10_000_000) require.NoError(err) require.Equal(expect, m.GetCardinality()) } @@ -121,7 +120,7 @@ func TestLogIndex(t *testing.T) { { total := 0 - err = tx.ForEach(dbutils.LogAddressIndex, nil, func(k, v []byte) error { + err = tx.ForEach(kv.LogAddressIndex, nil, func(k, v []byte) error { require.True(binary.BigEndian.Uint32(k[common.AddressLength:]) >= 500) total++ return nil @@ -131,7 +130,7 @@ func TestLogIndex(t *testing.T) { } { total := 0 - err = tx.ForEach(dbutils.LogTopicIndex, nil, func(k, v []byte) error { + err = tx.ForEach(kv.LogTopicIndex, nil, func(k, v []byte) error { require.True(binary.BigEndian.Uint32(k[common.HashLength:]) >= 500) total++ return nil @@ -145,12 +144,12 @@ func TestLogIndex(t *testing.T) { require.NoError(err) for addr := range expectAddrs { - m, err := bitmapdb.Get(tx, dbutils.LogAddressIndex, addr[:], 0, 10_000_000) + m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], 0, 10_000_000) require.NoError(err) require.True(m.Maximum() <= 700) } for topic := range expectTopics { - m, err := bitmapdb.Get(tx, dbutils.LogTopicIndex, topic[:], 0, 10_000_000) + m, err := bitmapdb.Get(tx, kv.LogTopicIndex, topic[:], 0, 10_000_000) require.NoError(err) require.True(m.Maximum() <= 700) } diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index 27a8719b836..274ed1b126e 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -15,7 +15,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) @@ -47,7 +47,7 @@ func NewMiningState(cfg *params.MiningConfig) MiningState { } type MiningCreateBlockCfg struct { - db ethdb.RwKV + db kv.RwDB miner MiningState chainConfig params.ChainConfig engine consensus.Engine @@ -56,7 +56,7 @@ type MiningCreateBlockCfg struct { } func StageMiningCreateBlockCfg( - db ethdb.RwKV, + db kv.RwDB, miner MiningState, chainConfig params.ChainConfig, engine consensus.Engine, @@ -76,7 +76,7 @@ func StageMiningCreateBlockCfg( // SpawnMiningCreateBlockStage //TODO: // - resubmitAdjustCh - variable is not implemented -func SpawnMiningCreateBlockStage(s *StageState, tx ethdb.RwTx, cfg MiningCreateBlockCfg, quit <-chan struct{}) error { +func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBlockCfg, quit <-chan struct{}) error { txPoolLocals := cfg.txPool.Locals() pendingTxs, err := cfg.txPool.Pending() if err != nil { @@ -289,7 +289,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx ethdb.RwTx, cfg MiningCreateB return nil } -func readNonCanonicalHeaders(tx ethdb.Tx, blockNum uint64, engine consensus.Engine, coinbase common.Address, txPoolLocals []common.Address) (localUncles, remoteUncles map[common.Hash]*types.Header, err error) { +func readNonCanonicalHeaders(tx kv.Tx, blockNum uint64, engine consensus.Engine, coinbase common.Address, txPoolLocals []common.Address) (localUncles, remoteUncles map[common.Hash]*types.Header, err error) { localUncles, remoteUncles = map[common.Hash]*types.Header{}, map[common.Hash]*types.Header{} nonCanonicalBlocks, err := rawdb.ReadHeadersByNumber(tx, blockNum) if err != nil { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 521a80f7871..5da01c866f1 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -13,12 +13,13 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) type MiningExecCfg struct { - db ethdb.RwKV + db kv.RwDB miningState MiningState notifier ChainEventNotifier chainConfig params.ChainConfig @@ -28,7 +29,7 @@ type MiningExecCfg struct { } func StageMiningExecCfg( - db ethdb.RwKV, + db kv.RwDB, miningState MiningState, notifier ChainEventNotifier, chainConfig params.ChainConfig, @@ -50,7 +51,7 @@ func StageMiningExecCfg( // SpawnMiningExecStage //TODO: // - resubmitAdjustCh - variable is not implemented -func SpawnMiningExecStage(s *StageState, tx ethdb.RwTx, cfg MiningExecCfg, quit <-chan struct{}) error { +func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-chan struct{}) error { cfg.vmConfig.NoReceipts = false logPrefix := s.LogPrefix() current := cfg.miningState.MiningBlock diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index fda18ec102d..dce953ff68f 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -5,13 +5,13 @@ import ( "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) type MiningFinishCfg struct { - db ethdb.RwKV + db kv.RwDB chainConfig params.ChainConfig engine consensus.Engine sealCancel <-chan struct{} @@ -19,7 +19,7 @@ type MiningFinishCfg struct { } func StageMiningFinishCfg( - db ethdb.RwKV, + db kv.RwDB, chainConfig params.ChainConfig, engine consensus.Engine, miningState MiningState, @@ -34,7 +34,7 @@ func StageMiningFinishCfg( } } -func SpawnMiningFinishStage(s *StageState, tx ethdb.RwTx, cfg MiningFinishCfg, quit <-chan struct{}) error { +func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit <-chan struct{}) error { logPrefix := s.LogPrefix() current := cfg.miningState.MiningBlock diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 3bef0ecb562..64a0a979c53 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -16,14 +16,14 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/secp256k1" ) type SendersCfg struct { - db ethdb.RwKV + db kv.RwDB batchSize int blockSize int bufferSize int @@ -34,7 +34,7 @@ type SendersCfg struct { chainConfig *params.ChainConfig } -func StageSendersCfg(db ethdb.RwKV, chainCfg *params.ChainConfig, tmpdir string) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -50,7 +50,7 @@ func StageSendersCfg(db ethdb.RwKV, chainCfg *params.ChainConfig, tmpdir string) } } -func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx ethdb.RwTx, toBlock uint64, ctx context.Context) error { +func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context) error { quitCh := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -85,7 +85,7 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx ethd canonical := make([]common.Hash, to-s.BlockNumber) currentHeaderIdx := uint64(0) - canonicalC, err := tx.Cursor(dbutils.HeaderCanonicalBucket) + canonicalC, err := tx.Cursor(kv.HeaderCanonical) if err != nil { return err } @@ -181,7 +181,7 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx ethd return nil } - bodiesC, err := tx.Cursor(dbutils.BlockBodyPrefix) + bodiesC, err := tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -239,7 +239,7 @@ Loop: } } else { if err := collectorSenders.Load(logPrefix, tx, - dbutils.Senders, + kv.Senders, etl.IdentityLoadFunc, etl.TransformArgs{ Quit: quitCh, @@ -323,7 +323,7 @@ func recoverSenders(ctx context.Context, logPrefix string, cryptoContext *secp25 } } -func UnwindSendersStage(s *UnwindState, tx ethdb.RwTx, cfg SendersCfg, ctx context.Context) (err error) { +func UnwindSendersStage(s *UnwindState, tx kv.RwTx, cfg SendersCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -344,7 +344,7 @@ func UnwindSendersStage(s *UnwindState, tx ethdb.RwTx, cfg SendersCfg, ctx conte return nil } -func PruneSendersStage(s *PruneState, tx ethdb.RwTx, cfg SendersCfg, ctx context.Context) (err error) { +func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index efc7521779c..6dd20eca64b 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -10,7 +10,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,7 +18,7 @@ import ( func TestSenders(t *testing.T) { ctx := context.Background() - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) require := require.New(t) var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") diff --git a/eth/stagedsync/stage_state_snapshot.go b/eth/stagedsync/stage_state_snapshot.go index 31fcccdbd21..b4f3ea8c2f0 100644 --- a/eth/stagedsync/stage_state_snapshot.go +++ b/eth/stagedsync/stage_state_snapshot.go @@ -4,19 +4,19 @@ import ( "context" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) type SnapshotStateCfg struct { - db ethdb.RwKV + db kv.RwDB snapshotDir string tmpDir string client *snapshotsync.Client snapshotMigrator *snapshotsync.SnapshotMigrator } -func StageSnapshotStateCfg(db ethdb.RwKV, snapshot ethconfig.Snapshot, tmpDir string, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator) SnapshotStateCfg { +func StageSnapshotStateCfg(db kv.RwDB, snapshot ethconfig.Snapshot, tmpDir string, client *snapshotsync.Client, snapshotMigrator *snapshotsync.SnapshotMigrator) SnapshotStateCfg { return SnapshotStateCfg{ db: db, snapshotDir: snapshot.Dir, @@ -26,7 +26,7 @@ func StageSnapshotStateCfg(db ethdb.RwKV, snapshot ethconfig.Snapshot, tmpDir st } } -func SpawnStateSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { +func SpawnStateSnapshotGenerationStage(s *StageState, tx kv.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -47,7 +47,7 @@ func SpawnStateSnapshotGenerationStage(s *StageState, tx ethdb.RwTx, cfg Snapsho return nil } -func UnwindStateSnapshotGenerationStage(s *UnwindState, tx ethdb.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { +func UnwindStateSnapshotGenerationStage(s *UnwindState, tx kv.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -68,7 +68,7 @@ func UnwindStateSnapshotGenerationStage(s *UnwindState, tx ethdb.RwTx, cfg Snaps return nil } -func PruneStateSnapshotGenerationStage(s *PruneState, tx ethdb.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { +func PruneStateSnapshotGenerationStage(s *PruneState, tx kv.RwTx, cfg SnapshotStateCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_tevm.go b/eth/stagedsync/stage_tevm.go index 31ec25b3743..b34a34c6c29 100644 --- a/eth/stagedsync/stage_tevm.go +++ b/eth/stagedsync/stage_tevm.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" "github.com/ledgerwatch/erigon/params" @@ -23,13 +24,13 @@ import ( var stageTranspileGauge = metrics.NewRegisteredGauge("stage/tevm", nil) type TranspileCfg struct { - db ethdb.RwKV + db kv.RwDB batchSize datasize.ByteSize chainConfig *params.ChainConfig } func StageTranspileCfg( - kv ethdb.RwKV, + kv kv.RwDB, batchSize datasize.ByteSize, chainConfig *params.ChainConfig, ) TranspileCfg { @@ -40,12 +41,12 @@ func StageTranspileCfg( } } -func SpawnTranspileStage(s *StageState, tx ethdb.RwTx, toBlock uint64, cfg TranspileCfg, ctx context.Context) error { +func SpawnTranspileStage(s *StageState, tx kv.RwTx, toBlock uint64, cfg TranspileCfg, ctx context.Context) error { var prevStageProgress uint64 var errStart error if tx == nil { - errStart = cfg.db.View(ctx, func(tx ethdb.Tx) error { + errStart = cfg.db.View(ctx, func(tx kv.Tx) error { prevStageProgress, errStart = stages.GetStageProgress(tx, stages.Execution) return errStart }) @@ -94,7 +95,7 @@ func SpawnTranspileStage(s *StageState, tx ethdb.RwTx, toBlock uint64, cfg Trans return nil } -func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg TranspileCfg, tx ethdb.RwTx, observedAddresses map[common.Address]struct{}, observedCodeHashes map[common.Hash]struct{}, quitCh <-chan struct{}) (uint64, error) { +func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg TranspileCfg, tx kv.RwTx, observedAddresses map[common.Address]struct{}, observedCodeHashes map[common.Hash]struct{}, quitCh <-chan struct{}) (uint64, error) { useExternalTx := tx != nil var err error if !useExternalTx { @@ -105,11 +106,11 @@ func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg Transpi defer tx.Rollback() } - batch := kv.NewBatch(tx, quitCh) + batch := olddb.NewBatch(tx, quitCh) defer batch.Rollback() // read contracts pending for translation - c, err := tx.CursorDupSort(dbutils.CallTraceSet) + c, err := tx.CursorDupSort(kv.CallTraceSet) if err != nil { return 0, err } @@ -197,7 +198,7 @@ func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg Transpi observedCodeHashes[codeHash] = struct{}{} // check if we already have TEVM code - ok, err = batch.Has(dbutils.ContractTEVMCodeBucket, codeHashBytes) + ok, err = batch.Has(kv.ContractTEVMCode, codeHashBytes) if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { return 0, fmt.Errorf("can't read code TEVM bucket by contract hash %q: %w", codeHash, err) } @@ -207,7 +208,7 @@ func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg Transpi } // load the contract code - evmContract, err = batch.GetOne(dbutils.CodeBucket, codeHashBytes) + evmContract, err = batch.GetOne(kv.CodeBucket, codeHashBytes) if err != nil { if errors.Is(err, ethdb.ErrKeyNotFound) { continue @@ -229,7 +230,7 @@ func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg Transpi } // store TEVM contract code - err = batch.Put(dbutils.ContractTEVMCodeBucket, codeHashBytes, transpiledCode) + err = batch.Put(kv.ContractTEVMCode, codeHashBytes, transpiledCode) if err != nil { return 0, fmt.Errorf("cannot store TEVM code %q: %w", codeHash, err) } @@ -268,7 +269,7 @@ func logTEVMProgress(logPrefix string, prevContract uint64, prevTime time.Time, return currentContract, currentTime } -func UnwindTranspileStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TranspileCfg, ctx context.Context) (err error) { +func UnwindTranspileStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg TranspileCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -279,7 +280,7 @@ func UnwindTranspileStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg Tran } keyStart := dbutils.EncodeBlockNumber(u.UnwindPoint + 1) - c, err := tx.CursorDupSort(dbutils.CallTraceSet) + c, err := tx.CursorDupSort(kv.CallTraceSet) if err != nil { return err } @@ -326,7 +327,7 @@ func UnwindTranspileStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg Tran codeHashBytes = codeHash.Bytes() // check if we already have TEVM code - ok, err = tx.Has(dbutils.ContractTEVMCodeBucket, codeHashBytes) + ok, err = tx.Has(kv.ContractTEVMCode, codeHashBytes) if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { return fmt.Errorf("can't read code TEVM bucket by contract hash %q: %w", codeHash, err) } @@ -335,7 +336,7 @@ func UnwindTranspileStage(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg Tran continue } - err = tx.Delete(dbutils.ContractTEVMCodeBucket, codeHashBytes, nil) + err = tx.Delete(kv.ContractTEVMCode, codeHashBytes, nil) if err != nil { return fmt.Errorf("can't delete TEVM code by hash %q: %w", codeHash, err) } @@ -357,7 +358,7 @@ func transpileCode(code []byte) ([]byte, error) { return append(make([]byte, 0, len(code)), code...), nil } -func PruneTranspileStage(p *PruneState, tx ethdb.RwTx, cfg TranspileCfg, initialCycle bool, ctx context.Context) (err error) { +func PruneTranspileStage(p *PruneState, tx kv.RwTx, cfg TranspileCfg, initialCycle bool, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go index 38e2167c4f9..31788c63787 100644 --- a/eth/stagedsync/stage_txlookup.go +++ b/eth/stagedsync/stage_txlookup.go @@ -12,19 +12,19 @@ import ( "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/rlp" ) type TxLookupCfg struct { - db ethdb.RwKV + db kv.RwDB prune prune.Mode tmpdir string } func StageTxLookupCfg( - db ethdb.RwKV, + db kv.RwDB, prune prune.Mode, tmpdir string, ) TxLookupCfg { @@ -35,7 +35,7 @@ func StageTxLookupCfg( } } -func SpawnTxLookup(s *StageState, tx ethdb.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { +func SpawnTxLookup(s *StageState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { quitCh := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -75,9 +75,9 @@ func SpawnTxLookup(s *StageState, tx ethdb.RwTx, cfg TxLookupCfg, ctx context.Co return nil } -func TxLookupTransform(logPrefix string, tx ethdb.RwTx, startKey, endKey []byte, quitCh <-chan struct{}, cfg TxLookupCfg) error { +func TxLookupTransform(logPrefix string, tx kv.RwTx, startKey, endKey []byte, quitCh <-chan struct{}, cfg TxLookupCfg) error { bigNum := new(big.Int) - return etl.Transform(logPrefix, tx, dbutils.HeaderCanonicalBucket, dbutils.TxLookupPrefix, cfg.tmpdir, func(k []byte, v []byte, next etl.ExtractNextFunc) error { + return etl.Transform(logPrefix, tx, kv.HeaderCanonical, kv.TxLookup, cfg.tmpdir, func(k []byte, v []byte, next etl.ExtractNextFunc) error { blocknum := binary.BigEndian.Uint64(k) blockHash := common.BytesToHash(v) body := rawdb.ReadBody(tx, blockHash, blocknum) @@ -101,7 +101,7 @@ func TxLookupTransform(logPrefix string, tx ethdb.RwTx, startKey, endKey []byte, }) } -func UnwindTxLookup(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { +func UnwindTxLookup(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { quitCh := ctx.Done() if s.BlockNumber <= u.UnwindPoint { return nil @@ -129,10 +129,10 @@ func UnwindTxLookup(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxLookupCf return nil } -func unwindTxLookup(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxLookupCfg, quitCh <-chan struct{}) error { +func unwindTxLookup(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxLookupCfg, quitCh <-chan struct{}) error { reader := bytes.NewReader(nil) logPrefix := s.LogPrefix() - return etl.Transform(logPrefix, tx, dbutils.BlockBodyPrefix, dbutils.TxLookupPrefix, cfg.tmpdir, func(k, v []byte, next etl.ExtractNextFunc) error { + return etl.Transform(logPrefix, tx, kv.BlockBody, kv.TxLookup, cfg.tmpdir, func(k, v []byte, next etl.ExtractNextFunc) error { body := new(types.BodyForStorage) reader.Reset(v) if err := rlp.Decode(reader, body); err != nil { @@ -159,7 +159,7 @@ func unwindTxLookup(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxLookupCf }) } -func PruneTxLookup(s *PruneState, tx ethdb.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { +func PruneTxLookup(s *PruneState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Context) (err error) { if !cfg.prune.TxIndex.Enabled() { return nil } @@ -193,9 +193,9 @@ func PruneTxLookup(s *PruneState, tx ethdb.RwTx, cfg TxLookupCfg, ctx context.Co return nil } -func pruneTxLookup(tx ethdb.RwTx, logPrefix, tmpDir string, s *PruneState, pruneTo uint64, ctx context.Context) error { +func pruneTxLookup(tx kv.RwTx, logPrefix, tmpDir string, s *PruneState, pruneTo uint64, ctx context.Context) error { reader := bytes.NewReader(nil) - return etl.Transform(logPrefix, tx, dbutils.BlockBodyPrefix, dbutils.TxLookupPrefix, tmpDir, func(k, v []byte, next etl.ExtractNextFunc) error { + return etl.Transform(logPrefix, tx, kv.BlockBody, kv.TxLookup, tmpDir, func(k, v []byte, next etl.ExtractNextFunc) error { body := new(types.BodyForStorage) reader.Reset(v) if err := rlp.Decode(reader, body); err != nil { diff --git a/eth/stagedsync/stage_txpool.go b/eth/stagedsync/stage_txpool.go index 2118ff876f7..b9219fd89ec 100644 --- a/eth/stagedsync/stage_txpool.go +++ b/eth/stagedsync/stage_txpool.go @@ -10,17 +10,17 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) type TxPoolCfg struct { - db ethdb.RwKV + db kv.RwDB pool *core.TxPool startFunc func() } -func StageTxPoolCfg(db ethdb.RwKV, pool *core.TxPool, startFunc func()) TxPoolCfg { +func StageTxPoolCfg(db kv.RwDB, pool *core.TxPool, startFunc func()) TxPoolCfg { return TxPoolCfg{ db: db, pool: pool, @@ -28,7 +28,7 @@ func StageTxPoolCfg(db ethdb.RwKV, pool *core.TxPool, startFunc func()) TxPoolCf } } -func SpawnTxPool(s *StageState, tx ethdb.RwTx, cfg TxPoolCfg, ctx context.Context) error { +func SpawnTxPool(s *StageState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) error { quitCh := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -83,7 +83,7 @@ func SpawnTxPool(s *StageState, tx ethdb.RwTx, cfg TxPoolCfg, ctx context.Contex return nil } -func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx ethdb.RwTx, quitCh <-chan struct{}) error { +func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx kv.RwTx, quitCh <-chan struct{}) error { headHash, err := rawdb.ReadCanonicalHash(tx, to) if err != nil { return err @@ -94,7 +94,7 @@ func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPoo canonical := make([]common.Hash, to-from) currentHeaderIdx := uint64(0) - canonicals, err := tx.Cursor(dbutils.HeaderCanonicalBucket) + canonicals, err := tx.Cursor(kv.HeaderCanonical) if err != nil { return err } @@ -116,7 +116,7 @@ func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPoo } log.Debug(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical)) - bodies, err := tx.Cursor(dbutils.BlockBodyPrefix) + bodies, err := tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -148,7 +148,7 @@ func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPoo return nil } -func UnwindTxPool(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { +func UnwindTxPool(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { if u.UnwindPoint >= s.BlockNumber { return nil } @@ -181,7 +181,7 @@ func UnwindTxPool(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg TxPoolCfg, c return nil } -func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx ethdb.RwTx, quitCh <-chan struct{}) error { +func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx kv.RwTx, quitCh <-chan struct{}) error { headHash, err := rawdb.ReadCanonicalHash(tx, from) if err != nil { return err @@ -190,7 +190,7 @@ func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx pool.ResetHead(headHeader.GasLimit, from) canonical := make([]common.Hash, to-from) - canonicals, err := tx.Cursor(dbutils.HeaderCanonicalBucket) + canonicals, err := tx.Cursor(kv.HeaderCanonical) if err != nil { return err } @@ -212,7 +212,7 @@ func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx } log.Debug(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical)) senders := make([][]common.Address, to-from+1) - sendersC, err := tx.Cursor(dbutils.Senders) + sendersC, err := tx.Cursor(kv.Senders) if err != nil { return err } @@ -243,7 +243,7 @@ func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx } var txsToInject []types.Transaction - bodies, err := tx.Cursor(dbutils.BlockBodyPrefix) + bodies, err := tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -278,7 +278,7 @@ func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx return nil } -func PruneTxPool(s *PruneState, tx ethdb.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { +func PruneTxPool(s *PruneState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) diff --git a/eth/stagedsync/stagebuilder.go b/eth/stagedsync/stagebuilder.go index 25e57b4427a..87d39005d31 100644 --- a/eth/stagedsync/stagebuilder.go +++ b/eth/stagedsync/stagebuilder.go @@ -5,8 +5,8 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/turbo/shards" ) @@ -16,7 +16,7 @@ type ChainEventNotifier interface { } type Notifications struct { - Events *remotedbserver.Events + Events *privateapi.Events Accumulator *shards.Accumulator } @@ -32,34 +32,34 @@ func MiningStages( { ID: stages.MiningCreateBlock, Description: "Mining: construct new block from tx pool", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnMiningCreateBlockStage(s, tx, createBlockCfg, ctx.Done()) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx ethdb.RwTx) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, }, { ID: stages.MiningExecution, Description: "Mining: construct new block from tx pool", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnMiningExecStage(s, tx, execCfg, ctx.Done()) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx ethdb.RwTx) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, }, { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnHashStateStage(s, tx, hashStateCfg, ctx) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx ethdb.RwTx) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, }, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { stateRoot, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) if err != nil { return err @@ -67,17 +67,17 @@ func MiningStages( createBlockCfg.miner.MiningBlock.Header.Root = stateRoot return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx ethdb.RwTx) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, }, { ID: stages.MiningFinish, Description: "Mining: create and propagate valid block", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnMiningFinishStage(s, tx, finish, ctx.Done()) }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { return nil }, - Prune: func(firstCycle bool, u *PruneState, tx ethdb.RwTx) error { return nil }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, + Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, }, } } diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go index 158a53170e2..bff850cc998 100644 --- a/eth/stagedsync/stages/stages.go +++ b/eth/stagedsync/stages/stages.go @@ -20,8 +20,7 @@ import ( "encoding/binary" "fmt" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) // SyncStage represents the stages of syncronisation in the SyncMode.StagedSync mode @@ -43,7 +42,7 @@ var ( LogIndex SyncStage = "LogIndex" // Generating logs index (from receipts) CallTraces SyncStage = "CallTraces" // Generating call traces index TxLookup SyncStage = "TxLookup" // Generating transactions lookup index - TxPool SyncStage = "TxPool" // Starts Backend + TxPool SyncStage = "TxPoolDB" // Starts Backend Finish SyncStage = "Finish" // Nominal stage after all other stages MiningCreateBlock SyncStage = "MiningCreateBlock" @@ -74,29 +73,29 @@ var AllStages = []SyncStage{ } // GetStageProgress retrieves saved progress of given sync stage from the database -func GetStageProgress(db ethdb.KVGetter, stage SyncStage) (uint64, error) { - v, err := db.GetOne(dbutils.SyncStageProgress, []byte(stage)) +func GetStageProgress(db kv.Getter, stage SyncStage) (uint64, error) { + v, err := db.GetOne(kv.SyncStageProgress, []byte(stage)) if err != nil { return 0, err } return unmarshalData(v) } -func SaveStageProgress(db ethdb.Putter, stage SyncStage, progress uint64) error { - return db.Put(dbutils.SyncStageProgress, []byte(stage), marshalData(progress)) +func SaveStageProgress(db kv.Putter, stage SyncStage, progress uint64) error { + return db.Put(kv.SyncStageProgress, []byte(stage), marshalData(progress)) } // GetStagePruneProgress retrieves saved progress of given sync stage from the database -func GetStagePruneProgress(db ethdb.KVGetter, stage SyncStage) (uint64, error) { - v, err := db.GetOne(dbutils.SyncStageProgress, []byte("prune_"+stage)) +func GetStagePruneProgress(db kv.Getter, stage SyncStage) (uint64, error) { + v, err := db.GetOne(kv.SyncStageProgress, []byte("prune_"+stage)) if err != nil { return 0, err } return unmarshalData(v) } -func SaveStagePruneProgress(db ethdb.Putter, stage SyncStage, progress uint64) error { - return db.Put(dbutils.SyncStageProgress, []byte("prune_"+stage), marshalData(progress)) +func SaveStagePruneProgress(db kv.Putter, stage SyncStage, progress uint64) error { + return db.Put(kv.SyncStageProgress, []byte("prune_"+stage), marshalData(progress)) } func marshalData(blockNumber uint64) []byte { diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 3dc02fd914e..4ade899d5c3 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -7,10 +7,9 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) @@ -39,7 +38,7 @@ func (s *Sync) NewUnwindState(id stages.SyncStage, unwindPoint, currentProgress return &UnwindState{id, unwindPoint, currentProgress, common.Hash{}, s} } -func (s *Sync) PruneStageState(id stages.SyncStage, forwardProgress uint64, tx ethdb.Tx, db ethdb.RwKV) (*PruneState, error) { +func (s *Sync) PruneStageState(id stages.SyncStage, forwardProgress uint64, tx kv.Tx, db kv.RwDB) (*PruneState, error) { var pruneProgress uint64 var err error useExternalTx := tx != nil @@ -49,7 +48,7 @@ func (s *Sync) PruneStageState(id stages.SyncStage, forwardProgress uint64, tx e return nil, err } } else { - if err = db.View(context.Background(), func(tx ethdb.Tx) error { + if err = db.View(context.Background(), func(tx kv.Tx) error { pruneProgress, err = stages.GetStagePruneProgress(tx, id) if err != nil { return err @@ -159,7 +158,7 @@ func New(stagesList []*Stage, unwindOrder UnwindOrder, pruneOrder PruneOrder) *S } } -func (s *Sync) StageState(stage stages.SyncStage, tx ethdb.Tx, db ethdb.RoKV) (*StageState, error) { +func (s *Sync) StageState(stage stages.SyncStage, tx kv.Tx, db kv.RoDB) (*StageState, error) { var blockNum uint64 var err error useExternalTx := tx != nil @@ -169,7 +168,7 @@ func (s *Sync) StageState(stage stages.SyncStage, tx ethdb.Tx, db ethdb.RoKV) (* return nil, err } } else { - if err = db.View(context.Background(), func(tx ethdb.Tx) error { + if err = db.View(context.Background(), func(tx kv.Tx) error { blockNum, err = stages.GetStageProgress(tx, stage) if err != nil { return err @@ -183,7 +182,7 @@ func (s *Sync) StageState(stage stages.SyncStage, tx ethdb.Tx, db ethdb.RoKV) (* return &StageState{s, stage, blockNum}, nil } -func (s *Sync) Run(db ethdb.RwKV, tx ethdb.RwTx, firstCycle bool) error { +func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { s.prevUnwindPoint = nil s.timings = s.timings[:0] for !s.IsDone() { @@ -247,7 +246,7 @@ func (s *Sync) Run(db ethdb.RwKV, tx ethdb.RwTx, firstCycle bool) error { return nil } -func printLogs(tx ethdb.RwTx, timings []Timing) error { +func printLogs(tx kv.RwTx, timings []Timing) error { var logCtx []interface{} count := 0 for i := range timings { @@ -277,11 +276,11 @@ func printLogs(tx ethdb.RwTx, timings []Timing) error { if len(logCtx) > 0 { // also don't print this logs if everything is fast buckets := []string{ "freelist", - dbutils.PlainStateBucket, - dbutils.AccountChangeSetBucket, - dbutils.StorageChangeSetBucket, - dbutils.EthTx, - dbutils.Log, + kv.PlainStateBucket, + kv.AccountChangeSet, + kv.StorageChangeSet, + kv.EthTx, + kv.Log, } bucketSizes := make([]interface{}, 0, 2*len(buckets)) for _, bucket := range buckets { @@ -297,7 +296,7 @@ func printLogs(tx ethdb.RwTx, timings []Timing) error { return nil } -func (s *Sync) runStage(stage *Stage, db ethdb.RwKV, tx ethdb.RwTx, firstCycle bool) (err error) { +func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool) (err error) { start := time.Now() stageState, err := s.StageState(stage.ID, tx, db) if err != nil { @@ -316,7 +315,7 @@ func (s *Sync) runStage(stage *Stage, db ethdb.RwKV, tx ethdb.RwTx, firstCycle b return nil } -func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db ethdb.RwKV, tx ethdb.RwTx) error { +func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error { t := time.Now() log.Debug("Unwind...", "stage", stage.ID) stageState, err := s.StageState(stage.ID, tx, db) @@ -349,7 +348,7 @@ func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db ethdb.RwKV, tx ethd return nil } -func (s *Sync) pruneStage(firstCycle bool, stage *Stage, db ethdb.RwKV, tx ethdb.RwTx) error { +func (s *Sync) pruneStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error { t := time.Now() log.Debug("Prune...", "stage", stage.ID) diff --git a/eth/stagedsync/sync_test.go b/eth/stagedsync/sync_test.go index e2406c7e87a..b2f1864b945 100644 --- a/eth/stagedsync/sync_test.go +++ b/eth/stagedsync/sync_test.go @@ -7,8 +7,8 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" ) @@ -18,7 +18,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) return nil }, @@ -26,7 +26,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) return nil }, @@ -34,14 +34,14 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) return nil }, }, } state := New(s, nil, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -57,7 +57,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) return nil }, @@ -65,7 +65,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) return nil }, @@ -74,14 +74,14 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) return nil }, }, } state := New(s, nil, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -98,7 +98,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) return nil }, @@ -106,7 +106,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -114,14 +114,14 @@ func TestErroredStage(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) return nil }, }, } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) @@ -138,14 +138,14 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Headers)) return u.Done(tx) }, @@ -153,14 +153,14 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 1000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Bodies)) return u.Done(tx) }, @@ -168,7 +168,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { if s.BlockNumber == 0 { if err := s.Update(tx, 1700); err != nil { return err @@ -182,7 +182,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Senders)) return u.Done(tx) }, @@ -190,21 +190,21 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.IntermediateHashes)) return u.Done(tx) }, }, } state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -236,14 +236,14 @@ func TestUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Headers)) return u.Done(tx) }, @@ -251,14 +251,14 @@ func TestUnwind(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Bodies)) return u.Done(tx) }, @@ -266,7 +266,7 @@ func TestUnwind(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -275,7 +275,7 @@ func TestUnwind(t *testing.T) { } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Senders)) return u.Done(tx) }, @@ -283,21 +283,21 @@ func TestUnwind(t *testing.T) { { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.IntermediateHashes)) return u.Done(tx) }, }, } state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -344,14 +344,14 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Headers)) return u.Done(tx) }, @@ -359,7 +359,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -370,7 +370,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -379,14 +379,14 @@ func TestUnwindEmptyUnwinder(t *testing.T) { } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Senders)) return u.Done(tx) }, }, } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -418,7 +418,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) return s.Update(tx, s.BlockNumber+100) }, @@ -426,7 +426,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) return s.Update(tx, s.BlockNumber+200) }, @@ -434,7 +434,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) return s.Update(tx, s.BlockNumber+300) }, @@ -442,7 +442,7 @@ func TestSyncDoTwice(t *testing.T) { } state := New(s, nil, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.NoError(t, err) @@ -476,7 +476,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) return nil }, @@ -484,7 +484,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -492,7 +492,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) return nil }, @@ -500,7 +500,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { } state := New(s, nil, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) @@ -528,14 +528,14 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Headers)) return u.Done(tx) }, @@ -543,14 +543,14 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Bodies)) return u.Done(tx) }, @@ -558,7 +558,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, s *StageState, u Unwinder, tx ethdb.RwTx) error { + Forward: func(firstCycle bool, s *StageState, u Unwinder, tx kv.RwTx) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -567,7 +567,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { } return nil }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx ethdb.RwTx) error { + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { flow = append(flow, unwindOf(stages.Senders)) if !interrupted { interrupted = true @@ -579,7 +579,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { }, } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) - db, tx := kv.NewTestTx(t) + db, tx := memdb.NewTestTx(t) err := state.Run(db, tx, true) assert.Error(t, errInterrupted, err) diff --git a/eth/stagedsync/testutil.go b/eth/stagedsync/testutil.go index 45fd151ef71..53a04028d81 100644 --- a/eth/stagedsync/testutil.go +++ b/eth/stagedsync/testutil.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/stretchr/testify/assert" ) @@ -21,8 +21,8 @@ const ( func compareCurrentState( t *testing.T, - db1 ethdb.Tx, - db2 ethdb.Tx, + db1 kv.Tx, + db2 kv.Tx, buckets ...string, ) { for _, bucket := range buckets { @@ -30,7 +30,7 @@ func compareCurrentState( } } -func compareBucket(t *testing.T, db1, db2 ethdb.Tx, bucketName string) { +func compareBucket(t *testing.T, db1, db2 kv.Tx, bucketName string) { var err error bucket1 := make(map[string][]byte) @@ -52,13 +52,13 @@ func compareBucket(t *testing.T, db1, db2 ethdb.Tx, bucketName string) { type stateWriterGen func(uint64) state.WriterWithChangeSets -func hashedWriterGen(tx ethdb.RwTx) stateWriterGen { +func hashedWriterGen(tx kv.RwTx) stateWriterGen { return func(blockNum uint64) state.WriterWithChangeSets { return state.NewDbStateWriter(tx, blockNum) } } -func plainWriterGen(tx ethdb.RwTx) stateWriterGen { +func plainWriterGen(tx kv.RwTx) stateWriterGen { return func(blockNum uint64) state.WriterWithChangeSets { return state.NewPlainStateWriter(tx, tx, blockNum) } diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 022f95e483c..40aeaf4678a 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -35,7 +35,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/tests" @@ -176,7 +176,7 @@ func TestPrestateTracerCreate2(t *testing.T) { Balance: big.NewInt(500000000000000), } - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) statedb, _ := tests.MakePreState(params.Rules{}, tx, alloc, context.BlockNumber) // Create the tracer, the EVM environment and run it @@ -254,7 +254,7 @@ func TestCallTracer(t *testing.T) { CheckTEVM: func(common.Hash) (bool, error) { return false, nil }, } - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) statedb, err := tests.MakePreState(params.Rules{}, tx, test.Genesis.Alloc, uint64(test.Context.Number)) require.NoError(t, err) diff --git a/ethdb/bitmapdb/dbutils.go b/ethdb/bitmapdb/dbutils.go index 2506b3b8a7d..e2b1b267e6a 100644 --- a/ethdb/bitmapdb/dbutils.go +++ b/ethdb/bitmapdb/dbutils.go @@ -11,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) const ChunkLimit = uint64(1950 * datasize.B) // threshold beyond which MDBX overflow pages appear: 4096 / 2 - (keySize + 8) @@ -76,7 +77,7 @@ func WalkChunkWithKeys(k []byte, m *roaring.Bitmap, sizeLimit uint64, f func(chu // TruncateRange - gets existing bitmap in db and call RemoveRange operator on it. // starts from hot shard, stops when shard not overlap with [from-to) // !Important: [from, to) -func TruncateRange(db ethdb.RwTx, bucket string, key []byte, to uint32) error { +func TruncateRange(db kv.RwTx, bucket string, key []byte, to uint32) error { chunkKey := make([]byte, len(key)+4) copy(chunkKey, key) binary.BigEndian.PutUint32(chunkKey[len(chunkKey)-4:], to) @@ -118,7 +119,7 @@ func TruncateRange(db ethdb.RwTx, bucket string, key []byte, to uint32) error { // Get - reading as much chunks as needed to satisfy [from, to] condition // join all chunks to 1 bitmap by Or operator -func Get(db ethdb.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, error) { +func Get(db kv.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, error) { var chunks []*roaring.Bitmap fromKey := make([]byte, len(key)+4) @@ -223,7 +224,7 @@ func WalkChunkWithKeys64(k []byte, m *roaring64.Bitmap, sizeLimit uint64, f func // TruncateRange - gets existing bitmap in db and call RemoveRange operator on it. // starts from hot shard, stops when shard not overlap with [from-to) // !Important: [from, to) -func TruncateRange64(db ethdb.RwTx, bucket string, key []byte, to uint64) error { +func TruncateRange64(db kv.RwTx, bucket string, key []byte, to uint64) error { chunkKey := make([]byte, len(key)+8) copy(chunkKey, key) binary.BigEndian.PutUint64(chunkKey[len(chunkKey)-8:], to) @@ -270,7 +271,7 @@ func TruncateRange64(db ethdb.RwTx, bucket string, key []byte, to uint64) error // Get - reading as much chunks as needed to satisfy [from, to] condition // join all chunks to 1 bitmap by Or operator -func Get64(db ethdb.Tx, bucket string, key []byte, from, to uint64) (*roaring64.Bitmap, error) { +func Get64(db kv.Tx, bucket string, key []byte, from, to uint64) (*roaring64.Bitmap, error) { var chunks []*roaring64.Bitmap fromKey := make([]byte, len(key)+8) diff --git a/ethdb/db_interface.go b/ethdb/db_interface.go index 369e9106d4a..6683307e10d 100644 --- a/ethdb/db_interface.go +++ b/ethdb/db_interface.go @@ -19,6 +19,8 @@ package ethdb import ( "context" "errors" + + "github.com/ledgerwatch/erigon/ethdb/kv" ) // DESCRIBED: For info on database buckets see docs/programmers_guide/db_walkthrough.MD @@ -33,9 +35,9 @@ const ( RO TxFlags = 0x02 ) -// Getter wraps the database read operations. -type Getter interface { - KVGetter +// DBGetter wraps the database read operations. +type DBGetter interface { + kv.Getter // Get returns the value for a given key if it's present. Get(bucket string, key []byte) ([]byte, error) @@ -43,22 +45,17 @@ type Getter interface { // Database wraps all database operations. All methods are safe for concurrent use. type Database interface { - Getter - Putter - Deleter - Closer - - // MultiPut inserts or updates multiple entries. - // Entries are passed as an array: - // bucket0, key0, val0, bucket1, key1, val1, ... - MultiPut(tuples ...[]byte) (uint64, error) + DBGetter + kv.Putter + kv.Deleter + kv.Closer Begin(ctx context.Context, flags TxFlags) (DbWithPendingMutations, error) // starts db transaction Last(bucket string) ([]byte, []byte, error) IncrementSequence(bucket string, amount uint64) (uint64, error) ReadSequence(bucket string) (uint64, error) - RwKV() RwKV + RwKV() kv.RwDB } // MinDatabase is a minimalistic version of the Database interface. @@ -85,33 +82,17 @@ type DbWithPendingMutations interface { // Commit() error - // CommitAndBegin - commits and starts new transaction inside same db object. - // useful for periodical commits implementation. - // - // Common pattern: - // - // tx := db.Begin() - // defer tx.Rollback() - // for { - // ... some calculations on `tx` - // tx.CommitAndBegin() - // // defer here - is not useful, because 'tx' object is reused and first `defer` will work perfectly - // } - // tx.Commit() - // - CommitAndBegin(ctx context.Context) error - RollbackAndBegin(ctx context.Context) error Rollback() BatchSize() int } type HasRwKV interface { - RwKV() RwKV - SetRwKV(kv RwKV) + RwKV() kv.RwDB + SetRwKV(kv kv.RwDB) } type HasTx interface { - Tx() Tx + Tx() kv.Tx } type BucketsMigrator interface { diff --git a/common/dbutils/bucket.go b/ethdb/kv/bucket.go similarity index 72% rename from common/dbutils/bucket.go rename to ethdb/kv/bucket.go index a19f340ecac..d503b990632 100644 --- a/common/dbutils/bucket.go +++ b/ethdb/kv/bucket.go @@ -1,4 +1,4 @@ -package dbutils +package kv import ( "sort" @@ -10,7 +10,7 @@ import ( // DBSchemaVersion var DBSchemaVersion = types.VersionReply{Major: 3, Minor: 0, Patch: 0} -// Buckets +// ErigonTables // Dictionary: // "Plain State" - state where keys arent' hashed. "CurrentState" - same, but keys are hashed. "PlainState" used for blocks execution. "CurrentState" used mostly for Merkle root calculation. @@ -26,7 +26,7 @@ PlainStateBucket logical layout: value - storage value(common.hash) Physical layout: - PlainStateBucket and HashedStorageBucket utilises DupSort feature of MDBX (store multiple values inside 1 key). + PlainStateBucket and HashedStorage utilises DupSort feature of MDBX (store multiple values inside 1 key). ------------------------------------------------------------- key | value ------------------------------------------------------------- @@ -42,20 +42,20 @@ Physical layout: */ const PlainStateBucket = "PlainState" -//PlainContractCodeBucket - +//PlainContractCode - //key - address+incarnation //value - code hash -const PlainContractCodeBucket = "PlainCodeHash" +const PlainContractCode = "PlainCodeHash" /* -AccountChangeSetBucket and StorageChangeSetBucket - of block N store values of state before block N changed them. +AccountChangeSet and StorageChangeSet - of block N store values of state before block N changed them. Because values "after" change stored in PlainState. Logical format: key - blockNum_u64 + key_in_plain_state value - value_in_plain_state_before_blockNum_changes Example: If block N changed account A from value X to Y. Then: - AccountChangeSetBucket has record: bigEndian(N) + A -> X + AccountChangeSet has record: bigEndian(N) + A -> X PlainStateBucket has record: A -> Y See also: docs/programmers_guide/db_walkthrough.MD#table-history-of-accounts @@ -63,31 +63,31 @@ See also: docs/programmers_guide/db_walkthrough.MD#table-history-of-accounts As you can see if block N changes much accounts - then all records have repetitive prefix `bigEndian(N)`. MDBX can store such prefixes only once - by DupSort feature (see `docs/programmers_guide/dupsort.md`). Both buckets are DupSort-ed and have physical format: -AccountChangeSetBucket: +AccountChangeSet: key - blockNum_u64 value - address + account(encoded) -StorageChangeSetBucket: +StorageChangeSet: key - blockNum_u64 + address + incarnation_u64 value - plain_storage_key + value */ -const AccountChangeSetBucket = "AccountChangeSet" -const StorageChangeSetBucket = "StorageChangeSet" +const AccountChangeSet = "AccountChangeSet" +const StorageChangeSet = "StorageChangeSet" const ( - //HashedAccountsBucket + //HashedAccounts // key - address hash // value - account encoded for storage // Contains Storage: //key - address hash + incarnation + storage key hash //value - storage value(common.hash) - HashedAccountsBucket = "HashedAccount" - HashedStorageBucket = "HashedStorage" + HashedAccounts = "HashedAccount" + HashedStorage = "HashedStorage" ) /* -AccountsHistoryBucket and StorageHistoryBucket - indices designed to serve next 2 type of requests: +AccountsHistory and StorageHistory - indices designed to serve next 2 type of requests: 1. what is smallest block number >= X where account A changed 2. get last shard of A - to append there new block numbers @@ -111,17 +111,17 @@ It allows: see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets -AccountsHistoryBucket: +AccountsHistory: key - address + shard_id_u64 value - roaring bitmap - list of block where it changed -StorageHistoryBucket +StorageHistory key - address + storage_key + shard_id_u64 value - roaring bitmap - list of block where it changed */ -var AccountsHistoryBucket = "AccountHistory" -var StorageHistoryBucket = "StorageHistory" +const AccountsHistory = "AccountHistory" +const StorageHistory = "StorageHistory" -var ( +const ( //key - contract code hash //value - contract code @@ -129,20 +129,20 @@ var ( //key - addressHash+incarnation //value - code hash - ContractCodeBucket = "HashedCodeHash" + ContractCode = "HashedCodeHash" - // IncarnationMapBucket for deleted accounts + // IncarnationMap for deleted accounts //key - address //value - incarnation of account when it was last deleted - IncarnationMapBucket = "IncarnationMap" + IncarnationMap = "IncarnationMap" //TEVMCodeBucket - //key - contract code hash //value - contract TEVM code - ContractTEVMCodeBucket = "TEVMCode" + ContractTEVMCode = "TEVMCode" ) -/*TrieOfAccountsBucket and TrieOfStorageBucket +/*TrieOfAccounts and TrieOfStorage hasState,groups - mark prefixes existing in hashed_account table hasTree - mark prefixes existing in trie_account table (not related with branchNodes) hasHash - mark prefixes which hashes are saved in current trie_account record (actually only hashes of branchNodes can be saved) @@ -182,27 +182,27 @@ Invariants: - TrieAccount records with length=1 can satisfy (hasBranch==0&&hasHash==0) condition - Other records in TrieAccount and TrieStorage must (hasTree!=0 || hasHash!=0) */ -const TrieOfAccountsBucket = "TrieAccount" -const TrieOfStorageBucket = "TrieStorage" +const TrieOfAccounts = "TrieAccount" +const TrieOfStorage = "TrieStorage" const ( - // DatabaseInfoBucket is used to store information about data layout. - DatabaseInfoBucket = "DbInfo" - SnapshotInfoBucket = "SnapshotInfo" - BittorrentInfoBucket = "BittorrentInfo" + // DatabaseInfo is used to store information about data layout. + DatabaseInfo = "DbInfo" + SnapshotInfo = "SnapshotInfo" + BittorrentInfo = "BittorrentInfo" // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). - HeaderPrefixOld = "h" // block_num_u64 + hash -> header - HeaderNumberBucket = "HeaderNumber" // headerNumberPrefix + hash -> num (uint64 big endian) + HeaderPrefixOld = "h" // block_num_u64 + hash -> header + HeaderNumber = "HeaderNumber" // headerNumberPrefix + hash -> num (uint64 big endian) - HeaderCanonicalBucket = "CanonicalHeader" // block_num_u64 -> header hash - HeadersBucket = "Header" // block_num_u64 + hash -> header (RLP) - HeaderTDBucket = "HeadersTotalDifficulty" // block_num_u64 + hash -> td (RLP) + HeaderCanonical = "CanonicalHeader" // block_num_u64 -> header hash + Headers = "Header" // block_num_u64 + hash -> header (RLP) + HeaderTD = "HeadersTotalDifficulty" // block_num_u64 + hash -> td (RLP) - BlockBodyPrefix = "BlockBody" // block_num_u64 + hash -> block body - EthTx = "BlockTransaction" // tbl_sequence_u64 -> rlp(tx) - Receipts = "Receipt" // block_num_u64 -> canonical block receipts (non-canonical are not stored) - Log = "TransactionLog" // block_num_u64 + txId -> logs of transaction + BlockBody = "BlockBody" // block_num_u64 + hash -> block body + EthTx = "BlockTransaction" // tbl_sequence_u64 -> rlp(tx) + Receipts = "Receipt" // block_num_u64 -> canonical block receipts (non-canonical are not stored) + Log = "TransactionLog" // block_num_u64 + txId -> logs of transaction // Stores bitmap indices - in which block numbers saw logs of given 'address' or 'topic' // [addr or topic] + [2 bytes inverted shard number] -> bitmap(blockN) @@ -224,21 +224,17 @@ const ( CallFromIndex = "CallFromIndex" CallToIndex = "CallToIndex" - TxLookupPrefix = "BlockTransactionLookup" // hash -> transaction/receipt lookup metadata - BloomBitsPrefix = "BloomBits" // bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits - - ConfigPrefix = "Config" // config prefix for the db + TxLookup = "BlockTransactionLookup" // hash -> transaction/receipt lookup metadata - // Chain index prefixes (use `i` + single byte to avoid mixing data types). - BloomBitsIndexPrefix = "BloomBitsIndex" // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress + ConfigTable = "Config" // config prefix for the db // Progress of sync stages: stageName -> stageData SyncStageProgress = "SyncStage" - CliqueBucket = "Clique" - CliqueSeparateBucket = "CliqueSeparate" - CliqueSnapshotBucket = "CliqueSnapshot" - CliqueLastSnapshotBucket = "CliqueLastSnapshot" + CliqueBucket = "Clique" + CliqueSeparate = "CliqueSeparate" + CliqueSnapshot = "CliqueSnapshot" + CliqueLastSnapshot = "CliqueLastSnapshot" // this bucket stored in separated database InodesBucket = "Inode" @@ -280,57 +276,58 @@ var ( CurrentBodiesSnapshotBlock = []byte("CurrentBodiesSnapshotBlock") ) -// Buckets - list of all buckets. App will panic if some bucket is not in this list. +// ErigonTables - list of all buckets. App will panic if some bucket is not in this list. // This list will be sorted in `init` method. -// BucketsConfigs - can be used to find index in sorted version of Buckets list by name -var Buckets = []string{ - AccountsHistoryBucket, - StorageHistoryBucket, +// BucketsConfigs - can be used to find index in sorted version of ErigonTables list by name +var ErigonTables = []string{ + AccountsHistory, + StorageHistory, CodeBucket, - ContractCodeBucket, - HeaderNumberBucket, - BlockBodyPrefix, + ContractCode, + HeaderNumber, + BlockBody, Receipts, - TxLookupPrefix, - BloomBitsPrefix, - ConfigPrefix, - BloomBitsIndexPrefix, - DatabaseInfoBucket, - IncarnationMapBucket, - ContractTEVMCodeBucket, - CliqueSeparateBucket, - CliqueLastSnapshotBucket, - CliqueSnapshotBucket, + TxLookup, + ConfigTable, + DatabaseInfo, + IncarnationMap, + ContractTEVMCode, + CliqueSeparate, + CliqueLastSnapshot, + CliqueSnapshot, SyncStageProgress, PlainStateBucket, - PlainContractCodeBucket, - AccountChangeSetBucket, - StorageChangeSetBucket, + PlainContractCode, + AccountChangeSet, + StorageChangeSet, Senders, HeadBlockKey, HeadHeaderKey, Migrations, LogTopicIndex, LogAddressIndex, - SnapshotInfoBucket, + SnapshotInfo, CallTraceSet, CallFromIndex, CallToIndex, Log, Sequence, EthTx, - TrieOfAccountsBucket, - TrieOfStorageBucket, - HashedAccountsBucket, - HashedStorageBucket, - BittorrentInfoBucket, - HeaderCanonicalBucket, - HeadersBucket, - HeaderTDBucket, + TrieOfAccounts, + TrieOfStorage, + HashedAccounts, + HashedStorage, + BittorrentInfo, + HeaderCanonical, + Headers, + HeaderTD, Epoch, PendingEpoch, } +var TxPoolTables = []string{} +var SentryTables = []string{} + // DeprecatedBuckets - list of buckets which can be programmatically deleted - for example after migration var DeprecatedBuckets = []string{ HeaderPrefixOld, @@ -339,23 +336,23 @@ var DeprecatedBuckets = []string{ type CmpFunc func(k1, k2, v1, v2 []byte) int -type BucketsCfg map[string]BucketConfigItem +type TableCfg map[string]TableConfigItem type Bucket string type DBI uint -type BucketFlags uint +type TableFlags uint const ( - Default BucketFlags = 0x00 - ReverseKey BucketFlags = 0x02 - DupSort BucketFlags = 0x04 - IntegerKey BucketFlags = 0x08 - IntegerDup BucketFlags = 0x20 - ReverseDup BucketFlags = 0x40 + Default TableFlags = 0x00 + ReverseKey TableFlags = 0x02 + DupSort TableFlags = 0x04 + IntegerKey TableFlags = 0x08 + IntegerDup TableFlags = 0x20 + ReverseDup TableFlags = 0x40 ) -type BucketConfigItem struct { - Flags BucketFlags +type TableConfigItem struct { + Flags TableFlags // AutoDupSortKeysConversion - enables some keys transformation - to change db layout without changing app code. // Use it wisely - it helps to do experiments with DB format faster, but better reduce amount of Magic in app. // If good DB format found, push app code to accept this format and then disable this property. @@ -371,17 +368,17 @@ type BucketConfigItem struct { DupToLen int } -var BucketsConfigs = BucketsCfg{ - HashedStorageBucket: { +var BucketsConfigs = TableCfg{ + HashedStorage: { Flags: DupSort, AutoDupSortKeysConversion: true, DupFromLen: 72, DupToLen: 40, }, - AccountChangeSetBucket: { + AccountChangeSet: { Flags: DupSort, }, - StorageChangeSetBucket: { + StorageChangeSet: { Flags: DupSort, }, PlainStateBucket: { @@ -396,28 +393,11 @@ var BucketsConfigs = BucketsCfg{ } func sortBuckets() { - sort.SliceStable(Buckets, func(i, j int) bool { - return strings.Compare(Buckets[i], Buckets[j]) < 0 + sort.SliceStable(ErigonTables, func(i, j int) bool { + return strings.Compare(ErigonTables[i], ErigonTables[j]) < 0 }) } -func DefaultBuckets() BucketsCfg { - return BucketsConfigs -} - -func UpdateBucketsList(newBucketCfg BucketsCfg) { - newBuckets := make([]string, 0) - for k, v := range newBucketCfg { - if !v.IsDeprecated { - newBuckets = append(newBuckets, k) - } - } - Buckets = newBuckets - BucketsConfigs = newBucketCfg - - reinit() -} - func init() { reinit() } @@ -425,17 +405,17 @@ func init() { func reinit() { sortBuckets() - for _, name := range Buckets { + for _, name := range ErigonTables { _, ok := BucketsConfigs[name] if !ok { - BucketsConfigs[name] = BucketConfigItem{} + BucketsConfigs[name] = TableConfigItem{} } } for _, name := range DeprecatedBuckets { _, ok := BucketsConfigs[name] if !ok { - BucketsConfigs[name] = BucketConfigItem{} + BucketsConfigs[name] = TableConfigItem{} } tmp := BucketsConfigs[name] tmp.IsDeprecated = true diff --git a/ethdb/kv_interface.go b/ethdb/kv/kv_interface.go similarity index 96% rename from ethdb/kv_interface.go rename to ethdb/kv/kv_interface.go index 329066bafe9..08102b7ba62 100644 --- a/ethdb/kv_interface.go +++ b/ethdb/kv/kv_interface.go @@ -1,10 +1,9 @@ -package ethdb +package kv import ( "context" "errors" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/metrics" ) @@ -12,7 +11,7 @@ const ReadersLimit = 32000 // MDBX_READERS_LIMIT=32767 var ( ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.DeprecatedBuckets can be deleted") - ErrUnknownBucket = errors.New("unknown bucket. add it to dbutils.Buckets") + ErrUnknownBucket = errors.New("unknown bucket. add it to dbutils.ErigonTables") DbSize = metrics.GetOrRegisterGauge("db/size", metrics.DefaultRegistry) //nolint TxLimit = metrics.GetOrRegisterGauge("tx/limit", metrics.DefaultRegistry) //nolint @@ -66,18 +65,18 @@ type DBVerbosityLvl int8 type Label uint8 const ( - Chain Label = 0 - TxPool Label = 1 - Sentry Label = 2 + ChainDB Label = 0 + TxPoolDB Label = 1 + SentryDB Label = 2 ) func (l Label) String() string { switch l { - case Chain: + case ChainDB: return "chaindata" - case TxPool: + case TxPoolDB: return "txpool" - case Sentry: + case SentryDB: return "sentry" default: return "unknown" @@ -89,10 +88,10 @@ type Has interface { Has(bucket string, key []byte) (bool, error) } type GetPut interface { - KVGetter + Getter Putter } -type KVGetter interface { +type Getter interface { Has GetOne(bucket string, key []byte) (val []byte, err error) @@ -123,8 +122,8 @@ type Closer interface { Close() } -// RoKV - Read-only version of KV. -type RoKV interface { +// RoDB - Read-only version of KV. +type RoDB interface { Closer View(ctx context.Context, f func(tx Tx) error) error @@ -143,10 +142,10 @@ type RoKV interface { // transaction and its cursors may not issue any other operations than // Commit and Rollback while it has active child transactions. BeginRo(ctx context.Context) (Tx, error) - AllBuckets() dbutils.BucketsCfg + AllBuckets() TableCfg } -// RwKV low-level database interface - main target is - to provide common abstraction over top of MDBX and RemoteKV. +// RwDB low-level database interface - main target is - to provide common abstraction over top of MDBX and RemoteKV. // // Common pattern for short-living transactions: // @@ -170,8 +169,8 @@ type RoKV interface { // return err // } // -type RwKV interface { - RoKV +type RwDB interface { + RoDB Update(ctx context.Context, f func(tx RwTx) error) error @@ -179,7 +178,7 @@ type RwKV interface { } type StatelessReadTx interface { - KVGetter + Getter Commit() error // Commit all the operations of a transaction into the database. Rollback() // Rollback - abandon all the operations of the transaction instead of saving them. @@ -215,7 +214,7 @@ type Tx interface { // Otherwise - object of interface Cursor created // // Cursor, also provides a grain of magic - it can use a declarative configuration - and automatically break - // long keys into DupSort key/values. See docs for `bucket.go:BucketConfigItem` + // long keys into DupSort key/values. See docs for `bucket.go:TableConfigItem` Cursor(bucket string) (Cursor, error) CursorDupSort(bucket string) (CursorDupSort, error) // CursorDupSort - can be used if bucket has mdbx.DupSort flag diff --git a/ethdb/kv_util.go b/ethdb/kv_util.go index 81171cc7511..8acacca9658 100644 --- a/ethdb/kv_util.go +++ b/ethdb/kv_util.go @@ -4,14 +4,12 @@ import ( "bytes" "errors" "fmt" - "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/log" + "github.com/ledgerwatch/erigon/ethdb/kv" ) -func Walk(c Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error { +func Walk(c kv.Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error { fixedbytes, mask := Bytesmask(fixedbits) k, v, err := c.Seek(startkey) if err != nil { @@ -33,72 +31,8 @@ func Walk(c Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bo return nil } -func MultiPut(tx RwTx, tuples ...[]byte) error { - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - count := 0 - total := float64(len(tuples)) / 3 - for bucketStart := 0; bucketStart < len(tuples); { - bucketEnd := bucketStart - for ; bucketEnd < len(tuples) && bytes.Equal(tuples[bucketEnd], tuples[bucketStart]); bucketEnd += 3 { - } - bucketName := string(tuples[bucketStart]) - c, err := tx.RwCursor(bucketName) - if err != nil { - return err - } - - // move cursor to a first element in batch - // if it's nil, it means all keys in batch gonna be inserted after end of bucket (batch is sorted and has no duplicates here) - // can apply optimisations for this case - firstKey, _, err := c.Seek(tuples[bucketStart+1]) - if err != nil { - return err - } - isEndOfBucket := firstKey == nil - - l := (bucketEnd - bucketStart) / 3 - for i := 0; i < l; i++ { - k := tuples[bucketStart+3*i+1] - v := tuples[bucketStart+3*i+2] - if isEndOfBucket { - if v == nil { - // nothing to delete after end of bucket - } else { - if err := c.Append(k, v); err != nil { - return err - } - } - } else { - if v == nil { - if err := c.Delete(k, nil); err != nil { - return err - } - } else { - if err := c.Put(k, v); err != nil { - return err - } - } - } - - count++ - - select { - default: - case <-logEvery.C: - progress := fmt.Sprintf("%.1fM/%.1fM", float64(count)/1_000_000, total/1_000_000) - log.Info("Write to db", "progress", progress, "current table", bucketName) - } - } - - bucketStart = bucketEnd - } - return nil -} - // todo: return TEVM code and use it -func GetCheckTEVM(db KVGetter) func(contractHash common.Hash) (bool, error) { +func GetCheckTEVM(db kv.Getter) func(contractHash common.Hash) (bool, error) { checked := map[common.Hash]struct{}{} var ok bool @@ -111,7 +45,7 @@ func GetCheckTEVM(db KVGetter) func(contractHash common.Hash) (bool, error) { return true, nil } - ok, err := db.Has(dbutils.ContractTEVMCodeBucket, contractHash.Bytes()) + ok, err := db.Has(kv.ContractTEVMCode, contractHash.Bytes()) if err != nil && !errors.Is(err, ErrKeyNotFound) { return false, fmt.Errorf("can't check TEVM bucket by contract %q hash: %w", contractHash.String(), err) diff --git a/ethdb/kv/kv_abstract_test.go b/ethdb/mdbx/kv_abstract_test.go similarity index 80% rename from ethdb/kv/kv_abstract_test.go rename to ethdb/mdbx/kv_abstract_test.go index 5a314875d2f..c88e1ec3bae 100644 --- a/ethdb/kv/kv_abstract_test.go +++ b/ethdb/mdbx/kv_abstract_test.go @@ -1,4 +1,4 @@ -package kv_test +package mdbx_test import ( "context" @@ -9,10 +9,10 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/remotedb" + "github.com/ledgerwatch/erigon/ethdb/remotedbserver" "github.com/ledgerwatch/erigon/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +21,7 @@ import ( ) func TestSequence(t *testing.T) { - writeDBs, _ := setupDatabases(t, func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { + writeDBs, _ := setupDatabases(t, log.New(), func(defaultBuckets kv.TableCfg) kv.TableCfg { return defaultBuckets }) ctx := context.Background() @@ -32,29 +32,29 @@ func TestSequence(t *testing.T) { require.NoError(t, err) defer tx.Rollback() - i, err := tx.ReadSequence(dbutils.Buckets[0]) + i, err := tx.ReadSequence(kv.ErigonTables[0]) require.NoError(t, err) require.Equal(t, uint64(0), i) - i, err = tx.IncrementSequence(dbutils.Buckets[0], 1) + i, err = tx.IncrementSequence(kv.ErigonTables[0], 1) require.NoError(t, err) require.Equal(t, uint64(0), i) - i, err = tx.IncrementSequence(dbutils.Buckets[0], 6) + i, err = tx.IncrementSequence(kv.ErigonTables[0], 6) require.NoError(t, err) require.Equal(t, uint64(1), i) - i, err = tx.IncrementSequence(dbutils.Buckets[0], 1) + i, err = tx.IncrementSequence(kv.ErigonTables[0], 1) require.NoError(t, err) require.Equal(t, uint64(7), i) - i, err = tx.ReadSequence(dbutils.Buckets[1]) + i, err = tx.ReadSequence(kv.ErigonTables[1]) require.NoError(t, err) require.Equal(t, uint64(0), i) - i, err = tx.IncrementSequence(dbutils.Buckets[1], 1) + i, err = tx.IncrementSequence(kv.ErigonTables[1], 1) require.NoError(t, err) require.Equal(t, uint64(0), i) - i, err = tx.IncrementSequence(dbutils.Buckets[1], 6) + i, err = tx.IncrementSequence(kv.ErigonTables[1], 6) require.NoError(t, err) require.Equal(t, uint64(1), i) - i, err = tx.IncrementSequence(dbutils.Buckets[1], 1) + i, err = tx.IncrementSequence(kv.ErigonTables[1], 1) require.NoError(t, err) require.Equal(t, uint64(7), i) tx.Rollback() @@ -62,18 +62,18 @@ func TestSequence(t *testing.T) { } func TestManagedTx(t *testing.T) { - defaultConfig := dbutils.BucketsConfigs + defaultConfig := kv.BucketsConfigs defer func() { - dbutils.BucketsConfigs = defaultConfig + kv.BucketsConfigs = defaultConfig }() bucketID := 0 - bucket1 := dbutils.Buckets[bucketID] - bucket2 := dbutils.Buckets[bucketID+1] - writeDBs, readDBs := setupDatabases(t, func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return map[string]dbutils.BucketConfigItem{ + bucket1 := kv.ErigonTables[bucketID] + bucket2 := kv.ErigonTables[bucketID+1] + writeDBs, readDBs := setupDatabases(t, log.New(), func(defaultBuckets kv.TableCfg) kv.TableCfg { + return map[string]kv.TableConfigItem{ bucket1: { - Flags: dbutils.DupSort, + Flags: kv.DupSort, AutoDupSortKeysConversion: true, DupToLen: 4, DupFromLen: 6, @@ -118,7 +118,7 @@ func TestManagedTx(t *testing.T) { db := db msg := fmt.Sprintf("%T", db) switch db.(type) { - case *kv.RemoteKV: + case *remotedb.RemoteKV: default: continue } @@ -137,10 +137,11 @@ func TestManagedTx(t *testing.T) { } func TestRemoteKvVersion(t *testing.T) { - f := func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { + logger := log.New() + f := func(defaultBuckets kv.TableCfg) kv.TableCfg { return defaultBuckets } - writeDb := kv.NewMDBX().InMem().WithBucketsConfig(f).MustOpen() + writeDb := mdbx.NewMDBX(logger).InMem().WithBucketsConfig(f).MustOpen() defer writeDb.Close() conn := bufconn.Listen(1024 * 1024) grpcServer := grpc.NewServer() @@ -154,7 +155,7 @@ func TestRemoteKvVersion(t *testing.T) { // Different Major versions v1 := v v1.Major++ - a, err := kv.NewRemote(v1).InMem(conn).Open("", "", "") + a, err := remotedb.NewRemote(v1, logger).InMem(conn).Open("", "", "") if err != nil { t.Fatalf("%v", err) } @@ -162,7 +163,7 @@ func TestRemoteKvVersion(t *testing.T) { // Different Minor versions v2 := v v2.Minor++ - _, err = kv.NewRemote(v2).InMem(conn).Open("", "", "") + _, err = remotedb.NewRemote(v2, logger).InMem(conn).Open("", "", "") if err != nil { t.Fatalf("%v", err) } @@ -170,17 +171,17 @@ func TestRemoteKvVersion(t *testing.T) { // Different Patch versions v3 := v v3.Patch++ - _, err = kv.NewRemote(v3).InMem(conn).Open("", "", "") + _, err = remotedb.NewRemote(v3, logger).InMem(conn).Open("", "", "") if err != nil { t.Fatalf("%v", err) } require.False(t, a.EnsureVersionCompatibility()) } -func setupDatabases(t *testing.T, f kv.BucketConfigsFunc) (writeDBs []ethdb.RwKV, readDBs []ethdb.RwKV) { - writeDBs = []ethdb.RwKV{ - kv.NewMDBX().InMem().WithBucketsConfig(f).MustOpen(), - kv.NewMDBX().InMem().WithBucketsConfig(f).MustOpen(), // for remote db +func setupDatabases(t *testing.T, logger log.Logger, f mdbx.BucketConfigsFunc) (writeDBs []kv.RwDB, readDBs []kv.RwDB) { + writeDBs = []kv.RwDB{ + mdbx.NewMDBX(logger).InMem().WithBucketsConfig(f).MustOpen(), + mdbx.NewMDBX(logger).InMem().WithBucketsConfig(f).MustOpen(), // for remote db } conn := bufconn.Listen(1024 * 1024) @@ -193,8 +194,8 @@ func setupDatabases(t *testing.T, f kv.BucketConfigsFunc) (writeDBs []ethdb.RwKV } }() v := gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion) - rdb := kv.NewRemote(v).InMem(conn).MustOpen() - readDBs = []ethdb.RwKV{ + rdb := remotedb.NewRemote(v, logger).InMem(conn).MustOpen() + readDBs = []kv.RwDB{ writeDBs[0], writeDBs[1], rdb, @@ -219,12 +220,12 @@ func setupDatabases(t *testing.T, f kv.BucketConfigsFunc) (writeDBs []ethdb.RwKV return writeDBs, readDBs } -func testCtxCancel(t *testing.T, db ethdb.RwKV, bucket1 string) { +func testCtxCancel(t *testing.T, db kv.RwDB, bucket1 string) { assert := assert.New(t) cancelableCtx, cancel := context.WithTimeout(context.Background(), time.Microsecond) defer cancel() - if err := db.View(cancelableCtx, func(tx ethdb.Tx) error { + if err := db.View(cancelableCtx, func(tx kv.Tx) error { c, err := tx.Cursor(bucket1) if err != nil { return err @@ -241,10 +242,10 @@ func testCtxCancel(t *testing.T, db ethdb.RwKV, bucket1 string) { } } -func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { +func testMultiCursor(t *testing.T, db kv.RwDB, bucket1, bucket2 string) { assert, ctx := assert.New(t), context.Background() - if err := db.View(ctx, func(tx ethdb.Tx) error { + if err := db.View(ctx, func(tx kv.Tx) error { c1, err := tx.Cursor(bucket1) if err != nil { return err @@ -347,11 +348,11 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // msg := fmt.Sprintf("%T", db) // t.Run("FillBuckets "+msg, func(t *testing.T) { // if err := db.Update(ctx, func(tx ethdb.Tx) error { -// c := tx.Cursor(dbutils.Buckets[0]) +// c := tx.Cursor(dbutils.ErigonTables[0]) // for i := uint8(0); i < 10; i++ { // require.NoError(t, c.Put([]byte{i}, []byte{i})) // } -// c2 := tx.Cursor(dbutils.Buckets[1]) +// c2 := tx.Cursor(dbutils.ErigonTables[1]) // for i := uint8(0); i < 12; i++ { // require.NoError(t, c2.Put([]byte{i}, []byte{i})) // } @@ -379,7 +380,7 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // counter2, counter := 0, 0 // var key, value []byte // err := db.View(ctx, func(tx ethdb.Tx) error { -// c := tx.Cursor(dbutils.Buckets[0]) +// c := tx.Cursor(dbutils.ErigonTables[0]) // for k, _, err := c.First(); k != nil; k, _, err = c.Next() { // if err != nil { // return err @@ -387,7 +388,7 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // counter++ // } // -// c2 := tx.Cursor(dbutils.Buckets[1]) +// c2 := tx.Cursor(dbutils.ErigonTables[1]) // for k, _, err := c2.First(); k != nil; k, _, err = c2.Next() { // if err != nil { // return err @@ -395,7 +396,7 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // counter2++ // } // -// c3 := tx.Cursor(dbutils.Buckets[0]) +// c3 := tx.Cursor(dbutils.ErigonTables[0]) // k, v, err := c3.Seek([]byte{5}) // if err != nil { // return err @@ -425,7 +426,7 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // msg := fmt.Sprintf("%T", db) // t.Run("GetAfterPut "+msg, func(t *testing.T) { // if err := db.Update(ctx, func(tx ethdb.Tx) error { -// c := tx.Cursor(dbutils.Buckets[0]) +// c := tx.Cursor(dbutils.ErigonTables[0]) // for i := uint8(0); i < 10; i++ { // don't read in same loop to check that writes don't affect each other (for example by sharing bucket.prefix buffer) // require.NoError(t, c.Put([]byte{i}, []byte{i})) // } @@ -436,7 +437,7 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // require.Equal(t, []byte{i}, v) // } // -// c2 := tx.Cursor(dbutils.Buckets[1]) +// c2 := tx.Cursor(dbutils.ErigonTables[1]) // for i := uint8(0); i < 12; i++ { // require.NoError(t, c2.Put([]byte{i}, []byte{i})) // } @@ -464,12 +465,12 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // // t.Run("cursor put and delete"+msg, func(t *testing.T) { // if err := db.Update(ctx, func(tx ethdb.Tx) error { -// c3 := tx.Cursor(dbutils.Buckets[2]) +// c3 := tx.Cursor(dbutils.ErigonTables[2]) // for i := uint8(0); i < 10; i++ { // don't read in same loop to check that writes don't affect each other (for example by sharing bucket.prefix buffer) // require.NoError(t, c3.Put([]byte{i}, []byte{i})) // } // for i := uint8(0); i < 10; i++ { -// v, err := tx.GetOne(dbutils.Buckets[2], []byte{i}) +// v, err := tx.GetOne(dbutils.ErigonTables[2], []byte{i}) // require.NoError(t, err) // require.Equal(t, []byte{i}, v) // } @@ -481,9 +482,9 @@ func testMultiCursor(t *testing.T, db ethdb.RwKV, bucket1, bucket2 string) { // } // // if err := db.Update(ctx, func(tx ethdb.Tx) error { -// c3 := tx.Cursor(dbutils.Buckets[2]) +// c3 := tx.Cursor(dbutils.ErigonTables[2]) // require.NoError(t, c3.Delete([]byte{5}, nil)) -// v, err := tx.GetOne(dbutils.Buckets[2], []byte{5}) +// v, err := tx.GetOne(dbutils.ErigonTables[2], []byte{5}) // require.NoError(t, err) // require.Nil(t, v) // return nil diff --git a/ethdb/kv/kv_mdbx.go b/ethdb/mdbx/kv_mdbx.go similarity index 86% rename from ethdb/kv/kv_mdbx.go rename to ethdb/mdbx/kv_mdbx.go index 6a3a1d4edbb..e47c44320f4 100644 --- a/ethdb/kv/kv_mdbx.go +++ b/ethdb/mdbx/kv_mdbx.go @@ -1,4 +1,4 @@ -package kv +package mdbx import ( "bytes" @@ -14,9 +14,8 @@ import ( "time" "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/metrics" "github.com/torquem-ch/mdbx-go/mdbx" @@ -26,11 +25,11 @@ const expectMdbxVersionMajor = 0 const expectMdbxVersionMinor = 10 const pageSize = 4 * 1024 -const NonExistingDBI dbutils.DBI = 999_999_999 +const NonExistingDBI kv.DBI = 999_999_999 -type BucketConfigsFunc func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg +type BucketConfigsFunc func(defaultBuckets kv.TableCfg) kv.TableCfg -func DefaultBucketConfigs(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { +func DefaultBucketConfigs(defaultBuckets kv.TableCfg) kv.TableCfg { return defaultBuckets } @@ -38,10 +37,11 @@ type MdbxOpts struct { bucketsCfg BucketConfigsFunc path string inMem bool - label ethdb.Label // marker to distinct db instances - one process may open many databases. for example to collect metrics of only 1 database - verbosity ethdb.DBVerbosityLvl + label kv.Label // marker to distinct db instances - one process may open many databases. for example to collect metrics of only 1 database + verbosity kv.DBVerbosityLvl mapSize datasize.ByteSize flags uint + log log.Logger } func testKVPath() string { @@ -52,14 +52,15 @@ func testKVPath() string { return dir } -func NewMDBX() MdbxOpts { +func NewMDBX(log log.Logger) MdbxOpts { return MdbxOpts{ bucketsCfg: DefaultBucketConfigs, flags: mdbx.NoReadahead | mdbx.Coalesce | mdbx.Durable, + log: log, } } -func (opts MdbxOpts) Label(label ethdb.Label) MdbxOpts { +func (opts MdbxOpts) Label(label kv.Label) MdbxOpts { opts.label = label return opts } @@ -93,7 +94,7 @@ func (opts MdbxOpts) Readonly() MdbxOpts { return opts } -func (opts MdbxOpts) DBVerbosity(v ethdb.DBVerbosityLvl) MdbxOpts { +func (opts MdbxOpts) DBVerbosity(v kv.DBVerbosityLvl) MdbxOpts { opts.verbosity = v return opts } @@ -108,11 +109,11 @@ func (opts MdbxOpts) WithBucketsConfig(f BucketConfigsFunc) MdbxOpts { return opts } -func (opts MdbxOpts) Open() (ethdb.RwKV, error) { +func (opts MdbxOpts) Open() (kv.RwDB, error) { if expectMdbxVersionMajor != mdbx.Major || expectMdbxVersionMinor != mdbx.Minor { return nil, fmt.Errorf("unexpected mdbx version: %d.%d, expected %d %d. Please run 'make mdbx'", mdbx.Major, mdbx.Minor, expectMdbxVersionMajor, expectMdbxVersionMinor) } - logger := log.New("mdbx", opts.label.String(), "exclusive", opts.flags&mdbx.Exclusive != 0) + var err error if opts.inMem { opts.path = testKVPath() @@ -131,7 +132,7 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { if err = env.SetOption(mdbx.OptMaxDB, 100); err != nil { return nil, err } - if err = env.SetOption(mdbx.OptMaxReaders, ethdb.ReadersLimit); err != nil { + if err = env.SetOption(mdbx.OptMaxReaders, kv.ReadersLimit); err != nil { return nil, err } @@ -201,12 +202,12 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { db := &MdbxKV{ opts: opts, env: env, - log: logger, + log: opts.log, wg: &sync.WaitGroup{}, - buckets: dbutils.BucketsCfg{}, + buckets: kv.TableCfg{}, txSize: dirtyPagesLimit * pageSize, } - customBuckets := opts.bucketsCfg(dbutils.BucketsConfigs) + customBuckets := opts.bucketsCfg(kv.BucketsConfigs) for name, cfg := range customBuckets { // copy map to avoid changing global variable db.buckets[name] = cfg } @@ -222,7 +223,7 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { if db.buckets[name].IsDeprecated { continue } - if err = tx.(ethdb.BucketMigrator).CreateBucket(name); err != nil { + if err = tx.(kv.BucketMigrator).CreateBucket(name); err != nil { return nil, err } } @@ -231,12 +232,12 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { return nil, err } } else { - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { for _, name := range buckets { if db.buckets[name].IsDeprecated { continue } - if err := tx.(ethdb.BucketMigrator).CreateBucket(name); err != nil { + if err := tx.(kv.BucketMigrator).CreateBucket(name); err != nil { return err } } @@ -264,7 +265,7 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { return fmt.Errorf("bucket: %s, %w", name, createErr) } } - cnfCopy.DBI = dbutils.DBI(dbi) + cnfCopy.DBI = kv.DBI(dbi) db.buckets[name] = cnfCopy } return nil @@ -282,7 +283,7 @@ func (opts MdbxOpts) Open() (ethdb.RwKV, error) { return db, nil } -func (opts MdbxOpts) MustOpen() ethdb.RwKV { +func (opts MdbxOpts) MustOpen() kv.RwDB { db, err := opts.Open() if err != nil { panic(fmt.Errorf("fail to open mdbx: %w", err)) @@ -294,7 +295,7 @@ type MdbxKV struct { env *mdbx.Env log log.Logger wg *sync.WaitGroup - buckets dbutils.BucketsCfg + buckets kv.TableCfg opts MdbxOpts txSize uint64 } @@ -319,7 +320,7 @@ func (db *MdbxKV) Close() { } } -func (db *MdbxKV) BeginRo(_ context.Context) (txn ethdb.Tx, err error) { +func (db *MdbxKV) BeginRo(_ context.Context) (txn kv.Tx, err error) { if db.env == nil { return nil, fmt.Errorf("db closed") } @@ -341,7 +342,7 @@ func (db *MdbxKV) BeginRo(_ context.Context) (txn ethdb.Tx, err error) { }, nil } -func (db *MdbxKV) BeginRw(_ context.Context) (txn ethdb.RwTx, err error) { +func (db *MdbxKV) BeginRw(_ context.Context) (txn kv.RwTx, err error) { if db.env == nil { return nil, fmt.Errorf("db closed") } @@ -368,7 +369,7 @@ type MdbxTx struct { tx *mdbx.Txn db *MdbxKV cursors map[uint64]*mdbx.Cursor - statelessCursors map[string]ethdb.Cursor + statelessCursors map[string]kv.Cursor readOnly bool cursorID uint64 } @@ -377,7 +378,7 @@ type MdbxCursor struct { tx *MdbxTx c *mdbx.Cursor bucketName string - bucketCfg dbutils.BucketConfigItem + bucketCfg kv.TableConfigItem dbi mdbx.DBI id uint64 } @@ -386,15 +387,15 @@ func (db *MdbxKV) Env() *mdbx.Env { return db.env } -func (db *MdbxKV) AllDBI() map[string]dbutils.DBI { - res := map[string]dbutils.DBI{} +func (db *MdbxKV) AllDBI() map[string]kv.DBI { + res := map[string]kv.DBI{} for name, cfg := range db.buckets { res[name] = cfg.DBI } return res } -func (db *MdbxKV) AllBuckets() dbutils.BucketsCfg { +func (db *MdbxKV) AllBuckets() kv.TableCfg { return db.buckets } @@ -456,7 +457,7 @@ func (tx *MdbxTx) ForAmount(bucket string, fromPrefix []byte, amount uint32, wal } func (tx *MdbxTx) CollectMetrics() { - if tx.db.opts.label != ethdb.Chain { + if tx.db.opts.label != kv.ChainDB { return } @@ -475,75 +476,75 @@ func (tx *MdbxTx) CollectMetrics() { if !metrics.Enabled { return } - ethdb.DbSize.Update(int64(info.Geo.Current)) - ethdb.DbPgopsNewly.Update(int64(info.PageOps.Newly)) - ethdb.DbPgopsCow.Update(int64(info.PageOps.Cow)) - ethdb.DbPgopsClone.Update(int64(info.PageOps.Clone)) - ethdb.DbPgopsSplit.Update(int64(info.PageOps.Split)) - ethdb.DbPgopsMerge.Update(int64(info.PageOps.Merge)) - ethdb.DbPgopsSpill.Update(int64(info.PageOps.Spill)) - ethdb.DbPgopsUnspill.Update(int64(info.PageOps.Unspill)) - ethdb.DbPgopsWops.Update(int64(info.PageOps.Wops)) + kv.DbSize.Update(int64(info.Geo.Current)) + kv.DbPgopsNewly.Update(int64(info.PageOps.Newly)) + kv.DbPgopsCow.Update(int64(info.PageOps.Cow)) + kv.DbPgopsClone.Update(int64(info.PageOps.Clone)) + kv.DbPgopsSplit.Update(int64(info.PageOps.Split)) + kv.DbPgopsMerge.Update(int64(info.PageOps.Merge)) + kv.DbPgopsSpill.Update(int64(info.PageOps.Spill)) + kv.DbPgopsUnspill.Update(int64(info.PageOps.Unspill)) + kv.DbPgopsWops.Update(int64(info.PageOps.Wops)) txInfo, err := tx.tx.Info(true) if err != nil { return } - ethdb.TxDirty.Update(int64(txInfo.SpaceDirty)) - ethdb.TxLimit.Update(int64(tx.db.txSize)) - ethdb.TxSpill.Update(int64(txInfo.Spill)) - ethdb.TxUnspill.Update(int64(txInfo.Unspill)) + kv.TxDirty.Update(int64(txInfo.SpaceDirty)) + kv.TxLimit.Update(int64(tx.db.txSize)) + kv.TxSpill.Update(int64(txInfo.Spill)) + kv.TxUnspill.Update(int64(txInfo.Unspill)) gc, err := tx.BucketStat("gc") if err != nil { return } - ethdb.GcLeafMetric.Update(int64(gc.LeafPages)) - ethdb.GcOverflowMetric.Update(int64(gc.OverflowPages)) - ethdb.GcPagesMetric.Update(int64((gc.LeafPages + gc.OverflowPages) * pageSize / 8)) + kv.GcLeafMetric.Update(int64(gc.LeafPages)) + kv.GcOverflowMetric.Update(int64(gc.OverflowPages)) + kv.GcPagesMetric.Update(int64((gc.LeafPages + gc.OverflowPages) * pageSize / 8)) { - st, err := tx.BucketStat(dbutils.PlainStateBucket) + st, err := tx.BucketStat(kv.PlainStateBucket) if err != nil { return } - ethdb.TableStateLeaf.Update(int64(st.LeafPages)) - ethdb.TableStateBranch.Update(int64(st.BranchPages)) - ethdb.TableStateEntries.Update(int64(st.Entries)) - ethdb.TableStateSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) + kv.TableStateLeaf.Update(int64(st.LeafPages)) + kv.TableStateBranch.Update(int64(st.BranchPages)) + kv.TableStateEntries.Update(int64(st.Entries)) + kv.TableStateSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) } { - st, err := tx.BucketStat(dbutils.StorageChangeSetBucket) + st, err := tx.BucketStat(kv.StorageChangeSet) if err != nil { return } - ethdb.TableScsLeaf.Update(int64(st.LeafPages)) - ethdb.TableScsBranch.Update(int64(st.BranchPages)) - ethdb.TableScsEntries.Update(int64(st.Entries)) - ethdb.TableScsSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) + kv.TableScsLeaf.Update(int64(st.LeafPages)) + kv.TableScsBranch.Update(int64(st.BranchPages)) + kv.TableScsEntries.Update(int64(st.Entries)) + kv.TableScsSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) } { - st, err := tx.BucketStat(dbutils.EthTx) + st, err := tx.BucketStat(kv.EthTx) if err != nil { return } - ethdb.TableTxLeaf.Update(int64(st.LeafPages)) - ethdb.TableTxBranch.Update(int64(st.BranchPages)) - ethdb.TableTxOverflow.Update(int64(st.OverflowPages)) - ethdb.TableTxEntries.Update(int64(st.Entries)) - ethdb.TableTxSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) + kv.TableTxLeaf.Update(int64(st.LeafPages)) + kv.TableTxBranch.Update(int64(st.BranchPages)) + kv.TableTxOverflow.Update(int64(st.OverflowPages)) + kv.TableTxEntries.Update(int64(st.Entries)) + kv.TableTxSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) } { - st, err := tx.BucketStat(dbutils.Log) + st, err := tx.BucketStat(kv.Log) if err != nil { return } - ethdb.TableLogLeaf.Update(int64(st.LeafPages)) - ethdb.TableLogBranch.Update(int64(st.BranchPages)) - ethdb.TableLogOverflow.Update(int64(st.OverflowPages)) - ethdb.TableLogEntries.Update(int64(st.Entries)) - ethdb.TableLogSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) + kv.TableLogLeaf.Update(int64(st.LeafPages)) + kv.TableLogBranch.Update(int64(st.BranchPages)) + kv.TableLogOverflow.Update(int64(st.OverflowPages)) + kv.TableLogEntries.Update(int64(st.Entries)) + kv.TableLogSize.Update(int64(st.LeafPages+st.BranchPages+st.OverflowPages) * pageSize) } } @@ -552,7 +553,7 @@ func (tx *MdbxTx) ListBuckets() ([]string, error) { return tx.tx.ListDBI() } -func (db *MdbxKV) View(ctx context.Context, f func(tx ethdb.Tx) error) (err error) { +func (db *MdbxKV) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { if db.env == nil { return fmt.Errorf("db closed") } @@ -569,7 +570,7 @@ func (db *MdbxKV) View(ctx context.Context, f func(tx ethdb.Tx) error) (err erro return f(tx) } -func (db *MdbxKV) Update(ctx context.Context, f func(tx ethdb.RwTx) error) (err error) { +func (db *MdbxKV) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { if db.env == nil { return fmt.Errorf("db closed") } @@ -599,13 +600,13 @@ func (tx *MdbxTx) CreateBucket(name string) error { return fmt.Errorf("create bucket: %s, %w", name, err) } if err == nil { - cnfCopy.DBI = dbutils.DBI(dbi) + cnfCopy.DBI = kv.DBI(dbi) var flags uint flags, err = tx.tx.Flags(dbi) if err != nil { return err } - cnfCopy.Flags = dbutils.BucketFlags(flags) + cnfCopy.Flags = kv.TableFlags(flags) tx.db.buckets[name] = cnfCopy return nil @@ -619,9 +620,9 @@ func (tx *MdbxTx) CreateBucket(name string) error { nativeFlags |= mdbx.Create } - if flags&dbutils.DupSort != 0 { + if flags&kv.DupSort != 0 { nativeFlags |= mdbx.DupSort - flags ^= dbutils.DupSort + flags ^= kv.DupSort } if flags != 0 { return fmt.Errorf("some not supported flag provided for bucket") @@ -632,7 +633,7 @@ func (tx *MdbxTx) CreateBucket(name string) error { if err != nil { return fmt.Errorf("create bucket: %s, %w", name, err) } - cnfCopy.DBI = dbutils.DBI(dbi) + cnfCopy.DBI = kv.DBI(dbi) tx.db.buckets[name] = cnfCopy return nil @@ -650,7 +651,7 @@ func (tx *MdbxTx) dropEvenIfBucketIsNotDeprecated(name string) error { } return fmt.Errorf("bucket: %s, %w", name, err) } - dbi = dbutils.DBI(nativeDBI) + dbi = kv.DBI(nativeDBI) } if err := tx.tx.Drop(mdbx.DBI(dbi), true); err != nil { @@ -672,7 +673,7 @@ func (tx *MdbxTx) ClearBucket(bucket string) error { func (tx *MdbxTx) DropBucket(bucket string) error { if cfg, ok := tx.db.buckets[bucket]; !(ok && cfg.IsDeprecated) { - return fmt.Errorf("%w, bucket: %s", ethdb.ErrAttemptToDeleteNonDeprecatedBucket, bucket) + return fmt.Errorf("%w, bucket: %s", kv.ErrAttemptToDeleteNonDeprecatedBucket, bucket) } return tx.dropEvenIfBucketIsNotDeprecated(bucket) @@ -716,14 +717,14 @@ func (tx *MdbxTx) Commit() error { return err } - if tx.db.opts.label == ethdb.Chain { - ethdb.DbCommitPreparation.Update(latency.Preparation) - ethdb.DbCommitGc.Update(latency.GC) - ethdb.DbCommitAudit.Update(latency.Audit) - ethdb.DbCommitWrite.Update(latency.Write) - ethdb.DbCommitSync.Update(latency.Sync) - ethdb.DbCommitEnding.Update(latency.Ending) - ethdb.DbCommitBigBatchTimer.Update(latency.Whole) + if tx.db.opts.label == kv.ChainDB { + kv.DbCommitPreparation.Update(latency.Preparation) + kv.DbCommitGc.Update(latency.GC) + kv.DbCommitAudit.Update(latency.Audit) + kv.DbCommitWrite.Update(latency.Write) + kv.DbCommitSync.Update(latency.Sync) + kv.DbCommitEnding.Update(latency.Ending) + kv.DbCommitBigBatchTimer.Update(latency.Whole) } if latency.Whole > slowTx { @@ -803,9 +804,9 @@ func (tx *MdbxTx) closeCursors() { tx.statelessCursors = nil } -func (tx *MdbxTx) statelessCursor(bucket string) (ethdb.RwCursor, error) { +func (tx *MdbxTx) statelessCursor(bucket string) (kv.RwCursor, error) { if tx.statelessCursors == nil { - tx.statelessCursors = make(map[string]ethdb.Cursor) + tx.statelessCursors = make(map[string]kv.Cursor) } c, ok := tx.statelessCursors[bucket] if !ok { @@ -816,7 +817,7 @@ func (tx *MdbxTx) statelessCursor(bucket string) (ethdb.RwCursor, error) { } tx.statelessCursors[bucket] = c } - return c.(ethdb.RwCursor), nil + return c.(kv.RwCursor), nil } func (tx *MdbxTx) Put(bucket string, k, v []byte) error { @@ -872,7 +873,7 @@ func (tx *MdbxTx) AppendDup(bucket string, k, v []byte) error { } func (tx *MdbxTx) IncrementSequence(bucket string, amount uint64) (uint64, error) { - c, err := tx.statelessCursor(dbutils.Sequence) + c, err := tx.statelessCursor(kv.Sequence) if err != nil { return 0, err } @@ -896,7 +897,7 @@ func (tx *MdbxTx) IncrementSequence(bucket string, amount uint64) (uint64, error } func (tx *MdbxTx) ReadSequence(bucket string) (uint64, error) { - c, err := tx.statelessCursor(dbutils.Sequence) + c, err := tx.statelessCursor(kv.Sequence) if err != nil { return 0, err } @@ -935,24 +936,24 @@ func (tx *MdbxTx) BucketStat(name string) (*mdbx.Stat, error) { return st, nil } -func (tx *MdbxTx) RwCursor(bucket string) (ethdb.RwCursor, error) { +func (tx *MdbxTx) RwCursor(bucket string) (kv.RwCursor, error) { b := tx.db.buckets[bucket] if b.AutoDupSortKeysConversion { return tx.stdCursor(bucket) } - if b.Flags&dbutils.DupSort != 0 { + if b.Flags&kv.DupSort != 0 { return tx.RwCursorDupSort(bucket) } return tx.stdCursor(bucket) } -func (tx *MdbxTx) Cursor(bucket string) (ethdb.Cursor, error) { +func (tx *MdbxTx) Cursor(bucket string) (kv.Cursor, error) { return tx.RwCursor(bucket) } -func (tx *MdbxTx) stdCursor(bucket string) (ethdb.RwCursor, error) { +func (tx *MdbxTx) stdCursor(bucket string) (kv.RwCursor, error) { b := tx.db.buckets[bucket] c := &MdbxCursor{bucketName: bucket, tx: tx, bucketCfg: b, dbi: mdbx.DBI(tx.db.buckets[bucket].DBI), id: tx.cursorID} tx.cursorID++ @@ -971,7 +972,7 @@ func (tx *MdbxTx) stdCursor(bucket string) (ethdb.RwCursor, error) { return c, nil } -func (tx *MdbxTx) RwCursorDupSort(bucket string) (ethdb.RwCursorDupSort, error) { +func (tx *MdbxTx) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { basicCursor, err := tx.stdCursor(bucket) if err != nil { return nil, err @@ -979,7 +980,7 @@ func (tx *MdbxTx) RwCursorDupSort(bucket string) (ethdb.RwCursorDupSort, error) return &MdbxDupSortCursor{MdbxCursor: basicCursor.(*MdbxCursor)}, nil } -func (tx *MdbxTx) CursorDupSort(bucket string) (ethdb.CursorDupSort, error) { +func (tx *MdbxTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { return tx.RwCursorDupSort(bucket) } @@ -1547,7 +1548,7 @@ func (c *MdbxDupSortCursor) CountDuplicates() (uint64, error) { return res, nil } -func bucketSlice(b dbutils.BucketsCfg) []string { +func bucketSlice(b kv.TableCfg) []string { buckets := make([]string, 0, len(b)) for name := range b { buckets = append(buckets, name) diff --git a/ethdb/kv/kv_migrator_test.go b/ethdb/mdbx/kv_migrator_test.go similarity index 54% rename from ethdb/kv/kv_migrator_test.go rename to ethdb/mdbx/kv_migrator_test.go index 4a32901c1b9..e0b86a5b532 100644 --- a/ethdb/kv/kv_migrator_test.go +++ b/ethdb/mdbx/kv_migrator_test.go @@ -1,49 +1,51 @@ -package kv +package mdbx_test import ( "context" "errors" "testing" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/memdb" + "github.com/ledgerwatch/erigon/log" "github.com/stretchr/testify/require" ) func TestBucketCRUD(t *testing.T) { require := require.New(t) - kv := NewMemKV() - defer kv.Close() + db := memdb.New() + defer db.Close() ctx := context.Background() - tx, err := kv.BeginRw(ctx) + tx, err := db.BeginRw(ctx) require.NoError(err) defer tx.Rollback() - normalBucket := dbutils.Buckets[15] - deprecatedBucket := dbutils.DeprecatedBuckets[0] - migrator, ok := tx.(ethdb.BucketMigrator) + normalBucket := kv.ErigonTables[15] + deprecatedBucket := kv.DeprecatedBuckets[0] + migrator, ok := tx.(kv.BucketMigrator) if !ok { return } // check thad buckets have unique DBI's - uniquness := map[dbutils.DBI]bool{} - castedKv, ok := kv.(*MdbxKV) + uniquness := map[kv.DBI]bool{} + castedKv, ok := db.(*mdbx.MdbxKV) if !ok { t.Skip() } - for _, bucketCfg := range castedKv.buckets { - if bucketCfg.DBI == NonExistingDBI { + for _, dbi := range castedKv.AllDBI() { + if dbi == mdbx.NonExistingDBI { continue } - _, ok := uniquness[bucketCfg.DBI] + _, ok := uniquness[dbi] require.False(ok) - uniquness[bucketCfg.DBI] = true + uniquness[dbi] = true } require.True(migrator.ExistsBucket(normalBucket)) - require.True(errors.Is(migrator.DropBucket(normalBucket), ethdb.ErrAttemptToDeleteNonDeprecatedBucket)) + require.True(errors.Is(migrator.DropBucket(normalBucket), kv.ErrAttemptToDeleteNonDeprecatedBucket)) require.False(migrator.ExistsBucket(deprecatedBucket)) require.NoError(migrator.CreateBucket(deprecatedBucket)) @@ -68,29 +70,30 @@ func TestBucketCRUD(t *testing.T) { require.True(len(buckets) > 10) // check thad buckets have unique DBI's - uniquness = map[dbutils.DBI]bool{} - for _, bucketCfg := range castedKv.buckets { - if bucketCfg.DBI == NonExistingDBI { + uniquness = map[kv.DBI]bool{} + for _, dbi := range castedKv.AllDBI() { + if dbi == mdbx.NonExistingDBI { continue } - _, ok := uniquness[bucketCfg.DBI] + _, ok := uniquness[dbi] require.False(ok) - uniquness[bucketCfg.DBI] = true + uniquness[dbi] = true } } func TestReadOnlyMode(t *testing.T) { path := t.TempDir() - db1 := NewMDBX().Path(path).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + logger := log.New() + db1 := mdbx.NewMDBX(logger).Path(path).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).MustOpen() db1.Close() - db2 := NewMDBX().Readonly().Path(path).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + db2 := mdbx.NewMDBX(logger).Readonly().Path(path).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).MustOpen() defer db2.Close() @@ -99,7 +102,7 @@ func TestReadOnlyMode(t *testing.T) { require.NoError(t, err) defer tx.Rollback() - c, err := tx.Cursor(dbutils.HeadersBucket) + c, err := tx.Cursor(kv.Headers) require.NoError(t, err) _, _, err = c.Seek([]byte("some prefix")) require.NoError(t, err) diff --git a/ethdb/mdbx/util.go b/ethdb/mdbx/util.go new file mode 100644 index 00000000000..0c1c744251f --- /dev/null +++ b/ethdb/mdbx/util.go @@ -0,0 +1,31 @@ +package mdbx + +import ( + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/log" + mdbxbind "github.com/torquem-ch/mdbx-go/mdbx" +) + +func MustOpen(path string) kv.RwDB { + db, err := Open(path, log.New(), false) + if err != nil { + panic(err) + } + return db +} + +// Open - main method to open database. +func Open(path string, logger log.Logger, readOnly bool) (kv.RwDB, error) { + var db kv.RwDB + var err error + opts := NewMDBX(logger).Path(path) + if readOnly { + opts = opts.Flags(func(flags uint) uint { return flags | mdbxbind.Readonly }) + } + db, err = opts.Open() + + if err != nil { + return nil, err + } + return db, nil +} diff --git a/ethdb/kv/memory_database.go b/ethdb/memdb/memory_database.go similarity index 66% rename from ethdb/kv/memory_database.go rename to ethdb/memdb/memory_database.go index 6418f4be4ac..cf8082f2522 100644 --- a/ethdb/kv/memory_database.go +++ b/ethdb/memdb/memory_database.go @@ -14,29 +14,32 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package kv +package memdb import ( "context" "testing" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/log" ) -func NewMemKV() ethdb.RwKV { - return NewMDBX().InMem().MustOpen() +func New() kv.RwDB { + logger := log.New() //TODO: move higher + return mdbx.NewMDBX(logger).InMem().MustOpen() } -func NewTestKV(t testing.TB) ethdb.RwKV { - kv := NewMemKV() - t.Cleanup(kv.Close) - return kv +func NewTestDB(t testing.TB) kv.RwDB { + db := New() + t.Cleanup(db.Close) + return db } -func NewTestTx(t testing.TB) (ethdb.RwKV, ethdb.RwTx) { - kv := NewMemKV() - t.Cleanup(kv.Close) - tx, err := kv.BeginRw(context.Background()) //nolint +func NewTestTx(t testing.TB) (kv.RwDB, kv.RwTx) { + db := New() + t.Cleanup(db.Close) + tx, err := db.BeginRw(context.Background()) //nolint if err != nil { t.Fatal(err) } @@ -46,5 +49,5 @@ func NewTestTx(t testing.TB) (ethdb.RwKV, ethdb.RwTx) { tt.Cleanup(tx.Rollback) } } - return kv, tx + return db, tx } diff --git a/ethdb/kv/database_test.go b/ethdb/olddb/database_test.go similarity index 90% rename from ethdb/kv/database_test.go rename to ethdb/olddb/database_test.go index 486b26b467e..d23e3bb1960 100644 --- a/ethdb/kv/database_test.go +++ b/ethdb/olddb/database_test.go @@ -16,7 +16,7 @@ // +build !js -package kv +package olddb import ( "bytes" @@ -28,17 +28,18 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var testBucket = dbutils.HashedAccountsBucket +var testBucket = kv.HashedAccounts var testValues = []string{"a", "1251", "\x00123\x00"} func TestPutGet(t *testing.T) { - _, tx := NewTestTx(t) + _, tx := memdb.NewTestTx(t) //for _, k := range testValues { // err := db.Put(testBucket, []byte(k), []byte{}) @@ -98,7 +99,7 @@ func TestPutGet(t *testing.T) { } func TestNoPanicAfterDbClosed(t *testing.T) { - db := NewTestKV(t) + db := memdb.NewTestDB(t) tx, err := db.BeginRo(context.Background()) require.NoError(t, err) defer tx.Rollback() @@ -114,11 +115,11 @@ func TestNoPanicAfterDbClosed(t *testing.T) { }) }() time.Sleep(time.Millisecond) // wait to check that db.Close doesn't panic, but wait when read tx finished - err = writeTx.Put(dbutils.Buckets[0], []byte{1}, []byte{1}) + err = writeTx.Put(kv.ErigonTables[0], []byte{1}, []byte{1}) require.NoError(t, err) err = writeTx.Commit() require.NoError(t, err) - _, err = tx.GetOne(dbutils.Buckets[0], []byte{1}) + _, err = tx.GetOne(kv.ErigonTables[0], []byte{1}) require.NoError(t, err) tx.Rollback() @@ -135,7 +136,7 @@ func TestNoPanicAfterDbClosed(t *testing.T) { } func TestParallelPutGet(t *testing.T) { - db := NewTestKV(t) + db := memdb.NewTestDB(t) const n = 8 var pending sync.WaitGroup @@ -144,7 +145,7 @@ func TestParallelPutGet(t *testing.T) { for i := 0; i < n; i++ { go func(key string) { defer pending.Done() - _ = db.Update(context.Background(), func(tx ethdb.RwTx) error { + _ = db.Update(context.Background(), func(tx kv.RwTx) error { err := tx.Put(testBucket, []byte(key), []byte("v"+key)) if err != nil { panic("put failed: " + err.Error()) @@ -159,7 +160,7 @@ func TestParallelPutGet(t *testing.T) { for i := 0; i < n; i++ { go func(key string) { defer pending.Done() - _ = db.View(context.Background(), func(tx ethdb.Tx) error { + _ = db.View(context.Background(), func(tx kv.Tx) error { data, err := tx.GetOne(testBucket, []byte(key)) if err != nil { panic("get failed: " + err.Error()) @@ -177,7 +178,7 @@ func TestParallelPutGet(t *testing.T) { for i := 0; i < n; i++ { go func(key string) { defer pending.Done() - _ = db.Update(context.Background(), func(tx ethdb.RwTx) error { + _ = db.Update(context.Background(), func(tx kv.RwTx) error { err := tx.Delete(testBucket, []byte(key), nil) if err != nil { panic("delete failed: " + err.Error()) @@ -192,7 +193,7 @@ func TestParallelPutGet(t *testing.T) { for i := 0; i < n; i++ { go func(key string) { defer pending.Done() - _ = db.Update(context.Background(), func(tx ethdb.RwTx) error { + _ = db.Update(context.Background(), func(tx kv.RwTx) error { v, err := tx.GetOne(testBucket, []byte(key)) if err != nil { panic(err) @@ -222,7 +223,7 @@ var fixedBits = 3 var keysInRange = [][]byte{common.FromHex("a8"), common.FromHex("bb"), common.FromHex("bd")} func TestWalk(t *testing.T) { - _, tx := NewTestTx(t) + _, tx := memdb.NewTestTx(t) for k, v := range hexEntries { err := tx.Put(testBucket, common.FromHex(k), common.FromHex(v)) diff --git a/ethdb/kv/mutation.go b/ethdb/olddb/mutation.go similarity index 84% rename from ethdb/kv/mutation.go rename to ethdb/olddb/mutation.go index 84df10ed7f9..c6e9ea7cdc9 100644 --- a/ethdb/kv/mutation.go +++ b/ethdb/olddb/mutation.go @@ -1,4 +1,4 @@ -package kv +package olddb import ( "bytes" @@ -12,14 +12,14 @@ import ( "github.com/google/btree" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) type mutation struct { puts *btree.BTree - db ethdb.RwTx + db kv.RwTx quit <-chan struct{} clean func() searchItem MutationItem @@ -41,7 +41,7 @@ type MutationItem struct { // defer batch.Rollback() // ... some calculations on `batch` // batch.Commit() -func NewBatch(tx ethdb.RwTx, quit <-chan struct{}) *mutation { +func NewBatch(tx kv.RwTx, quit <-chan struct{}) *mutation { clean := func() {} if quit == nil { ch := make(chan struct{}) @@ -65,7 +65,7 @@ func (mi *MutationItem) Less(than btree.Item) bool { return bytes.Compare(mi.key, i.key) < 0 } -func (m *mutation) RwKV() ethdb.RwKV { +func (m *mutation) RwKV() kv.RwDB { if casted, ok := m.db.(ethdb.HasRwKV); ok { return casted.RwKV() } @@ -85,9 +85,9 @@ func (m *mutation) getMem(table string, key []byte) ([]byte, bool) { } func (m *mutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { - v, ok := m.getMem(dbutils.Sequence, []byte(bucket)) + v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { - v, err = m.db.GetOne(dbutils.Sequence, []byte(bucket)) + v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) if err != nil { return 0, err } @@ -100,16 +100,16 @@ func (m *mutation) IncrementSequence(bucket string, amount uint64) (res uint64, newVBytes := make([]byte, 8) binary.BigEndian.PutUint64(newVBytes, currentV+amount) - if err = m.Put(dbutils.Sequence, []byte(bucket), newVBytes); err != nil { + if err = m.Put(kv.Sequence, []byte(bucket), newVBytes); err != nil { return 0, err } return currentV, nil } func (m *mutation) ReadSequence(bucket string) (res uint64, err error) { - v, ok := m.getMem(dbutils.Sequence, []byte(bucket)) + v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { - v, err = m.db.GetOne(dbutils.Sequence, []byte(bucket)) + v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) if err != nil { return 0, err } @@ -205,22 +205,6 @@ func (m *mutation) AppendDup(table string, key []byte, value []byte) error { return m.Put(table, key, value) } -func (m *mutation) MultiPut(tuples ...[]byte) (uint64, error) { - m.mu.Lock() - defer m.mu.Unlock() - l := len(tuples) - for i := 0; i < l; i += 3 { - newMi := &MutationItem{table: string(tuples[i]), key: tuples[i+1], value: tuples[i+2]} - i := m.puts.ReplaceOrInsert(newMi) - m.size += int(unsafe.Sizeof(newMi)) + len(newMi.key) + len(newMi.value) - if i != nil { - oldMi := i.(*MutationItem) - m.size -= (int(unsafe.Sizeof(oldMi)) + len(oldMi.key) + len(oldMi.value)) - } - } - return 0, nil -} - func (m *mutation) BatchSize() int { m.mu.RLock() defer m.mu.RUnlock() @@ -250,19 +234,9 @@ func (m *mutation) Delete(table string, k, v []byte) error { return m.Put(table, k, nil) } -func (m *mutation) CommitAndBegin(ctx context.Context) error { - err := m.Commit() - return err -} - -func (m *mutation) RollbackAndBegin(ctx context.Context) error { - m.Rollback() - return nil -} - -func (m *mutation) doCommit(tx ethdb.RwTx) error { +func (m *mutation) doCommit(tx kv.RwTx) error { var prevTable string - var c ethdb.RwCursor + var c kv.RwCursor var innerErr error var isEndOfBucket bool logEvery := time.NewTicker(30 * time.Second) @@ -365,6 +339,6 @@ func (m *mutation) panicOnEmptyDB() { } } -func (m *mutation) SetRwKV(kv ethdb.RwKV) { +func (m *mutation) SetRwKV(kv kv.RwDB) { m.db.(ethdb.HasRwKV).SetRwKV(kv) } diff --git a/ethdb/kv/object_db.go b/ethdb/olddb/object_db.go similarity index 63% rename from ethdb/kv/object_db.go rename to ethdb/olddb/object_db.go index aa5a7ac8e10..4ed05fb0055 100644 --- a/ethdb/kv/object_db.go +++ b/ethdb/olddb/object_db.go @@ -15,58 +15,34 @@ // along with the go-ethereum library. If not, see . // Package ethdb defines the interfaces for an Ethereum data store. -package kv +package olddb import ( - "bytes" "context" "fmt" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" - "github.com/torquem-ch/mdbx-go/mdbx" ) // ObjectDatabase - is an object-style interface of DB accessing type ObjectDatabase struct { - kv ethdb.RwKV + kv kv.RwDB } // NewObjectDatabase returns a AbstractDB wrapper. -func NewObjectDatabase(kv ethdb.RwKV) *ObjectDatabase { +//Deprecated +func NewObjectDatabase(kv kv.RwDB) *ObjectDatabase { return &ObjectDatabase{ kv: kv, } } -func MustOpen(path string) ethdb.RwKV { - db, err := Open(path, false) - if err != nil { - panic(err) - } - return db -} - -// Open - main method to open database. -func Open(path string, readOnly bool) (ethdb.RwKV, error) { - var db ethdb.RwKV - var err error - opts := NewMDBX().Path(path) - if readOnly { - opts = opts.Flags(func(flags uint) uint { return flags | mdbx.Readonly }) - } - db, err = opts.Open() - - if err != nil { - return nil, err - } - return db, nil -} - // Put inserts or updates a single entry. func (db *ObjectDatabase) Put(bucket string, key []byte, value []byte) error { - err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { return tx.Put(bucket, key, value) }) return err @@ -74,7 +50,7 @@ func (db *ObjectDatabase) Put(bucket string, key []byte, value []byte) error { // Append appends a single entry to the end of the bucket. func (db *ObjectDatabase) Append(bucket string, key []byte, value []byte) error { - err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { c, err := tx.RwCursor(bucket) if err != nil { return err @@ -86,7 +62,7 @@ func (db *ObjectDatabase) Append(bucket string, key []byte, value []byte) error // AppendDup appends a single entry to the end of the bucket. func (db *ObjectDatabase) AppendDup(bucket string, key []byte, value []byte) error { - err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { c, err := tx.RwCursorDupSort(bucket) if err != nil { return err @@ -96,20 +72,9 @@ func (db *ObjectDatabase) AppendDup(bucket string, key []byte, value []byte) err return err } -// MultiPut - requirements: input must be sorted and without duplicates -func (db *ObjectDatabase) MultiPut(tuples ...[]byte) (uint64, error) { - err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return ethdb.MultiPut(tx, tuples...) - }) - if err != nil { - return 0, err - } - return 0, nil -} - func (db *ObjectDatabase) Has(bucket string, key []byte) (bool, error) { var has bool - err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { + err := db.kv.View(context.Background(), func(tx kv.Tx) error { v, err := tx.GetOne(bucket, key) if err != nil { return err @@ -121,14 +86,14 @@ func (db *ObjectDatabase) Has(bucket string, key []byte) (bool, error) { } func (db *ObjectDatabase) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { - err = db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { + err = db.kv.Update(context.Background(), func(tx kv.RwTx) error { res, err = tx.IncrementSequence(bucket, amount) return err }) return res, err } func (db *ObjectDatabase) ReadSequence(bucket string) (res uint64, err error) { - err = db.kv.View(context.Background(), func(tx ethdb.Tx) error { + err = db.kv.View(context.Background(), func(tx kv.Tx) error { res, err = tx.ReadSequence(bucket) return err }) @@ -138,7 +103,7 @@ func (db *ObjectDatabase) ReadSequence(bucket string) (res uint64, err error) { // Get returns the value for a given key if it's present. func (db *ObjectDatabase) GetOne(bucket string, key []byte) ([]byte, error) { var dat []byte - err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { + err := db.kv.View(context.Background(), func(tx kv.Tx) error { v, err := tx.GetOne(bucket, key) if err != nil { return err @@ -159,7 +124,7 @@ func (db *ObjectDatabase) Get(bucket string, key []byte) ([]byte, error) { func (db *ObjectDatabase) Last(bucket string) ([]byte, []byte, error) { var key, value []byte - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { c, err := tx.Cursor(bucket) if err != nil { return err @@ -179,7 +144,7 @@ func (db *ObjectDatabase) Last(bucket string) ([]byte, []byte, error) { } func (db *ObjectDatabase) Walk(bucket string, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error { - err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { + err := db.kv.View(context.Background(), func(tx kv.Tx) error { c, err := tx.Cursor(bucket) if err != nil { return err @@ -190,18 +155,18 @@ func (db *ObjectDatabase) Walk(bucket string, startkey []byte, fixedbits int, wa } func (db *ObjectDatabase) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { - return db.kv.View(context.Background(), func(tx ethdb.Tx) error { + return db.kv.View(context.Background(), func(tx kv.Tx) error { return tx.ForEach(bucket, fromPrefix, walker) }) } func (db *ObjectDatabase) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error { - return db.kv.View(context.Background(), func(tx ethdb.Tx) error { + return db.kv.View(context.Background(), func(tx kv.Tx) error { return tx.ForAmount(bucket, fromPrefix, amount, walker) }) } func (db *ObjectDatabase) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { - return db.kv.View(context.Background(), func(tx ethdb.Tx) error { + return db.kv.View(context.Background(), func(tx kv.Tx) error { return tx.ForPrefix(bucket, prefix, walker) }) } @@ -209,7 +174,7 @@ func (db *ObjectDatabase) ForPrefix(bucket string, prefix []byte, walker func(k, // Delete deletes the key from the queue and database func (db *ObjectDatabase) Delete(bucket string, k, v []byte) error { // Execute the actual operation - err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { + err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { return tx.Delete(bucket, k, v) }) return err @@ -217,8 +182,8 @@ func (db *ObjectDatabase) Delete(bucket string, k, v []byte) error { func (db *ObjectDatabase) BucketExists(name string) (bool, error) { exists := false - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) (err error) { - migrator, ok := tx.(ethdb.BucketMigrator) + if err := db.kv.View(context.Background(), func(tx kv.Tx) (err error) { + migrator, ok := tx.(kv.BucketMigrator) if !ok { return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", db.kv) } @@ -236,8 +201,8 @@ func (db *ObjectDatabase) BucketExists(name string) (bool, error) { func (db *ObjectDatabase) ClearBuckets(buckets ...string) error { for i := range buckets { name := buckets[i] - if err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - migrator, ok := tx.(ethdb.BucketMigrator) + if err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { + migrator, ok := tx.(kv.BucketMigrator) if !ok { return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", db.kv) } @@ -257,8 +222,8 @@ func (db *ObjectDatabase) DropBuckets(buckets ...string) error { for i := range buckets { name := buckets[i] log.Info("Dropping bucket", "name", name) - if err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - migrator, ok := tx.(ethdb.BucketMigrator) + if err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { + migrator, ok := tx.(kv.BucketMigrator) if !ok { return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", db.kv) } @@ -277,11 +242,11 @@ func (db *ObjectDatabase) Close() { db.kv.Close() } -func (db *ObjectDatabase) RwKV() ethdb.RwKV { +func (db *ObjectDatabase) RwKV() kv.RwDB { return db.kv } -func (db *ObjectDatabase) SetRwKV(kv ethdb.RwKV) { +func (db *ObjectDatabase) SetRwKV(kv kv.RwDB) { db.kv = kv } @@ -292,39 +257,3 @@ func (db *ObjectDatabase) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb } return batch, nil } - -// Type which expecting sequence of triplets: dbi, key, value, .... -// It sorts entries by dbi name, then inside dbi clusters sort by keys -type MultiPutTuples [][]byte - -func (t MultiPutTuples) Len() int { return len(t) / 3 } - -func (t MultiPutTuples) Less(i, j int) bool { - i3, j3 := i*3, j*3 - cmp := bytes.Compare(t[i3], t[j3]) - if cmp == -1 { - return true - } - if cmp == 0 { - return bytes.Compare(t[i3+1], t[j3+1]) == -1 - } - return false -} - -func (t MultiPutTuples) Swap(i, j int) { - i3, j3 := i*3, j*3 - t[i3], t[j3] = t[j3], t[i3] - t[i3+1], t[j3+1] = t[j3+1], t[i3+1] - t[i3+2], t[j3+2] = t[j3+2], t[i3+2] -} - -func InspectDatabase(db ethdb.Database) error { - // FIXME: implement in Erigon - // see https://github.com/ethereum/go-ethereum/blob/f5d89cdb72c1e82e9deb54754bef8dd20bf12591/core/rawdb/database.go#L224 - return ethdb.ErrNotSupported -} - -func NewDatabaseWithFreezer(db *ObjectDatabase, dir, suffix string) (*ObjectDatabase, error) { - // FIXME: implement freezer in Erigon - return db, nil -} diff --git a/ethdb/kv/tx_db.go b/ethdb/olddb/tx_db.go similarity index 81% rename from ethdb/kv/tx_db.go rename to ethdb/olddb/tx_db.go index 73b98426cc6..ad187b3d07b 100644 --- a/ethdb/kv/tx_db.go +++ b/ethdb/olddb/tx_db.go @@ -1,10 +1,11 @@ -package kv +package olddb import ( "context" "fmt" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) @@ -13,17 +14,19 @@ import ( // TxDb not usable after .Commit()/.Rollback() call, but usable after .CommitAndBegin() call // you can put unlimited amount of data into this class // Walk and MultiWalk methods - work outside of Tx object yet, will implement it later +//Deprecated +//nolint type TxDb struct { db ethdb.Database - tx ethdb.Tx - cursors map[string]ethdb.Cursor + tx kv.Tx + cursors map[string]kv.Cursor txFlags ethdb.TxFlags len uint64 } //nolint -func WrapIntoTxDB(tx ethdb.RwTx) *TxDb { - return &TxDb{tx: tx, cursors: map[string]ethdb.Cursor{}} +func WrapIntoTxDB(tx kv.RwTx) *TxDb { + return &TxDb{tx: tx, cursors: map[string]kv.Cursor{}} } func (m *TxDb) Close() { @@ -42,7 +45,7 @@ func (m *TxDb) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPend return batch, nil } -func (m *TxDb) cursor(bucket string) (ethdb.Cursor, error) { +func (m *TxDb) cursor(bucket string) (kv.Cursor, error) { c, ok := m.cursors[bucket] if !ok { var err error @@ -56,7 +59,7 @@ func (m *TxDb) cursor(bucket string) (ethdb.Cursor, error) { } func (m *TxDb) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { - return m.tx.(ethdb.RwTx).IncrementSequence(bucket, amount) + return m.tx.(kv.RwTx).IncrementSequence(bucket, amount) } func (m *TxDb) ReadSequence(bucket string) (res uint64, err error) { @@ -69,7 +72,7 @@ func (m *TxDb) Put(bucket string, key []byte, value []byte) error { if err != nil { return err } - return c.(ethdb.RwCursor).Put(key, value) + return c.(kv.RwCursor).Put(key, value) } func (m *TxDb) Append(bucket string, key []byte, value []byte) error { @@ -78,7 +81,7 @@ func (m *TxDb) Append(bucket string, key []byte, value []byte) error { if err != nil { return err } - return c.(ethdb.RwCursor).Append(key, value) + return c.(kv.RwCursor).Append(key, value) } func (m *TxDb) AppendDup(bucket string, key []byte, value []byte) error { @@ -87,7 +90,7 @@ func (m *TxDb) AppendDup(bucket string, key []byte, value []byte) error { if err != nil { return err } - return c.(ethdb.RwCursorDupSort).AppendDup(key, value) + return c.(kv.RwCursorDupSort).AppendDup(key, value) } func (m *TxDb) Delete(bucket string, k, v []byte) error { @@ -96,28 +99,28 @@ func (m *TxDb) Delete(bucket string, k, v []byte) error { if err != nil { return err } - return c.(ethdb.RwCursor).Delete(k, v) + return c.(kv.RwCursor).Delete(k, v) } func (m *TxDb) begin(ctx context.Context, flags ethdb.TxFlags) error { - kv := m.db.(ethdb.HasRwKV).RwKV() + db := m.db.(ethdb.HasRwKV).RwKV() - var tx ethdb.Tx + var tx kv.Tx var err error if flagsðdb.RO != 0 { - tx, err = kv.BeginRo(ctx) + tx, err = db.BeginRo(ctx) } else { - tx, err = kv.BeginRw(ctx) + tx, err = db.BeginRw(ctx) } if err != nil { return err } m.tx = tx - m.cursors = make(map[string]ethdb.Cursor, 16) + m.cursors = make(map[string]kv.Cursor, 16) return nil } -func (m *TxDb) RwKV() ethdb.RwKV { +func (m *TxDb) RwKV() kv.RwDB { panic("not allowed to get KV interface because you will loose transaction, please use .Tx() method") } @@ -152,10 +155,6 @@ func (m *TxDb) Has(bucket string, key []byte) (bool, error) { return v != nil, nil } -func (m *TxDb) MultiPut(tuples ...[]byte) (uint64, error) { - return 0, ethdb.MultiPut(m.tx.(ethdb.RwTx), tuples...) -} - func (m *TxDb) BatchSize() int { return int(m.len) } @@ -193,20 +192,6 @@ func (m *TxDb) ForAmount(bucket string, prefix []byte, amount uint32, walker fun return m.tx.ForAmount(bucket, prefix, amount, walker) } -func (m *TxDb) CommitAndBegin(ctx context.Context) error { - err := m.Commit() - if err != nil { - return err - } - - return m.begin(ctx, m.txFlags) -} - -func (m *TxDb) RollbackAndBegin(ctx context.Context) error { - m.Rollback() - return m.begin(ctx, m.txFlags) -} - func (m *TxDb) Commit() error { if m.tx == nil { return fmt.Errorf("second call .Commit() on same transaction") @@ -230,12 +215,12 @@ func (m *TxDb) Rollback() { m.len = 0 } -func (m *TxDb) Tx() ethdb.Tx { +func (m *TxDb) Tx() kv.Tx { return m.tx } func (m *TxDb) BucketExists(name string) (bool, error) { - migrator, ok := m.tx.(ethdb.BucketMigrator) + migrator, ok := m.tx.(kv.BucketMigrator) if !ok { return false, fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx) } @@ -246,7 +231,7 @@ func (m *TxDb) ClearBuckets(buckets ...string) error { for i := range buckets { name := buckets[i] - migrator, ok := m.tx.(ethdb.BucketMigrator) + migrator, ok := m.tx.(kv.BucketMigrator) if !ok { return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx) } @@ -262,7 +247,7 @@ func (m *TxDb) DropBuckets(buckets ...string) error { for i := range buckets { name := buckets[i] log.Info("Dropping bucket", "name", name) - migrator, ok := m.tx.(ethdb.BucketMigrator) + migrator, ok := m.tx.(kv.BucketMigrator) if !ok { return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx) } diff --git a/ethdb/privateapi/all.go b/ethdb/privateapi/all.go new file mode 100644 index 00000000000..35a2a90fce5 --- /dev/null +++ b/ethdb/privateapi/all.go @@ -0,0 +1,78 @@ +package privateapi + +import ( + "fmt" + "net" + "time" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/ethdb/remotedbserver" + "github.com/ledgerwatch/erigon/log" + "github.com/ledgerwatch/erigon/metrics" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +func StartGrpc(kv *remotedbserver.KvServer, ethBackendSrv *EthBackendServer, txPoolServer *TxPoolServer, miningServer *MiningServer, addr string, rateLimit uint32, creds *credentials.TransportCredentials) (*grpc.Server, error) { + log.Info("Starting private RPC server", "on", addr) + lis, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("could not create listener: %w, addr=%s", err, addr) + } + + var ( + streamInterceptors []grpc.StreamServerInterceptor + unaryInterceptors []grpc.UnaryServerInterceptor + ) + streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor()) + unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor()) + + if metrics.Enabled { + streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) + unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) + } + + var grpcServer *grpc.Server + //cpus := uint32(runtime.GOMAXPROCS(-1)) + opts := []grpc.ServerOption{ + //grpc.NumStreamWorkers(cpus), // reduce amount of goroutines + grpc.WriteBufferSize(1024), // reduce buffers to save mem + grpc.ReadBufferSize(1024), + grpc.MaxConcurrentStreams(rateLimit), // to force clients reduce concurrency level + // Don't drop the connection, settings accordign to this comment on GitHub + // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 10 * time.Second, + PermitWithoutStream: true, + }), + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), + } + if creds == nil { + // no specific opts + } else { + opts = append(opts, grpc.Creds(*creds)) + } + grpcServer = grpc.NewServer(opts...) + remote.RegisterETHBACKENDServer(grpcServer, ethBackendSrv) + txpool.RegisterTxpoolServer(grpcServer, txPoolServer) + txpool.RegisterMiningServer(grpcServer, miningServer) + remote.RegisterKVServer(grpcServer, kv) + + if metrics.Enabled { + grpc_prometheus.Register(grpcServer) + } + + go func() { + if err := grpcServer.Serve(lis); err != nil { + log.Error("private RPC server fail", "err", err) + } + }() + + return grpcServer, nil +} diff --git a/ethdb/remote/remotedbserver/ethbackend.go b/ethdb/privateapi/ethbackend.go similarity index 99% rename from ethdb/remote/remotedbserver/ethbackend.go rename to ethdb/privateapi/ethbackend.go index ae1326f2532..47575a1fe21 100644 --- a/ethdb/remote/remotedbserver/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -1,4 +1,4 @@ -package remotedbserver +package privateapi import ( "bytes" diff --git a/ethdb/remote/remotedbserver/events.go b/ethdb/privateapi/events.go similarity index 98% rename from ethdb/remote/remotedbserver/events.go rename to ethdb/privateapi/events.go index 46d46a5c6e3..8d6cdaec7f7 100644 --- a/ethdb/remote/remotedbserver/events.go +++ b/ethdb/privateapi/events.go @@ -1,4 +1,4 @@ -package remotedbserver +package privateapi import ( "sync" diff --git a/ethdb/remote/remotedbserver/mining.go b/ethdb/privateapi/mining.go similarity index 99% rename from ethdb/remote/remotedbserver/mining.go rename to ethdb/privateapi/mining.go index c35f169a171..7cc236a0273 100644 --- a/ethdb/remote/remotedbserver/mining.go +++ b/ethdb/privateapi/mining.go @@ -1,4 +1,4 @@ -package remotedbserver +package privateapi import ( "bytes" diff --git a/ethdb/remote/remotedbserver/txpool.go b/ethdb/privateapi/txpool.go similarity index 99% rename from ethdb/remote/remotedbserver/txpool.go rename to ethdb/privateapi/txpool.go index 7cf2980d89b..4c6209f53e9 100644 --- a/ethdb/remote/remotedbserver/txpool.go +++ b/ethdb/privateapi/txpool.go @@ -1,4 +1,4 @@ -package remotedbserver +package privateapi import ( "bytes" diff --git a/ethdb/prune/storage_mode.go b/ethdb/prune/storage_mode.go index 7bd2cb1bcec..0bd3e11f45d 100644 --- a/ethdb/prune/storage_mode.go +++ b/ethdb/prune/storage_mode.go @@ -5,8 +5,7 @@ import ( "fmt" "math" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" ) @@ -71,11 +70,11 @@ func FromCli(flags string, exactHistory, exactReceipts, exactTxIndex, exactCallT return mode, nil } -func Get(db ethdb.KVGetter) (Mode, error) { +func Get(db kv.Getter) (Mode, error) { prune := DefaultMode prune.Initialised = true - v, err := db.GetOne(dbutils.DatabaseInfoBucket, dbutils.PruneDistanceHistory) + v, err := db.GetOne(kv.DatabaseInfo, kv.PruneDistanceHistory) if err != nil { return prune, err } @@ -84,7 +83,7 @@ func Get(db ethdb.KVGetter) (Mode, error) { } else { prune.History = math.MaxUint64 } - v, err = db.GetOne(dbutils.DatabaseInfoBucket, dbutils.PruneDistanceReceipts) + v, err = db.GetOne(kv.DatabaseInfo, kv.PruneDistanceReceipts) if err != nil { return prune, err } @@ -93,7 +92,7 @@ func Get(db ethdb.KVGetter) (Mode, error) { } else { prune.Receipts = math.MaxUint64 } - v, err = db.GetOne(dbutils.DatabaseInfoBucket, dbutils.PruneDistanceTxIndex) + v, err = db.GetOne(kv.DatabaseInfo, kv.PruneDistanceTxIndex) if err != nil { return prune, err } @@ -103,7 +102,7 @@ func Get(db ethdb.KVGetter) (Mode, error) { prune.TxIndex = math.MaxUint64 } - v, err = db.GetOne(dbutils.DatabaseInfoBucket, dbutils.PruneDistanceCallTraces) + v, err = db.GetOne(kv.DatabaseInfo, kv.PruneDistanceCallTraces) if err != nil { return prune, err } @@ -113,7 +112,7 @@ func Get(db ethdb.KVGetter) (Mode, error) { prune.CallTraces = math.MaxUint64 } - v, err = db.GetOne(dbutils.DatabaseInfoBucket, dbutils.StorageModeTEVM) + v, err = db.GetOne(kv.DatabaseInfo, kv.StorageModeTEVM) if err != nil { return prune, err } @@ -182,32 +181,32 @@ func (m Mode) String() string { return modeString } -func Override(db ethdb.RwTx, sm Mode) error { +func Override(db kv.RwTx, sm Mode) error { var ( err error ) - err = setDistance(db, dbutils.PruneDistanceHistory, sm.History) + err = setDistance(db, kv.PruneDistanceHistory, sm.History) if err != nil { return err } - err = setDistance(db, dbutils.PruneDistanceReceipts, sm.Receipts) + err = setDistance(db, kv.PruneDistanceReceipts, sm.Receipts) if err != nil { return err } - err = setDistance(db, dbutils.PruneDistanceTxIndex, sm.TxIndex) + err = setDistance(db, kv.PruneDistanceTxIndex, sm.TxIndex) if err != nil { return err } - err = setDistance(db, dbutils.PruneDistanceCallTraces, sm.CallTraces) + err = setDistance(db, kv.PruneDistanceCallTraces, sm.CallTraces) if err != nil { return err } - err = setMode(db, dbutils.StorageModeTEVM, sm.Experiments.TEVM) + err = setMode(db, kv.StorageModeTEVM, sm.Experiments.TEVM) if err != nil { return err } @@ -215,7 +214,7 @@ func Override(db ethdb.RwTx, sm Mode) error { return nil } -func SetIfNotExist(db ethdb.GetPut, pm Mode) error { +func SetIfNotExist(db kv.GetPut, pm Mode) error { var ( err error ) @@ -223,27 +222,27 @@ func SetIfNotExist(db ethdb.GetPut, pm Mode) error { pm = DefaultMode } - err = setDistanceOnEmpty(db, dbutils.PruneDistanceHistory, pm.History) + err = setDistanceOnEmpty(db, kv.PruneDistanceHistory, pm.History) if err != nil { return err } - err = setDistanceOnEmpty(db, dbutils.PruneDistanceReceipts, pm.Receipts) + err = setDistanceOnEmpty(db, kv.PruneDistanceReceipts, pm.Receipts) if err != nil { return err } - err = setDistanceOnEmpty(db, dbutils.PruneDistanceTxIndex, pm.TxIndex) + err = setDistanceOnEmpty(db, kv.PruneDistanceTxIndex, pm.TxIndex) if err != nil { return err } - err = setDistanceOnEmpty(db, dbutils.PruneDistanceCallTraces, pm.CallTraces) + err = setDistanceOnEmpty(db, kv.PruneDistanceCallTraces, pm.CallTraces) if err != nil { return err } - err = setModeOnEmpty(db, dbutils.StorageModeTEVM, pm.Experiments.TEVM) + err = setModeOnEmpty(db, kv.StorageModeTEVM, pm.Experiments.TEVM) if err != nil { return err } @@ -251,24 +250,24 @@ func SetIfNotExist(db ethdb.GetPut, pm Mode) error { return nil } -func setDistance(db ethdb.Putter, key []byte, distance Distance) error { +func setDistance(db kv.Putter, key []byte, distance Distance) error { v := make([]byte, 8) binary.BigEndian.PutUint64(v, uint64(distance)) - if err := db.Put(dbutils.DatabaseInfoBucket, key, v); err != nil { + if err := db.Put(kv.DatabaseInfo, key, v); err != nil { return err } return nil } -func setDistanceOnEmpty(db ethdb.GetPut, key []byte, distance Distance) error { - mode, err := db.GetOne(dbutils.DatabaseInfoBucket, key) +func setDistanceOnEmpty(db kv.GetPut, key []byte, distance Distance) error { + mode, err := db.GetOne(kv.DatabaseInfo, key) if err != nil { return err } if len(mode) == 0 || binary.BigEndian.Uint64(mode) == math.MaxUint64 { v := make([]byte, 8) binary.BigEndian.PutUint64(v, uint64(distance)) - if err = db.Put(dbutils.DatabaseInfoBucket, key, v); err != nil { + if err = db.Put(kv.DatabaseInfo, key, v); err != nil { return err } } @@ -276,19 +275,19 @@ func setDistanceOnEmpty(db ethdb.GetPut, key []byte, distance Distance) error { return nil } -func setMode(db ethdb.RwTx, key []byte, currentValue bool) error { +func setMode(db kv.RwTx, key []byte, currentValue bool) error { val := []byte{2} if currentValue { val = []byte{1} } - if err := db.Put(dbutils.DatabaseInfoBucket, key, val); err != nil { + if err := db.Put(kv.DatabaseInfo, key, val); err != nil { return err } return nil } -func setModeOnEmpty(db ethdb.GetPut, key []byte, currentValue bool) error { - mode, err := db.GetOne(dbutils.DatabaseInfoBucket, key) +func setModeOnEmpty(db kv.GetPut, key []byte, currentValue bool) error { + mode, err := db.GetOne(kv.DatabaseInfo, key) if err != nil { return err } @@ -297,7 +296,7 @@ func setModeOnEmpty(db ethdb.GetPut, key []byte, currentValue bool) error { if currentValue { val = []byte{1} } - if err = db.Put(dbutils.DatabaseInfoBucket, key, val); err != nil { + if err = db.Put(kv.DatabaseInfo, key, val); err != nil { return err } } diff --git a/ethdb/prune/storage_mode_test.go b/ethdb/prune/storage_mode_test.go index d121ab5642b..2bf0c143ffd 100644 --- a/ethdb/prune/storage_mode_test.go +++ b/ethdb/prune/storage_mode_test.go @@ -4,12 +4,12 @@ import ( "testing" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" ) func TestSetStorageModeIfNotExist(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) prune, err := Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64, Experiments{TEVM: false}}, prune) diff --git a/ethdb/kv/kv_remote.go b/ethdb/remotedb/kv_remote.go similarity index 93% rename from ethdb/kv/kv_remote.go rename to ethdb/remotedb/kv_remote.go index f9f93eb502e..18f7ee4db53 100644 --- a/ethdb/kv/kv_remote.go +++ b/ethdb/remotedb/kv_remote.go @@ -1,4 +1,4 @@ -package kv +package remotedb import ( "bytes" @@ -15,8 +15,8 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/log" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -28,17 +28,18 @@ import ( // generate the messages and services type remoteOpts struct { - bucketsCfg BucketConfigsFunc + bucketsCfg mdbx.BucketConfigsFunc inMemConn *bufconn.Listener // for tests DialAddress string version gointerfaces.Version + log log.Logger } type RemoteKV struct { conn *grpc.ClientConn remoteKV remote.KVClient log log.Logger - buckets dbutils.BucketsCfg + buckets kv.TableCfg opts remoteOpts } @@ -48,7 +49,7 @@ type remoteTx struct { streamCancelFn context.CancelFunc db *RemoteKV cursors []*remoteCursor - statelessCursors map[string]ethdb.Cursor + statelessCursors map[string]kv.Cursor streamingRequested bool } @@ -57,7 +58,7 @@ type remoteCursor struct { stream remote.KV_TxClient tx *remoteTx bucketName string - bucketCfg dbutils.BucketConfigItem + bucketCfg kv.TableConfigItem id uint32 } @@ -74,7 +75,7 @@ func (opts remoteOpts) Path(path string) remoteOpts { return opts } -func (opts remoteOpts) WithBucketsConfig(f BucketConfigsFunc) remoteOpts { +func (opts remoteOpts) WithBucketsConfig(f mdbx.BucketConfigsFunc) remoteOpts { opts.bucketsCfg = f return opts } @@ -152,9 +153,9 @@ func (opts remoteOpts) Open(certFile, keyFile, caCert string) (*RemoteKV, error) conn: conn, remoteKV: kvClient, log: log.New("remote_db", opts.DialAddress), - buckets: dbutils.BucketsCfg{}, + buckets: kv.TableCfg{}, } - customBuckets := opts.bucketsCfg(dbutils.BucketsConfigs) + customBuckets := opts.bucketsCfg(kv.BucketsConfigs) for name, cfg := range customBuckets { // copy map to avoid changing global variable db.buckets[name] = cfg } @@ -162,7 +163,7 @@ func (opts remoteOpts) Open(certFile, keyFile, caCert string) (*RemoteKV, error) return db, nil } -func (opts remoteOpts) MustOpen() ethdb.RwKV { +func (opts remoteOpts) MustOpen() kv.RwDB { db, err := opts.Open("", "", "") if err != nil { panic(err) @@ -173,11 +174,11 @@ func (opts remoteOpts) MustOpen() ethdb.RwKV { // NewRemote defines new remove KV connection (without actually opening it) // version parameters represent the version the KV client is expecting, // compatibility check will be performed when the KV connection opens -func NewRemote(v gointerfaces.Version) remoteOpts { - return remoteOpts{bucketsCfg: DefaultBucketConfigs, version: v} +func NewRemote(v gointerfaces.Version, logger log.Logger) remoteOpts { + return remoteOpts{bucketsCfg: mdbx.DefaultBucketConfigs, version: v, log: logger} } -func (db *RemoteKV) AllBuckets() dbutils.BucketsCfg { +func (db *RemoteKV) AllBuckets() kv.TableCfg { return db.buckets } @@ -212,7 +213,7 @@ func (db *RemoteKV) Close() { } } -func (db *RemoteKV) BeginRo(ctx context.Context) (ethdb.Tx, error) { +func (db *RemoteKV) BeginRo(ctx context.Context) (kv.Tx, error) { streamCtx, streamCancelFn := context.WithCancel(ctx) // We create child context for the stream so we can cancel it to prevent leak stream, err := db.remoteKV.Tx(streamCtx) if err != nil { @@ -222,11 +223,11 @@ func (db *RemoteKV) BeginRo(ctx context.Context) (ethdb.Tx, error) { return &remoteTx{ctx: ctx, db: db, stream: stream, streamCancelFn: streamCancelFn}, nil } -func (db *RemoteKV) BeginRw(ctx context.Context) (ethdb.RwTx, error) { +func (db *RemoteKV) BeginRw(ctx context.Context) (kv.RwTx, error) { return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") } -func (db *RemoteKV) View(ctx context.Context, f func(tx ethdb.Tx) error) (err error) { +func (db *RemoteKV) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { tx, err := db.BeginRo(ctx) if err != nil { return err @@ -236,7 +237,7 @@ func (db *RemoteKV) View(ctx context.Context, f func(tx ethdb.Tx) error) (err er return f(tx) } -func (db *RemoteKV) Update(ctx context.Context, f func(tx ethdb.RwTx) error) (err error) { +func (db *RemoteKV) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { return fmt.Errorf("remote db provider doesn't support .Update method") } @@ -261,9 +262,9 @@ func (tx *remoteTx) Rollback() { tx.closeGrpcStream() } -func (tx *remoteTx) statelessCursor(bucket string) (ethdb.Cursor, error) { +func (tx *remoteTx) statelessCursor(bucket string) (kv.Cursor, error) { if tx.statelessCursors == nil { - tx.statelessCursors = make(map[string]ethdb.Cursor) + tx.statelessCursors = make(map[string]kv.Cursor) } c, ok := tx.statelessCursors[bucket] if !ok { @@ -368,7 +369,7 @@ func (c *remoteCursor) Prev() ([]byte, []byte, error) { return c.prev() } -func (tx *remoteTx) Cursor(bucket string) (ethdb.Cursor, error) { +func (tx *remoteTx) Cursor(bucket string) (kv.Cursor, error) { b := tx.db.buckets[bucket] c := &remoteCursor{tx: tx, ctx: tx.ctx, bucketName: bucket, bucketCfg: b, stream: tx.stream} tx.cursors = append(tx.cursors, c) @@ -605,7 +606,7 @@ func (c *remoteCursor) Close() { } } -func (tx *remoteTx) CursorDupSort(bucket string) (ethdb.CursorDupSort, error) { +func (tx *remoteTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { c, err := tx.Cursor(bucket) if err != nil { return nil, err diff --git a/ethdb/remote/remotedbserver/server.go b/ethdb/remotedbserver/server.go similarity index 61% rename from ethdb/remote/remotedbserver/server.go rename to ethdb/remotedbserver/server.go index 07f499b1d01..c9aa1724654 100644 --- a/ethdb/remote/remotedbserver/server.go +++ b/ethdb/remotedbserver/server.go @@ -4,23 +4,12 @@ import ( "context" "fmt" "io" - "net" "time" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/log" - "github.com/ledgerwatch/erigon/metrics" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" + "github.com/ledgerwatch/erigon/ethdb/kv" "google.golang.org/protobuf/types/known/emptypb" ) @@ -35,75 +24,16 @@ var KvServiceAPIVersion = &types.VersionReply{Major: 3, Minor: 0, Patch: 0} type KvServer struct { remote.UnimplementedKVServer // must be embedded to have forward compatible implementations. - kv ethdb.RwKV + kv kv.RwDB } -func StartGrpc(kv *KvServer, ethBackendSrv *EthBackendServer, txPoolServer *TxPoolServer, miningServer *MiningServer, addr string, rateLimit uint32, creds *credentials.TransportCredentials) (*grpc.Server, error) { - log.Info("Starting private RPC server", "on", addr) - lis, err := net.Listen("tcp", addr) - if err != nil { - return nil, fmt.Errorf("could not create listener: %w, addr=%s", err, addr) - } - - var ( - streamInterceptors []grpc.StreamServerInterceptor - unaryInterceptors []grpc.UnaryServerInterceptor - ) - streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor()) - unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor()) - - if metrics.Enabled { - streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) - unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) - } - - var grpcServer *grpc.Server - //cpus := uint32(runtime.GOMAXPROCS(-1)) - opts := []grpc.ServerOption{ - //grpc.NumStreamWorkers(cpus), // reduce amount of goroutines - grpc.WriteBufferSize(1024), // reduce buffers to save mem - grpc.ReadBufferSize(1024), - grpc.MaxConcurrentStreams(rateLimit), // to force clients reduce concurrency level - // Don't drop the connection, settings accordign to this comment on GitHub - // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 10 * time.Second, - PermitWithoutStream: true, - }), - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), - } - if creds == nil { - // no specific opts - } else { - opts = append(opts, grpc.Creds(*creds)) - } - grpcServer = grpc.NewServer(opts...) - remote.RegisterETHBACKENDServer(grpcServer, ethBackendSrv) - txpool.RegisterTxpoolServer(grpcServer, txPoolServer) - txpool.RegisterMiningServer(grpcServer, miningServer) - remote.RegisterKVServer(grpcServer, kv) - - if metrics.Enabled { - grpc_prometheus.Register(grpcServer) - } - - go func() { - if err := grpcServer.Serve(lis); err != nil { - log.Error("private RPC server fail", "err", err) - } - }() - - return grpcServer, nil -} - -func NewKvServer(kv ethdb.RwKV) *KvServer { +func NewKvServer(kv kv.RwDB) *KvServer { return &KvServer{kv: kv} } // Version returns the service-side interface version number func (s *KvServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) { - dbSchemaVersion := &dbutils.DBSchemaVersion + dbSchemaVersion := &kv.DBSchemaVersion if KvServiceAPIVersion.Major > dbSchemaVersion.Major { return KvServiceAPIVersion, nil } @@ -132,7 +62,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { var CursorID uint32 type CursorInfo struct { bucket string - c ethdb.Cursor + c kv.Cursor k, v []byte //fields to save current position of cursor - used when Tx reopen } cursors := map[uint32]*CursorInfo{} @@ -176,7 +106,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { return err } switch casted := c.c.(type) { - case ethdb.CursorDupSort: + case kv.CursorDupSort: v, err := casted.SeekBothRange(c.k, c.v) if err != nil { return fmt.Errorf("server-side error: %w", err) @@ -187,7 +117,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { return fmt.Errorf("server-side error: %w", err) } } - case ethdb.Cursor: + case kv.Cursor: if _, _, err := c.c.Seek(c.k); err != nil { return fmt.Errorf("server-side error: %w", err) } @@ -195,7 +125,7 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { } } - var c ethdb.Cursor + var c kv.Cursor if in.BucketName == "" { cInfo, ok := cursors[in.Cursor] if !ok { @@ -240,30 +170,30 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { } } -func handleOp(c ethdb.Cursor, stream remote.KV_TxServer, in *remote.Cursor) error { +func handleOp(c kv.Cursor, stream remote.KV_TxServer, in *remote.Cursor) error { var k, v []byte var err error switch in.Op { case remote.Op_FIRST: k, v, err = c.First() case remote.Op_FIRST_DUP: - v, err = c.(ethdb.CursorDupSort).FirstDup() + v, err = c.(kv.CursorDupSort).FirstDup() case remote.Op_SEEK: k, v, err = c.Seek(in.K) case remote.Op_SEEK_BOTH: - v, err = c.(ethdb.CursorDupSort).SeekBothRange(in.K, in.V) + v, err = c.(kv.CursorDupSort).SeekBothRange(in.K, in.V) case remote.Op_CURRENT: k, v, err = c.Current() case remote.Op_LAST: k, v, err = c.Last() case remote.Op_LAST_DUP: - v, err = c.(ethdb.CursorDupSort).LastDup() + v, err = c.(kv.CursorDupSort).LastDup() case remote.Op_NEXT: k, v, err = c.Next() case remote.Op_NEXT_DUP: - k, v, err = c.(ethdb.CursorDupSort).NextDup() + k, v, err = c.(kv.CursorDupSort).NextDup() case remote.Op_NEXT_NO_DUP: - k, v, err = c.(ethdb.CursorDupSort).NextNoDup() + k, v, err = c.(kv.CursorDupSort).NextNoDup() case remote.Op_PREV: k, v, err = c.Prev() //case remote.Op_PREV_DUP: @@ -279,7 +209,7 @@ func handleOp(c ethdb.Cursor, stream remote.KV_TxServer, in *remote.Cursor) erro case remote.Op_SEEK_EXACT: k, v, err = c.SeekExact(in.K) case remote.Op_SEEK_BOTH_EXACT: - k, v, err = c.(ethdb.CursorDupSort).SeekBothExact(in.K, in.V) + k, v, err = c.(kv.CursorDupSort).SeekBothExact(in.K, in.V) default: return fmt.Errorf("unknown operation: %s", in.Op) } diff --git a/ethdb/kv/kv_snapshot.go b/ethdb/snapshotdb/kv_snapshot.go similarity index 82% rename from ethdb/kv/kv_snapshot.go rename to ethdb/snapshotdb/kv_snapshot.go index 627a5603dcc..0a5b617b4d4 100644 --- a/ethdb/kv/kv_snapshot.go +++ b/ethdb/snapshotdb/kv_snapshot.go @@ -1,4 +1,4 @@ -package kv +package snapshotdb import ( "bytes" @@ -8,29 +8,29 @@ import ( "sync" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" ) var ( - _ ethdb.RwKV = &SnapshotKV{} - _ ethdb.RoKV = &SnapshotKV{} - _ ethdb.Tx = &snTX{} - _ ethdb.BucketMigrator = &snTX{} - _ ethdb.RwCursor = &snCursor{} - _ ethdb.Cursor = &snCursor{} + _ kv.RwDB = &SnapshotKV{} + _ kv.RoDB = &SnapshotKV{} + _ kv.Tx = &snTX{} + _ kv.BucketMigrator = &snTX{} + _ kv.RwCursor = &snCursor{} + _ kv.Cursor = &snCursor{} ) type SnapshotUpdater interface { - UpdateSnapshots(tp string, snapshotKV ethdb.RoKV, done chan struct{}) - HeadersSnapshot() ethdb.RoKV - BodiesSnapshot() ethdb.RoKV - StateSnapshot() ethdb.RoKV + UpdateSnapshots(tp string, snapshotKV kv.RoDB, done chan struct{}) + HeadersSnapshot() kv.RoDB + BodiesSnapshot() kv.RoDB + StateSnapshot() kv.RoDB } type WriteDB interface { - WriteDB() ethdb.RwKV + WriteDB() kv.RwDB } func NewSnapshotKV() snapshotOpts { @@ -39,26 +39,26 @@ func NewSnapshotKV() snapshotOpts { } type snapshotOpts struct { - db ethdb.RwKV - headersSnapshot ethdb.RoKV - bodiesSnapshot ethdb.RoKV - stateSnapshot ethdb.RoKV + db kv.RwDB + headersSnapshot kv.RoDB + bodiesSnapshot kv.RoDB + stateSnapshot kv.RoDB } -func (opts snapshotOpts) HeadersSnapshot(kv ethdb.RoKV) snapshotOpts { +func (opts snapshotOpts) HeadersSnapshot(kv kv.RoDB) snapshotOpts { opts.headersSnapshot = kv return opts } -func (opts snapshotOpts) BodiesSnapshot(kv ethdb.RoKV) snapshotOpts { +func (opts snapshotOpts) BodiesSnapshot(kv kv.RoDB) snapshotOpts { opts.bodiesSnapshot = kv return opts } -func (opts snapshotOpts) StateSnapshot(kv ethdb.RoKV) snapshotOpts { +func (opts snapshotOpts) StateSnapshot(kv kv.RoDB) snapshotOpts { opts.stateSnapshot = kv return opts } -func (opts snapshotOpts) DB(db ethdb.RwKV) snapshotOpts { +func (opts snapshotOpts) DB(db kv.RwDB) snapshotOpts { opts.db = db return opts } @@ -73,17 +73,17 @@ func (opts snapshotOpts) Open() *SnapshotKV { } type SnapshotKV struct { - db ethdb.RwKV - headersSnapshot ethdb.RoKV - bodiesSnapshot ethdb.RoKV - stateSnapshot ethdb.RoKV + db kv.RwDB + headersSnapshot kv.RoDB + bodiesSnapshot kv.RoDB + stateSnapshot kv.RoDB mtx sync.RWMutex - tmpDB ethdb.RwKV + tmpDB kv.RwDB tmpDBBuckets map[string]struct{} } -func (s *SnapshotKV) View(ctx context.Context, f func(tx ethdb.Tx) error) error { +func (s *SnapshotKV) View(ctx context.Context, f func(tx kv.Tx) error) error { snTX, err := s.BeginRo(ctx) if err != nil { return err @@ -92,7 +92,7 @@ func (s *SnapshotKV) View(ctx context.Context, f func(tx ethdb.Tx) error) error return f(snTX) } -func (s *SnapshotKV) Update(ctx context.Context, f func(tx ethdb.RwTx) error) error { +func (s *SnapshotKV) Update(ctx context.Context, f func(tx kv.RwTx) error) error { tx, err := s.BeginRw(ctx) if err != nil { return err @@ -121,8 +121,8 @@ func (s *SnapshotKV) Close() { } } -func (s *SnapshotKV) UpdateSnapshots(tp string, snapshotKV ethdb.RoKV, done chan struct{}) { - var toClose ethdb.RoKV +func (s *SnapshotKV) UpdateSnapshots(tp string, snapshotKV kv.RoDB, done chan struct{}) { + var toClose kv.RoDB s.mtx.Lock() defer s.mtx.Unlock() switch { @@ -148,15 +148,15 @@ func (s *SnapshotKV) UpdateSnapshots(tp string, snapshotKV ethdb.RoKV, done chan }() } -func (s *SnapshotKV) WriteDB() ethdb.RwKV { +func (s *SnapshotKV) WriteDB() kv.RwDB { return s.db } -func (s *SnapshotKV) TempDB() ethdb.RwKV { +func (s *SnapshotKV) TempDB() kv.RwDB { return s.tmpDB } -func (s *SnapshotKV) SetTempDB(kv ethdb.RwKV, buckets []string) { +func (s *SnapshotKV) SetTempDB(kv kv.RwDB, buckets []string) { bucketsMap := make(map[string]struct{}, len(buckets)) for _, bucket := range buckets { bucketsMap[bucket] = struct{}{} @@ -166,18 +166,18 @@ func (s *SnapshotKV) SetTempDB(kv ethdb.RwKV, buckets []string) { } //todo -func (s *SnapshotKV) HeadersSnapshot() ethdb.RoKV { +func (s *SnapshotKV) HeadersSnapshot() kv.RoDB { return s.headersSnapshot } -func (s *SnapshotKV) BodiesSnapshot() ethdb.RoKV { +func (s *SnapshotKV) BodiesSnapshot() kv.RoDB { return s.bodiesSnapshot } -func (s *SnapshotKV) StateSnapshot() ethdb.RoKV { +func (s *SnapshotKV) StateSnapshot() kv.RoDB { return s.stateSnapshot } -func (s *SnapshotKV) snapsthotsTx(ctx context.Context) (ethdb.Tx, ethdb.Tx, ethdb.Tx, error) { - var headersTX, bodiesTX, stateTX ethdb.Tx +func (s *SnapshotKV) snapsthotsTx(ctx context.Context) (kv.Tx, kv.Tx, kv.Tx, error) { + var headersTX, bodiesTX, stateTX kv.Tx var err error defer func() { if err != nil { @@ -212,12 +212,12 @@ func (s *SnapshotKV) snapsthotsTx(ctx context.Context) (ethdb.Tx, ethdb.Tx, ethd } return headersTX, bodiesTX, stateTX, nil } -func (s *SnapshotKV) BeginRo(ctx context.Context) (ethdb.Tx, error) { +func (s *SnapshotKV) BeginRo(ctx context.Context) (kv.Tx, error) { dbTx, err := s.db.BeginRo(ctx) if err != nil { return nil, err } - var tmpTX ethdb.Tx + var tmpTX kv.Tx if s.tmpDB != nil { tmpTX, err = s.tmpDB.BeginRo(context.Background()) if err != nil { @@ -238,13 +238,13 @@ func (s *SnapshotKV) BeginRo(ctx context.Context) (ethdb.Tx, error) { }, nil } -func (s *SnapshotKV) BeginRw(ctx context.Context) (ethdb.RwTx, error) { +func (s *SnapshotKV) BeginRw(ctx context.Context) (kv.RwTx, error) { dbTx, err := s.db.BeginRw(ctx) //nolint if err != nil { return nil, err } - var tmpTX ethdb.Tx + var tmpTX kv.Tx if s.tmpDB != nil { tmpTX, err = s.tmpDB.BeginRw(context.Background()) if err != nil { @@ -267,33 +267,33 @@ func (s *SnapshotKV) BeginRw(ctx context.Context) (ethdb.RwTx, error) { }, nil } -func (s *SnapshotKV) AllBuckets() dbutils.BucketsCfg { +func (s *SnapshotKV) AllBuckets() kv.TableCfg { return s.db.AllBuckets() } var ErrUnavailableSnapshot = errors.New("unavailable snapshot") type snTX struct { - dbTX ethdb.Tx - headersTX ethdb.Tx - bodiesTX ethdb.Tx - stateTX ethdb.Tx + dbTX kv.Tx + headersTX kv.Tx + bodiesTX kv.Tx + stateTX kv.Tx //just an experiment with temp db for state snapshot migration. - tmpTX ethdb.Tx + tmpTX kv.Tx buckets map[string]struct{} } type DBTX interface { - DBTX() ethdb.RwTx + DBTX() kv.RwTx } -func (s *snTX) DBTX() ethdb.RwTx { - return s.dbTX.(ethdb.RwTx) +func (s *snTX) DBTX() kv.RwTx { + return s.dbTX.(kv.RwTx) } -func (s *snTX) RwCursor(bucket string) (ethdb.RwCursor, error) { +func (s *snTX) RwCursor(bucket string) (kv.RwCursor, error) { if !IsSnapshotBucket(bucket) { - return s.dbTX.(ethdb.RwTx).RwCursor(bucket) + return s.dbTX.(kv.RwTx).RwCursor(bucket) } tx, err := s.getSnapshotTX(bucket) if err != nil && !errors.Is(err, ErrUnavailableSnapshot) { @@ -301,7 +301,7 @@ func (s *snTX) RwCursor(bucket string) (ethdb.RwCursor, error) { } //process only db buckets if errors.Is(err, ErrUnavailableSnapshot) { - return s.dbTX.(ethdb.RwTx).RwCursor(bucket) + return s.dbTX.(kv.RwTx).RwCursor(bucket) } snCursor2, err := tx.Cursor(bucket) @@ -314,7 +314,7 @@ func (s *snTX) RwCursor(bucket string) (ethdb.RwCursor, error) { if err != nil { return nil, err } - tmpDBCursor, err := s.tmpTX.(ethdb.RwTx).RwCursor(bucket) + tmpDBCursor, err := s.tmpTX.(kv.RwTx).RwCursor(bucket) if err != nil { return nil, err } @@ -327,7 +327,7 @@ func (s *snTX) RwCursor(bucket string) (ethdb.RwCursor, error) { snCursor: snCursor2, }, nil } - dbCursor, err := s.dbTX.(ethdb.RwTx).RwCursor(bucket) + dbCursor, err := s.dbTX.(kv.RwTx).RwCursor(bucket) if err != nil { return nil, err } @@ -340,26 +340,26 @@ func (s *snTX) RwCursor(bucket string) (ethdb.RwCursor, error) { } func (s *snTX) DropBucket(bucket string) error { - return s.dbTX.(ethdb.BucketMigrator).DropBucket(bucket) + return s.dbTX.(kv.BucketMigrator).DropBucket(bucket) } func (s *snTX) CreateBucket(bucket string) error { - return s.dbTX.(ethdb.BucketMigrator).CreateBucket(bucket) + return s.dbTX.(kv.BucketMigrator).CreateBucket(bucket) } func (s *snTX) ExistsBucket(bucket string) (bool, error) { - return s.dbTX.(ethdb.BucketMigrator).ExistsBucket(bucket) + return s.dbTX.(kv.BucketMigrator).ExistsBucket(bucket) } func (s *snTX) ClearBucket(bucket string) error { - return s.dbTX.(ethdb.BucketMigrator).ClearBucket(bucket) + return s.dbTX.(kv.BucketMigrator).ClearBucket(bucket) } func (s *snTX) ListBuckets() ([]string, error) { - return s.dbTX.(ethdb.BucketMigrator).ListBuckets() + return s.dbTX.(kv.BucketMigrator).ListBuckets() } -func (s *snTX) Cursor(bucket string) (ethdb.Cursor, error) { +func (s *snTX) Cursor(bucket string) (kv.Cursor, error) { if !IsSnapshotBucket(bucket) { return s.dbTX.Cursor(bucket) } @@ -400,7 +400,7 @@ func (s *snTX) Cursor(bucket string) (ethdb.Cursor, error) { }, nil } -func (s *snTX) CursorDupSort(bucket string) (ethdb.CursorDupSort, error) { +func (s *snTX) CursorDupSort(bucket string) (kv.CursorDupSort, error) { tx, err := s.getSnapshotTX(bucket) if err != nil && !errors.Is(err, ErrUnavailableSnapshot) { panic(err.Error()) @@ -427,12 +427,12 @@ func (s *snTX) CursorDupSort(bucket string) (ethdb.CursorDupSort, error) { }, nil } -func (s *snTX) RwCursorDupSort(bucket string) (ethdb.RwCursorDupSort, error) { +func (s *snTX) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { c, err := s.CursorDupSort(bucket) if err != nil { return nil, err } - return c.(ethdb.RwCursorDupSort), nil + return c.(kv.RwCursorDupSort), nil } func (s *snTX) GetOne(bucket string, key []byte) (val []byte, err error) { v, err := s.dbTX.GetOne(bucket, key) @@ -462,21 +462,21 @@ func (s *snTX) GetOne(bucket string, key []byte) (val []byte, err error) { func (s *snTX) Put(bucket string, k, v []byte) error { if s.tmpTX != nil && IsStateSnapshotSnapshotBucket(bucket) { - return s.tmpTX.(ethdb.RwTx).Put(bucket, k, v) + return s.tmpTX.(kv.RwTx).Put(bucket, k, v) } - return s.dbTX.(ethdb.RwTx).Put(bucket, k, v) + return s.dbTX.(kv.RwTx).Put(bucket, k, v) } func (s *snTX) Append(bucket string, k, v []byte) error { if s.tmpTX != nil && IsStateSnapshotSnapshotBucket(bucket) { - return s.tmpTX.(ethdb.RwTx).Put(bucket, k, v) + return s.tmpTX.(kv.RwTx).Put(bucket, k, v) } - return s.dbTX.(ethdb.RwTx).Append(bucket, k, v) + return s.dbTX.(kv.RwTx).Append(bucket, k, v) } func (s *snTX) AppendDup(bucket string, k, v []byte) error { if s.tmpTX != nil && IsStateSnapshotSnapshotBucket(bucket) { - return s.tmpTX.(ethdb.RwTx).Put(bucket, k, v) + return s.tmpTX.(kv.RwTx).Put(bucket, k, v) } - return s.dbTX.(ethdb.RwTx).AppendDup(bucket, k, v) + return s.dbTX.(kv.RwTx).AppendDup(bucket, k, v) } func (s *snTX) Delete(bucket string, k, v []byte) error { //note we can't use Delete here, because we can't change snapshots @@ -484,26 +484,26 @@ func (s *snTX) Delete(bucket string, k, v []byte) error { //so we are just marking that this value is deleted. //this value will be removed on snapshot merging if s.tmpTX != nil && IsStateSnapshotSnapshotBucket(bucket) { - return s.tmpTX.(ethdb.RwTx).Put(bucket, k, DeletedValue) + return s.tmpTX.(kv.RwTx).Put(bucket, k, DeletedValue) } - return s.dbTX.(ethdb.RwTx).Put(bucket, k, DeletedValue) + return s.dbTX.(kv.RwTx).Put(bucket, k, DeletedValue) } func (s *snTX) CollectMetrics() { - if rw, ok := s.dbTX.(ethdb.RwTx); ok { + if rw, ok := s.dbTX.(kv.RwTx); ok { rw.CollectMetrics() } } -func (s *snTX) getSnapshotTX(bucket string) (ethdb.Tx, error) { - var tx ethdb.Tx +func (s *snTX) getSnapshotTX(bucket string) (kv.Tx, error) { + var tx kv.Tx switch bucket { - case dbutils.HeadersBucket: + case kv.Headers: tx = s.headersTX - case dbutils.BlockBodyPrefix, dbutils.EthTx: + case kv.BlockBody, kv.EthTx: tx = s.bodiesTX - case dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.CodeBucket: + case kv.PlainStateBucket, kv.PlainContractCode, kv.CodeBucket: tx = s.stateTX } if tx == nil { @@ -634,7 +634,7 @@ func (s *snTX) BucketSize(bucket string) (uint64, error) { } func (s *snTX) IncrementSequence(bucket string, amount uint64) (uint64, error) { - return s.dbTX.(ethdb.RwTx).IncrementSequence(bucket, amount) + return s.dbTX.(kv.RwTx).IncrementSequence(bucket, amount) } func (s *snTX) ReadSequence(bucket string) (uint64, error) { @@ -656,8 +656,8 @@ func (s *snTX) DropBuckets(buckets ...string) error { var DeletedValue = []byte{0} type snCursor struct { - dbCursor ethdb.Cursor - snCursor ethdb.Cursor + dbCursor kv.Cursor + snCursor kv.Cursor currentKey []byte } @@ -908,15 +908,15 @@ func (s *snCursor) Current() ([]byte, []byte, error) { } func (s *snCursor) Put(k, v []byte) error { - return s.dbCursor.(ethdb.RwCursor).Put(k, v) + return s.dbCursor.(kv.RwCursor).Put(k, v) } func (s *snCursor) Append(k []byte, v []byte) error { - return s.dbCursor.(ethdb.RwCursor).Append(k, v) + return s.dbCursor.(kv.RwCursor).Append(k, v) } func (s *snCursor) Delete(k, v []byte) error { - return s.dbCursor.(ethdb.RwCursor).Put(k, DeletedValue) + return s.dbCursor.(kv.RwCursor).Put(k, DeletedValue) } func (s *snCursor) DeleteCurrent() error { @@ -933,8 +933,8 @@ func (s *snCursor) Close() { } type snCursorDup struct { - dbCursorDup ethdb.CursorDupSort - sndbCursorDup ethdb.CursorDupSort + dbCursorDup kv.CursorDupSort + sndbCursorDup kv.CursorDupSort snCursor } @@ -1022,11 +1022,11 @@ func IsSnapshotBucket(bucket string) bool { return IsStateSnapshotSnapshotBucket(bucket) || IsHeaderSnapshotSnapshotBucket(bucket) || IsBodiesSnapshotSnapshotBucket(bucket) } func IsHeaderSnapshotSnapshotBucket(bucket string) bool { - return bucket == dbutils.HeadersBucket + return bucket == kv.Headers } func IsBodiesSnapshotSnapshotBucket(bucket string) bool { - return bucket == dbutils.BlockBodyPrefix || bucket == dbutils.EthTx + return bucket == kv.BlockBody || bucket == kv.EthTx } func IsStateSnapshotSnapshotBucket(bucket string) bool { - return bucket == dbutils.PlainStateBucket || bucket == dbutils.PlainContractCodeBucket || bucket == dbutils.CodeBucket + return bucket == kv.PlainStateBucket || bucket == kv.PlainContractCode || bucket == kv.CodeBucket } diff --git a/ethdb/kv/kv_snapshot_property_test.go b/ethdb/snapshotdb/kv_snapshot_property_test.go similarity index 92% rename from ethdb/kv/kv_snapshot_property_test.go rename to ethdb/snapshotdb/kv_snapshot_property_test.go index d5138dbf5d2..cfe80d44664 100644 --- a/ethdb/kv/kv_snapshot_property_test.go +++ b/ethdb/snapshotdb/kv_snapshot_property_test.go @@ -1,12 +1,12 @@ -package kv +package snapshotdb import ( "context" "testing" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/require" "pgregory.net/rapid" ) @@ -18,20 +18,20 @@ func TestGetAndPut(t *testing.T) { type getPutkvMachine struct { bucket string - snKV ethdb.RwKV - modelKV ethdb.RwKV + snKV kv.RwDB + modelKV kv.RwDB snapshotKeys [][20]byte newKeys [][20]byte allKeys [][20]byte - snTX ethdb.RwTx - modelTX ethdb.RwTx + snTX kv.RwTx + modelTX kv.RwTx } func (m *getPutkvMachine) Init(t *rapid.T) { - m.bucket = dbutils.PlainStateBucket - m.snKV = NewMemKV() - m.modelKV = NewMemKV() + m.bucket = kv.PlainStateBucket + m.snKV = memdb.New() + m.modelKV = memdb.New() m.snapshotKeys = rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Filter(func(_v [][20]byte) bool { return len(_v) > 0 }).Draw(t, "generate keys").([][20]byte) @@ -62,7 +62,7 @@ func (m *getPutkvMachine) Init(t *rapid.T) { require.NoError(t, err) err = txModel.Commit() require.NoError(t, err) - m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(NewMemKV()).Open() + m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(memdb.New()).Open() } func (m *getPutkvMachine) Cleanup() { @@ -166,8 +166,8 @@ func (m *getPutkvMachine) Commit(t *rapid.T) { type getKVMachine struct { bucket string - snKV ethdb.RwKV - modelKV ethdb.RwKV + snKV kv.RwDB + modelKV kv.RwDB overWriteKeys [][20]byte snKeys [][20]byte newKeys [][20]byte @@ -175,9 +175,9 @@ type getKVMachine struct { } func (m *getKVMachine) Init(t *rapid.T) { - m.bucket = dbutils.PlainStateBucket - m.snKV = NewMemKV() - m.modelKV = NewMemKV() + m.bucket = kv.PlainStateBucket + m.snKV = memdb.New() + m.modelKV = memdb.New() m.snKeys = rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Filter(func(_v [][20]byte) bool { return len(_v) > 0 }).Draw(t, "generate keys").([][20]byte) @@ -205,7 +205,7 @@ func (m *getKVMachine) Init(t *rapid.T) { //save snapshot and wrap new write db err = txSn.Commit() require.NoError(t, err) - m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(NewMemKV()).Open() + m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(memdb.New()).Open() txSn, err = m.snKV.BeginRw(context.Background()) require.NoError(t, err) defer txSn.Rollback() @@ -242,12 +242,12 @@ func (m *getKVMachine) Get(t *rapid.T) { v1, v2 []byte err1, err2 error ) - err := m.snKV.View(context.Background(), func(tx ethdb.Tx) error { + err := m.snKV.View(context.Background(), func(tx kv.Tx) error { v1, err1 = tx.GetOne(m.bucket, key[:]) return nil }) require.NoError(t, err) - err = m.modelKV.View(context.Background(), func(tx ethdb.Tx) error { + err = m.modelKV.View(context.Background(), func(tx kv.Tx) error { v2, err2 = tx.GetOne(m.bucket, key[:]) return nil }) @@ -268,14 +268,14 @@ func TestCursorWithTX(t *testing.T) { type cursorKVMachine struct { bucket string - snKV ethdb.RwKV - modelKV ethdb.RwKV + snKV kv.RwDB + modelKV kv.RwDB - snTX ethdb.RwTx - modelTX ethdb.RwTx + snTX kv.RwTx + modelTX kv.RwTx - snCursor ethdb.RwCursor - modelCursor ethdb.RwCursor + snCursor kv.RwCursor + modelCursor kv.RwCursor snapshotKeys [][20]byte newKeys [][20]byte @@ -283,9 +283,9 @@ type cursorKVMachine struct { } func (m *cursorKVMachine) Init(t *rapid.T) { - m.bucket = dbutils.PlainStateBucket - m.snKV = NewMemKV() - m.modelKV = NewMemKV() + m.bucket = kv.PlainStateBucket + m.snKV = memdb.New() + m.modelKV = memdb.New() m.snapshotKeys = rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Filter(func(_v [][20]byte) bool { return len(_v) > 0 }).Draw(t, "generate keys").([][20]byte) @@ -320,7 +320,7 @@ func (m *cursorKVMachine) Init(t *rapid.T) { require.NoError(t, err) err = txModel.Commit() require.NoError(t, err) - m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(NewMemKV()).Open() + m.snKV = NewSnapshotKV().StateSnapshot(m.snKV).DB(memdb.New()).Open() } func (m *cursorKVMachine) Check(t *rapid.T) { diff --git a/ethdb/kv/kv_snapshot_test.go b/ethdb/snapshotdb/kv_snapshot_test.go similarity index 77% rename from ethdb/kv/kv_snapshot_test.go rename to ethdb/snapshotdb/kv_snapshot_test.go index 0445571ac80..c44e1fa6039 100644 --- a/ethdb/kv/kv_snapshot_test.go +++ b/ethdb/snapshotdb/kv_snapshot_test.go @@ -1,4 +1,4 @@ -package kv +package snapshotdb import ( "bytes" @@ -10,18 +10,23 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + kv2 "github.com/ledgerwatch/erigon/ethdb/memdb" + "github.com/ledgerwatch/erigon/log" "github.com/stretchr/testify/require" ) func TestSnapshot2Get(t *testing.T) { - sn1 := NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + logger := log.New() + sn1 := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).InMem().MustOpen() defer sn1.Close() - err := sn1.Update(context.Background(), func(tx ethdb.RwTx) error { - bucket, err := tx.RwCursor(dbutils.HeadersBucket) + err := sn1.Update(context.Background(), func(tx kv.RwTx) error { + bucket, err := tx.RwCursor(kv.Headers) if err != nil { return err } @@ -40,14 +45,14 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(err) } - sn2 := NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{}, + sn2 := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.BlockBody: kv.TableConfigItem{}, } }).InMem().MustOpen() defer sn2.Close() - err = sn2.Update(context.Background(), func(tx ethdb.RwTx) error { - bucket, err := tx.RwCursor(dbutils.BlockBodyPrefix) + err = sn2.Update(context.Background(), func(tx kv.RwTx) error { + bucket, err := tx.RwCursor(kv.BlockBody) require.NoError(t, err) innerErr := bucket.Put(dbutils.BlockBodyKey(1, common.Hash{1}), []byte{1}) if innerErr != nil { @@ -64,9 +69,9 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - err = mainDB.Update(context.Background(), func(tx ethdb.RwTx) error { - bucket, err := tx.RwCursor(dbutils.HeadersBucket) + mainDB := kv2.NewTestDB(t) + err = mainDB.Update(context.Background(), func(tx kv.RwTx) error { + bucket, err := tx.RwCursor(kv.Headers) if err != nil { return err } @@ -79,7 +84,7 @@ func TestSnapshot2Get(t *testing.T) { return innerErr } - bucket, err = tx.RwCursor(dbutils.BlockBodyPrefix) + bucket, err = tx.RwCursor(kv.BlockBody) if err != nil { return err } @@ -99,16 +104,16 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(err) } - kv := NewSnapshotKV().DB(mainDB).HeadersSnapshot(sn1). + db := NewSnapshotKV().DB(mainDB).HeadersSnapshot(sn1). BodiesSnapshot(sn2).Open() - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - v, err := tx.GetOne(dbutils.HeadersBucket, dbutils.HeaderKey(1, common.Hash{1})) + v, err := tx.GetOne(kv.Headers, dbutils.HeaderKey(1, common.Hash{1})) if err != nil { t.Fatal(err) } @@ -116,7 +121,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.HeadersBucket, dbutils.HeaderKey(2, common.Hash{2})) + v, err = tx.GetOne(kv.Headers, dbutils.HeaderKey(2, common.Hash{2})) if err != nil { t.Fatal(err) } @@ -124,7 +129,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.HeadersBucket, dbutils.HeaderKey(3, common.Hash{3})) + v, err = tx.GetOne(kv.Headers, dbutils.HeaderKey(3, common.Hash{3})) if err != nil { t.Fatal(err) } @@ -132,7 +137,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(1, common.Hash{1})) + v, err = tx.GetOne(kv.BlockBody, dbutils.BlockBodyKey(1, common.Hash{1})) if err != nil { t.Fatal(err) } @@ -140,7 +145,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(2, common.Hash{2})) + v, err = tx.GetOne(kv.BlockBody, dbutils.BlockBodyKey(2, common.Hash{2})) if err != nil { t.Fatal(err) } @@ -148,7 +153,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(3, common.Hash{3})) + v, err = tx.GetOne(kv.BlockBody, dbutils.BlockBodyKey(3, common.Hash{3})) if err != nil { t.Fatal(err) } @@ -156,7 +161,7 @@ func TestSnapshot2Get(t *testing.T) { t.Fatal(v) } - headerCursor, err := tx.Cursor(dbutils.HeadersBucket) + headerCursor, err := tx.Cursor(kv.Headers) require.NoError(t, err) k, v, err := headerCursor.Last() require.NoError(t, err) @@ -192,16 +197,17 @@ func TestSnapshot2Get(t *testing.T) { } func TestSnapshot2WritableTxAndGet(t *testing.T) { - sn1 := NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + logger := log.New() + sn1 := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.TableConfigItem{}, } }).InMem().MustOpen() defer sn1.Close() { - err := sn1.Update(context.Background(), func(tx ethdb.RwTx) error { - bucket, err := tx.RwCursor(dbutils.HeadersBucket) + err := sn1.Update(context.Background(), func(tx kv.RwTx) error { + bucket, err := tx.RwCursor(kv.Headers) require.NoError(t, err) innerErr := bucket.Put(dbutils.HeaderKey(1, common.Hash{1}), []byte{1}) if innerErr != nil { @@ -219,15 +225,15 @@ func TestSnapshot2WritableTxAndGet(t *testing.T) { } } - sn2 := NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{}, + sn2 := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.BlockBody: kv.TableConfigItem{}, } }).InMem().MustOpen() defer sn2.Close() { - err := sn2.Update(context.Background(), func(tx ethdb.RwTx) error { - bucket, err := tx.RwCursor(dbutils.BlockBodyPrefix) + err := sn2.Update(context.Background(), func(tx kv.RwTx) error { + bucket, err := tx.RwCursor(kv.BlockBody) require.NoError(t, err) innerErr := bucket.Put(dbutils.BlockBodyKey(1, common.Hash{1}), []byte{1}) if innerErr != nil { @@ -243,37 +249,37 @@ func TestSnapshot2WritableTxAndGet(t *testing.T) { require.NoError(t, err) } - mainDB := NewTestKV(t) + mainDB := kv2.NewTestDB(t) - kv := NewSnapshotKV().DB(mainDB).HeadersSnapshot(sn1).BodiesSnapshot(sn2).Open() + db := NewSnapshotKV().DB(mainDB).HeadersSnapshot(sn1).BodiesSnapshot(sn2).Open() { - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() - v, err := tx.GetOne(dbutils.HeadersBucket, dbutils.HeaderKey(1, common.Hash{1})) + v, err := tx.GetOne(kv.Headers, dbutils.HeaderKey(1, common.Hash{1})) require.NoError(t, err) if !bytes.Equal(v, []byte{1}) { t.Fatal(v) } - v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(1, common.Hash{1})) + v, err = tx.GetOne(kv.BlockBody, dbutils.BlockBodyKey(1, common.Hash{1})) require.NoError(t, err) if !bytes.Equal(v, []byte{1}) { t.Fatal(v) } - err = tx.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(4, common.Hash{4}), []byte{4}) + err = tx.Put(kv.BlockBody, dbutils.BlockBodyKey(4, common.Hash{4}), []byte{4}) require.NoError(t, err) - err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(4, common.Hash{4}), []byte{4}) + err = tx.Put(kv.Headers, dbutils.HeaderKey(4, common.Hash{4}), []byte{4}) require.NoError(t, err) err = tx.Commit() require.NoError(t, err) } - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) require.NoError(t, err) defer tx.Rollback() - c, err := tx.Cursor(dbutils.HeadersBucket) + c, err := tx.Cursor(kv.Headers) require.NoError(t, err) k, v, err := c.First() require.NoError(t, err) @@ -304,7 +310,7 @@ func TestSnapshot2WritableTxAndGet(t *testing.T) { t.Fatal(k, v, err) } - c, err = tx.Cursor(dbutils.BlockBodyPrefix) + c, err = tx.Cursor(kv.BlockBody) require.NoError(t, err) k, v, err = c.First() require.NoError(t, err) @@ -350,19 +356,19 @@ func TestSnapshot2WritableTxWalkReplaceAndCreateNewKey(t *testing.T) { if err != nil { t.Fatal(err) } - mainDB := NewTestKV(t) + mainDB := kv2.NewTestDB(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - defer kv.Close() + defer db.Close() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.RwCursor(dbutils.PlainStateBucket) + c, err := tx.RwCursor(kv.PlainStateBucket) require.NoError(t, err) replaceKey := dbutils.PlainGenerateCompositeStorageKey([]byte{2}, 1, []byte{4}) replaceValue := []byte{2, 4, 4} @@ -422,19 +428,19 @@ func TestSnapshot2WritableTxWalkAndDeleteKey(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) - deleteCursor, err := tx.RwCursor(dbutils.PlainStateBucket) + deleteCursor, err := tx.RwCursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v @@ -498,19 +504,19 @@ func TestSnapshot2WritableTxNextAndPrevAndDeleteKey(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) - deleteCursor, err := tx.RwCursor(dbutils.PlainStateBucket) + deleteCursor, err := tx.RwCursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v @@ -606,16 +612,16 @@ func TestSnapshot2WritableTxWalkLastElementIsSnapshot(t *testing.T) { t.Fatal(err) } - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v k, v, err := c.First() @@ -691,16 +697,16 @@ func TestSnapshot2WritableTxWalkForwardAndBackward(t *testing.T) { t.Fatal(err) } - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v k, v, err := c.First() @@ -787,17 +793,17 @@ func TestSnapshot2WalkByEmptyDB(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) i := 0 @@ -825,20 +831,20 @@ func TestSnapshot2WritablePrevAndDeleteKey(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v k, v, err := c.First() if err != nil { - printBucket(kv, dbutils.PlainStateBucket) + printBucket(db, kv.PlainStateBucket) t.Fatal(err) } checkKV(t, k, v, data[0].K, data[0].V) @@ -888,16 +894,16 @@ func TestSnapshot2WritableTxNextAndPrevWithDeleteAndPutKeys(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRw(context.Background()) + tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) require.NoError(t, err) - deleteCursor, err := tx.RwCursor(dbutils.PlainStateBucket) + deleteCursor, err := tx.RwCursor(kv.PlainStateBucket) require.NoError(t, err) //get first correct k&v @@ -988,16 +994,16 @@ func TestSnapshotUpdateSnapshot(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - tx, err := kv.BeginRo(context.Background()) + tx, err := db.BeginRo(context.Background()) if err != nil { t.Fatal(err) } defer tx.Rollback() - c, err := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(kv.PlainStateBucket) if err != nil { t.Fatal(err) } @@ -1009,15 +1015,15 @@ func TestSnapshotUpdateSnapshot(t *testing.T) { checkKVErr(t, k, v, err, []byte{1}, []byte{1}) done := make(chan struct{}) - kv.UpdateSnapshots("state", snapshotDB2, done) + db.UpdateSnapshots("state", snapshotDB2, done) - tx2, err := kv.BeginRo(context.Background()) + tx2, err := db.BeginRo(context.Background()) if err != nil { t.Fatal(err) } defer tx2.Rollback() - c2, err := tx2.Cursor(dbutils.PlainStateBucket) + c2, err := tx2.Cursor(kv.PlainStateBucket) if err != nil { t.Fatal(err) } @@ -1091,11 +1097,11 @@ func TestPlainStateProxy(t *testing.T) { t.Fatal(err) } - mainDB := NewTestKV(t) - kv := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). + mainDB := kv2.NewTestDB(t) + db := NewSnapshotKV().DB(mainDB).StateSnapshot(snapshotDB). Open() - err = kv.Update(context.Background(), func(tx ethdb.RwTx) error { - c, err := tx.RwCursor(dbutils.PlainStateBucket) + err = db.Update(context.Background(), func(tx kv.RwTx) error { + c, err := tx.RwCursor(kv.PlainStateBucket) if err != nil { return err } @@ -1111,19 +1117,19 @@ func TestPlainStateProxy(t *testing.T) { t.Fatal(err) } - tmpDB := NewTestKV(t) - kv.SetTempDB(tmpDB, []string{dbutils.PlainStateBucket}) + tmpDB := kv2.NewTestDB(t) + db.SetTempDB(tmpDB, []string{kv.PlainStateBucket}) nonStateKey := []byte{11} nonStateValue := []byte{99} - err = kv.Update(context.Background(), func(tx ethdb.RwTx) error { - err = tx.Put(dbutils.BlockBodyPrefix, nonStateKey, nonStateValue) + err = db.Update(context.Background(), func(tx kv.RwTx) error { + err = tx.Put(kv.BlockBody, nonStateKey, nonStateValue) if err != nil { return err } - c, err := tx.RwCursor(dbutils.PlainStateBucket) + c, err := tx.RwCursor(kv.PlainStateBucket) if err != nil { return err } @@ -1141,8 +1147,8 @@ func TestPlainStateProxy(t *testing.T) { } fullStateResult := []KvData{} - err = kv.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BlockBodyPrefix, nonStateKey) + err = db.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BlockBody, nonStateKey) if err != nil { t.Error(err) } @@ -1150,7 +1156,7 @@ func TestPlainStateProxy(t *testing.T) { t.Error(v, nonStateValue) } - return tx.ForEach(dbutils.PlainStateBucket, []byte{}, func(k, v []byte) error { + return tx.ForEach(kv.PlainStateBucket, []byte{}, func(k, v []byte) error { fullStateResult = append(fullStateResult, KvData{ K: k, V: v, @@ -1165,8 +1171,8 @@ func TestPlainStateProxy(t *testing.T) { require.Equal(t, fullStateExpected, fullStateResult) tmpDBResult := []KvData{} - err = kv.tmpDB.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BlockBodyPrefix, nonStateKey) + err = db.tmpDB.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BlockBody, nonStateKey) if err != nil { t.Error(err) } @@ -1174,7 +1180,7 @@ func TestPlainStateProxy(t *testing.T) { t.Error(v) } - return tx.ForEach(dbutils.PlainStateBucket, []byte{}, func(k, v []byte) error { + return tx.ForEach(kv.PlainStateBucket, []byte{}, func(k, v []byte) error { tmpDBResult = append(tmpDBResult, KvData{ K: k, V: v, @@ -1188,8 +1194,8 @@ func TestPlainStateProxy(t *testing.T) { require.Equal(t, tmpDBData, tmpDBData) writeDBResult := []KvData{} - err = kv.WriteDB().View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BlockBodyPrefix, nonStateKey) + err = db.WriteDB().View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BlockBody, nonStateKey) if err != nil { t.Error(err) } @@ -1197,7 +1203,7 @@ func TestPlainStateProxy(t *testing.T) { t.Error(v, nonStateValue) } - return tx.ForEach(dbutils.PlainStateBucket, []byte{}, func(k, v []byte) error { + return tx.ForEach(kv.PlainStateBucket, []byte{}, func(k, v []byte) error { writeDBResult = append(writeDBResult, KvData{ K: k, V: v, @@ -1214,12 +1220,12 @@ func TestPlainStateProxy(t *testing.T) { } -func printBucket(kv ethdb.RoKV, bucket string) { +func printBucket(db kv.RoDB, bucket string) { fmt.Println("+Print bucket", bucket) defer func() { fmt.Println("-Print bucket", bucket) }() - err := kv.View(context.Background(), func(tx ethdb.Tx) error { + err := db.View(context.Background(), func(tx kv.Tx) error { c, err := tx.Cursor(bucket) if err != nil { return err @@ -1272,15 +1278,15 @@ type KvData struct { V []byte } -func GenStateData(data []KvData) (ethdb.RwKV, error) { - snapshot := NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.PlainStateBucket: dbutils.BucketConfigItem{}, +func GenStateData(data []KvData) (kv.RwDB, error) { + snapshot := mdbx.NewMDBX(log.New()).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.PlainStateBucket: kv.TableConfigItem{}, } }).InMem().MustOpen() - err := snapshot.Update(context.Background(), func(tx ethdb.RwTx) error { - c, err := tx.RwCursor(dbutils.PlainStateBucket) + err := snapshot.Update(context.Background(), func(tx kv.RwTx) error { + c, err := tx.RwCursor(kv.PlainStateBucket) if err != nil { return err } diff --git a/ethdb/walk.go b/ethdb/walk.go index fa3c2b019c4..0117f0673d7 100644 --- a/ethdb/walk.go +++ b/ethdb/walk.go @@ -18,6 +18,8 @@ package ethdb import ( "bytes" + + "github.com/ledgerwatch/erigon/ethdb/kv" ) // splitCursor implements cursor with two keys @@ -28,8 +30,8 @@ import ( // functions `Seek` and `Next` deliver both // parts as well as the corresponding value type splitCursor struct { - c Cursor // Unlerlying cursor - startkey []byte // Starting key (also contains bits that need to be preserved) + c kv.Cursor // Unlerlying cursor + startkey []byte // Starting key (also contains bits that need to be preserved) matchBytes int mask uint8 part1end int // Position in the key where the first part ends @@ -37,7 +39,7 @@ type splitCursor struct { part3start int // Position in the key where the third part starts } -func NewSplitCursor(c Cursor, startkey []byte, matchBits int, part1end, part2start, part3start int) *splitCursor { +func NewSplitCursor(c kv.Cursor, startkey []byte, matchBits int, part1end, part2start, part3start int) *splitCursor { var sc splitCursor sc.c = c sc.startkey = startkey diff --git a/log/handler.go b/log/handler.go index 5e187b76623..792939c2879 100644 --- a/log/handler.go +++ b/log/handler.go @@ -2,14 +2,15 @@ package log import ( "fmt" - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" "io" "net" "os" "reflect" "sync" + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" + "github.com/go-stack/stack" ) diff --git a/migrations/call_trace_index.go b/migrations/call_trace_index.go index 2faf07ec2a4..a736fb7b5fe 100644 --- a/migrations/call_trace_index.go +++ b/migrations/call_trace_index.go @@ -4,17 +4,16 @@ import ( "context" "encoding/binary" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" ) var rebuilCallTraceIndex = Migration{ Name: "rebuild_call_trace_index", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -22,7 +21,7 @@ var rebuilCallTraceIndex = Migration{ defer tx.Rollback() // Find the lowest key in the TraceCallSet table - c, err := tx.CursorDupSort(dbutils.CallTraceSet) + c, err := tx.CursorDupSort(kv.CallTraceSet) if err != nil { return err } diff --git a/migrations/db_schema_version.go b/migrations/db_schema_version.go index a5d07aa2b63..ea555633965 100644 --- a/migrations/db_schema_version.go +++ b/migrations/db_schema_version.go @@ -3,12 +3,12 @@ package migrations import ( "context" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) var dbSchemaVersion = Migration{ Name: "db_schema_version", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err diff --git a/migrations/fix_sequences.go b/migrations/fix_sequences.go index 2bb679af856..c1274951bb9 100644 --- a/migrations/fix_sequences.go +++ b/migrations/fix_sequences.go @@ -3,17 +3,16 @@ package migrations import ( "context" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) var oldSequences = map[string]string{ - dbutils.EthTx: "eth_tx", + kv.EthTx: "eth_tx", } var fixSequences = Migration{ Name: "fix_sequences", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -21,13 +20,13 @@ var fixSequences = Migration{ defer tx.Rollback() for bkt, oldbkt := range oldSequences { - seq, getErr := tx.GetOne(dbutils.Sequence, []byte(oldbkt)) + seq, getErr := tx.GetOne(kv.Sequence, []byte(oldbkt)) if getErr != nil { return getErr } if seq != nil { - putErr := tx.Put(dbutils.Sequence, []byte(bkt), seq) + putErr := tx.Put(kv.Sequence, []byte(bkt), seq) if putErr != nil { return putErr } diff --git a/migrations/header_prefix.go b/migrations/header_prefix.go index ae9e47fe525..73bfe7ac1e4 100644 --- a/migrations/header_prefix.go +++ b/migrations/header_prefix.go @@ -8,19 +8,19 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/etl" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) var headerPrefixToSeparateBuckets = Migration{ Name: "header_prefix_to_separate_buckets", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - exists, err := tx.ExistsBucket(dbutils.HeaderPrefixOld) + exists, err := tx.ExistsBucket(kv.HeaderPrefixOld) if err != nil { return err } @@ -31,10 +31,10 @@ var headerPrefixToSeparateBuckets = Migration{ return tx.Commit() } - if err = tx.ClearBucket(dbutils.HeaderCanonicalBucket); err != nil { + if err = tx.ClearBucket(kv.HeaderCanonical); err != nil { return err } - if err = tx.ClearBucket(dbutils.HeaderTDBucket); err != nil { + if err = tx.ClearBucket(kv.HeaderTD); err != nil { return err } logPrefix := "split_header_prefix_bucket" @@ -104,7 +104,7 @@ var headerPrefixToSeparateBuckets = Migration{ headersCollector.Close(logPrefix) }() - err = tx.ForEach(dbutils.HeaderPrefixOld, []byte{}, func(k, v []byte) error { + err = tx.ForEach(kv.HeaderPrefixOld, []byte{}, func(k, v []byte) error { var innerErr error switch { case IsHeaderKey(k): @@ -121,19 +121,19 @@ var headerPrefixToSeparateBuckets = Migration{ } return nil }) - if err = tx.DropBucket(dbutils.HeaderPrefixOld); err != nil { + if err = tx.DropBucket(kv.HeaderPrefixOld); err != nil { return err } LoadStep: // Now transaction would have been re-opened, and we should be re-using the space - if err = canonicalCollector.Load(logPrefix, tx, dbutils.HeaderCanonicalBucket, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + if err = canonicalCollector.Load(logPrefix, tx, kv.HeaderCanonical, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { return fmt.Errorf("loading the transformed data back into the storage table: %w", err) } - if err = tdCollector.Load(logPrefix, tx, dbutils.HeaderTDBucket, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + if err = tdCollector.Load(logPrefix, tx, kv.HeaderTD, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { return fmt.Errorf("loading the transformed data back into the acc table: %w", err) } - if err = headersCollector.Load(logPrefix, tx, dbutils.HeadersBucket, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + if err = headersCollector.Load(logPrefix, tx, kv.Headers, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { return fmt.Errorf("loading the transformed data back into the acc table: %w", err) } if err := BeforeCommit(tx, nil, true); err != nil { diff --git a/migrations/header_prefix_test.go b/migrations/header_prefix_test.go index 70020175e10..e1b768dc4a6 100644 --- a/migrations/header_prefix_test.go +++ b/migrations/header_prefix_test.go @@ -9,43 +9,43 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestHeaderPrefix(t *testing.T) { require := require.New(t) - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) - err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - err := tx.CreateBucket(dbutils.HeaderPrefixOld) + err := db.Update(context.Background(), func(tx kv.RwTx) error { + err := tx.CreateBucket(kv.HeaderPrefixOld) if err != nil { return err } for i := uint64(0); i < 10; i++ { //header - err = tx.Put(dbutils.HeaderPrefixOld, dbutils.HeaderKey(i, common.Hash{uint8(i)}), []byte("header "+strconv.Itoa(int(i)))) + err = tx.Put(kv.HeaderPrefixOld, dbutils.HeaderKey(i, common.Hash{uint8(i)}), []byte("header "+strconv.Itoa(int(i)))) require.NoError(err) //canonical - err = tx.Put(dbutils.HeaderPrefixOld, HeaderHashKey(i), common.Hash{uint8(i)}.Bytes()) + err = tx.Put(kv.HeaderPrefixOld, HeaderHashKey(i), common.Hash{uint8(i)}.Bytes()) require.NoError(err) - err = tx.Put(dbutils.HeaderPrefixOld, append(dbutils.HeaderKey(i, common.Hash{uint8(i)}), HeaderTDSuffix...), []byte{uint8(i)}) + err = tx.Put(kv.HeaderPrefixOld, append(dbutils.HeaderKey(i, common.Hash{uint8(i)}), HeaderTDSuffix...), []byte{uint8(i)}) require.NoError(err) } return nil }) require.NoError(err) - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = []Migration{headerPrefixToSeparateBuckets} err = migrator.Apply(db, t.TempDir()) require.NoError(err) num := 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeaderCanonicalBucket, []byte{}, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.HeaderCanonical, []byte{}, func(k, v []byte) error { require.Len(k, 8) bytes.Equal(v, common.Hash{uint8(binary.BigEndian.Uint64(k))}.Bytes()) num++ @@ -56,8 +56,8 @@ func TestHeaderPrefix(t *testing.T) { require.Equal(num, 10) num = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeaderTDBucket, []byte{}, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.HeaderTD, []byte{}, func(k, v []byte) error { require.Len(k, 40) bytes.Equal(v, []byte{uint8(binary.BigEndian.Uint64(k))}) num++ @@ -68,8 +68,8 @@ func TestHeaderPrefix(t *testing.T) { require.Equal(num, 10) num = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, []byte{}, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, []byte{}, func(k, v []byte) error { require.Len(k, 40) bytes.Equal(v, []byte("header "+strconv.Itoa(int(binary.BigEndian.Uint64(k))))) num++ diff --git a/migrations/migrations.go b/migrations/migrations.go index 3ab17d410f0..ed57df2ef33 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -8,9 +8,8 @@ import ( "path" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ugorji/go/codec" ) @@ -53,8 +52,8 @@ import ( // }, // - if you need migrate multiple buckets - create separate migration for each bucket // - write test where apply migration twice -var migrations = map[ethdb.Label][]Migration{ - ethdb.Chain: { +var migrations = map[kv.Label][]Migration{ + kv.ChainDB: { headerPrefixToSeparateBuckets, removeCliqueBucket, dbSchemaVersion, @@ -62,14 +61,14 @@ var migrations = map[ethdb.Label][]Migration{ fixSequences, storageMode, }, - ethdb.TxPool: {}, - ethdb.Sentry: {}, + kv.TxPoolDB: {}, + kv.SentryDB: {}, } -type Callback func(tx ethdb.RwTx, progress []byte, isDone bool) error +type Callback func(tx kv.RwTx, progress []byte, isDone bool) error type Migration struct { Name string - Up func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) error + Up func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) error } var ( @@ -78,7 +77,7 @@ var ( ErrMigrationETLFilesDeleted = fmt.Errorf("db migration progress was interrupted after extraction step and ETL files was deleted, please contact development team for help or re-sync from scratch") ) -func NewMigrator(label ethdb.Label) *Migrator { +func NewMigrator(label kv.Label) *Migrator { return &Migrator{ Migrations: migrations[label], } @@ -88,9 +87,9 @@ type Migrator struct { Migrations []Migration } -func AppliedMigrations(tx ethdb.Tx, withPayload bool) (map[string][]byte, error) { +func AppliedMigrations(tx kv.Tx, withPayload bool) (map[string][]byte, error) { applied := map[string][]byte{} - err := tx.ForEach(dbutils.Migrations, nil, func(k []byte, v []byte) error { + err := tx.ForEach(kv.Migrations, nil, func(k []byte, v []byte) error { if bytes.HasPrefix(k, []byte("_progress_")) { return nil } @@ -104,9 +103,9 @@ func AppliedMigrations(tx ethdb.Tx, withPayload bool) (map[string][]byte, error) return applied, err } -func (m *Migrator) HasPendingMigrations(db ethdb.RwKV) (bool, error) { +func (m *Migrator) HasPendingMigrations(db kv.RwDB) (bool, error) { var has bool - if err := db.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { pending, err := m.PendingMigrations(tx) if err != nil { return err @@ -119,7 +118,7 @@ func (m *Migrator) HasPendingMigrations(db ethdb.RwKV) (bool, error) { return has, nil } -func (m *Migrator) PendingMigrations(tx ethdb.Tx) ([]Migration, error) { +func (m *Migrator) PendingMigrations(tx kv.Tx) ([]Migration, error) { applied, err := AppliedMigrations(tx, false) if err != nil { return nil, err @@ -145,13 +144,13 @@ func (m *Migrator) PendingMigrations(tx ethdb.Tx) ([]Migration, error) { return pending, nil } -func (m *Migrator) Apply(db ethdb.RwKV, datadir string) error { +func (m *Migrator) Apply(db kv.RwDB, datadir string) error { if len(m.Migrations) == 0 { return nil } var applied map[string][]byte - if err := db.View(context.Background(), func(tx ethdb.Tx) error { + if err := db.View(context.Background(), func(tx kv.Tx) error { var err error applied, err = AppliedMigrations(tx, false) return err @@ -179,17 +178,17 @@ func (m *Migrator) Apply(db ethdb.RwKV, datadir string) error { log.Info("Apply migration", "name", v.Name) var progress []byte - if err := db.View(context.Background(), func(tx ethdb.Tx) (err error) { - progress, err = tx.GetOne(dbutils.Migrations, []byte("_progress_"+v.Name)) + if err := db.View(context.Background(), func(tx kv.Tx) (err error) { + progress, err = tx.GetOne(kv.Migrations, []byte("_progress_"+v.Name)) return err }); err != nil { return err } - if err := v.Up(db, path.Join(datadir, "migrations", v.Name), progress, func(tx ethdb.RwTx, key []byte, isDone bool) error { + if err := v.Up(db, path.Join(datadir, "migrations", v.Name), progress, func(tx kv.RwTx, key []byte, isDone bool) error { if !isDone { if key != nil { - if err := tx.Put(dbutils.Migrations, []byte("_progress_"+v.Name), key); err != nil { + if err := tx.Put(kv.Migrations, []byte("_progress_"+v.Name), key); err != nil { return err } } @@ -201,12 +200,12 @@ func (m *Migrator) Apply(db ethdb.RwKV, datadir string) error { if err != nil { return err } - err = tx.Put(dbutils.Migrations, []byte(v.Name), stagesProgress) + err = tx.Put(kv.Migrations, []byte(v.Name), stagesProgress) if err != nil { return err } - err = tx.Delete(dbutils.Migrations, []byte("_progress_"+v.Name), nil) + err = tx.Delete(kv.Migrations, []byte("_progress_"+v.Name), nil) if err != nil { return err } @@ -223,29 +222,29 @@ func (m *Migrator) Apply(db ethdb.RwKV, datadir string) error { } // Write DB schema version var version [12]byte - binary.BigEndian.PutUint32(version[:], dbutils.DBSchemaVersion.Major) - binary.BigEndian.PutUint32(version[4:], dbutils.DBSchemaVersion.Minor) - binary.BigEndian.PutUint32(version[8:], dbutils.DBSchemaVersion.Patch) - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - if err := tx.Put(dbutils.DatabaseInfoBucket, dbutils.DBSchemaVersionKey, version[:]); err != nil { + binary.BigEndian.PutUint32(version[:], kv.DBSchemaVersion.Major) + binary.BigEndian.PutUint32(version[4:], kv.DBSchemaVersion.Minor) + binary.BigEndian.PutUint32(version[8:], kv.DBSchemaVersion.Patch) + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + if err := tx.Put(kv.DatabaseInfo, kv.DBSchemaVersionKey, version[:]); err != nil { return fmt.Errorf("writing DB schema version: %w", err) } return nil }); err != nil { return err } - log.Info("Updated DB schema to", "version", fmt.Sprintf("%d.%d.%d", dbutils.DBSchemaVersion.Major, dbutils.DBSchemaVersion.Minor, dbutils.DBSchemaVersion.Patch)) + log.Info("Updated DB schema to", "version", fmt.Sprintf("%d.%d.%d", kv.DBSchemaVersion.Major, kv.DBSchemaVersion.Minor, kv.DBSchemaVersion.Patch)) return nil } -func MarshalMigrationPayload(db ethdb.KVGetter) ([]byte, error) { +func MarshalMigrationPayload(db kv.Getter) ([]byte, error) { s := map[string][]byte{} buf := bytes.NewBuffer(nil) encoder := codec.NewEncoder(buf, &codec.CborHandle{}) for _, stage := range stages.AllStages { - v, err := db.GetOne(dbutils.SyncStageProgress, []byte(stage)) + v, err := db.GetOne(kv.SyncStageProgress, []byte(stage)) if err != nil { return nil, err } diff --git a/migrations/migrations_test.go b/migrations/migrations_test.go index 20a2f945509..e4ff5f1db80 100644 --- a/migrations/migrations_test.go +++ b/migrations/migrations_test.go @@ -7,18 +7,17 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/stretchr/testify/require" ) func TestApplyWithInit(t *testing.T) { - require, db := require.New(t), kv.NewTestKV(t) + require, db := require.New(t), memdb.NewTestDB(t) m := []Migration{ { "one", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -33,7 +32,7 @@ func TestApplyWithInit(t *testing.T) { }, { "two", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -48,12 +47,12 @@ func TestApplyWithInit(t *testing.T) { }, } - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = m err := migrator.Apply(db, "") require.NoError(err) var applied map[string][]byte - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied, err = AppliedMigrations(tx, false) require.NoError(err) @@ -68,7 +67,7 @@ func TestApplyWithInit(t *testing.T) { // apply again err = migrator.Apply(db, "") require.NoError(err) - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied2, err := AppliedMigrations(tx, false) require.NoError(err) require.Equal(applied, applied2) @@ -78,18 +77,18 @@ func TestApplyWithInit(t *testing.T) { } func TestApplyWithoutInit(t *testing.T) { - require, db := require.New(t), kv.NewTestKV(t) + require, db := require.New(t), memdb.NewTestDB(t) m := []Migration{ { "one", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { t.Fatal("shouldn't been executed") return nil }, }, { "two", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -103,18 +102,18 @@ func TestApplyWithoutInit(t *testing.T) { }, }, } - err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.Migrations, []byte(m[0].Name), []byte{1}) + err := db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.Migrations, []byte(m[0].Name), []byte{1}) }) require.NoError(err) - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = m err = migrator.Apply(db, "") require.NoError(err) var applied map[string][]byte - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied, err = AppliedMigrations(tx, false) require.NoError(err) @@ -131,7 +130,7 @@ func TestApplyWithoutInit(t *testing.T) { err = migrator.Apply(db, "") require.NoError(err) - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied2, err := AppliedMigrations(tx, false) require.NoError(err) require.Equal(applied, applied2) @@ -142,11 +141,11 @@ func TestApplyWithoutInit(t *testing.T) { } func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { - require, db := require.New(t), kv.NewTestKV(t) + require, db := require.New(t), memdb.NewTestDB(t) m := []Migration{ { "one", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -161,24 +160,24 @@ func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { }, { "two", - func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { t.Fatal("shouldn't been executed") return nil }, }, } - err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.Migrations, []byte(m[1].Name), []byte{1}) // apply non-first migration + err := db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.Migrations, []byte(m[1].Name), []byte{1}) // apply non-first migration }) require.NoError(err) - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = m err = migrator.Apply(db, "") require.NoError(err) var applied map[string][]byte - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied, err = AppliedMigrations(tx, false) require.NoError(err) @@ -194,7 +193,7 @@ func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { // apply again err = migrator.Apply(db, "") require.NoError(err) - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied2, err := AppliedMigrations(tx, false) require.NoError(err) require.Equal(applied, applied2) @@ -205,7 +204,7 @@ func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { func TestMarshalStages(t *testing.T) { require := require.New(t) - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) err := stages.SaveStageProgress(tx, stages.Execution, 42) require.NoError(err) @@ -223,11 +222,11 @@ func TestMarshalStages(t *testing.T) { } func TestValidation(t *testing.T) { - require, db := require.New(t), kv.NewTestKV(t) + require, db := require.New(t), memdb.NewTestDB(t) m := []Migration{ { Name: "repeated_name", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -242,7 +241,7 @@ func TestValidation(t *testing.T) { }, { Name: "repeated_name", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -256,13 +255,13 @@ func TestValidation(t *testing.T) { }, }, } - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = m err := migrator.Apply(db, "") require.True(errors.Is(err, ErrMigrationNonUniqueName)) var applied map[string][]byte - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied, err = AppliedMigrations(tx, false) require.NoError(err) require.Equal(0, len(applied)) @@ -272,23 +271,23 @@ func TestValidation(t *testing.T) { } func TestCommitCallRequired(t *testing.T) { - require, db := require.New(t), kv.NewTestKV(t) + require, db := require.New(t), memdb.NewTestDB(t) m := []Migration{ { Name: "one", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { //don't call BeforeCommit return nil }, }, } - migrator := NewMigrator(ethdb.Chain) + migrator := NewMigrator(kv.ChainDB) migrator.Migrations = m err := migrator.Apply(db, "") require.True(errors.Is(err, ErrMigrationCommitNotCalled)) var applied map[string][]byte - err = db.View(context.Background(), func(tx ethdb.Tx) error { + err = db.View(context.Background(), func(tx kv.Tx) error { applied, err = AppliedMigrations(tx, false) require.NoError(err) require.Equal(0, len(applied)) diff --git a/migrations/prune.go b/migrations/prune.go index 920db5a66fe..2ea97d68e2b 100644 --- a/migrations/prune.go +++ b/migrations/prune.go @@ -3,16 +3,15 @@ package migrations import ( "context" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" ) var storageMode = Migration{ Name: "storage_mode", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -36,7 +35,7 @@ var storageMode = Migration{ return math.MaxUint64 // means, prune disabled } { - v, err := tx.GetOne(dbutils.DatabaseInfoBucket, StorageModeHistory) + v, err := tx.GetOne(kv.DatabaseInfo, StorageModeHistory) if err != nil { return err } @@ -44,28 +43,28 @@ var storageMode = Migration{ } { - v, err := tx.GetOne(dbutils.DatabaseInfoBucket, StorageModeReceipts) + v, err := tx.GetOne(kv.DatabaseInfo, StorageModeReceipts) if err != nil { return err } pm.Receipts = castToPruneDistance(v) } { - v, err := tx.GetOne(dbutils.DatabaseInfoBucket, StorageModeTxIndex) + v, err := tx.GetOne(kv.DatabaseInfo, StorageModeTxIndex) if err != nil { return err } pm.TxIndex = castToPruneDistance(v) } { - v, err := tx.GetOne(dbutils.DatabaseInfoBucket, StorageModeCallTraces) + v, err := tx.GetOne(kv.DatabaseInfo, StorageModeCallTraces) if err != nil { return err } pm.CallTraces = castToPruneDistance(v) } { - v, err := tx.GetOne(dbutils.DatabaseInfoBucket, dbutils.StorageModeTEVM) + v, err := tx.GetOne(kv.DatabaseInfo, kv.StorageModeTEVM) if err != nil { return err } diff --git a/migrations/receipt_cbor.go b/migrations/receipt_cbor.go index 32e3628be71..0f84cfaf476 100644 --- a/migrations/receipt_cbor.go +++ b/migrations/receipt_cbor.go @@ -11,12 +11,11 @@ import ( "time" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" pkg1_common "github.com/ledgerwatch/erigon/common" @@ -36,7 +35,7 @@ type OldReceipts []*OldReceipt var ReceiptCbor = Migration{ Name: "receipt_cbor", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -70,7 +69,7 @@ var ReceiptCbor = Migration{ } for blockNum := uint64(1); blockNum <= to; blockNum++ { binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(dbutils.Receipts, key[:]); err != nil { + if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { return err } if v == nil { @@ -109,7 +108,7 @@ var ReceiptCbor = Migration{ if err = cbor.Marshal(&buf, receipts); err != nil { return err } - if err = tx.Put(dbutils.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { + if err = tx.Put(kv.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { return err } } diff --git a/migrations/receipt_repair.go b/migrations/receipt_repair.go index 9b379367f33..ac3a0c08e5e 100644 --- a/migrations/receipt_repair.go +++ b/migrations/receipt_repair.go @@ -9,7 +9,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" @@ -17,14 +16,14 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) -func availableReceiptFrom(tx ethdb.Tx) (uint64, error) { - c, err := tx.Cursor(dbutils.Receipts) +func availableReceiptFrom(tx kv.Tx) (uint64, error) { + c, err := tx.Cursor(kv.Receipts) if err != nil { return 0, err } @@ -41,7 +40,7 @@ func availableReceiptFrom(tx ethdb.Tx) (uint64, error) { var ReceiptRepair = Migration{ Name: "receipt_repair", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -90,7 +89,7 @@ var ReceiptRepair = Migration{ break } binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(dbutils.Receipts, key[:]); err != nil { + if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { return err } var receipts types.Receipts @@ -134,7 +133,7 @@ var ReceiptRepair = Migration{ if err != nil { return fmt.Errorf("encode block receipts for block %d: %v", blockNum, err) } - if err = tx.Put(dbutils.Receipts, key[:], buf.Bytes()); err != nil { + if err = tx.Put(kv.Receipts, key[:], buf.Bytes()); err != nil { return fmt.Errorf("writing receipts for block %d: %v", blockNum, err) } fixedCount++ diff --git a/migrations/remove_clique.go b/migrations/remove_clique.go index 9241b833abf..63c09a2a029 100644 --- a/migrations/remove_clique.go +++ b/migrations/remove_clique.go @@ -3,20 +3,19 @@ package migrations import ( "context" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) var removeCliqueBucket = Migration{ Name: "remove_clique_bucket", - Up: func(db ethdb.RwKV, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err } defer tx.Rollback() - if exists, err := tx.ExistsBucket(dbutils.CliqueBucket); err != nil { + if exists, err := tx.ExistsBucket(kv.CliqueBucket); err != nil { return err } else if !exists { if err := BeforeCommit(tx, nil, true); err != nil { @@ -25,7 +24,7 @@ var removeCliqueBucket = Migration{ return tx.Commit() } - if err := tx.DropBucket(dbutils.CliqueBucket); err != nil { + if err := tx.DropBucket(kv.CliqueBucket); err != nil { return err } diff --git a/node/config.go b/node/config.go index c9db74533b0..60019769e0d 100644 --- a/node/config.go +++ b/node/config.go @@ -25,7 +25,7 @@ import ( "strings" "sync" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/paths" @@ -142,7 +142,7 @@ type Config struct { // Logger is a custom logger to use with the p2p.Server. Logger log.Logger `toml:",omitempty"` - DatabaseVerbosity ethdb.DBVerbosityLvl + DatabaseVerbosity kv.DBVerbosityLvl // Address to listen to when launchig listener for remote database access // empty string means not to start the listener diff --git a/node/node.go b/node/node.go index f471e2ca733..c884e5878d8 100644 --- a/node/node.go +++ b/node/node.go @@ -27,8 +27,9 @@ import ( "strings" "sync" - "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/p2p" @@ -55,7 +56,7 @@ type Node struct { rpcAllowList rpc.AllowList // list of RPC methods explicitly allowed for this RPC node - databases []ethdb.Closer + databases []kv.Closer } const ( @@ -95,7 +96,7 @@ func New(conf *Config) (*Node, error) { inprocHandler: rpc.NewServer(50), log: conf.Logger, stop: make(chan struct{}), - databases: make([]ethdb.Closer, 0), + databases: make([]kv.Closer, 0), } // Register built-in APIs. node.rpcAPIs = append(node.rpcAPIs, node.apis()...) @@ -487,107 +488,31 @@ func (n *Node) WSEndpoint() string { return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix } -// OpenDatabase opens an existing database with the given name (or creates one if no -// previous can be found) from within the node's instance directory. If the node is -// ephemeral, a memory database is returned. -func (n *Node) OpenDatabase(label ethdb.Label, datadir string) (ethdb.RwKV, error) { - n.lock.Lock() - defer n.lock.Unlock() - - if n.state == closedState { - return nil, ErrNodeStopped - } - - var name string - switch label { - case ethdb.Chain: - name = "chaindata" - case ethdb.TxPool: - name = "txpool" - default: - name = "test" - } - var db ethdb.RwKV - if n.config.DataDir == "" { - db = kv2.NewMemKV() - n.databases = append(n.databases, db) - return db, nil - } - dbPath := n.config.ResolvePath(name) - - var openFunc func(exclusive bool) (ethdb.RwKV, error) - log.Info("Opening Database", "label", name) - openFunc = func(exclusive bool) (ethdb.RwKV, error) { - opts := kv2.NewMDBX().Path(dbPath).Label(label).DBVerbosity(n.config.DatabaseVerbosity) - if exclusive { - opts = opts.Exclusive() - } - kv, err1 := opts.Open() - if err1 != nil { - return nil, err1 - } - return kv, nil - } - var err error - db, err = openFunc(false) - if err != nil { - return nil, err - } - migrator := migrations.NewMigrator(label) - has, err := migrator.HasPendingMigrations(db) - if err != nil { - return nil, err - } - if has { - log.Info("Re-Opening DB in exclusive mode to apply migrations") - db.Close() - db, err = openFunc(true) - if err != nil { - return nil, err - } - if err = migrator.Apply(db, datadir); err != nil { - return nil, err - } - db.Close() - db, err = openFunc(false) - if err != nil { - return nil, err - } - } - - n.databases = append(n.databases, db) - return db, nil -} - -func OpenDatabase(config *Config, label ethdb.Label) (ethdb.RwKV, error) { +func OpenDatabase(config *Config, logger log.Logger, label kv.Label) (kv.RwDB, error) { var name string switch label { - case ethdb.Chain: + case kv.ChainDB: name = "chaindata" - case ethdb.TxPool: + case kv.TxPoolDB: name = "txpool" default: name = "test" } - var db ethdb.RwKV + var db kv.RwDB if config.DataDir == "" { - db = kv2.NewMemKV() + db = memdb.New() return db, nil } dbPath := config.ResolvePath(name) - var openFunc func(exclusive bool) (ethdb.RwKV, error) + var openFunc func(exclusive bool) (kv.RwDB, error) log.Info("Opening Database", "label", name) - openFunc = func(exclusive bool) (ethdb.RwKV, error) { - opts := kv2.NewMDBX().Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity) + openFunc = func(exclusive bool) (kv.RwDB, error) { + opts := mdbx.NewMDBX(logger).Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity) if exclusive { opts = opts.Exclusive() } - kv, err1 := opts.Open() - if err1 != nil { - return nil, err1 - } - return kv, nil + return opts.Open() } var err error db, err = openFunc(false) diff --git a/node/node_test.go b/node/node_test.go index caf4b4efcf4..a9653b4d6fc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -28,9 +28,9 @@ import ( "strings" "testing" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/rpc" @@ -41,10 +41,11 @@ var ( testNodeKey, _ = crypto.GenerateKey() ) -func testNodeConfig() *Config { +func testNodeConfig(t *testing.T) *Config { return &Config{ - Name: "test node", - P2P: p2p.Config{PrivateKey: testNodeKey}, + Name: "test node", + P2P: p2p.Config{PrivateKey: testNodeKey}, + DataDir: t.TempDir(), } } @@ -54,7 +55,7 @@ func TestNodeCloseMultipleTimes(t *testing.T) { t.Skip("fix me on win please") } - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } @@ -73,7 +74,7 @@ func TestNodeStartMultipleTimes(t *testing.T) { t.Skip("fix me on win please") } - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } @@ -121,7 +122,7 @@ func TestNodeUsedDataDir(t *testing.T) { // Tests whether a Lifecycle can be registered. func TestLifecycleRegistry_Successful(t *testing.T) { - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } @@ -138,7 +139,7 @@ func TestLifecycleRegistry_Successful(t *testing.T) { // Tests whether a service's protocols can be registered properly on the node's p2p server. func TestRegisterProtocols(t *testing.T) { t.Skip("adjust to p2p sentry") - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } @@ -168,25 +169,25 @@ func TestNodeCloseClosesDB(t *testing.T) { t.Skip("fix me on win please") } - stack, _ := New(testNodeConfig()) + stack, _ := New(testNodeConfig(t)) defer stack.Close() - db, err := stack.OpenDatabase(ethdb.Chain, t.TempDir()) + db, err := OpenDatabase(stack.Config(), log.New(), kv.ChainDB) if err != nil { t.Fatal("can't open DB:", err) } - if err = db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.HashedAccountsBucket, []byte("testK"), []byte{}) + if err = db.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.HashedAccounts, []byte("testK"), []byte{}) }); err != nil { t.Fatal("can't Put on open DB:", err) } stack.Close() - if err = db.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.HashedAccountsBucket, []byte("testK"), []byte{}) - }); err == nil { - t.Fatal("Put succeeded after node is closed") - } + //if err = db.Update(context.Background(), func(tx kv.RwTx) error { + // return tx.Put(kv.HashedAccounts, []byte("testK"), []byte{}) + //}); err == nil { + // t.Fatal("Put succeeded after node is closed") + //} } // This test checks that OpenDatabase can be used from within a Lifecycle Start method. @@ -195,14 +196,14 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) { t.Skip("fix me on win please") } - stack, _ := New(testNodeConfig()) + stack, _ := New(testNodeConfig(t)) defer stack.Close() - var db ethdb.RwKV + var db kv.RwDB var err error stack.RegisterLifecycle(&InstrumentedService{ startHook: func() { - db, err = stack.OpenDatabase(ethdb.Chain, t.TempDir()) + db, err = OpenDatabase(stack.Config(), log.New(), kv.ChainDB) if err != nil { t.Fatal("can't open DB:", err) } @@ -222,12 +223,12 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { t.Skip("fix me on win please") } - stack, _ := New(testNodeConfig()) + stack, _ := New(testNodeConfig(t)) defer stack.Close() stack.RegisterLifecycle(&InstrumentedService{ stopHook: func() { - db, err := stack.OpenDatabase(ethdb.Chain, t.TempDir()) + db, err := OpenDatabase(stack.Config(), log.New(), kv.ChainDB) if err != nil { t.Fatal("can't open DB:", err) } @@ -241,7 +242,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { // Tests that registered Lifecycles get started and stopped correctly. func TestLifecycleLifeCycle(t *testing.T) { - stack, _ := New(testNodeConfig()) + stack, _ := New(testNodeConfig(t)) defer stack.Close() started := make(map[string]bool) @@ -296,7 +297,7 @@ func TestLifecycleStartupError(t *testing.T) { t.Skip("fix me on win please") } - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } @@ -346,7 +347,7 @@ func TestLifecycleStartupError(t *testing.T) { // Tests that even if a registered Lifecycle fails to shut down cleanly, it does // not influence the rest of the shutdown invocations. func TestLifecycleTerminationGuarantee(t *testing.T) { - stack, err := New(testNodeConfig()) + stack, err := New(testNodeConfig(t)) if err != nil { t.Fatalf("failed to create protocol stack: %v", err) } diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 3cf545a9c24..0ce53642a16 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -30,10 +30,9 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) @@ -74,7 +73,7 @@ var zeroIP = make(net.IP, 16) // DB is the node database, storing previously seen nodes and any collected metadata about // them for QoS purposes. type DB struct { - kv ethdb.RwKV // Interface to the database itself + kv kv.RwDB // Interface to the database itself runner sync.Once // Ensures we can start at most one expirer quit chan struct{} // Channel to signal the expiring thread to stop } @@ -82,23 +81,24 @@ type DB struct { // OpenDB opens a node database for storing and retrieving infos about known peers in the // network. If no path is given an in-memory, temporary database is constructed. func OpenDB(path string) (*DB, error) { + logger := log.New() //TODO: move higher if path == "" { - return newMemoryDB() + return newMemoryDB(logger) } - return newPersistentDB(path) + return newPersistentDB(logger, path) } -var bucketsConfig = func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.InodesBucket: {}, +var bucketsConfig = func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.InodesBucket: {}, } } // newMemoryNodeDB creates a new in-memory node database without a persistent backend. -func newMemoryDB() (*DB, error) { +func newMemoryDB(logger log.Logger) (*DB, error) { db := &DB{quit: make(chan struct{})} var err error - db.kv, err = kv.NewMDBX().InMem().Label(ethdb.Sentry).WithBucketsConfig(bucketsConfig).Open() + db.kv, err = mdbx.NewMDBX(logger).InMem().Label(kv.SentryDB).WithBucketsConfig(bucketsConfig).Open() if err != nil { return nil, err } @@ -107,10 +107,10 @@ func newMemoryDB() (*DB, error) { // newPersistentNodeDB creates/opens a persistent node database, // also flushing its contents in case of a version mismatch. -func newPersistentDB(path string) (*DB, error) { - var db ethdb.RwKV +func newPersistentDB(logger log.Logger, path string) (*DB, error) { + var db kv.RwDB var err error - db, err = kv.NewMDBX().Path(path).Label(ethdb.Sentry).MapSize(64 * datasize.MB).WithBucketsConfig(bucketsConfig).Open() + db, err = mdbx.NewMDBX(logger).Path(path).Label(kv.SentryDB).MapSize(64 * datasize.MB).WithBucketsConfig(bucketsConfig).Open() if err != nil { return nil, err } @@ -120,8 +120,8 @@ func newPersistentDB(path string) (*DB, error) { currentVer = currentVer[:binary.PutVarint(currentVer, int64(dbVersion))] var blob []byte - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - c, err := tx.RwCursor(dbutils.InodesBucket) + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + c, err := tx.RwCursor(kv.InodesBucket) if err != nil { return err } @@ -144,7 +144,7 @@ func newPersistentDB(path string) (*DB, error) { if err := os.Remove(path); err != nil { return nil, err } - return newPersistentDB(path) + return newPersistentDB(logger, path) } return &DB{kv: db, quit: make(chan struct{})}, nil } @@ -216,8 +216,8 @@ func localItemKey(id ID, field string) []byte { // fetchInt64 retrieves an integer associated with a particular key. func (db *DB) fetchInt64(key []byte) int64 { var val int64 - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - blob, errGet := tx.GetOne(dbutils.InodesBucket, key) + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + blob, errGet := tx.GetOne(kv.InodesBucket, key) if errGet != nil { return errGet } @@ -238,16 +238,16 @@ func (db *DB) fetchInt64(key []byte) int64 { func (db *DB) storeInt64(key []byte, n int64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutVarint(blob, n)] - return db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.InodesBucket, common.CopyBytes(key), blob) + return db.kv.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.InodesBucket, common.CopyBytes(key), blob) }) } // fetchUint64 retrieves an integer associated with a particular key. func (db *DB) fetchUint64(key []byte) uint64 { var val uint64 - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - blob, errGet := tx.GetOne(dbutils.InodesBucket, key) + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + blob, errGet := tx.GetOne(kv.InodesBucket, key) if errGet != nil { return errGet } @@ -265,16 +265,16 @@ func (db *DB) fetchUint64(key []byte) uint64 { func (db *DB) storeUint64(key []byte, n uint64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutUvarint(blob, n)] - return db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.InodesBucket, common.CopyBytes(key), blob) + return db.kv.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.InodesBucket, common.CopyBytes(key), blob) }) } // Node retrieves a node with a given id from the database. func (db *DB) Node(id ID) *Node { var blob []byte - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - v, errGet := tx.GetOne(dbutils.InodesBucket, nodeKey(id)) + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + v, errGet := tx.GetOne(kv.InodesBucket, nodeKey(id)) if errGet != nil { return errGet } @@ -311,8 +311,8 @@ func (db *DB) UpdateNode(node *Node) error { if err != nil { return err } - if err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.Put(dbutils.InodesBucket, nodeKey(node.ID()), blob) + if err := db.kv.Update(context.Background(), func(tx kv.RwTx) error { + return tx.Put(kv.InodesBucket, nodeKey(node.ID()), blob) }); err != nil { return err } @@ -338,9 +338,9 @@ func (db *DB) DeleteNode(id ID) { deleteRange(db.kv, nodeKey(id)) } -func deleteRange(db ethdb.RwKV, prefix []byte) { - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - c, err := tx.RwCursor(dbutils.InodesBucket) +func deleteRange(db kv.RwDB, prefix []byte) { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + c, err := tx.RwCursor(kv.InodesBucket) if err != nil { return err } @@ -394,8 +394,8 @@ func (db *DB) expireNodes() { youngestPong int64 ) var toDelete [][]byte - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.InodesBucket) + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.InodesBucket) if err != nil { return err } @@ -527,8 +527,8 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { id ID ) - if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.InodesBucket) + if err := db.kv.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.InodesBucket) if err != nil { return err } @@ -557,7 +557,7 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { db.ensureExpirer() pongKey := nodeItemKey(n.ID(), n.IP(), dbNodePong) var lastPongReceived int64 - blob, errGet := tx.GetOne(dbutils.InodesBucket, pongKey) + blob, errGet := tx.GetOne(kv.InodesBucket, pongKey) if errGet != nil { return errGet } diff --git a/tests/block_test_util.go b/tests/block_test_util.go index f03e748df1b..13d50534bde 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -35,7 +35,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/stages" @@ -192,7 +192,7 @@ func (t *BlockTest) insertBlocks(m *stages.MockSentry) ([]btBlock, error) { return nil, fmt.Errorf("block #%v insertion into chain failed: %v", cb.Number(), err1) } } else if b.BlockHeader == nil { - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { canonical, cErr := rawdb.ReadCanonicalHash(tx, cb.NumberU64()) if cErr != nil { return cErr @@ -292,7 +292,7 @@ func (t *BlockTest) validatePostState(statedb *state.IntraBlockState) error { return nil } -func (t *BlockTest) validateImportedHeaders(tx ethdb.Tx, validBlocks []btBlock) error { +func (t *BlockTest) validateImportedHeaders(tx kv.Tx, validBlocks []btBlock) error { // to get constant lookup when verifying block headers by hash (some tests have many blocks) bmap := make(map[common.Hash]btBlock, len(t.json.Blocks)) for _, b := range validBlocks { diff --git a/tests/state_test.go b/tests/state_test.go index 9a3cf157683..72c11d24a42 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -26,7 +26,7 @@ import ( "testing" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" ) func TestState(t *testing.T) { @@ -72,7 +72,7 @@ func TestState(t *testing.T) { legacyStateTestDir, } { st.walk(t, dir, func(t *testing.T, name string, test *StateTest) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) for _, subtest := range test.Subtests() { subtest := subtest key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 4ba120da75d..08a9529ee54 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -25,7 +25,6 @@ import ( "strings" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" @@ -33,7 +32,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/trie" @@ -158,7 +157,7 @@ func (t *StateTest) Subtests() []StateSubtest { } // Run executes a specific subtest and verifies the post-state and logs -func (t *StateTest) Run(rules params.Rules, tx ethdb.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, error) { +func (t *StateTest) Run(rules params.Rules, tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, error) { state, root, err := t.RunNoVerify(rules, tx, subtest, vmconfig) if err != nil { return state, err @@ -176,7 +175,7 @@ func (t *StateTest) Run(rules params.Rules, tx ethdb.RwTx, subtest StateSubtest, } // RunNoVerify runs a specific subtest and returns the statedb and post-state root -func (t *StateTest) RunNoVerify(rules params.Rules, tx ethdb.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, common.Hash, error) { +func (t *StateTest) RunNoVerify(rules params.Rules, tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, common.Hash, error) { config, eips, err := GetChainConfig(subtest.Fork) if err != nil { return nil, common.Hash{}, UnsupportedForkError{subtest.Fork} @@ -245,7 +244,7 @@ func (t *StateTest) RunNoVerify(rules params.Rules, tx ethdb.RwTx, subtest State return nil, common.Hash{}, err } // Generate hashed state - c, err := tx.RwCursor(dbutils.PlainStateBucket) + c, err := tx.RwCursor(kv.PlainStateBucket) if err != nil { return nil, common.Hash{}, err } @@ -273,11 +272,11 @@ func (t *StateTest) RunNoVerify(rules params.Rules, tx ethdb.RwTx, subtest State h.Sha.Write(k[common.AddressLength+common.IncarnationLength:]) //nolint:errcheck h.Sha.Read(newK[common.HashLength+common.IncarnationLength:]) - if err = tx.Put(dbutils.HashedStorageBucket, newK, common.CopyBytes(v)); err != nil { + if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { return nil, common.Hash{}, fmt.Errorf("insert hashed key: %w", err) } } else { - if err = tx.Put(dbutils.HashedAccountsBucket, newK, common.CopyBytes(v)); err != nil { + if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil { return nil, common.Hash{}, fmt.Errorf("insert hashed key: %w", err) } } @@ -296,7 +295,7 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] } -func MakePreState(rules params.Rules, tx ethdb.RwTx, accounts core.GenesisAlloc, blockNr uint64) (*state.IntraBlockState, error) { +func MakePreState(rules params.Rules, tx kv.RwTx, accounts core.GenesisAlloc, blockNr uint64) (*state.IntraBlockState, error) { r := state.NewPlainStateReader(tx) statedb := state.New(r) for addr, a := range accounts { diff --git a/tests/statedb_chain_test.go b/tests/statedb_chain_test.go index 251ea2c1fa8..287aa1fcddb 100644 --- a/tests/statedb_chain_test.go +++ b/tests/statedb_chain_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" @@ -60,7 +60,7 @@ func TestSelfDestructReceive(t *testing.T) { ) m := stages.MockWithGenesis(t, gspec, key) - db := kv.NewObjectDatabase(m.DB) + db := olddb.NewObjectDatabase(m.DB) defer db.Close() contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) diff --git a/tests/statedb_insert_chain_transaction_test.go b/tests/statedb_insert_chain_transaction_test.go index 6184c5ef24e..c3fe89a888b 100644 --- a/tests/statedb_insert_chain_transaction_test.go +++ b/tests/statedb_insert_chain_transaction_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -18,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/tests/contracts" ) @@ -370,7 +370,7 @@ func TestAccountDeployIncorrectRoot(t *testing.T) { if err = m.InsertChain(chain.Slice(0, 1)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -393,7 +393,7 @@ func TestAccountDeployIncorrectRoot(t *testing.T) { t.Fatal("should fail") } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -411,7 +411,7 @@ func TestAccountDeployIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -457,7 +457,7 @@ func TestAccountCreateIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -475,7 +475,7 @@ func TestAccountCreateIncorrectRoot(t *testing.T) { if err = m.InsertChain(chain.Slice(1, 2)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -541,7 +541,7 @@ func TestAccountUpdateIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -560,7 +560,7 @@ func TestAccountUpdateIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -630,7 +630,7 @@ func TestAccountDeleteIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") @@ -648,7 +648,7 @@ func TestAccountDeleteIncorrectRoot(t *testing.T) { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { st := state.New(state.NewPlainStateReader(tx)) if !st.Exist(from) { t.Error("expected account to exist") diff --git a/tests/vm_test.go b/tests/vm_test.go index e63e9eb3b30..1be5173f75e 100644 --- a/tests/vm_test.go +++ b/tests/vm_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" ) func TestVM(t *testing.T) { @@ -30,7 +30,7 @@ func TestVM(t *testing.T) { vmt.slow("^vmPerformance") vmt.fails("^vmSystemOperationsTest.json/createNameRegistrator$", "fails without parallel execution") - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) { withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go index a26a8a6b2ba..b0552e4f8ef 100644 --- a/tests/vm_test_util.go +++ b/tests/vm_test_util.go @@ -23,6 +23,7 @@ import ( "math/big" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/erigon/common" @@ -31,7 +32,6 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" ) @@ -80,7 +80,7 @@ type vmExecMarshaling struct { GasPrice *math.HexOrDecimal256 } -func (t *VMTest) Run(tx ethdb.RwTx, vmconfig vm.Config, blockNr uint64) error { +func (t *VMTest) Run(tx kv.RwTx, vmconfig vm.Config, blockNr uint64) error { state, err := MakePreState(params.MainnetChainConfig.Rules(blockNr), tx, t.json.Pre, blockNr) if err != nil { return fmt.Errorf("error in MakePreState: %v", err) diff --git a/turbo/adapter/block_getter.go b/turbo/adapter/block_getter.go index c3bbcf002e9..95d1f3b185d 100644 --- a/turbo/adapter/block_getter.go +++ b/turbo/adapter/block_getter.go @@ -4,15 +4,15 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) -func NewBlockGetter(tx ethdb.Tx) *blockGetter { +func NewBlockGetter(tx kv.Tx) *blockGetter { return &blockGetter{tx} } type blockGetter struct { - tx ethdb.Tx + tx kv.Tx } func (g *blockGetter) GetBlockByHash(hash common.Hash) (*types.Block, error) { diff --git a/turbo/adapter/chain_context.go b/turbo/adapter/chain_context.go index 3be5cc78b16..6eec051c433 100644 --- a/turbo/adapter/chain_context.go +++ b/turbo/adapter/chain_context.go @@ -8,17 +8,17 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" ) type chainContext struct { - tx ethdb.Tx + tx kv.Tx } -func NewChainContext(tx ethdb.Tx) *chainContext { +func NewChainContext(tx kv.Tx) *chainContext { return &chainContext{ tx: tx, } diff --git a/turbo/adapter/reader.go b/turbo/adapter/reader.go index 40c14d97632..2bd839b8d2c 100644 --- a/turbo/adapter/reader.go +++ b/turbo/adapter/reader.go @@ -8,15 +8,15 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" ) type StateReader struct { blockNr uint64 - tx ethdb.Tx + tx kv.Tx } -func NewStateReader(tx ethdb.Tx, blockNr uint64) *StateReader { +func NewStateReader(tx kv.Tx, blockNr uint64) *StateReader { return &StateReader{ tx: tx, blockNr: blockNr, @@ -45,7 +45,7 @@ func (r *StateReader) ReadAccountCode(address common.Address, incarnation uint64 return nil, nil } var val []byte - v, err := r.tx.GetOne(dbutils.CodeBucket, codeHash[:]) + v, err := r.tx.GetOne(kv.CodeBucket, codeHash[:]) if err != nil { return nil, err } diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 3b3f129a93c..f3d14543b81 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/etl" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/node" @@ -271,7 +271,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { func ApplyFlagsForNodeConfig(ctx *cli.Context, cfg *node.Config) { setPrivateApi(ctx, cfg) - cfg.DatabaseVerbosity = ethdb.DBVerbosityLvl(ctx.GlobalInt(DatabaseVerbosityFlag.Name)) + cfg.DatabaseVerbosity = kv.DBVerbosityLvl(ctx.GlobalInt(DatabaseVerbosityFlag.Name)) } // setPrivateApi populates configuration fields related to the remote @@ -279,7 +279,7 @@ func ApplyFlagsForNodeConfig(ctx *cli.Context, cfg *node.Config) { func setPrivateApi(ctx *cli.Context, cfg *node.Config) { cfg.PrivateApiAddr = ctx.GlobalString(PrivateApiAddr.Name) cfg.PrivateApiRateLimit = uint32(ctx.GlobalUint64(PrivateApiRateLimit.Name)) - maxRateLimit := uint32(ethdb.ReadersLimit - 128) // leave some readers for P2P + maxRateLimit := uint32(kv.ReadersLimit - 128) // leave some readers for P2P if cfg.PrivateApiRateLimit > maxRateLimit { log.Warn("private.api.ratelimit is too big", "force", maxRateLimit) cfg.PrivateApiRateLimit = maxRateLimit diff --git a/turbo/node/buckets.go b/turbo/node/buckets.go deleted file mode 100644 index 48b42d8568a..00000000000 --- a/turbo/node/buckets.go +++ /dev/null @@ -1,25 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/ledgerwatch/erigon/common/dbutils" -) - -//nolint -func prepareBuckets(customBuckets dbutils.BucketsCfg) { - if len(customBuckets) == 0 { - return - } - - currentBuckets := dbutils.DefaultBuckets() - - for k, v := range customBuckets { - if _, ok := currentBuckets[k]; ok { - panic(fmt.Errorf("overriding existing buckets is not supported (bucket key=%s)", k)) - } - currentBuckets[k] = v - } - - dbutils.UpdateBucketsList(currentBuckets) -} diff --git a/turbo/node/node.go b/turbo/node/node.go index 73e27bdc01b..427206d93fd 100644 --- a/turbo/node/node.go +++ b/turbo/node/node.go @@ -3,9 +3,9 @@ package node import ( "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/eth" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/params" @@ -41,11 +41,11 @@ func (eri *ErigonNode) run() { // Params contains optional parameters for creating a node. // * GitCommit is a commit from which then node was built. -// * CustomBuckets is a `map[string]dbutils.BucketConfigItem`, that contains bucket name and its properties. +// * CustomBuckets is a `map[string]dbutils.TableConfigItem`, that contains bucket name and its properties. // // NB: You have to declare your custom buckets here to be able to use them in the app. type Params struct { - CustomBuckets dbutils.BucketsCfg + CustomBuckets kv.TableCfg } func NewNodConfigUrfave(ctx *cli.Context) *node.Config { @@ -91,16 +91,17 @@ func NewEthConfigUrfave(ctx *cli.Context, nodeConfig *node.Config) *ethconfig.Co func New( nodeConfig *node.Config, ethConfig *ethconfig.Config, + logger log.Logger, ) *ErigonNode { //prepareBuckets(optionalParams.CustomBuckets) node := makeConfigNode(nodeConfig) - ethereum := RegisterEthService(node, ethConfig) + ethereum := RegisterEthService(node, ethConfig, logger) return &ErigonNode{stack: node, backend: ethereum} } // RegisterEthService adds an Ethereum client to the stack. -func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) *eth.Ethereum { - backend, err := eth.New(stack, cfg) +func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, logger log.Logger) *eth.Ethereum { + backend, err := eth.New(stack, cfg, logger) if err != nil { panic(err) } diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index d0866f36678..9a2fd5cb7d8 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -8,7 +8,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter" ) @@ -22,7 +22,7 @@ func (e nonCanonocalHashError) Error() string { return fmt.Sprintf("hash %x is not currently canonical", e.hash) } -func GetBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx ethdb.Tx, filters *filters.Filters) (uint64, common.Hash, error) { +func GetBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *filters.Filters) (uint64, common.Hash, error) { var blockNumber uint64 var err error hash, ok := blockNrOrHash.Hash() @@ -70,7 +70,7 @@ func GetBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx ethdb.Tx, filters *f return blockNumber, hash, nil } -func GetAccount(tx ethdb.Tx, blockNumber uint64, address common.Address) (*accounts.Account, error) { +func GetAccount(tx kv.Tx, blockNumber uint64, address common.Address) (*accounts.Account, error) { reader := adapter.NewStateReader(tx, blockNumber) return reader.ReadAccountData(address) } diff --git a/turbo/snapshotsync/bodies_snapshot.go b/turbo/snapshotsync/bodies_snapshot.go index f502a3ac301..46af3783ee5 100644 --- a/turbo/snapshotsync/bodies_snapshot.go +++ b/turbo/snapshotsync/bodies_snapshot.go @@ -14,25 +14,27 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) -func GenerateBodiesSnapshot(ctx context.Context, readTX ethdb.Tx, writeTX ethdb.RwTx, toBlock uint64) error { - readBodyCursor, err := readTX.Cursor(dbutils.BlockBodyPrefix) +func GenerateBodiesSnapshot(ctx context.Context, readTX kv.Tx, writeTX kv.RwTx, toBlock uint64) error { + readBodyCursor, err := readTX.Cursor(kv.BlockBody) if err != nil { return err } - writeBodyCursor, err := writeTX.RwCursor(dbutils.BlockBodyPrefix) + writeBodyCursor, err := writeTX.RwCursor(kv.BlockBody) if err != nil { return err } - writeEthTXCursor, err := writeTX.RwCursor(dbutils.EthTx) + writeEthTXCursor, err := writeTX.RwCursor(kv.EthTx) if err != nil { return err } - readEthTXCursor, err := readTX.Cursor(dbutils.EthTx) + readEthTXCursor, err := readTX.Cursor(kv.EthTx) if err != nil { return err } @@ -43,7 +45,7 @@ func GenerateBodiesSnapshot(ctx context.Context, readTX ethdb.Tx, writeTX ethdb. return false, nil } - canonocalHash, err := readTX.GetOne(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(binary.BigEndian.Uint64(k))) + canonocalHash, err := readTX.GetOne(kv.HeaderCanonical, dbutils.EncodeBlockNumber(binary.BigEndian.Uint64(k))) if err != nil { return false, err } @@ -96,19 +98,19 @@ func GenerateBodiesSnapshot(ctx context.Context, readTX ethdb.Tx, writeTX ethdb. return nil } -func CreateBodySnapshot(readTx ethdb.Tx, lastBlock uint64, snapshotPath string) error { - kv, err := kv.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.BlockBodyPrefix: dbutils.BucketsConfigs[dbutils.BlockBodyPrefix], - dbutils.EthTx: dbutils.BucketsConfigs[dbutils.EthTx], +func CreateBodySnapshot(readTx kv.Tx, logger log.Logger, lastBlock uint64, snapshotPath string) error { + db, err := mdbx.NewMDBX(logger).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.BlockBody: kv.BucketsConfigs[kv.BlockBody], + kv.EthTx: kv.BucketsConfigs[kv.EthTx], } }).Path(snapshotPath).Open() if err != nil { return err } - defer kv.Close() - writeTX, err := kv.BeginRw(context.Background()) + defer db.Close() + writeTX, err := db.BeginRw(context.Background()) if err != nil { return err } @@ -120,21 +122,21 @@ func CreateBodySnapshot(readTx ethdb.Tx, lastBlock uint64, snapshotPath string) return writeTX.Commit() } -func OpenBodiesSnapshot(dbPath string) (ethdb.RoKV, error) { - return kv.NewMDBX().Path(dbPath).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.BlockBodyPrefix: dbutils.BucketsConfigs[dbutils.BlockBodyPrefix], - dbutils.EthTx: dbutils.BucketsConfigs[dbutils.EthTx], +func OpenBodiesSnapshot(logger log.Logger, dbPath string) (kv.RoDB, error) { + return mdbx.NewMDBX(logger).Path(dbPath).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.BlockBody: kv.BucketsConfigs[kv.BlockBody], + kv.EthTx: kv.BucketsConfigs[kv.EthTx], } }).Readonly().Open() } -func RemoveBlocksData(db ethdb.RoKV, tx ethdb.RwTx, newSnapshot uint64) (err error) { +func RemoveBlocksData(db kv.RoDB, tx kv.RwTx, newSnapshot uint64) (err error) { log.Info("Remove blocks data", "to", newSnapshot) - if _, ok := db.(kv.SnapshotUpdater); !ok { + if _, ok := db.(snapshotdb.SnapshotUpdater); !ok { return errors.New("db don't implement snapshotUpdater interface") } - bodiesSnapshot := db.(kv.SnapshotUpdater).BodiesSnapshot() + bodiesSnapshot := db.(snapshotdb.SnapshotUpdater).BodiesSnapshot() if bodiesSnapshot == nil { log.Info("bodiesSnapshot is empty") return nil @@ -144,7 +146,7 @@ func RemoveBlocksData(db ethdb.RoKV, tx ethdb.RwTx, newSnapshot uint64) (err err return err } defer blockBodySnapshotReadTX.Rollback() - ethtxSnapshotReadTX, err := blockBodySnapshotReadTX.Cursor(dbutils.EthTx) + ethtxSnapshotReadTX, err := blockBodySnapshotReadTX.Cursor(kv.EthTx) if err != nil { return err } @@ -154,12 +156,12 @@ func RemoveBlocksData(db ethdb.RoKV, tx ethdb.RwTx, newSnapshot uint64) (err err } rewriteId := binary.BigEndian.Uint64(lastEthTXSnapshotKey) + 1 - writeTX := tx.(kv.DBTX).DBTX() - blockBodyWriteCursor, err := writeTX.RwCursor(dbutils.BlockBodyPrefix) + writeTX := tx.(snapshotdb.DBTX).DBTX() + blockBodyWriteCursor, err := writeTX.RwCursor(kv.BlockBody) if err != nil { return fmt.Errorf("get bodies cursor %w", err) } - ethTXWriteCursor, err := writeTX.RwCursor(dbutils.EthTx) + ethTXWriteCursor, err := writeTX.RwCursor(kv.EthTx) if err != nil { return fmt.Errorf("get ethtx cursor %w", err) } @@ -173,7 +175,7 @@ func RemoveBlocksData(db ethdb.RoKV, tx ethdb.RwTx, newSnapshot uint64) (err err if binary.BigEndian.Uint64(k) > newSnapshot { return false, nil } - has, err := blockBodySnapshotReadTX.Has(dbutils.BlockBodyPrefix, k) + has, err := blockBodySnapshotReadTX.Has(kv.BlockBody, k) if err != nil { return false, err } @@ -245,12 +247,12 @@ func RemoveBlocksData(db ethdb.RoKV, tx ethdb.RwTx, newSnapshot uint64) (err err if err != nil { return err } - err = bodiesCollector.Load("bodies", writeTX, dbutils.BlockBodyPrefix, etl.IdentityLoadFunc, etl.TransformArgs{}) + err = bodiesCollector.Load("bodies", writeTX, kv.BlockBody, etl.IdentityLoadFunc, etl.TransformArgs{}) if err != nil { return err } - err = ethTXCollector.Load("ethtx", writeTX, dbutils.EthTx, etl.IdentityLoadFunc, etl.TransformArgs{}) + err = ethTXCollector.Load("ethtx", writeTX, kv.EthTx, etl.IdentityLoadFunc, etl.TransformArgs{}) if err != nil { return err } diff --git a/turbo/snapshotsync/downloader.go b/turbo/snapshotsync/downloader.go index a817b2d9269..19b3ed0a131 100644 --- a/turbo/snapshotsync/downloader.go +++ b/turbo/snapshotsync/downloader.go @@ -10,12 +10,12 @@ import ( "time" "github.com/anacrolix/torrent/bencode" + "github.com/ledgerwatch/erigon/ethdb/kv" lg "github.com/anacrolix/log" "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/metainfo" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" @@ -67,16 +67,16 @@ func (cli *Client) Torrents() []metainfo.Hash { } return hashes } -func (cli *Client) Load(tx ethdb.Tx) error { +func (cli *Client) Load(tx kv.Tx) error { log.Info("Load added torrents") - return tx.ForEach(dbutils.SnapshotInfoBucket, []byte{}, func(k, infoHashBytes []byte) error { + return tx.ForEach(kv.SnapshotInfo, []byte{}, func(k, infoHashBytes []byte) error { if !bytes.HasPrefix(k[8:], []byte(SnapshotInfoHashPrefix)) { return nil } networkID, snapshotName := ParseInfoHashKey(k) infoHash := metainfo.Hash{} copy(infoHash[:], infoHashBytes) - infoBytes, err := tx.GetOne(dbutils.SnapshotInfoBucket, MakeInfoBytesKey(snapshotName, networkID)) + infoBytes, err := tx.GetOne(kv.SnapshotInfo, MakeInfoBytesKey(snapshotName, networkID)) if err != nil { return err } @@ -91,8 +91,8 @@ func (cli *Client) Load(tx ethdb.Tx) error { }) } -func (cli *Client) SavePeerID(db ethdb.Putter) error { - return db.Put(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID), cli.PeerID()) +func (cli *Client) SavePeerID(db kv.Putter) error { + return db.Put(kv.BittorrentInfo, []byte(kv.BittorrentPeerID), cli.PeerID()) } func (cli *Client) Close() { @@ -117,7 +117,7 @@ func (cli *Client) AddTorrentSpec(snapshotName string, snapshotHash metainfo.Has return t, err } -func (cli *Client) AddTorrent(ctx context.Context, db ethdb.RwTx, snapshotType SnapshotType, networkID uint64) error { //nolint: interfacer +func (cli *Client) AddTorrent(ctx context.Context, db kv.RwTx, snapshotType SnapshotType, networkID uint64) error { //nolint: interfacer infoHashBytes, infoBytes, err := getTorrentSpec(db, snapshotType.String(), networkID) if err != nil { return err @@ -178,7 +178,7 @@ func (cli *Client) GetInfoBytes(ctx context.Context, snapshotHash metainfo.Hash) } } -func (cli *Client) AddSnapshotsTorrents(ctx context.Context, db ethdb.RwTx, networkId uint64, mode SnapshotMode) error { +func (cli *Client) AddSnapshotsTorrents(ctx context.Context, db kv.RwTx, networkId uint64, mode SnapshotMode) error { ctx, cancel := context.WithTimeout(ctx, time.Minute*10) defer cancel() eg := errgroup.Group{} @@ -256,11 +256,11 @@ func (cli *Client) Download() { } } -func (cli *Client) GetSnapshots(tx ethdb.Tx, networkID uint64) (map[SnapshotType]*SnapshotsInfo, error) { +func (cli *Client) GetSnapshots(tx kv.Tx, networkID uint64) (map[SnapshotType]*SnapshotsInfo, error) { mp := make(map[SnapshotType]*SnapshotsInfo) networkIDBytes := make([]byte, 8) binary.BigEndian.PutUint64(networkIDBytes, networkID) - err := tx.ForPrefix(dbutils.SnapshotInfoBucket, append(networkIDBytes, []byte(SnapshotInfoHashPrefix)...), func(k, v []byte) error { + err := tx.ForPrefix(kv.SnapshotInfo, append(networkIDBytes, []byte(SnapshotInfoHashPrefix)...), func(k, v []byte) error { var hash metainfo.Hash if len(v) != metainfo.HashSize { return nil @@ -331,30 +331,30 @@ func (cli *Client) StopSeeding(hash metainfo.Hash) error { return nil } -func getTorrentSpec(db ethdb.Tx, snapshotName string, networkID uint64) ([]byte, []byte, error) { +func getTorrentSpec(db kv.Tx, snapshotName string, networkID uint64) ([]byte, []byte, error) { var infohash, infobytes []byte var err error b := make([]byte, 8) binary.BigEndian.PutUint64(b, networkID) - infohash, err = db.GetOne(dbutils.SnapshotInfoBucket, MakeInfoHashKey(snapshotName, networkID)) + infohash, err = db.GetOne(kv.SnapshotInfo, MakeInfoHashKey(snapshotName, networkID)) if err != nil { return nil, nil, err } - infobytes, err = db.GetOne(dbutils.SnapshotInfoBucket, MakeInfoBytesKey(snapshotName, networkID)) + infobytes, err = db.GetOne(kv.SnapshotInfo, MakeInfoBytesKey(snapshotName, networkID)) if err != nil { return nil, nil, err } return infohash, infobytes, nil } -func saveTorrentSpec(db ethdb.Putter, snapshotName string, networkID uint64, hash torrent.InfoHash, infobytes []byte) error { +func saveTorrentSpec(db kv.Putter, snapshotName string, networkID uint64, hash torrent.InfoHash, infobytes []byte) error { b := make([]byte, 8) binary.BigEndian.PutUint64(b, networkID) - err := db.Put(dbutils.SnapshotInfoBucket, MakeInfoHashKey(snapshotName, networkID), hash.Bytes()) + err := db.Put(kv.SnapshotInfo, MakeInfoHashKey(snapshotName, networkID), hash.Bytes()) if err != nil { return err } - return db.Put(dbutils.SnapshotInfoBucket, MakeInfoBytesKey(snapshotName, networkID), infobytes) + return db.Put(kv.SnapshotInfo, MakeInfoBytesKey(snapshotName, networkID), infobytes) } func MakeInfoHashKey(snapshotName string, networkID uint64) []byte { @@ -378,11 +378,11 @@ func GetInfo() { } -func SnapshotSeeding(chainDB ethdb.RwKV, cli *Client, name string, snapshotsDir string) error { +func SnapshotSeeding(chainDB kv.RwDB, cli *Client, name string, snapshotsDir string) error { var snapshotBlock uint64 var hasSnapshotBlock bool - if err := chainDB.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + if err := chainDB.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { return err } diff --git a/turbo/snapshotsync/headers_snapshot.go b/turbo/snapshotsync/headers_snapshot.go index 6bd0dd8815a..f2685906362 100644 --- a/turbo/snapshotsync/headers_snapshot.go +++ b/turbo/snapshotsync/headers_snapshot.go @@ -12,19 +12,21 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" "github.com/ledgerwatch/erigon/log" ) -func CreateHeadersSnapshot(ctx context.Context, readTX ethdb.Tx, toBlock uint64, snapshotPath string) error { +func CreateHeadersSnapshot(ctx context.Context, readTX kv.Tx, toBlock uint64, snapshotPath string) error { // remove created snapshot if it's not saved in main db(to avoid append error) err := os.RemoveAll(snapshotPath) if err != nil { return err } - snKV, err := kv.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketsConfigs[dbutils.HeadersBucket], + snKV, err := mdbx.NewMDBX(log.New()).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.BucketsConfigs[kv.Headers], } }).Path(snapshotPath).Open() if err != nil { @@ -49,8 +51,8 @@ func CreateHeadersSnapshot(ctx context.Context, readTX ethdb.Tx, toBlock uint64, return nil } -func GenerateHeadersSnapshot(ctx context.Context, db ethdb.Tx, sntx ethdb.RwTx, toBlock uint64) error { - headerCursor, err := sntx.RwCursor(dbutils.HeadersBucket) +func GenerateHeadersSnapshot(ctx context.Context, db kv.Tx, sntx kv.RwTx, toBlock uint64) error { + headerCursor, err := sntx.RwCursor(kv.Headers) if err != nil { return err } @@ -85,31 +87,31 @@ func GenerateHeadersSnapshot(ctx context.Context, db ethdb.Tx, sntx ethdb.RwTx, return nil } -func OpenHeadersSnapshot(dbPath string) (ethdb.RoKV, error) { - return kv.NewMDBX().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { - return dbutils.BucketsCfg{ - dbutils.HeadersBucket: dbutils.BucketsConfigs[dbutils.HeadersBucket], +func OpenHeadersSnapshot(dbPath string) (kv.RoDB, error) { + return mdbx.NewMDBX(log.New()).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + kv.Headers: kv.BucketsConfigs[kv.Headers], } }).Readonly().Path(dbPath).Open() } -func RemoveHeadersData(db ethdb.RoKV, tx ethdb.RwTx, currentSnapshot, newSnapshot uint64) (err error) { +func RemoveHeadersData(db kv.RoDB, tx kv.RwTx, currentSnapshot, newSnapshot uint64) (err error) { log.Info("Remove data", "from", currentSnapshot, "to", newSnapshot) - if _, ok := db.(kv.SnapshotUpdater); !ok { + if _, ok := db.(snapshotdb.SnapshotUpdater); !ok { return errors.New("db don't implement snapshotUpdater interface") } - headerSnapshot := db.(kv.SnapshotUpdater).HeadersSnapshot() + headerSnapshot := db.(snapshotdb.SnapshotUpdater).HeadersSnapshot() if headerSnapshot == nil { return errors.New("empty headers snapshot") } - writeTX := tx.(kv.DBTX).DBTX() - c, err := writeTX.RwCursor(dbutils.HeadersBucket) + writeTX := tx.(snapshotdb.DBTX).DBTX() + c, err := writeTX.RwCursor(kv.Headers) if err != nil { return fmt.Errorf("get headers cursor %w", err) } - return headerSnapshot.View(context.Background(), func(tx ethdb.Tx) error { - c2, err := tx.Cursor(dbutils.HeadersBucket) + return headerSnapshot.View(context.Background(), func(tx kv.Tx) error { + c2, err := tx.Cursor(kv.Headers) if err != nil { return err } diff --git a/turbo/snapshotsync/postprocessing.go b/turbo/snapshotsync/postprocessing.go index 9e1d0840e0f..90addd4da8e 100644 --- a/turbo/snapshotsync/postprocessing.go +++ b/turbo/snapshotsync/postprocessing.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" ) @@ -40,16 +41,16 @@ var ( Snapshot11kkTD = []byte{138, 3, 199, 118, 5, 203, 95, 162, 81, 64, 161} ) -func PostProcessing(db ethdb.RwKV, downloadedSnapshots map[SnapshotType]*SnapshotsInfo) error { +func PostProcessing(db kv.RwDB, downloadedSnapshots map[SnapshotType]*SnapshotsInfo) error { if _, ok := downloadedSnapshots[SnapshotType_headers]; ok { - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { return GenerateHeaderIndexes(context.Background(), tx) }); err != nil { return err } } if _, ok := downloadedSnapshots[SnapshotType_state]; ok { - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { return PostProcessState(tx, downloadedSnapshots[SnapshotType_state]) }); err != nil { return err @@ -57,7 +58,7 @@ func PostProcessing(db ethdb.RwKV, downloadedSnapshots map[SnapshotType]*Snapsho } if _, ok := downloadedSnapshots[SnapshotType_bodies]; ok { - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { return PostProcessBodies(tx) }); err != nil { return err @@ -67,7 +68,7 @@ func PostProcessing(db ethdb.RwKV, downloadedSnapshots map[SnapshotType]*Snapsho return nil } -func PostProcessBodies(tx ethdb.RwTx) error { +func PostProcessBodies(tx kv.RwTx) error { v, err := stages.GetStageProgress(tx, stages.Bodies) if err != nil { return err @@ -76,12 +77,12 @@ func PostProcessBodies(tx ethdb.RwTx) error { if v > 0 { return nil } - err = tx.ClearBucket(dbutils.TxLookupPrefix) + err = tx.ClearBucket(kv.TxLookup) if err != nil { return err } - ethTxC, err := tx.Cursor(dbutils.EthTx) + ethTxC, err := tx.Cursor(kv.EthTx) if err != nil { return err } @@ -94,12 +95,12 @@ func PostProcessBodies(tx ethdb.RwTx) error { } secKey := make([]byte, 8) binary.BigEndian.PutUint64(secKey, binary.BigEndian.Uint64(k)+1) - err = tx.Put(dbutils.Sequence, []byte(dbutils.EthTx), secKey) + err = tx.Put(kv.Sequence, []byte(kv.EthTx), secKey) if err != nil { return err } - bodyC, err := tx.Cursor(dbutils.BlockBodyPrefix) + bodyC, err := tx.Cursor(kv.BlockBody) if err != nil { return err } @@ -120,7 +121,7 @@ func PostProcessBodies(tx ethdb.RwTx) error { return tx.Commit() } -func PostProcessState(db ethdb.RwTx, info *SnapshotsInfo) error { +func PostProcessState(db kv.RwTx, info *SnapshotsInfo) error { v, err := stages.GetStageProgress(db, stages.Execution) if err != nil { return err @@ -130,10 +131,10 @@ func PostProcessState(db ethdb.RwTx, info *SnapshotsInfo) error { return nil } // clear genesis state - if err = db.ClearBucket(dbutils.PlainStateBucket); err != nil { + if err = db.ClearBucket(kv.PlainStateBucket); err != nil { return err } - if err = db.ClearBucket(dbutils.EthTx); err != nil { + if err = db.ClearBucket(kv.EthTx); err != nil { return err } err = stages.SaveStageProgress(db, stages.Execution, info.SnapshotBlock) @@ -166,12 +167,12 @@ func PostProcessNoBlocksSync(db ethdb.Database, blockNum uint64, blockHash commo defer tx.Rollback() //add header - err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(SnapshotBlock, blockHash), blockHeaderBytes) + err = tx.Put(kv.Headers, dbutils.HeaderKey(SnapshotBlock, blockHash), blockHeaderBytes) if err != nil { return err } //add canonical - err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(SnapshotBlock), blockHash.Bytes()) + err = tx.Put(kv.HeaderCanonical, dbutils.EncodeBlockNumber(SnapshotBlock), blockHash.Bytes()) if err != nil { return err } @@ -185,7 +186,7 @@ func PostProcessNoBlocksSync(db ethdb.Database, blockNum uint64, blockHash commo return err } - err = tx.Put(dbutils.HeaderNumberBucket, blockHash.Bytes(), dbutils.EncodeBlockNumber(SnapshotBlock)) + err = tx.Put(kv.HeaderNumber, blockHash.Bytes(), dbutils.EncodeBlockNumber(SnapshotBlock)) if err != nil { return err } @@ -193,17 +194,17 @@ func PostProcessNoBlocksSync(db ethdb.Database, blockNum uint64, blockHash commo if err != nil { return err } - err = tx.Put(dbutils.HeaderTDBucket, dbutils.HeaderKey(SnapshotBlock, blockHash), b) + err = tx.Put(kv.HeaderTD, dbutils.HeaderKey(SnapshotBlock, blockHash), b) if err != nil { return err } - err = tx.Put(dbutils.HeadHeaderKey, []byte(dbutils.HeadHeaderKey), blockHash.Bytes()) + err = tx.Put(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey), blockHash.Bytes()) if err != nil { return err } - err = tx.Put(dbutils.HeadBlockKey, []byte(dbutils.HeadBlockKey), blockHash.Bytes()) + err = tx.Put(kv.HeadBlockKey, []byte(kv.HeadBlockKey), blockHash.Bytes()) if err != nil { return err } @@ -231,8 +232,8 @@ func PostProcessNoBlocksSync(db ethdb.Database, blockNum uint64, blockHash commo return tx.Commit() } -func generateHeaderHashToNumberIndex(ctx context.Context, tx ethdb.RwTx) error { - c, err := tx.Cursor(dbutils.HeadersBucket) +func generateHeaderHashToNumberIndex(ctx context.Context, tx kv.RwTx) error { + c, err := tx.Cursor(kv.Headers) if err != nil { return err } @@ -249,7 +250,7 @@ func generateHeaderHashToNumberIndex(ctx context.Context, tx ethdb.RwTx) error { headNumber := big.NewInt(0).SetBytes(headNumberBytes).Uint64() headHash := common.BytesToHash(headHashBytes) - return etl.Transform("Torrent post-processing 1", tx, dbutils.HeadersBucket, dbutils.HeaderNumberBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { + return etl.Transform("Torrent post-processing 1", tx, kv.Headers, kv.HeaderNumber, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { return next(k, common.CopyBytes(k[8:]), common.CopyBytes(k[:8])) }, etl.IdentityLoadFunc, etl.TransformArgs{ Quit: ctx.Done(), @@ -257,7 +258,7 @@ func generateHeaderHashToNumberIndex(ctx context.Context, tx ethdb.RwTx) error { }) } -func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx ethdb.RwTx) error { +func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx kv.RwTx) error { var hash common.Hash var number uint64 var err error @@ -266,7 +267,7 @@ func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx ethdb.RwTx) err td := h.Difficulty log.Info("Generate TD index & canonical") - err = etl.Transform("Torrent post-processing 2", tx, dbutils.HeadersBucket, dbutils.HeaderTDBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { + err = etl.Transform("Torrent post-processing 2", tx, kv.Headers, kv.HeaderTD, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { header := &types.Header{} innerErr := rlp.DecodeBytes(v, header) if innerErr != nil { @@ -288,7 +289,7 @@ func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx ethdb.RwTx) err return err } log.Info("Generate TD index & canonical") - err = etl.Transform("Torrent post-processing 2", tx, dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { + err = etl.Transform("Torrent post-processing 2", tx, kv.Headers, kv.HeaderCanonical, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error { return next(k, common.CopyBytes(k[:8]), common.CopyBytes(k[8:])) }, etl.IdentityLoadFunc, etl.TransformArgs{ Quit: ctx.Done(), @@ -311,7 +312,7 @@ func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx ethdb.RwTx) err return nil } -func GenerateHeaderIndexes(ctx context.Context, tx ethdb.RwTx) error { +func GenerateHeaderIndexes(ctx context.Context, tx kv.RwTx) error { v, err1 := stages.GetStageProgress(tx, HeadersPostProcessingStage) if err1 != nil { return err1 diff --git a/turbo/snapshotsync/postprocessing_test.go b/turbo/snapshotsync/postprocessing_test.go index f0b89c99380..37cc2a2f512 100644 --- a/turbo/snapshotsync/postprocessing_test.go +++ b/turbo/snapshotsync/postprocessing_test.go @@ -9,8 +9,10 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + mdbx2 "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" + "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" "github.com/stretchr/testify/require" "github.com/torquem-ch/mdbx-go/mdbx" @@ -18,16 +20,16 @@ import ( func TestHeadersGenerateIndex(t *testing.T) { snPath := t.TempDir() - snKV := kv.NewMDBX().Path(snPath).MustOpen() + snKV := mdbx2.NewMDBX(log.New()).Path(snPath).MustOpen() defer os.RemoveAll(snPath) headers := generateHeaders(10) - err := snKV.Update(context.Background(), func(tx ethdb.RwTx) error { + err := snKV.Update(context.Background(), func(tx kv.RwTx) error { for _, header := range headers { headerBytes, innerErr := rlp.EncodeToBytes(header) if innerErr != nil { panic(innerErr) } - innerErr = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), headerBytes) + innerErr = tx.Put(kv.Headers, dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), headerBytes) if innerErr != nil { panic(innerErr) } @@ -39,20 +41,20 @@ func TestHeadersGenerateIndex(t *testing.T) { } snKV.Close() - db := kv.NewMDBX().InMem().WithBucketsConfig(kv.DefaultBucketConfigs).MustOpen() + db := mdbx2.NewMDBX(log.New()).InMem().WithBucketsConfig(mdbx2.DefaultBucketConfigs).MustOpen() defer db.Close() //we need genesis - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { + if err := db.Update(context.Background(), func(tx kv.RwTx) error { return rawdb.WriteCanonicalHash(tx, headers[0].Hash(), headers[0].Number.Uint64()) }); err != nil { t.Fatal(err) } - snKV = kv.NewMDBX().Path(snPath).Flags(func(flags uint) uint { return flags | mdbx.Readonly }).WithBucketsConfig(kv.DefaultBucketConfigs).MustOpen() + snKV = mdbx2.NewMDBX(log.New()).Path(snPath).Flags(func(flags uint) uint { return flags | mdbx.Readonly }).WithBucketsConfig(mdbx2.DefaultBucketConfigs).MustOpen() defer snKV.Close() - snKV = kv.NewSnapshotKV().HeadersSnapshot(snKV).DB(db).Open() + snKV = snapshotdb.NewSnapshotKV().HeadersSnapshot(snKV).DB(db).Open() snTx, err := snKV.BeginRw(context.Background()) require.NoError(t, err) defer snTx.Rollback() diff --git a/turbo/snapshotsync/server.go b/turbo/snapshotsync/server.go index 446d477c98e..9de47fc9a1f 100644 --- a/turbo/snapshotsync/server.go +++ b/turbo/snapshotsync/server.go @@ -7,9 +7,8 @@ import ( "github.com/anacrolix/torrent" "github.com/golang/protobuf/ptypes/empty" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" ) var ( @@ -21,12 +20,12 @@ var ( ) func NewServer(dir string, seeding bool) (*SNDownloaderServer, error) { - db := kv.MustOpen(dir + "/db") + db := mdbx.MustOpen(dir + "/db") sn := &SNDownloaderServer{ db: db, } - if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - peerID, err := tx.GetOne(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID)) + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + peerID, err := tx.GetOne(kv.BittorrentInfo, []byte(kv.BittorrentPeerID)) if err != nil { return fmt.Errorf("get peer id: %w", err) } @@ -51,11 +50,11 @@ func NewServer(dir string, seeding bool) (*SNDownloaderServer, error) { type SNDownloaderServer struct { DownloaderServer t *Client - db ethdb.RwKV + db kv.RwDB } func (s *SNDownloaderServer) Download(ctx context.Context, request *DownloadSnapshotRequest) (*empty.Empty, error) { - if err := s.db.Update(ctx, func(tx ethdb.RwTx) error { + if err := s.db.Update(ctx, func(tx kv.RwTx) error { return s.t.AddSnapshotsTorrents(ctx, tx, request.NetworkId, FromSnapshotTypes(request.Type)) }); err != nil { return nil, err @@ -63,7 +62,7 @@ func (s *SNDownloaderServer) Download(ctx context.Context, request *DownloadSnap return &empty.Empty{}, nil } func (s *SNDownloaderServer) Load() error { - return s.db.View(context.Background(), func(tx ethdb.Tx) error { + return s.db.View(context.Background(), func(tx kv.Tx) error { return s.t.Load(tx) }) } diff --git a/turbo/snapshotsync/snapshot_builder.go b/turbo/snapshotsync/snapshot_builder.go index 90e51a36edd..e1ab7ec3b0e 100644 --- a/turbo/snapshotsync/snapshot_builder.go +++ b/turbo/snapshotsync/snapshot_builder.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" ) @@ -45,7 +46,7 @@ type SnapshotMigrator struct { replaced uint64 } -func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, rwTX ethdb.Tx, bittorrent *Client, async bool) error { +func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, logger log.Logger, dbi kv.RwDB, rwTX kv.Tx, bittorrent *Client, async bool) error { if atomic.LoadUint64(&sm.started) > 0 { return nil } @@ -54,10 +55,10 @@ func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, r var snapshotHashKey []byte if sm.HeadersCurrentSnapshot < migrateToBlock && atomic.LoadUint64(&sm.HeadersNewSnapshot) < migrateToBlock { snapshotName = "headers" - snapshotHashKey = dbutils.CurrentHeadersSnapshotHash + snapshotHashKey = kv.CurrentHeadersSnapshotHash } else if sm.BodiesCurrentSnapshot < migrateToBlock && atomic.LoadUint64(&sm.BodiesNewSnapshot) < migrateToBlock { snapshotName = "bodies" - snapshotHashKey = dbutils.CurrentBodiesSnapshotHash + snapshotHashKey = kv.CurrentBodiesSnapshotHash } else { return nil } @@ -72,16 +73,16 @@ func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, r } atomic.StoreUint64(&sm.replaced, 0) - var initialStages []func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error + var initialStages []func(db kv.RoDB, tx kv.Tx, toBlock uint64) error switch sm.snapshotType { case "headers": - initialStages = []func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error{ - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { + initialStages = []func(db kv.RoDB, tx kv.Tx, toBlock uint64) error{ + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { return CreateHeadersSnapshot(context.Background(), tx, toBlock, snapshotPath) }, - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { //replace snapshot - if _, ok := db.(kv.SnapshotUpdater); !ok { + if _, ok := db.(snapshotdb.SnapshotUpdater); !ok { return errors.New("db don't implement snapshotUpdater interface") } snapshotKV, err := OpenHeadersSnapshot(snapshotPath) @@ -89,38 +90,38 @@ func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, r return err } - db.(kv.SnapshotUpdater).UpdateSnapshots("headers", snapshotKV, sm.replaceChan) + db.(snapshotdb.SnapshotUpdater).UpdateSnapshots("headers", snapshotKV, sm.replaceChan) return nil }, } case "bodies": - initialStages = []func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error{ - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { - return CreateBodySnapshot(tx, toBlock, snapshotPath) + initialStages = []func(db kv.RoDB, tx kv.Tx, toBlock uint64) error{ + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { + return CreateBodySnapshot(tx, logger, toBlock, snapshotPath) }, - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { //replace snapshot - if _, ok := db.(kv.SnapshotUpdater); !ok { + if _, ok := db.(snapshotdb.SnapshotUpdater); !ok { return errors.New("db don't implement snapshotUpdater interface") } - snapshotKV, err := OpenBodiesSnapshot(snapshotPath) + snapshotKV, err := OpenBodiesSnapshot(logger, snapshotPath) if err != nil { return err } - db.(kv.SnapshotUpdater).UpdateSnapshots("bodies", snapshotKV, sm.replaceChan) + db.(snapshotdb.SnapshotUpdater).UpdateSnapshots("bodies", snapshotKV, sm.replaceChan) return nil }, } } - btStages := func(shapshotHashKey []byte) []func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { - return []func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error{ - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { + btStages := func(shapshotHashKey []byte) []func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { + return []func(db kv.RoDB, tx kv.Tx, toBlock uint64) error{ + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { //todo headers infohash var infohash []byte var err error - infohash, err = tx.GetOne(dbutils.BittorrentInfoBucket, shapshotHashKey) + infohash, err = tx.GetOne(kv.BittorrentInfo, shapshotHashKey) if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { log.Error("Get infohash", "err", err, "block", toBlock) return err @@ -141,7 +142,7 @@ func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, r } return nil }, - func(db ethdb.RoKV, tx ethdb.Tx, toBlock uint64) error { + func(db kv.RoDB, tx kv.Tx, toBlock uint64) error { log.Info("Start seeding snapshot", "type", snapshotName) seedingInfoHash, err := bittorrent.SeedSnapshot(snapshotName, snapshotPath) if err != nil { @@ -165,7 +166,7 @@ func (sm *SnapshotMigrator) AsyncStages(migrateToBlock uint64, dbi ethdb.RwKV, r stages := append(initialStages, btStages(snapshotHashKey)...) - startStages := func(tx ethdb.Tx) (innerErr error) { + startStages := func(tx kv.Tx) (innerErr error) { defer func() { if innerErr != nil { atomic.StoreUint64(&sm.started, 0) @@ -222,52 +223,52 @@ func (sm *SnapshotMigrator) Replaced() bool { return atomic.LoadUint64(&sm.replaced) == 1 } -func (sm *SnapshotMigrator) SyncStages(migrateToBlock uint64, dbi ethdb.RwKV, rwTX ethdb.RwTx) error { +func (sm *SnapshotMigrator) SyncStages(migrateToBlock uint64, dbi kv.RwDB, rwTX kv.RwTx) error { log.Info("SyncStages", "started", atomic.LoadUint64(&sm.started)) if atomic.LoadUint64(&sm.started) == 2 && sm.Replaced() { - var syncStages []func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error + var syncStages []func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error switch sm.snapshotType { case "bodies": - syncStages = []func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error{ - func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error { + syncStages = []func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error{ + func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error { log.Info("Prune db", "new", atomic.LoadUint64(&sm.BodiesNewSnapshot)) return RemoveBlocksData(db, tx, atomic.LoadUint64(&sm.BodiesNewSnapshot)) }, - func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error { + func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error { log.Info("Save bodies snapshot", "new", common.Bytes2Hex(sm.HeadersNewSnapshotInfohash), "new", atomic.LoadUint64(&sm.HeadersNewSnapshot)) - c, err := tx.RwCursor(dbutils.BittorrentInfoBucket) + c, err := tx.RwCursor(kv.BittorrentInfo) if err != nil { return err } if len(sm.BodiesNewSnapshotInfohash) == 20 { - err = c.Put(dbutils.CurrentBodiesSnapshotHash, sm.BodiesNewSnapshotInfohash) + err = c.Put(kv.CurrentBodiesSnapshotHash, sm.BodiesNewSnapshotInfohash) if err != nil { return err } } - return c.Put(dbutils.CurrentBodiesSnapshotBlock, dbutils.EncodeBlockNumber(atomic.LoadUint64(&sm.BodiesNewSnapshot))) + return c.Put(kv.CurrentBodiesSnapshotBlock, dbutils.EncodeBlockNumber(atomic.LoadUint64(&sm.BodiesNewSnapshot))) }, } case "headers": - syncStages = []func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error{ - func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error { + syncStages = []func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error{ + func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error { log.Info("Prune headers db", "current", sm.HeadersCurrentSnapshot, "new", atomic.LoadUint64(&sm.HeadersNewSnapshot)) return RemoveHeadersData(db, tx, sm.HeadersCurrentSnapshot, atomic.LoadUint64(&sm.HeadersNewSnapshot)) }, - func(db ethdb.RoKV, tx ethdb.RwTx, toBlock uint64) error { + func(db kv.RoDB, tx kv.RwTx, toBlock uint64) error { log.Info("Save headers snapshot", "new", common.Bytes2Hex(sm.HeadersNewSnapshotInfohash), "new", atomic.LoadUint64(&sm.HeadersNewSnapshot)) - c, err := tx.RwCursor(dbutils.BittorrentInfoBucket) + c, err := tx.RwCursor(kv.BittorrentInfo) if err != nil { return err } if len(sm.HeadersNewSnapshotInfohash) == 20 { - err = c.Put(dbutils.CurrentHeadersSnapshotHash, sm.HeadersNewSnapshotInfohash) + err = c.Put(kv.CurrentHeadersSnapshotHash, sm.HeadersNewSnapshotInfohash) if err != nil { return err } } - return c.Put(dbutils.CurrentHeadersSnapshotBlock, dbutils.EncodeBlockNumber(atomic.LoadUint64(&sm.HeadersNewSnapshot))) + return c.Put(kv.CurrentHeadersSnapshotBlock, dbutils.EncodeBlockNumber(atomic.LoadUint64(&sm.HeadersNewSnapshot))) }, } } @@ -284,12 +285,12 @@ func (sm *SnapshotMigrator) SyncStages(migrateToBlock uint64, dbi ethdb.RwKV, rw return nil } -func (sm *SnapshotMigrator) Final(tx ethdb.Tx) error { +func (sm *SnapshotMigrator) Final(tx kv.Tx) error { if atomic.LoadUint64(&sm.started) < 3 { return nil } - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if errors.Is(err, ethdb.ErrKeyNotFound) { return nil } @@ -360,13 +361,13 @@ func SnapshotName(baseDir, name string, blockNum uint64) string { return filepath.Join(baseDir, name) + strconv.FormatUint(blockNum, 10) } -func GetSnapshotInfo(db ethdb.RwKV) (uint64, []byte, error) { +func GetSnapshotInfo(db kv.RwDB) (uint64, []byte, error) { tx, err := db.BeginRo(context.Background()) if err != nil { return 0, nil, err } defer tx.Rollback() - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { return 0, nil, err } @@ -378,7 +379,7 @@ func GetSnapshotInfo(db ethdb.RwKV) (uint64, []byte, error) { snapshotBlock = binary.BigEndian.Uint64(v) } - infohash, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash) + infohash, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotHash) if err != nil { return 0, nil, err } diff --git a/turbo/snapshotsync/snapshot_builder_test.go b/turbo/snapshotsync/snapshot_builder_test.go index 83d7f8524eb..c81e5b7487f 100644 --- a/turbo/snapshotsync/snapshot_builder_test.go +++ b/turbo/snapshotsync/snapshot_builder_test.go @@ -23,6 +23,9 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" + "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/rlp" "github.com/stretchr/testify/require" ) @@ -42,6 +45,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min } //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + logger := log.New() var err error dir := t.TempDir() @@ -67,7 +71,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { defer btCli.Close() btCli.trackers = [][]string{} - db := kv.NewSnapshotKV().DB(kv.MustOpen(filepath.Join(dir, "chaindata"))).Open() + db := snapshotdb.NewSnapshotKV().DB(mdbx.MustOpen(filepath.Join(dir, "chaindata"))).Open() quit := make(chan struct{}) defer func() { close(quit) @@ -111,7 +115,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { } - err = sb.AsyncStages(currentSnapshotBlock, db, tx, btCli, true) + err = sb.AsyncStages(currentSnapshotBlock, logger, db, tx, btCli, true) if err != nil { t.Error(err) } @@ -173,7 +177,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { rotx, err := db.WriteDB().BeginRo(context.Background()) require.NoError(t, err) defer rotx.Rollback() - roc, err := rotx.Cursor(dbutils.HeadersBucket) + roc, err := rotx.Cursor(kv.Headers) require.NoError(t, err) var headerNumber uint64 headerNumber = 11 @@ -195,7 +199,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { snRoTx, err := snokv.BeginRo(context.Background()) require.NoError(t, err) headerNumber = 0 - err = snRoTx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = snRoTx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -214,8 +218,8 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { } headerNumber = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -236,8 +240,8 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { t.Fatal("incorrect len", trnts) } - err = db.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash) + err = db.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotHash) if err != nil { t.Fatal(err) } @@ -245,7 +249,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes())) } - v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err = tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { t.Fatal(err) } @@ -265,7 +269,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { } //just start snapshot transaction // it can't be empty slice but shouldn't be in main db - _, err = roTX.GetOne(dbutils.HeadersBucket, []byte{112, 3}) + _, err = roTX.GetOne(kv.Headers, []byte{112, 3}) if err != nil { t.Fatal(err) } @@ -296,7 +300,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { rotx, err = db.WriteDB().BeginRo(context.Background()) require.NoError(t, err) defer rotx.Rollback() - roc, err = rotx.Cursor(dbutils.HeadersBucket) + roc, err = rotx.Cursor(kv.Headers) require.NoError(t, err) err = ethdb.Walk(roc, []byte{}, 0, func(k, v []byte) (bool, error) { @@ -308,8 +312,8 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { } headerNumber = 0 - err = db.HeadersSnapshot().View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = db.HeadersSnapshot().View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -326,8 +330,8 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { t.Fatal(headerNumber) } headerNumber = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -346,8 +350,8 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { if len(trnts) != 1 { t.Fatal("incorrect len", trnts) } - err = db.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash) + err = db.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotHash) if err != nil { t.Fatal(err) } @@ -355,7 +359,7 @@ func TestSnapshotMigratorStageAsync(t *testing.T) { t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes())) } - v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err = tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { t.Fatal(err) } @@ -380,6 +384,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min } //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + logger := log.New() var err error dir := t.TempDir() @@ -405,7 +410,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { btCli.trackers = [][]string{} defer btCli.Close() - db := kv.NewSnapshotKV().DB(kv.MustOpen(filepath.Join(dir, "chaindata"))).Open() + db := snapshotdb.NewSnapshotKV().DB(mdbx.MustOpen(filepath.Join(dir, "chaindata"))).Open() defer db.Close() sb := &SnapshotMigrator{ @@ -456,7 +461,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { } defer rotx.Rollback() - err = sb.AsyncStages(currentSnapshotBlock, db, rotx, btCli, false) + err = sb.AsyncStages(currentSnapshotBlock, logger, db, rotx, btCli, false) if err != nil { t.Fatal(err) } @@ -470,7 +475,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { tm := time.After(time.Second * 10) for atomic.LoadUint64(&sb.started) > 0 && atomic.LoadUint64(&sb.HeadersCurrentSnapshot) != 10 { - err = db.View(context.Background(), func(tx ethdb.Tx) error { return sb.Final(tx) }) + err = db.View(context.Background(), func(tx kv.Tx) error { return sb.Final(tx) }) if err != nil { t.Fatal(err) } @@ -487,7 +492,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { rotx, err := db.WriteDB().BeginRo(context.Background()) require.NoError(t, err) defer rotx.Rollback() - roc, err := rotx.Cursor(dbutils.HeadersBucket) + roc, err := rotx.Cursor(kv.Headers) require.NoError(t, err) var headerNumber uint64 headerNumber = 11 @@ -508,7 +513,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { snokv := db.HeadersSnapshot() snRoTx, err := snokv.BeginRo(context.Background()) require.NoError(t, err) - headersCursor, err := snRoTx.Cursor(dbutils.HeadersBucket) + headersCursor, err := snRoTx.Cursor(kv.Headers) require.NoError(t, err) headerNumber = 0 err = ethdb.Walk(headersCursor, []byte{}, 0, func(k, v []byte) (bool, error) { @@ -528,8 +533,8 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { } headerNumber = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -550,8 +555,8 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { t.Fatal("incorrect len", trnts) } - err = db.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash) + err = db.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotHash) if err != nil { t.Fatal(err) } @@ -559,7 +564,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes())) } - v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err = tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { t.Fatal(err) } @@ -603,7 +608,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { defer rotx.Rollback() //just start snapshot transaction // it can't be empty slice but shouldn't be in main db - _, err = roTX.GetOne(dbutils.HeadersBucket, []byte{1}) + _, err = roTX.GetOne(kv.Headers, []byte{1}) if err != nil { wg.Done() t.Error(err) @@ -649,7 +654,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { rotx, err = db.WriteDB().BeginRo(context.Background()) require.NoError(t, err) - err = rotx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = rotx.ForEach(kv.Headers, nil, func(k, v []byte) error { t.Fatal("main db must be empty here", k) return nil }) @@ -660,7 +665,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { headerNumber = 0 snRoTx, err = db.HeadersSnapshot().BeginRo(context.Background()) require.NoError(t, err) - err = snRoTx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = snRoTx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -676,8 +681,8 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { t.Fatal(headerNumber) } headerNumber = 0 - err = db.View(context.Background(), func(tx ethdb.Tx) error { - return tx.ForEach(dbutils.HeadersBucket, nil, func(k, v []byte) error { + err = db.View(context.Background(), func(tx kv.Tx) error { + return tx.ForEach(kv.Headers, nil, func(k, v []byte) error { if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) { t.Fatal(k) } @@ -696,8 +701,8 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { if len(trnts) != 1 { t.Fatal("incorrect len", trnts) } - err = db.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash) + err = db.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotHash) if err != nil { t.Fatal(err) } @@ -705,7 +710,7 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes())) } - v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + v, err = tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { t.Fatal(err) } @@ -725,17 +730,17 @@ func TestSnapshotMigratorStageSyncMode(t *testing.T) { } } -func GenerateHeaderData(tx ethdb.RwTx, from, to int) error { +func GenerateHeaderData(tx kv.RwTx, from, to int) error { var err error if to > math.MaxInt8 { return errors.New("greater than uint8") } for i := from; i <= to; i++ { - err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(uint64(i), common.Hash{uint8(i)}), []byte{uint8(i), uint8(i), uint8(i)}) + err = tx.Put(kv.Headers, dbutils.HeaderKey(uint64(i), common.Hash{uint8(i)}), []byte{uint8(i), uint8(i), uint8(i)}) if err != nil { return err } - err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(uint64(i)), common.Hash{uint8(i)}.Bytes()) + err = tx.Put(kv.HeaderCanonical, dbutils.EncodeBlockNumber(uint64(i)), common.Hash{uint8(i)}.Bytes()) if err != nil { return err } @@ -743,7 +748,7 @@ func GenerateHeaderData(tx ethdb.RwTx, from, to int) error { return nil } -func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { +func GenerateBodyData(tx kv.RwTx, from, to uint64) error { var err error if to > math.MaxInt8 { return errors.New("greater than uint8") @@ -751,7 +756,7 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { for i := from; i <= to; i++ { for blockNum := 1; blockNum < 4; blockNum++ { bodyForStorage := new(types.BodyForStorage) - baseTxId, err := tx.IncrementSequence(dbutils.EthTx, 3) + baseTxId, err := tx.IncrementSequence(kv.EthTx, 3) if err != nil { return err } @@ -761,7 +766,7 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { if err != nil { return err } - err = tx.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(i, common.Hash{uint8(i), uint8(blockNum)}), body) + err = tx.Put(kv.BlockBody, dbutils.BlockBodyKey(i, common.Hash{uint8(i), uint8(blockNum)}), body) if err != nil { return err } @@ -773,7 +778,7 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { return err } - err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(i, common.Hash{uint8(i), uint8(blockNum)}), headersBytes) + err = tx.Put(kv.Headers, dbutils.HeaderKey(i, common.Hash{uint8(i), uint8(blockNum)}), headersBytes) if err != nil { return err } @@ -786,7 +791,7 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { return err } - err = tx.Put(dbutils.EthTx, dbutils.EncodeBlockNumber(baseTxId), txBytes) + err = tx.Put(kv.EthTx, dbutils.EncodeBlockNumber(baseTxId), txBytes) if err != nil { return err } @@ -795,7 +800,7 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { return err } - err = tx.Put(dbutils.EthTx, dbutils.EncodeBlockNumber(baseTxId+1), txBytes) + err = tx.Put(kv.EthTx, dbutils.EncodeBlockNumber(baseTxId+1), txBytes) if err != nil { return err } @@ -805,13 +810,13 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { return err } - err = tx.Put(dbutils.EthTx, dbutils.EncodeBlockNumber(baseTxId+2), txBytes) + err = tx.Put(kv.EthTx, dbutils.EncodeBlockNumber(baseTxId+2), txBytes) if err != nil { return err } } - err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(i), common.Hash{uint8(i), uint8(i%3) + 1}.Bytes()) + err = tx.Put(kv.HeaderCanonical, dbutils.EncodeBlockNumber(i), common.Hash{uint8(i), uint8(i%3) + 1}.Bytes()) if err != nil { return err } @@ -820,9 +825,9 @@ func GenerateBodyData(tx ethdb.RwTx, from, to uint64) error { } // check snapshot data based on GenerateBodyData -func verifyBodiesSnapshot(t *testing.T, bodySnapshotTX ethdb.Tx, snapshotTo uint64) { +func verifyBodiesSnapshot(t *testing.T, bodySnapshotTX kv.Tx, snapshotTo uint64) { t.Helper() - bodyCursor, err := bodySnapshotTX.Cursor(dbutils.BlockBodyPrefix) + bodyCursor, err := bodySnapshotTX.Cursor(kv.BlockBody) if err != nil { t.Fatal(err) } @@ -867,9 +872,9 @@ func verifyBodiesSnapshot(t *testing.T, bodySnapshotTX ethdb.Tx, snapshotTo uint } // check headers snapshot data based on GenerateBodyData -func verifyHeadersSnapshot(t *testing.T, headersSnapshotTX ethdb.Tx, snapshotTo uint64) { +func verifyHeadersSnapshot(t *testing.T, headersSnapshotTX kv.Tx, snapshotTo uint64) { t.Helper() - headersCursor, err := headersSnapshotTX.Cursor(dbutils.HeadersBucket) + headersCursor, err := headersSnapshotTX.Cursor(kv.Headers) if err != nil { t.Fatal(err) } @@ -895,9 +900,9 @@ func verifyHeadersSnapshot(t *testing.T, headersSnapshotTX ethdb.Tx, snapshotTo } } -func verifyFullBodiesData(t *testing.T, bodySnapshotTX ethdb.Tx, dataTo uint64) { +func verifyFullBodiesData(t *testing.T, bodySnapshotTX kv.Tx, dataTo uint64) { t.Helper() - bodyCursor, err := bodySnapshotTX.Cursor(dbutils.BlockBodyPrefix) + bodyCursor, err := bodySnapshotTX.Cursor(kv.BlockBody) if err != nil { t.Fatal(err) } @@ -954,9 +959,9 @@ func verifyFullBodiesData(t *testing.T, bodySnapshotTX ethdb.Tx, dataTo uint64) } } -func verifyPrunedBlocksData(t *testing.T, tx ethdb.Tx, dataFrom, dataTo, snapshotTxTo uint64) { +func verifyPrunedBlocksData(t *testing.T, tx kv.Tx, dataFrom, dataTo, snapshotTxTo uint64) { t.Helper() - bodyCursor, err := tx.Cursor(dbutils.BlockBodyPrefix) + bodyCursor, err := tx.Cursor(kv.BlockBody) if err != nil { t.Fatal(err) } @@ -1019,6 +1024,7 @@ func TestPruneBlocks(t *testing.T) { t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min } //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + logger := log.New() var err error dir := t.TempDir() @@ -1043,7 +1049,7 @@ func TestPruneBlocks(t *testing.T) { btCli.trackers = [][]string{} defer btCli.Close() - db := kv.NewSnapshotKV().DB(kv.MustOpen(filepath.Join(dir, "chaindata"))).Open() + db := snapshotdb.NewSnapshotKV().DB(mdbx.MustOpen(filepath.Join(dir, "chaindata"))).Open() defer db.Close() tx, err := db.BeginRw(context.Background()) if err != nil { @@ -1070,12 +1076,12 @@ func TestPruneBlocks(t *testing.T) { defer readTX.Rollback() bodySnapshotPath := filepath.Join(snapshotsDir, SnapshotName(snapshotsDir, "bodies", snapshotTo)) - err = CreateBodySnapshot(readTX, snapshotTo, bodySnapshotPath) + err = CreateBodySnapshot(readTX, logger, snapshotTo, bodySnapshotPath) if err != nil { t.Fatal(err) } readTX.Rollback() - kvSnapshot, err := OpenBodiesSnapshot(bodySnapshotPath) + kvSnapshot, err := OpenBodiesSnapshot(logger, bodySnapshotPath) if err != nil { t.Fatal(err) } @@ -1087,7 +1093,7 @@ func TestPruneBlocks(t *testing.T) { defer bodySnapshotTX.Rollback() verifyBodiesSnapshot(t, bodySnapshotTX, snapshotTo) - ethTXCursor, err := bodySnapshotTX.Cursor(dbutils.EthTx) + ethTXCursor, err := bodySnapshotTX.Cursor(kv.EthTx) if err != nil { t.Fatal(err) } @@ -1176,13 +1182,13 @@ func TestPruneBlocks(t *testing.T) { } defer readTX.Rollback() - err = CreateBodySnapshot(readTX, snapshotTo, bodySnapshotPath) + err = CreateBodySnapshot(readTX, logger, snapshotTo, bodySnapshotPath) if err != nil { t.Fatal(err) } readTX.Rollback() - kvSnapshot, err = OpenBodiesSnapshot(bodySnapshotPath) + kvSnapshot, err = OpenBodiesSnapshot(logger, bodySnapshotPath) if err != nil { t.Fatal(err) } @@ -1194,7 +1200,7 @@ func TestPruneBlocks(t *testing.T) { defer bodySnapshotTX.Rollback() verifyBodiesSnapshot(t, bodySnapshotTX, snapshotTo) - ethTXCursor, err = bodySnapshotTX.Cursor(dbutils.EthTx) + ethTXCursor, err = bodySnapshotTX.Cursor(kv.EthTx) if err != nil { t.Fatal(err) } @@ -1255,8 +1261,8 @@ func TestPruneBlocks(t *testing.T) { verifyPrunedBlocksData(t, writeDBKVRoTX, snapshotTo, dataTo, binary.BigEndian.Uint64(lastTxID)) } -func PrintBodyBuckets(t *testing.T, tx ethdb.Tx) { //nolint: deadcode - bodyCursor, err := tx.Cursor(dbutils.BlockBodyPrefix) +func PrintBodyBuckets(t *testing.T, tx kv.Tx) { //nolint: deadcode + bodyCursor, err := tx.Cursor(kv.BlockBody) if err != nil { t.Fatal(err) } @@ -1287,6 +1293,7 @@ func TestBodySnapshotSyncMigration(t *testing.T) { t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min } //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + logger := log.New() var err error dir := t.TempDir() @@ -1316,7 +1323,7 @@ func TestBodySnapshotSyncMigration(t *testing.T) { replaceChan: make(chan struct{}), } - db := kv.NewSnapshotKV().DB(kv.MustOpen(filepath.Join(dir, "chaindata"))).Open() + db := snapshotdb.NewSnapshotKV().DB(mdbx.MustOpen(filepath.Join(dir, "chaindata"))).Open() defer db.Close() tx, err := db.BeginRw(context.Background()) @@ -1361,7 +1368,7 @@ func TestBodySnapshotSyncMigration(t *testing.T) { } defer rotx.Rollback() - err = sb.AsyncStages(currentSnapshotBlock, db, rotx, btCli, false) + err = sb.AsyncStages(currentSnapshotBlock, logger, db, rotx, btCli, false) if err != nil { t.Fatal(err) } @@ -1433,7 +1440,7 @@ func TestBodySnapshotSyncMigration(t *testing.T) { t.Fatal(err) } verifyBodiesSnapshot(t, btx, 10) - ethTX, err := btx.Cursor(dbutils.EthTx) + ethTX, err := btx.Cursor(kv.EthTx) if err != nil { t.Fatal(err) } @@ -1453,7 +1460,7 @@ func TestBodySnapshotSyncMigration(t *testing.T) { var blockNum uint64 var numOfDuplicateBlocks uint64 dataFrom := uint64(10) - err = roWriteDBTX.ForEach(dbutils.HeadersBucket, []byte{}, func(k, v []byte) error { + err = roWriteDBTX.ForEach(kv.Headers, []byte{}, func(k, v []byte) error { numOfDuplicateBlocks++ if binary.BigEndian.Uint64(k[:8]) != blockNum { diff --git a/turbo/snapshotsync/wrapdb.go b/turbo/snapshotsync/wrapdb.go index 57a23f8b039..4dc3e1268bd 100644 --- a/turbo/snapshotsync/wrapdb.go +++ b/turbo/snapshotsync/wrapdb.go @@ -6,46 +6,47 @@ import ( "errors" "time" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb" - kv2 "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/kv" + kv2 "github.com/ledgerwatch/erigon/ethdb/mdbx" + "github.com/ledgerwatch/erigon/ethdb/snapshotdb" "github.com/ledgerwatch/erigon/log" ) var ( - BucketConfigs = map[SnapshotType]dbutils.BucketsCfg{ + BucketConfigs = map[SnapshotType]kv.TableCfg{ SnapshotType_bodies: { - dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{}, - dbutils.EthTx: dbutils.BucketConfigItem{}, + kv.BlockBody: kv.TableConfigItem{}, + kv.EthTx: kv.TableConfigItem{}, }, SnapshotType_headers: { - dbutils.HeadersBucket: dbutils.BucketConfigItem{}, + kv.Headers: kv.TableConfigItem{}, }, SnapshotType_state: { - dbutils.PlainStateBucket: dbutils.BucketConfigItem{ - Flags: dbutils.DupSort, + kv.PlainStateBucket: kv.TableConfigItem{ + Flags: kv.DupSort, AutoDupSortKeysConversion: true, DupFromLen: 60, DupToLen: 28, }, - dbutils.PlainContractCodeBucket: dbutils.BucketConfigItem{}, - dbutils.CodeBucket: dbutils.BucketConfigItem{}, + kv.PlainContractCode: kv.TableConfigItem{}, + kv.CodeBucket: kv.TableConfigItem{}, }, } ) //nolint -func WrapBySnapshotsFromDir(kv ethdb.RwKV, snapshotDir string, mode SnapshotMode) (ethdb.RwKV, error) { +func WrapBySnapshotsFromDir(kv kv.RwDB, snapshotDir string, mode SnapshotMode) (kv.RwDB, error) { //todo remove it return nil, errors.New("deprecated") //nolint } -func WrapBySnapshotsFromDownloader(kv ethdb.RwKV, snapshots map[SnapshotType]*SnapshotsInfo) (ethdb.RwKV, error) { - snKV := kv2.NewSnapshotKV().DB(kv) +func WrapBySnapshotsFromDownloader(db kv.RwDB, snapshots map[SnapshotType]*SnapshotsInfo) (kv.RwDB, error) { + snKV := snapshotdb.NewSnapshotKV().DB(db) for k, v := range snapshots { log.Info("Wrap db by", "snapshot", k.String(), "dir", v.Dbpath) cfg := BucketConfigs[k] - snapshotKV, err := kv2.NewMDBX().Readonly().Path(v.Dbpath).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg { + snapshotKV, err := kv2.NewMDBX(log.New()).Readonly().Path(v.Dbpath).WithBucketsConfig(func(defaultBuckets kv.TableCfg) kv.TableCfg { return cfg }).Open() @@ -67,11 +68,11 @@ func WrapBySnapshotsFromDownloader(kv ethdb.RwKV, snapshots map[SnapshotType]*Sn return snKV.Open(), nil } -func WrapSnapshots(chainDb ethdb.RwKV, snapshotsDir string) (ethdb.RwKV, error) { +func WrapSnapshots(chainDb kv.RwDB, snapshotsDir string) (kv.RwDB, error) { var snapshotBlock uint64 var hasSnapshotBlock bool - if err := chainDb.View(context.Background(), func(tx ethdb.Tx) error { - v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock) + if err := chainDb.View(context.Background(), func(tx kv.Tx) error { + v, err := tx.GetOne(kv.BittorrentInfo, kv.CurrentHeadersSnapshotBlock) if err != nil { return err } @@ -84,7 +85,7 @@ func WrapSnapshots(chainDb ethdb.RwKV, snapshotsDir string) (ethdb.RwKV, error) return chainDb, err } - snKVOpts := kv2.NewSnapshotKV().DB(chainDb) + snKVOpts := snapshotdb.NewSnapshotKV().DB(chainDb) if hasSnapshotBlock { snKV, innerErr := OpenHeadersSnapshot(SnapshotName(snapshotsDir, "headers", snapshotBlock)) if innerErr != nil { @@ -176,7 +177,7 @@ func DownloadSnapshots(torrentClient *Client, ExternalSnapshotDownloaderAddr str } } else { - if err := chainDb.RwKV().Update(context.Background(), func(tx ethdb.RwTx) error { + if err := chainDb.RwKV().Update(context.Background(), func(tx kv.RwTx) error { err := torrentClient.Load(tx) if err != nil { return err @@ -188,7 +189,7 @@ func DownloadSnapshots(torrentClient *Client, ExternalSnapshotDownloaderAddr str torrentClient.Download() var innerErr error var downloadedSnapshots map[SnapshotType]*SnapshotsInfo - if err := chainDb.RwKV().View(context.Background(), func(tx ethdb.Tx) (err error) { + if err := chainDb.RwKV().View(context.Background(), func(tx kv.Tx) (err error) { downloadedSnapshots, err = torrentClient.GetSnapshots(tx, networkID) if err != nil { return err diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index 89bbccbedf4..c1e580c2a39 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -27,9 +27,9 @@ import ( "testing" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -44,7 +44,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages" @@ -90,7 +89,7 @@ func testFork(t *testing.T, m *stages.MockSentry, i, n int, comparator func(td1, var err error // Assert the chains have the same header/block at #i var hash1, hash2 common.Hash - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { if hash1, err = rawdb.ReadCanonicalHash(tx, uint64(i)); err != nil { t.Fatalf("Failed to read canonical hash: %v", err) } @@ -101,7 +100,7 @@ func testFork(t *testing.T, m *stages.MockSentry, i, n int, comparator func(td1, }) require.NoError(t, err) - canonicalMock.DB.View(context.Background(), func(tx ethdb.Tx) error { + canonicalMock.DB.View(context.Background(), func(tx kv.Tx) error { if hash2, err = rawdb.ReadCanonicalHash(tx, uint64(i)); err != nil { t.Fatalf("Failed to read canonical hash 2: %v", err) } @@ -120,7 +119,7 @@ func testFork(t *testing.T, m *stages.MockSentry, i, n int, comparator func(td1, var tdPre, tdPost *big.Int var currentBlockB *types.Block - err = canonicalMock.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = canonicalMock.DB.View(context.Background(), func(tx kv.Tx) error { currentBlockB, err = rawdb.ReadBlockByHash(tx, rawdb.ReadHeadBlockHash(tx)) if err != nil { t.Fatalf("Failed to read current bock: %v", err) @@ -131,7 +130,7 @@ func testFork(t *testing.T, m *stages.MockSentry, i, n int, comparator func(td1, blockChainB = makeBlockChain(currentBlockB, n, canonicalMock, forkSeed) - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { currentBlockHash := rawdb.ReadHeadBlockHash(tx) currentBlock, err1 := rawdb.ReadBlockByHash(tx, currentBlockHash) if err1 != nil { @@ -149,7 +148,7 @@ func testFork(t *testing.T, m *stages.MockSentry, i, n int, comparator func(td1, t.Fatalf("failed to insert forking chain: %v", err) } currentBlockHash := blockChainB.TopBlock.Hash() - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { currentBlock, err1 := rawdb.ReadBlockByHash(tx, currentBlockHash) if err1 != nil { t.Fatalf("Failed to read last header: %v", err1) @@ -566,7 +565,7 @@ func TestEIP155Transition(t *testing.T) { } ) m := stages.MockWithGenesis(t, gspec, key) - db := kv.NewObjectDatabase(m.DB) + db := olddb.NewObjectDatabase(m.DB) defer db.Close() chain, chainErr := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) { @@ -617,7 +616,7 @@ func TestEIP155Transition(t *testing.T) { if chainErr = m.InsertChain(chain); chainErr != nil { t.Fatal(chainErr) } - if err := m.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { block, _ := rawdb.ReadBlockByNumber(tx, 1) if block.Transactions()[0].Protected() { t.Error("Expected block[0].txs[0] to not be replay protected") @@ -741,7 +740,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { receiptsAvailable, err := rawdb.ReceiptsAvailableFrom(tx) require.NoError(err) found := uint64(0) - err = tx.ForEach(dbutils.Receipts, nil, func(k, v []byte) error { + err = tx.ForEach(kv.Receipts, nil, func(k, v []byte) error { found++ return nil }) @@ -756,7 +755,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { if pm.History.Enabled() { afterPrune := uint64(0) - err := tx.ForEach(dbutils.AccountsHistoryBucket, nil, func(k, _ []byte) error { + err := tx.ForEach(kv.AccountsHistory, nil, func(k, _ []byte) error { n := binary.BigEndian.Uint64(k[common.AddressLength:]) require.Greater(n, pm.History.PruneTo(head)) afterPrune++ @@ -765,7 +764,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { require.Greater(afterPrune, uint64(0)) assert.NoError(t, err) } else { - found, err := bitmapdb.Get64(tx, dbutils.AccountsHistoryBucket, address[:], 0, 1024) + found, err := bitmapdb.Get64(tx, kv.AccountsHistory, address[:], 0, 1024) require.NoError(err) require.Equal(uint64(0), found.Minimum()) } @@ -792,16 +791,16 @@ func doModesTest(t *testing.T, pm prune.Mode) error { } /* for bucketName, shouldBeEmpty := range map[string]bool{ - //dbutils.AccountsHistoryBucket: pm.History.Enabled(), + //dbutils.AccountsHistory: pm.History.Enabled(), dbutils.Receipts: pm.Receipts.Enabled(), - //dbutils.TxLookupPrefix: pm.TxIndex.Enabled(), + //dbutils.TxLookup: pm.TxIndex.Enabled(), } { numberOfEntries := 0 err := tx.ForEach(bucketName, nil, func(k, v []byte) error { // we ignore empty account history //nolint:scopelint - if bucketName == dbutils.AccountsHistoryBucket && len(v) == 0 { + if bucketName == dbutils.AccountsHistory && len(v) == 0 { return nil } @@ -905,7 +904,7 @@ func TestEIP161AccountRemoval(t *testing.T) { if err = m.InsertChain(chain.Slice(0, 1)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { if st := state.New(state.NewPlainStateReader(tx)); !st.Exist(theAddr) { t.Error("expected account to exist") } @@ -917,7 +916,7 @@ func TestEIP161AccountRemoval(t *testing.T) { if err = m.InsertChain(chain.Slice(1, 2)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { if st := state.New(state.NewPlainStateReader(tx)); st.Exist(theAddr) { t.Error("account should not exist") } @@ -929,7 +928,7 @@ func TestEIP161AccountRemoval(t *testing.T) { if err = m.InsertChain(chain.Slice(2, 3)); err != nil { t.Fatal(err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { if st := state.New(state.NewPlainStateReader(tx)); st.Exist(theAddr) { t.Error("account should not exist") } @@ -954,7 +953,7 @@ func TestDoubleAccountRemoval(t *testing.T) { } ) m := stages.MockWithGenesis(t, gspec, bankKey) - db := kv.NewObjectDatabase(m.DB) + db := olddb.NewObjectDatabase(m.DB) defer db.Close() var theAddr common.Address @@ -989,7 +988,7 @@ func TestDoubleAccountRemoval(t *testing.T) { err = m.InsertChain(chain) assert.NoError(t, err) - err = m.DB.View(m.Ctx, func(tx ethdb.Tx) error { + err = m.DB.View(m.Ctx, func(tx kv.Tx) error { st := state.New(state.NewDbStateReader(tx)) assert.NoError(t, err) assert.False(t, st.Exist(theAddr), "Contract should've been removed") @@ -1053,7 +1052,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { t.Fatalf("block %d: failed to insert into chain: %v", i, err) } - if err := m2.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m2.DB.View(context.Background(), func(tx kv.Tx) error { b, h := rawdb.ReadCurrentBlock(tx), rawdb.ReadCurrentHeader(tx) if b.Hash() != h.Hash() { t.Errorf("block %d: current block/header mismatch: block #%d [%x…], header #%d [%x…]", i, b.Number(), b.Hash().Bytes()[:4], h.Number, h.Hash().Bytes()[:4]) @@ -1175,7 +1174,7 @@ func TestLowDiffLongChain(t *testing.T) { t.Fatalf("failed to insert into chain: %v", err) } - if err := m2.DB.View(context.Background(), func(tx ethdb.Tx) error { + if err := m2.DB.View(context.Background(), func(tx kv.Tx) error { head := rawdb.ReadCurrentBlock(tx) if got := fork.TopBlock.Hash(); got != head.Hash() { t.Fatalf("head wrong, expected %x got %x", head.Hash(), got) @@ -1368,7 +1367,7 @@ func TestDeleteRecreateSlots(t *testing.T) { if err := m.InsertChain(chain); err != nil { t.Fatalf("failed to insert into chain: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { statedb := state.New(state.NewPlainState(tx, 1)) // If all is correct, then slot 1 and 2 are zero @@ -1453,7 +1452,7 @@ func TestDeleteRecreateAccount(t *testing.T) { if err := m.InsertChain(chain); err != nil { t.Fatalf("failed to insert into chain: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { statedb := state.New(state.NewPlainState(tx, 1)) // If all is correct, then both slots are zero @@ -1560,7 +1559,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { }, } m := stages.MockWithGenesis(t, gspec, key) - db := kv.NewObjectDatabase(m.DB) + db := olddb.NewObjectDatabase(m.DB) defer db.Close() var nonce uint64 @@ -1635,7 +1634,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { if err := m.InsertChain(chain.Slice(i, i+1)); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", i, err) } - err = m.DB.View(m.Ctx, func(tx ethdb.Tx) error { + err = m.DB.View(m.Ctx, func(tx kv.Tx) error { statedb := state.New(state.NewDbStateReader(tx)) // If all is correct, then slot 1 and 2 are zero @@ -1766,7 +1765,7 @@ func TestInitThenFailCreateContract(t *testing.T) { t.Fatalf("generate blocks: %v", err) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { // Import the canonical chain statedb := state.New(state.NewPlainState(tx, 1)) @@ -1974,7 +1973,7 @@ func TestEIP1559Transition(t *testing.T) { t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) } - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { statedb := state.New(state.NewPlainState(tx, 0)) // 3: Ensure that miner received only the tx's tip. @@ -2015,7 +2014,7 @@ func TestEIP1559Transition(t *testing.T) { } block = chain.Blocks[0] - err = m.DB.View(context.Background(), func(tx ethdb.Tx) error { + err = m.DB.View(context.Background(), func(tx kv.Tx) error { statedb := state.New(state.NewPlainState(tx, 0)) effectiveTip := block.Transactions()[0].GetPrice().Uint64() - block.BaseFee().Uint64() @@ -2040,7 +2039,7 @@ func TestEIP1559Transition(t *testing.T) { require.NoError(t, err) } -func current(kv ethdb.RwKV) *types.Block { +func current(kv kv.RwDB) *types.Block { tx, err := kv.BeginRo(context.Background()) if err != nil { panic(err) diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index 3e7806b66ea..7a5bec2a32a 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -13,7 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/adapter" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" @@ -29,7 +29,7 @@ const BlockBufferSize = 128 type VerifyUnclesFunc func(peerID string, header *types.Header, uncles []*types.Header) error // UpdateFromDb reads the state of the database and refreshes the state of the body download -func (bd *BodyDownload) UpdateFromDb(db ethdb.RwTx) (headHeight uint64, headHash common.Hash, headTd256 *uint256.Int, err error) { +func (bd *BodyDownload) UpdateFromDb(db kv.RwTx) (headHeight uint64, headHash common.Hash, headTd256 *uint256.Int, err error) { var headerProgress, bodyProgress uint64 headerProgress, err = stages.GetStageProgress(db, stages.Headers) if err != nil { @@ -76,7 +76,7 @@ func (bd *BodyDownload) UpdateFromDb(db ethdb.RwTx) (headHeight uint64, headHash } // RequestMoreBodies - returns nil if nothing to request -func (bd *BodyDownload) RequestMoreBodies(db ethdb.Tx, blockNum uint64, currentTime uint64, blockPropagator adapter.BlockPropagator) (*BodyRequest, uint64, error) { +func (bd *BodyDownload) RequestMoreBodies(db kv.Tx, blockNum uint64, currentTime uint64, blockPropagator adapter.BlockPropagator) (*BodyRequest, uint64, error) { if blockNum < bd.requestedLow { blockNum = bd.requestedLow } diff --git a/turbo/stages/bodydownload/body_test.go b/turbo/stages/bodydownload/body_test.go index 1679e5f31fe..68c15f71cf3 100644 --- a/turbo/stages/bodydownload/body_test.go +++ b/turbo/stages/bodydownload/body_test.go @@ -4,11 +4,11 @@ import ( "testing" "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" ) func TestCreateBodyDownload(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) bd := NewBodyDownload(100, ethash.NewFaker()) if _, _, _, err := bd.UpdateFromDb(tx); err != nil { t.Fatalf("update from db: %v", err) diff --git a/turbo/stages/chain_makers_test.go b/turbo/stages/chain_makers_test.go index 965f2bb75d3..9505544f70d 100644 --- a/turbo/stages/chain_makers_test.go +++ b/turbo/stages/chain_makers_test.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/ledgerwatch/erigon/core" @@ -55,7 +55,7 @@ func TestGenerateChain(t *testing.T) { Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } m := stages.MockWithGenesis(t, gspec, key1) - db := kv.NewObjectDatabase(m.DB) + db := olddb.NewObjectDatabase(m.DB) // This call generates a chain of 5 blocks. The function runs for // each block and adds different features to gen based on the diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index 8b10569203f..7389e72dcf4 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -28,8 +28,8 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/stages" @@ -133,14 +133,14 @@ func TestSetupGenesis(t *testing.T) { oldcustomg.Config = ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(2)} tests := []struct { wantErr error - fn func(ethdb.RwKV) (*params.ChainConfig, *types.Block, error) + fn func(kv.RwDB) (*params.ChainConfig, *types.Block, error) wantConfig *params.ChainConfig name string wantHash common.Hash }{ { name: "genesis without ChainConfig", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { return core.CommitGenesisBlock(db, new(core.Genesis)) }, wantErr: core.ErrGenesisNoConfig, @@ -148,7 +148,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "no block in DB, genesis == nil", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { return core.CommitGenesisBlock(db, nil) }, wantHash: params.MainnetGenesisHash, @@ -156,7 +156,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "mainnet block in DB, genesis == nil", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { return core.CommitGenesisBlock(db, nil) }, wantHash: params.MainnetGenesisHash, @@ -164,7 +164,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "custom block in DB, genesis == nil", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { customg.MustCommit(db) return core.CommitGenesisBlock(db, nil) }, @@ -173,7 +173,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "custom block in DB, genesis == ropsten", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { customg.MustCommit(db) return core.CommitGenesisBlock(db, core.DefaultRopstenGenesisBlock()) }, @@ -183,7 +183,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "compatible config in DB", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { oldcustomg.MustCommit(db) return core.CommitGenesisBlock(db, &customg) }, @@ -192,7 +192,7 @@ func TestSetupGenesis(t *testing.T) { }, { name: "incompatible config in DB", - fn: func(db ethdb.RwKV) (*params.ChainConfig, *types.Block, error) { + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { // Commit the 'old' genesis block with Homestead transition at #2. // Advance to block #4, past the homestead transition block of customg. key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -222,7 +222,7 @@ func TestSetupGenesis(t *testing.T) { for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { - db := kv.NewTestKV(t) + db := memdb.NewTestDB(t) config, genesis, err := test.fn(db) // Check the return values. if !reflect.DeepEqual(err, test.wantErr) { @@ -243,7 +243,7 @@ func TestSetupGenesis(t *testing.T) { if genesis.Hash() != test.wantHash { t.Errorf("%s: returned hash %s, want %s", test.name, genesis.Hash().Hex(), test.wantHash.Hex()) } else if err == nil { - if dbErr := db.View(context.Background(), func(tx ethdb.Tx) error { + if dbErr := db.View(context.Background(), func(tx kv.Tx) error { // Check database content. stored := rawdb.ReadBlock(tx, test.wantHash, 0) if stored.Hash() != test.wantHash { diff --git a/turbo/stages/headerdownload/header_algo_test.go b/turbo/stages/headerdownload/header_algo_test.go index 34f1d8ed3cc..3a0b09ad2bb 100644 --- a/turbo/stages/headerdownload/header_algo_test.go +++ b/turbo/stages/headerdownload/header_algo_test.go @@ -7,11 +7,11 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" ) func TestInserter1(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) // Set up parent difficulty if err := rawdb.WriteTd(tx, common.Hash{}, 4, big.NewInt(0)); err != nil { t.Fatalf("write parent diff: %v", err) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 3b551c49a9c..5883ce6cb9a 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -450,7 +450,7 @@ func (hd *HeaderDownload) SetPreverifiedHashes(preverifiedHashes map[common.Hash hd.preverifiedHeight = preverifiedHeight } -func (hd *HeaderDownload) RecoverFromDb(db ethdb.RoKV) error { +func (hd *HeaderDownload) RecoverFromDb(db kv.RoDB) error { hd.lock.Lock() defer hd.lock.Unlock() // Drain persistedLinksQueue and remove links @@ -458,8 +458,8 @@ func (hd *HeaderDownload) RecoverFromDb(db ethdb.RoKV) error { link := heap.Pop(hd.persistedLinkQueue).(*Link) delete(hd.links, link.hash) } - err := db.View(context.Background(), func(tx ethdb.Tx) error { - c, err := tx.Cursor(dbutils.HeadersBucket) + err := db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.Headers) if err != nil { return err } @@ -489,7 +489,7 @@ func (hd *HeaderDownload) RecoverFromDb(db ethdb.RoKV) error { // ReadProgressFromDb updates highestInDb field according to the information // in the database. It is useful in the situations when transaction was // aborted and highestInDb became out-of-sync -func (hd *HeaderDownload) ReadProgressFromDb(tx ethdb.RwTx) (err error) { +func (hd *HeaderDownload) ReadProgressFromDb(tx kv.RwTx) (err error) { hd.lock.Lock() defer hd.lock.Unlock() hd.highestInDb, err = stages.GetStageProgress(tx, stages.Headers) @@ -692,14 +692,14 @@ func (hd *HeaderDownload) addHeaderAsLink(header *types.Header, persisted bool) return link } -func (hi *HeaderInserter) FeedHeaderFunc(db ethdb.StatelessRwTx) func(header *types.Header, blockHeight uint64) error { +func (hi *HeaderInserter) FeedHeaderFunc(db kv.StatelessRwTx) func(header *types.Header, blockHeight uint64) error { return func(header *types.Header, blockHeight uint64) error { return hi.FeedHeader(db, header, blockHeight) } } -func (hi *HeaderInserter) FeedHeader(db ethdb.StatelessRwTx, header *types.Header, blockHeight uint64) error { +func (hi *HeaderInserter) FeedHeader(db kv.StatelessRwTx, header *types.Header, blockHeight uint64) error { hash := header.Hash() if hash == hi.prevHash { // Skip duplicates @@ -794,7 +794,7 @@ func (hi *HeaderInserter) FeedHeader(db ethdb.StatelessRwTx, header *types.Heade if err = rawdb.WriteTd(db, hash, blockHeight, td); err != nil { return fmt.Errorf("[%s] failed to WriteTd: %w", hi.logPrefix, err) } - if err = db.Put(dbutils.HeadersBucket, dbutils.HeaderKey(blockHeight, hash), data); err != nil { + if err = db.Put(kv.Headers, dbutils.HeaderKey(blockHeight, hash), data); err != nil { return fmt.Errorf("[%s] failed to store header: %w", hi.logPrefix, err) } hi.prevHash = hash diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index fad4347eb5d..5069f5b0981 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -28,10 +28,10 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/ledgerwatch/erigon/ethdb/remote/remotedbserver" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -47,9 +47,10 @@ import ( type MockSentry struct { proto_sentry.UnimplementedSentryServer Ctx context.Context + Log log.Logger t *testing.T cancel context.CancelFunc - DB ethdb.RwKV + DB kv.RwDB tmpdir string Engine consensus.Engine ChainConfig *params.ChainConfig @@ -151,12 +152,13 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey } mock := &MockSentry{ t: t, + Log: log.New(), tmpdir: tmpdir, Engine: engine, ChainConfig: gspec.Config, Key: key, Notifications: &stagedsync.Notifications{ - Events: remotedbserver.NewEvents(), + Events: privateapi.NewEvents(), Accumulator: &shards.Accumulator{}, }, UpdateHead: func(Ctx context.Context, head uint64, hash common.Hash, td *uint256.Int) { @@ -164,9 +166,9 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey PeerId: gointerfaces.ConvertBytesToH512([]byte("12345")), } if t != nil { - mock.DB = kv.NewTestKV(t) + mock.DB = memdb.NewTestDB(t) } else { - mock.DB = kv.NewMemKV() + mock.DB = memdb.New() } mock.Ctx, mock.cancel = context.WithCancel(context.Background()) mock.Address = crypto.PubkeyToAddress(mock.Key.PublicKey) @@ -241,7 +243,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey cfg.BatchSize, ), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir), - stagedsync.StageSnapshotHeadersCfg(mock.DB, ethconfig.Snapshot{Enabled: false}, nil, nil), + stagedsync.StageSnapshotHeadersCfg(mock.DB, ethconfig.Snapshot{Enabled: false}, nil, nil, mock.Log), stagedsync.StageBodiesCfg( mock.DB, mock.downloader.Bd, @@ -295,7 +297,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.StreamWg.Wait() mock.TxPoolP2PServer.TxFetcher.Start() }), - stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, nil, nil), + stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, nil, nil, mock.Log), true, /* test */ ), stagedsync.DefaultUnwindOrder, @@ -412,11 +414,11 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { ms.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := false highestSeenHeader := chain.TopBlock.NumberU64() - if err := StageLoopStep(ms.Ctx, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil); err != nil { + if err := StageLoopStep(ms.Ctx, ms.Log, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil); err != nil { return err } // Check if the latest header was imported or rolled back - if err = ms.DB.View(ms.Ctx, func(tx ethdb.Tx) error { + if err = ms.DB.View(ms.Ctx, func(tx kv.Tx) error { if rawdb.ReadHeader(tx, chain.TopBlock.Hash(), chain.TopBlock.NumberU64()) == nil { return fmt.Errorf("did not import block %d %x", chain.TopBlock.NumberU64(), chain.TopBlock.Hash()) } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index 9784ddc53ee..e090109ccf7 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -58,7 +58,7 @@ func TestHeaderStep(t *testing.T) { initialCycle := true highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -99,7 +99,7 @@ func TestMineBlockWith1Tx(t *testing.T) { initialCycle := true highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -170,7 +170,7 @@ func TestReorg(t *testing.T) { initialCycle := true highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestReorg(t *testing.T) { highestSeenHeader = uint64(short.TopBlock.NumberU64()) initialCycle = false - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -268,7 +268,7 @@ func TestReorg(t *testing.T) { // This is unwind step highestSeenHeader = uint64(long1.TopBlock.NumberU64()) - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -306,7 +306,7 @@ func TestReorg(t *testing.T) { highestSeenHeader = uint64(short2.TopBlock.NumberU64()) initialCycle = false - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -405,7 +405,7 @@ func TestAnchorReplace(t *testing.T) { highestSeenHeader := uint64(long.TopBlock.NumberU64()) initialCycle := true - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -512,7 +512,7 @@ func TestAnchorReplace2(t *testing.T) { highestSeenHeader := uint64(long.TopBlock.NumberU64()) initialCycle := true - if err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if err := stages.StageLoopStep(m.Ctx, m.Log, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 47b7412ebab..4b98d9a8d9f 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -17,7 +17,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -28,7 +28,8 @@ import ( // StageLoop runs the continuous loop of staged sync func StageLoop( ctx context.Context, - db ethdb.RwKV, + logger log.Logger, + db kv.RwDB, sync *stagedsync.Sync, hd *headerdownload.HeaderDownload, notifications *stagedsync.Notifications, @@ -50,7 +51,7 @@ func StageLoop( // Estimate the current top height seen from the peer height := hd.TopSeenHeight() - if err := StageLoopStep(ctx, db, sync, height, notifications, initialCycle, updateHead, nil); err != nil { + if err := StageLoopStep(ctx, logger, db, sync, height, notifications, initialCycle, updateHead, nil); err != nil { if errors.Is(err, common.ErrStopped) { return } @@ -80,17 +81,18 @@ func StageLoop( func StageLoopStep( ctx context.Context, - db ethdb.RwKV, + logger log.Logger, + db kv.RwDB, sync *stagedsync.Sync, highestSeenHeader uint64, notifications *stagedsync.Notifications, initialCycle bool, updateHead func(ctx context.Context, head uint64, hash common.Hash, td *uint256.Int), - snapshotMigratorFinal func(tx ethdb.Tx) error, + snapshotMigratorFinal func(tx kv.Tx) error, ) (err error) { defer func() { err = debug.ReportPanicAndRecover(err) }() // avoid crash because Erigon's core does many things - var origin, hashStateStageProgress, finishProgressBefore uint64 - if err := db.View(ctx, func(tx ethdb.Tx) error { + if err := db.View(ctx, func(tx kv.Tx) error { origin, err = stages.GetStageProgress(tx, stages.Headers) if err != nil { return err @@ -114,7 +116,7 @@ func StageLoopStep( canRunCycleInOneTransaction := !initialCycle && highestSeenHeader-origin < 1024 && highestSeenHeader-hashStateStageProgress < 1024 - var tx ethdb.RwTx // on this variable will run sync cycle. + var tx kv.RwTx // on this variable will run sync cycle. if canRunCycleInOneTransaction { tx, err = db.BeginRw(context.Background()) if err != nil { @@ -135,7 +137,7 @@ func StageLoopStep( } log.Info("Commit cycle", "in", time.Since(commitStart)) } - var rotx ethdb.Tx + var rotx kv.Tx if rotx, err = db.BeginRo(ctx); err != nil { return err } @@ -177,7 +179,7 @@ func StageLoopStep( return nil } -func MiningStep(ctx context.Context, kv ethdb.RwKV, mining *stagedsync.Sync) (err error) { +func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err error) { defer func() { err = debug.ReportPanicAndRecover(err) }() // avoid crash because Erigon's core does many things - tx, err := kv.BeginRw(ctx) @@ -194,7 +196,8 @@ func MiningStep(ctx context.Context, kv ethdb.RwKV, mining *stagedsync.Sync) (er func NewStagedSync2( ctx context.Context, - db ethdb.RwKV, + logger log.Logger, + db kv.RwDB, cfg ethconfig.Config, controlServer *download.ControlServerImpl, tmpdir string, @@ -219,7 +222,7 @@ func NewStagedSync2( cfg.BatchSize, ), stagedsync.StageBlockHashesCfg(db, tmpdir), - stagedsync.StageSnapshotHeadersCfg(db, cfg.Snapshot, client, snapshotMigrator), + stagedsync.StageSnapshotHeadersCfg(db, cfg.Snapshot, client, snapshotMigrator, logger), stagedsync.StageBodiesCfg( db, controlServer.Bd, @@ -267,7 +270,7 @@ func NewStagedSync2( } txPoolServer.TxFetcher.Start() }), - stagedsync.StageFinishCfg(db, tmpdir, client, snapshotMigrator), + stagedsync.StageFinishCfg(db, tmpdir, client, snapshotMigrator, logger), false, /* test */ ), stagedsync.DefaultUnwindOrder, diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 51f7c6b392b..3f97a693921 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/params" @@ -24,7 +24,7 @@ import ( const callTimeout = 5 * time.Minute -func DoCall(ctx context.Context, args ethapi.CallArgs, tx ethdb.Tx, blockNrOrHash rpc.BlockNumberOrHash, overrides *map[common.Address]ethapi.Account, gasCap uint64, chainConfig *params.ChainConfig, filters *filters.Filters) (*core.ExecutionResult, error) { +func DoCall(ctx context.Context, args ethapi.CallArgs, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, overrides *map[common.Address]ethapi.Account, gasCap uint64, chainConfig *params.ChainConfig, filters *filters.Filters) (*core.ExecutionResult, error) { // todo: Pending state is only known by the miner /* if blockNrOrHash.BlockNumber != nil && *blockNrOrHash.BlockNumber == rpc.PendingBlockNumber { @@ -136,7 +136,7 @@ func DoCall(ctx context.Context, args ethapi.CallArgs, tx ethdb.Tx, blockNrOrHas return result, nil } -func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx ethdb.Tx) (vm.BlockContext, vm.TxContext) { +func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx kv.Tx) (vm.BlockContext, vm.TxContext) { var baseFee uint256.Int if header.Eip1559 { overflow := baseFee.SetFromBig(header.BaseFee) @@ -162,7 +162,7 @@ func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool } } -func getHashGetter(requireCanonical bool, tx ethdb.Tx) func(uint64) common.Hash { +func getHashGetter(requireCanonical bool, tx kv.Tx) func(uint64) common.Hash { return func(n uint64) common.Hash { hash, err := rawdb.ReadCanonicalHash(tx, n) if err != nil { diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 3ad164a49ff..19610805fa4 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -18,7 +18,7 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/stack" "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/params" ) @@ -31,7 +31,7 @@ type BlockGetter interface { } // computeTxEnv returns the execution environment of a certain transaction. -func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, checkTEVM func(common.Hash) (bool, error), engine consensus.Engine, dbtx ethdb.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainState, error) { +func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, checkTEVM func(common.Hash) (bool, error), engine consensus.Engine, dbtx kv.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainState, error) { // Create the parent state database reader := state.NewPlainState(dbtx, block.NumberU64()-1) statedb := state.New(reader) diff --git a/turbo/trie/structural_branch_test.go b/turbo/trie/structural_branch_test.go index 14b9b687949..da0d22043a7 100644 --- a/turbo/trie/structural_branch_test.go +++ b/turbo/trie/structural_branch_test.go @@ -22,16 +22,16 @@ import ( "testing" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/ethdb/kv" + "github.com/ledgerwatch/erigon/ethdb/memdb" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestIHCursor(t *testing.T) { - _, tx := kv.NewTestTx(t) + _, tx := memdb.NewTestTx(t) require := require.New(t) hash := common.HexToHash(fmt.Sprintf("%064d", 0)) @@ -40,7 +40,7 @@ func TestIHCursor(t *testing.T) { k := common.FromHex(ks) integrity.AssertSubset(k, hasTree, hasState) integrity.AssertSubset(k, hasHash, hasState) - _ = tx.Put(dbutils.TrieOfAccountsBucket, k, common.CopyBytes(trie.MarshalTrieNodeTyped(hasState, hasTree, hasHash, hashes, newV))) + _ = tx.Put(kv.TrieOfAccounts, k, common.CopyBytes(trie.MarshalTrieNodeTyped(hasState, hasTree, hasHash, hashes, newV))) } put("00", 0b0000000000000010, 0b0000000000000000, 0b0000000000000010, []common.Hash{hash}) @@ -59,7 +59,7 @@ func TestIHCursor(t *testing.T) { integrity.Trie(tx, false, context.Background()) - cursor, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + cursor, err := tx.Cursor(kv.TrieOfAccounts) require.NoError(err) rl := trie.NewRetainList(0) rl.AddHex(common.FromHex("01")) @@ -124,7 +124,7 @@ func TestIHCursor(t *testing.T) { k, _, _, _ = ih.Next() assert.Nil(t, k) - //cursorS := tx.Cursor(dbutils.TrieOfStorageBucket) + //cursorS := tx.Cursor(dbutils.TrieOfStorage) //ihStorage := AccTrie(canUse, cursorS) // //k, _, _ = ihStorage.SeekToAccount(common.FromHex(acc)) diff --git a/turbo/trie/trie_root.go b/turbo/trie/trie_root.go index fc8571f7ac9..bf65cb0fe98 100644 --- a/turbo/trie/trie_root.go +++ b/turbo/trie/trie_root.go @@ -11,7 +11,7 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/kv" "github.com/ledgerwatch/erigon/log" "github.com/ledgerwatch/erigon/turbo/rlphacks" ) @@ -23,12 +23,12 @@ on each level of trie calculates intermediate hash of underlying data. **Practically:** It can be implemented as "Preorder trie traversal" (Preorder - visit Root, visit Left, visit Right). But, let's make couple observations to make traversal over huge state efficient. -**Observation 1:** `TrieOfAccountsBucket` already stores state keys in sorted way. +**Observation 1:** `TrieOfAccounts` already stores state keys in sorted way. Iteration over this bucket will retrieve keys in same order as "Preorder trie traversal". **Observation 2:** each Eth block - changes not big part of state - it means most of Merkle trie intermediate hashes will not change. -It means we effectively can cache them. `TrieOfAccountsBucket` stores "Intermediate hashes of all Merkle trie levels". -It also sorted and Iteration over `TrieOfAccountsBucket` will retrieve keys in same order as "Preorder trie traversal". +It means we effectively can cache them. `TrieOfAccounts` stores "Intermediate hashes of all Merkle trie levels". +It also sorted and Iteration over `TrieOfAccounts` will retrieve keys in same order as "Preorder trie traversal". **Implementation:** by opening 1 Cursor on state and 1 more Cursor on intermediate hashes bucket - we will receive data in order of "Preorder trie traversal". Cursors will only do "sequential reads" and "jumps forward" - been hardware-friendly. @@ -193,20 +193,20 @@ func (l *FlatDBTrieLoader) SetStreamReceiver(receiver StreamReceiver) { // SkipAccounts: // use(AccTrie) // } -func (l *FlatDBTrieLoader) CalcTrieRoot(tx ethdb.Tx, prefix []byte, quit <-chan struct{}) (common.Hash, error) { +func (l *FlatDBTrieLoader) CalcTrieRoot(tx kv.Tx, prefix []byte, quit <-chan struct{}) (common.Hash, error) { - accC, err := tx.Cursor(dbutils.HashedAccountsBucket) + accC, err := tx.Cursor(kv.HashedAccounts) if err != nil { return EmptyRoot, err } defer accC.Close() accs := NewStateCursor(accC, quit) - trieAccC, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + trieAccC, err := tx.Cursor(kv.TrieOfAccounts) if err != nil { return EmptyRoot, err } defer trieAccC.Close() - trieStorageC, err := tx.CursorDupSort(dbutils.TrieOfStorageBucket) + trieStorageC, err := tx.CursorDupSort(kv.TrieOfStorage) if err != nil { return EmptyRoot, err } @@ -219,7 +219,7 @@ func (l *FlatDBTrieLoader) CalcTrieRoot(tx ethdb.Tx, prefix []byte, quit <-chan accTrie := AccTrie(canUse, l.hc, trieAccC, quit) storageTrie := StorageTrie(canUse, l.shc, trieStorageC, quit) - ss, err := tx.CursorDupSort(dbutils.HashedStorageBucket) + ss, err := tx.CursorDupSort(kv.HashedStorage) if err != nil { return EmptyRoot, err } @@ -674,13 +674,13 @@ type AccTrieCursor struct { SkipState bool is, lvl int k, v [64][]byte // store up to 64 levels of key/value pairs in nibbles format - hasState [64]uint16 // says that records in dbutil.HashedAccountsBucket exists by given prefix - hasTree [64]uint16 // says that records in dbutil.TrieOfAccountsBucket exists by given prefix + hasState [64]uint16 // says that records in dbutil.HashedAccounts exists by given prefix + hasTree [64]uint16 // says that records in dbutil.TrieOfAccounts exists by given prefix hasHash [64]uint16 // store ownership of hashes stored in .v childID, hashID [64]int8 // meta info: current child in .hasState[lvl] field, max child id, current hash in .v[lvl] deleted [64]bool // helper to avoid multiple deletes of same key - c ethdb.Cursor + c kv.Cursor hc HashCollector2 prev, cur, next []byte prefix []byte // global prefix - cursor will never return records without this prefix @@ -693,7 +693,7 @@ type AccTrieCursor struct { quit <-chan struct{} } -func AccTrie(canUse func([]byte) (bool, []byte), hc HashCollector2, c ethdb.Cursor, quit <-chan struct{}) *AccTrieCursor { +func AccTrie(canUse func([]byte) (bool, []byte), hc HashCollector2, c kv.Cursor, quit <-chan struct{}) *AccTrieCursor { return &AccTrieCursor{ c: c, canUse: canUse, @@ -986,7 +986,7 @@ type StorageTrieCursor struct { deleted [64]bool childID, hashID [64]int8 - c ethdb.Cursor + c kv.Cursor shc StorageHashCollector2 prev, cur []byte seek []byte @@ -1003,7 +1003,7 @@ type StorageTrieCursor struct { quit <-chan struct{} } -func StorageTrie(canUse func(prefix []byte) (bool, []byte), shc StorageHashCollector2, c ethdb.Cursor, quit <-chan struct{}) *StorageTrieCursor { +func StorageTrie(canUse func(prefix []byte) (bool, []byte), shc StorageHashCollector2, c kv.Cursor, quit <-chan struct{}) *StorageTrieCursor { ih := &StorageTrieCursor{c: c, canUse: canUse, firstNotCoveredPrefix: make([]byte, 0, 64), next: make([]byte, 0, 64), @@ -1369,12 +1369,12 @@ func firstNotCoveredPrefix(prev, prefix, buf []byte) []byte { } type StateCursor struct { - c ethdb.Cursor + c kv.Cursor quit <-chan struct{} kHex []byte } -func NewStateCursor(c ethdb.Cursor, quit <-chan struct{}) *StateCursor { +func NewStateCursor(c kv.Cursor, quit <-chan struct{}) *StateCursor { return &StateCursor{c: c, quit: quit} } @@ -1481,7 +1481,7 @@ func CastTrieNodeValue(hashes, rootHash []byte) []common.Hash { // CalcRoot is a combination of `ResolveStateTrie` and `UpdateStateTrie` // DESCRIBED: docs/programmers_guide/guide.md#organising-ethereum-state-into-a-merkle-tree -func CalcRoot(logPrefix string, tx ethdb.Tx) (common.Hash, error) { +func CalcRoot(logPrefix string, tx kv.Tx) (common.Hash, error) { loader := NewFlatDBTrieLoader(logPrefix) if err := loader.Reset(NewRetainList(0), nil, nil, false); err != nil { return EmptyRoot, err