From 545fe32f680d264131c35e0acaaa659e7d70fa97 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 22 May 2021 10:03:02 +0700 Subject: [PATCH] Mdbx - make it default db. Lazy buckets renaming. (#1982) --- cmd/cons/commands/root.go | 8 +- cmd/hack/hack.go | 2 - cmd/integration/commands/flags.go | 4 +- cmd/integration/commands/root.go | 10 +- cmd/rpcdaemon/cli/config.go | 2 +- cmd/snapshots/generator/commands/root.go | 2 +- cmd/state/commands/global_flags_vars.go | 2 +- common/dbutils/bucket.go | 68 +++++++++-- common/dbutils/rename.go | 1 + ethdb/kv_mdbx.go | 30 ++++- ethdb/mdbx/mdbx.go | 2 - ethdb/memory_database.go | 2 +- ethdb/object_db_nomdbx.go | 7 -- migrations/dupsort_state.go | 132 ---------------------- migrations/dupsort_state_test.go | 115 ------------------- migrations/migrations.go | 9 -- turbo/cli/flags.go | 2 +- turbo/trie/flatdb_sub_trie_loader.go | 2 +- turbo/trie/flatdb_sub_trie_loader_test.go | 16 +-- 19 files changed, 110 insertions(+), 306 deletions(-) create mode 100644 common/dbutils/rename.go delete mode 100644 ethdb/object_db_nomdbx.go diff --git a/cmd/cons/commands/root.go b/cmd/cons/commands/root.go index 817919d1d08..2aac84f118f 100644 --- a/cmd/cons/commands/root.go +++ b/cmd/cons/commands/root.go @@ -52,7 +52,7 @@ func must(err error) { func withDatadir(cmd *cobra.Command) { cmd.Flags().StringVar(&datadir, "datadir", paths.DefaultDataDir(), "directory where databases and temporary files are kept") must(cmd.MarkFlagDirname("datadir")) - cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") + cmd.Flags().StringVar(&database, "database", "mdbx", "lmdb|mdbx") } func withApiAddr(cmd *cobra.Command) { @@ -69,15 +69,15 @@ func openDatabase(path string) *ethdb.ObjectDatabase { } func openKV(path string, exclusive bool) ethdb.RwKV { - if database == "mdbx" { - opts := ethdb.NewMDBX().Path(path) + if database == "lmdb" { + opts := ethdb.NewLMDB().Path(path) if exclusive { opts = opts.Exclusive() } return opts.MustOpen() } - opts := ethdb.NewLMDB().Path(path) + opts := ethdb.NewMDBX().Path(path) if exclusive { opts = opts.Exclusive() } diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 89db40cd348..72b15fb3d6b 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -1,5 +1,3 @@ -//+build mdbx - package main import ( diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index af39a44ff2e..255c7511ed0 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -91,7 +91,7 @@ func withDatadir2(cmd *cobra.Command) { cmd.Flags().String(utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) must(cmd.MarkFlagDirname(utils.DataDirFlag.Name)) must(cmd.MarkFlagRequired(utils.DataDirFlag.Name)) - cmd.Flags().StringVar(&database, "database", "lmdb", "lmdb|mdbx") + cmd.Flags().StringVar(&database, "database", "mdbx", "lmdb|mdbx") cmd.Flags().IntVar(&databaseVerbosity, "database.verbosity", 2, "Enabling internal db logs. Very high verbosity levels may require recompile db. Default: 2, means warning.") } @@ -107,7 +107,7 @@ func withDatadir(cmd *cobra.Command) { cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") must(cmd.MarkFlagDirname("snapshot.dir")) - cmd.Flags().StringVar(&database, "database", "lmdb", "lmdb|mdbx") + cmd.Flags().StringVar(&database, "database", "mdbx", "lmdb|mdbx") cmd.Flags().IntVar(&databaseVerbosity, "database.verbosity", 2, "Enabling internal db logs. Very high verbosity levels may require recompile db. Default: 2, means warning") } diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 73721405bca..568c629bce3 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -69,8 +69,8 @@ func openDatabase(path string, applyMigrations bool) *ethdb.ObjectDatabase { } func openKV(path string, exclusive bool) ethdb.RwKV { - if database == "mdbx" { - opts := ethdb.NewMDBX().Path(path) + if database == "lmdb" { + opts := ethdb.NewLMDB().Path(path) if exclusive { opts = opts.Exclusive() } @@ -82,10 +82,12 @@ func openKV(path string, exclusive bool) ethdb.RwKV { if databaseVerbosity != -1 { opts = opts.DBVerbosity(ethdb.DBVerbosityLvl(databaseVerbosity)) } - return opts.MustOpen() + kv := opts.MustOpen() + metrics.AddCallback(kv.CollectMetrics) + return kv } - opts := ethdb.NewLMDB().Path(path) + opts := ethdb.NewMDBX().Path(path) if exclusive { opts = opts.Exclusive() } diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 656b1e0e63b..bbf4db7589c 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -57,7 +57,7 @@ func RootCommand() (*cobra.Command, *Flags) { cfg := &Flags{} rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090, empty string means not to start the listener. do not expose to public network. serves remote database interface") rootCmd.PersistentFlags().StringVar(&cfg.Datadir, "datadir", "", "path to turbo-geth working directory") - rootCmd.PersistentFlags().StringVar(&cfg.Database, "database", "lmdb", "lmdb|mdbx engines") + rootCmd.PersistentFlags().StringVar(&cfg.Database, "database", "mdbx", "lmdb|mdbx engines") rootCmd.PersistentFlags().StringVar(&cfg.Chaindata, "chaindata", "", "path to the database") rootCmd.PersistentFlags().StringVar(&cfg.SnapshotDir, "snapshot.dir", "", "path to snapshot dir(only for chaindata mode)") rootCmd.PersistentFlags().StringVar(&cfg.SnapshotMode, "snapshot.mode", "", `Configures the storage mode of the app(only for chaindata mode): diff --git a/cmd/snapshots/generator/commands/root.go b/cmd/snapshots/generator/commands/root.go index 0e5c78afbb8..552f6858e84 100644 --- a/cmd/snapshots/generator/commands/root.go +++ b/cmd/snapshots/generator/commands/root.go @@ -91,7 +91,7 @@ func withDatadir(cmd *cobra.Command) { cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") must(cmd.MarkFlagDirname("snapshot.dir")) - cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") + cmd.Flags().StringVar(&database, "database", "mdbx", "lmdb|mdbx") } func withSnapshotFile(cmd *cobra.Command) { diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index 00207de481d..1623678973c 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -39,7 +39,7 @@ func withDatadir(cmd *cobra.Command) { cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") must(cmd.MarkFlagDirname("snapshot.dir")) - cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") + cmd.Flags().StringVar(&database, "database", "mdbx", "lmdb|mdbx") } func withStatsfile(cmd *cobra.Command) { diff --git a/common/dbutils/bucket.go b/common/dbutils/bucket.go index b198d3c59a2..0fb993c85d8 100644 --- a/common/dbutils/bucket.go +++ b/common/dbutils/bucket.go @@ -69,7 +69,6 @@ const ( HashedAccountsBucket = "hashed_accounts" HashedStorageBucket = "hashed_storage" CurrentStateBucketOld2 = "CST2" - CurrentStateBucketOld1 = "CST" //key - address + shard_id_u64 //value - roaring bitmap - list of block where it changed @@ -135,7 +134,6 @@ Invariants: */ const TrieOfAccountsBucket = "trie_account" const TrieOfStorageBucket = "trie_storage" -const IntermediateTrieHashBucketOld1 = "iTh" const IntermediateTrieHashBucketOld2 = "iTh2" const ( @@ -218,10 +216,64 @@ const ( // in case of bug-report developer can ask content of this bucket Migrations = "migrations" - Sequence = "sequence" // tbl_name -> seq_u64 - + Sequence = "sequence" // tbl_name -> seq_u64 + HeadHeaderKey = "LastHeader" ) +var Rename = map[string]string{ + PlainStateBucket: "PlainState", + PlainContractCodeBucket: "PlainCodeHash", + AccountChangeSetBucket: "AccountChangeSet", + StorageChangeSetBucket: "StorageChangeSet", + HashedAccountsBucket: "HashedAccount", + HashedStorageBucket: "HashedStorage", + AccountsHistoryBucket: "AccountHistory", + StorageHistoryBucket: "StorageHistory", + CodeBucket: "Code", + ContractCodeBucket: "HashedCodeHash", + IncarnationMapBucket: "IncarnationMap", + TrieOfAccountsBucket: "TrieAccount", + TrieOfStorageBucket: "TrieStorage", + DatabaseInfoBucket: "DbInfo", + SnapshotInfoBucket: "SnapshotInfo", + BittorrentInfoBucket: "BittorrentInfo", + HeadersSnapshotInfoBucket: "HeadersSnapshotInfo", + BodiesSnapshotInfoBucket: "BodiesSnapshotInfo", + StateSnapshotInfoBucket: "StateSnapshotInfo", + HeaderNumberBucket: "HeaderNumber", + HeaderCanonicalBucket: "CanonicalHeader", + HeadersBucket: "Header", + HeaderTDBucket: "HeadersTotalDifficulty", + BlockBodyPrefix: "BlockBody", + EthTx: "BlockTransaction", + BlockReceiptsPrefix: "Receipt", + Log: "TransactionLog", + LogTopicIndex: "LogTopicIndex", + LogAddressIndex: "LogAddressIndex", + CallTraceSet: "CallTraceSet", + CallFromIndex: "CallFromIndex", + CallToIndex: "CallToIndex", + TxLookupPrefix: "BlockTransactionLookup", + BloomBitsPrefix: "BloomBits", + PreimagePrefix: "Preimage", + ConfigPrefix: "Config", + BloomBitsIndexPrefix: "BloomBitsIndex", + SyncStageProgress: "SyncStage", + SyncStageUnwind: "SyncStageUnwind", + CliqueBucket: "Clique", + CliqueSeparateBucket: "CliqueSeparate", + CliqueSnapshotBucket: "CliqueSnapshot", + CliqueLastSnapshotBucket: "CliqueLastSnapshot", + InodesBucket: "Inode", + Senders: "TxSender", + HeadBlockKey: "LastBlock", + InvalidBlock: "InvalidBlock", + UncleanShutdown: "UncleanShutdown", + Migrations: "Migration", + Sequence: "Sequence", + HeadHeaderKey: "LastHeader", +} + // Keys var ( //StorageModePruning - does node prune. @@ -237,8 +289,6 @@ var ( DBSchemaVersionKey = []byte("dbVersion") - HeadHeaderKey = "LastHeader" - SnapshotHeadersHeadNumber = "SnapshotLastHeaderNumber" SnapshotHeadersHeadHash = "SnapshotLastHeaderHash" SnapshotBodyHeadNumber = "SnapshotLastBodyNumber" @@ -253,7 +303,6 @@ var ( // This list will be sorted in `init` method. // BucketsConfigs - can be used to find index in sorted version of Buckets list by name var Buckets = []string{ - CurrentStateBucketOld2, AccountsHistoryBucket, StorageHistoryBucket, CodeBucket, @@ -297,7 +346,6 @@ var Buckets = []string{ TrieOfStorageBucket, HashedAccountsBucket, HashedStorageBucket, - IntermediateTrieHashBucketOld2, BittorrentInfoBucket, HeaderCanonicalBucket, HeadersBucket, @@ -306,11 +354,11 @@ var Buckets = []string{ // DeprecatedBuckets - list of buckets which can be programmatically deleted - for example after migration var DeprecatedBuckets = []string{ + IntermediateTrieHashBucketOld2, + CurrentStateBucketOld2, SyncStageProgressOld1, SyncStageUnwindOld1, - CurrentStateBucketOld1, PlainStateBucketOld1, - IntermediateTrieHashBucketOld1, HeaderPrefixOld, CliqueBucket, } diff --git a/common/dbutils/rename.go b/common/dbutils/rename.go new file mode 100644 index 00000000000..0ff9dac08a1 --- /dev/null +++ b/common/dbutils/rename.go @@ -0,0 +1 @@ +package dbutils diff --git a/ethdb/kv_mdbx.go b/ethdb/kv_mdbx.go index 5bc562025df..c4b0ac0ff1f 100644 --- a/ethdb/kv_mdbx.go +++ b/ethdb/kv_mdbx.go @@ -1,5 +1,3 @@ -//+build mdbx - package ethdb import ( @@ -539,7 +537,6 @@ func (db *MdbxKV) Update(ctx context.Context, f func(tx RwTx) error) (err error) func (tx *MdbxTx) CreateBucket(name string) error { cnfCopy := tx.db.buckets[name] - var dcmp mdbx.CmpFunc switch cnfCopy.CustomDupComparator { case dbutils.DupCmpSuffix32: @@ -563,6 +560,26 @@ func (tx *MdbxTx) CreateBucket(name string) error { return nil } + // if bucket with this name not found - check renamed one + rename := dbutils.Rename[name] + + dbi, err = tx.tx.OpenDBI(rename, mdbx.DBAccede, nil, dcmp) + if err != nil && !mdbx.IsNotFound(err) { + return fmt.Errorf("create bucket: %s, %w", name, err) + } + if err == nil { + cnfCopy.DBI = dbutils.DBI(dbi) + var flags uint + flags, err = tx.tx.Flags(dbi) + if err != nil { + return err + } + cnfCopy.Flags = dbutils.BucketFlags(flags) + + tx.db.buckets[name] = cnfCopy + return nil + } + // if bucket doesn't exists - create it var flags = tx.db.buckets[name].Flags @@ -579,7 +596,11 @@ func (tx *MdbxTx) CreateBucket(name string) error { return fmt.Errorf("some not supported flag provided for bucket") } - dbi, err = tx.tx.OpenDBI(name, nativeFlags, nil, dcmp) + if rename != "" { + dbi, err = tx.tx.OpenDBI(rename, nativeFlags, nil, dcmp) + } else { + dbi, err = tx.tx.OpenDBI(name, nativeFlags, nil, dcmp) + } if err != nil { return fmt.Errorf("create bucket: %s, %w", name, err) } @@ -1350,7 +1371,6 @@ func (c *MdbxCursor) Append(k []byte, v []byte) error { } return nil } - if err := c.append(k, v); err != nil { return fmt.Errorf("bucket: %s, %w", c.bucketName, err) } diff --git a/ethdb/mdbx/mdbx.go b/ethdb/mdbx/mdbx.go index bde2ef80d21..84335ec2bbc 100644 --- a/ethdb/mdbx/mdbx.go +++ b/ethdb/mdbx/mdbx.go @@ -1,5 +1,3 @@ -// +build mdbx - /* Package lmdb provides bindings to the lmdb C API. The package bindings are fairly low level and are designed to provide a minimal interface that prevents diff --git a/ethdb/memory_database.go b/ethdb/memory_database.go index 716b0695675..9b5f6077eed 100644 --- a/ethdb/memory_database.go +++ b/ethdb/memory_database.go @@ -41,7 +41,7 @@ func NewMemKV() RwKV { // mdbx is too slow for our tests currently, so we keep // lmdb as our in-mem db // with mdbx tests time out, especially ./tests package - return NewLMDB().InMem().MustOpen() + return NewMDBX().InMem().MustOpen() } } diff --git a/ethdb/object_db_nomdbx.go b/ethdb/object_db_nomdbx.go deleted file mode 100644 index 62b6ad1a385..00000000000 --- a/ethdb/object_db_nomdbx.go +++ /dev/null @@ -1,7 +0,0 @@ -//+build !mdbx - -package ethdb - -func NewMDBX() LmdbOpts { - panic("to use MDBX, compile with -tags 'mdbx'") -} diff --git a/migrations/dupsort_state.go b/migrations/dupsort_state.go index efa6e05836c..a6e78aa35f1 100644 --- a/migrations/dupsort_state.go +++ b/migrations/dupsort_state.go @@ -14,138 +14,6 @@ import ( "github.com/ledgerwatch/erigon/log" ) -var dupSortHashState = Migration{ - Name: "dupsort_hash_state", - Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error { - if exists, err := db.(ethdb.BucketsMigrator).BucketExists(dbutils.CurrentStateBucketOld1); err != nil { - return err - } else if !exists { - return OnLoadCommit(db, nil, true) - } - - if err := db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.CurrentStateBucketOld2); err != nil { - return err - } - extractFunc := func(k []byte, v []byte, next etl.ExtractNextFunc) error { - return next(k, k, v) - } - - if err := etl.Transform( - "dupsort_hash_state", - db.(ethdb.HasTx).Tx().(ethdb.RwTx), - dbutils.CurrentStateBucketOld1, - dbutils.CurrentStateBucketOld2, - tmpdir, - extractFunc, - etl.IdentityLoadFunc, - etl.TransformArgs{}, - ); err != nil { - return err - } - if err := OnLoadCommit(db, nil, true); err != nil { - return err - } - - if err := db.(ethdb.BucketsMigrator).DropBuckets(dbutils.CurrentStateBucketOld1); err != nil { - return err - } - return nil - }, -} - -var dupSortPlainState = Migration{ - Name: "dupsort_plain_state", - Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error { - if exists, err := db.(ethdb.BucketsMigrator).BucketExists(dbutils.PlainStateBucketOld1); err != nil { - return err - } else if !exists { - return OnLoadCommit(db, nil, true) - } - - if err := db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.PlainStateBucket); err != nil { - return err - } - extractFunc := func(k []byte, v []byte, next etl.ExtractNextFunc) error { - return next(k, k, v) - } - - if err := etl.Transform( - "dupsort_plain_state", - db.(ethdb.HasTx).Tx().(ethdb.RwTx), - dbutils.PlainStateBucketOld1, - dbutils.PlainStateBucket, - tmpdir, - extractFunc, - etl.IdentityLoadFunc, - etl.TransformArgs{}, - ); err != nil { - return err - } - if err := OnLoadCommit(db, nil, true); err != nil { - return err - } - - if err := db.(ethdb.BucketsMigrator).DropBuckets(dbutils.PlainStateBucketOld1); err != nil { - return err - } - return nil - }, -} - -var dupSortIH = Migration{ - Name: "dupsort_intermediate_trie_hashes", - Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error { - if err := db.(ethdb.BucketsMigrator).ClearBuckets( - dbutils.IntermediateTrieHashBucketOld2, - dbutils.IntermediateTrieHashBucketOld1, - dbutils.TrieOfStorageBucket, - dbutils.TrieOfAccountsBucket); err != nil { - return err - } - if err := stages.SaveStageProgress(db, stages.IntermediateHashes, 0); err != nil { - return err - } - if err := stages.SaveStageUnwind(db, stages.IntermediateHashes, 0); err != nil { - return err - } - return OnLoadCommit(db, nil, true) - }, -} - -var clearIndices = Migration{ - Name: "clear_log_indices7", - Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error { - if err := db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.LogAddressIndex, dbutils.LogTopicIndex); err != nil { - return err - } - - if err := stages.SaveStageProgress(db, stages.LogIndex, 0); err != nil { - return err - } - if err := stages.SaveStageUnwind(db, stages.LogIndex, 0); err != nil { - return err - } - - return OnLoadCommit(db, nil, true) - }, -} - -var resetIHBucketToRecoverDB = Migration{ - Name: "reset_in_bucket_to_recover_db", - Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error { - if err := db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.IntermediateTrieHashBucketOld2); err != nil { - return err - } - if err := stages.SaveStageProgress(db, stages.IntermediateHashes, 0); err != nil { - return err - } - if err := stages.SaveStageUnwind(db, stages.IntermediateHashes, 0); err != nil { - return err - } - return OnLoadCommit(db, nil, true) - }, -} - var splitHashStateBucket = Migration{ Name: "split_hash_state_bucket", Up: func(db ethdb.Database, tmpdir string, progress []byte, CommitProgress etl.LoadCommitHandler) (err error) { diff --git a/migrations/dupsort_state_test.go b/migrations/dupsort_state_test.go index c2bd5efc76a..a6ea3eef7b6 100644 --- a/migrations/dupsort_state_test.go +++ b/migrations/dupsort_state_test.go @@ -1,116 +1 @@ package migrations - -import ( - "context" - "fmt" - "testing" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/stretchr/testify/require" -) - -func TestDupSortHashState(t *testing.T) { - require, db := require.New(t), ethdb.NewTestDB(t) - - err := db.RwKV().Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.(ethdb.BucketMigrator).CreateBucket(dbutils.HashedStorageBucket) - }) - require.NoError(err) - - accKey := string(common.FromHex(fmt.Sprintf("%064x", 0))) - inc := string(common.FromHex("0000000000000001")) - storageKey := accKey + inc + accKey - - err = db.Put(dbutils.HashedStorageBucket, []byte(storageKey), []byte{2}) - require.NoError(err) - - migrator := NewMigrator() - migrator.Migrations = []Migration{dupSortHashState} - err = migrator.Apply(db, "") - require.NoError(err) - - // test high-level data access didn't change - i := 0 - err = db.Walk(dbutils.HashedStorageBucket, nil, 0, func(k, v []byte) (bool, error) { - i++ - return true, nil - }) - require.NoError(err) - require.Equal(1, i) - - v, err := db.Get(dbutils.HashedStorageBucket, []byte(storageKey)) - require.NoError(err) - require.Equal([]byte{2}, v) - - tx, err := db.Begin(context.Background(), ethdb.RW) - require.NoError(err) - defer tx.Rollback() - - c, err := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.HashedStorageBucket) - require.NoError(err) - - // test low-level data layout - keyLen := common.HashLength + common.IncarnationLength - v, err = c.SeekBothRange([]byte(storageKey)[:keyLen], []byte(storageKey)[keyLen:]) - require.NoError(err) - require.Equal([]byte(storageKey)[keyLen:], v[:common.HashLength]) - require.Equal([]byte{2}, v[common.HashLength:]) -} - -func TestDupSortPlainState(t *testing.T) { - require, db := require.New(t), ethdb.NewTestDB(t) - - err := db.RwKV().Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.(ethdb.BucketMigrator).CreateBucket(dbutils.PlainStateBucketOld1) - }) - require.NoError(err) - - accKey := string(common.FromHex(fmt.Sprintf("%040x", 0))) - inc := string(common.FromHex("0000000000000001")) - storageKey := accKey + inc + string(common.FromHex(fmt.Sprintf("%064x", 0))) - - err = db.Put(dbutils.PlainStateBucketOld1, []byte(accKey), []byte{1}) - require.NoError(err) - err = db.Put(dbutils.PlainStateBucketOld1, []byte(storageKey), []byte{2}) - require.NoError(err) - - migrator := NewMigrator() - migrator.Migrations = []Migration{dupSortPlainState} - err = migrator.Apply(db, "") - require.NoError(err) - - // test high-level data access didn't change - i := 0 - err = db.Walk(dbutils.PlainStateBucket, nil, 0, func(k, v []byte) (bool, error) { - i++ - return true, nil - }) - require.NoError(err) - require.Equal(2, i) - - v, err := db.Get(dbutils.PlainStateBucket, []byte(accKey)) - require.NoError(err) - require.Equal([]byte{1}, v) - - v, err = db.Get(dbutils.PlainStateBucket, []byte(storageKey)) - require.NoError(err) - require.Equal([]byte{2}, v) - - tx, err := db.Begin(context.Background(), ethdb.RW) - require.NoError(err) - defer tx.Rollback() - - c, err := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.PlainStateBucket) - require.NoError(err) - _, v, err = c.SeekExact([]byte(accKey)) - require.NoError(err) - require.Equal([]byte{1}, v) - - keyLen := common.AddressLength + common.IncarnationLength - v, err = c.SeekBothRange([]byte(storageKey)[:keyLen], []byte(storageKey)[keyLen:]) - require.NoError(err) - require.Equal([]byte(storageKey)[keyLen:], v[:common.HashLength]) - require.Equal([]byte{2}, v[common.HashLength:]) -} diff --git a/migrations/migrations.go b/migrations/migrations.go index a20e597a045..213285f7d4f 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -55,15 +55,6 @@ import ( // - if you need migrate multiple buckets - create separate migration for each bucket // - write test where apply migration twice var migrations = []Migration{ - stagesToUseNamedKeys, - unwindStagesToUseNamedKeys, - stagedsyncToUseStageBlockhashes, - unwindStagedsyncToUseStageBlockhashes, - dupSortHashState, - dupSortPlainState, - dupSortIH, - clearIndices, - resetIHBucketToRecoverDB, receiptsCborEncode, receiptsOnePerTx, accChangeSetDupSort, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 97ee501d15b..8193d6abad9 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -20,7 +20,7 @@ var ( DatabaseFlag = cli.StringFlag{ Name: "database", Usage: "Which database software to use? Currently supported values: lmdb|mdbx", - Value: "lmdb", + Value: "mdbx", } DatabaseVerbosityFlag = cli.IntFlag{ Name: "database.verbosity", diff --git a/turbo/trie/flatdb_sub_trie_loader.go b/turbo/trie/flatdb_sub_trie_loader.go index f212b6405ca..83ced4339b4 100644 --- a/turbo/trie/flatdb_sub_trie_loader.go +++ b/turbo/trie/flatdb_sub_trie_loader.go @@ -603,7 +603,7 @@ func (fstl *FlatDbSubTrieLoader) LoadSubTries() (SubTries, error) { defer fstl.tx.Rollback() } tx := fstl.tx - c, err := tx.Cursor(dbutils.CurrentStateBucketOld2) + c, err := tx.Cursor(dbutils.HashedAccountsBucket) if err != nil { return SubTries{}, err } diff --git a/turbo/trie/flatdb_sub_trie_loader_test.go b/turbo/trie/flatdb_sub_trie_loader_test.go index 37fed0bc598..c8f3047976c 100644 --- a/turbo/trie/flatdb_sub_trie_loader_test.go +++ b/turbo/trie/flatdb_sub_trie_loader_test.go @@ -20,7 +20,7 @@ func TestResolve1(t *testing.T) { require, assert, db := require.New(t), assert.New(t), ethdb.NewTestDB(t) putStorage := func(k string, v string) { - err := db.Put(dbutils.CurrentStateBucketOld2, common.Hex2Bytes(k), common.Hex2Bytes(v)) + err := db.Put(dbutils.HashedAccountsBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)) require.NoError(err) } putStorage("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "") @@ -41,7 +41,7 @@ func TestResolve2(t *testing.T) { require, assert, db := require.New(t), assert.New(t), ethdb.NewTestDB(t) putStorage := func(k string, v string) { - err := db.Put(dbutils.CurrentStateBucketOld2, common.Hex2Bytes(k), common.Hex2Bytes(v)) + err := db.Put(dbutils.HashedAccountsBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)) require.NoError(err) } putStorage("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "") @@ -65,7 +65,7 @@ func TestResolve2Keep(t *testing.T) { require, assert, db := require.New(t), assert.New(t), ethdb.NewTestDB(t) putStorage := func(k string, v string) { - err := db.Put(dbutils.CurrentStateBucketOld2, common.Hex2Bytes(k), common.Hex2Bytes(v)) + err := db.Put(dbutils.HashedAccountsBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)) require.NoError(err) } putStorage("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "") @@ -89,7 +89,7 @@ func TestResolve3Keep(t *testing.T) { require, assert, db := require.New(t), assert.New(t), ethdb.NewTestDB(t) putStorage := func(k string, v string) { - err := db.Put(dbutils.CurrentStateBucketOld2, common.Hex2Bytes(k), common.Hex2Bytes(v)) + err := db.Put(dbutils.HashedAccountsBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)) require.NoError(err) } putStorage("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "") @@ -114,7 +114,7 @@ func TestTrieSubTrieLoader(t *testing.T) { require, _, db := require.New(t), assert.New(t), ethdb.NewTestDB(t) putStorage := func(k string, v string) { - err := db.Put(dbutils.CurrentStateBucketOld2, common.Hex2Bytes(k), common.Hex2Bytes(v)) + err := db.Put(dbutils.HashedAccountsBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)) require.NoError(err) } putStorage("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "") @@ -145,8 +145,8 @@ func TestTwoStorageItems(t *testing.T) { val1 := common.Hex2Bytes("02") val2 := common.Hex2Bytes("03") - require.NoError(db.Put(dbutils.CurrentStateBucketOld2, key1, val1)) - require.NoError(db.Put(dbutils.CurrentStateBucketOld2, key2, val2)) + require.NoError(db.Put(dbutils.HashedAccountsBucket, key1, val1)) + require.NoError(db.Put(dbutils.HashedAccountsBucket, key2, val2)) var branch fullNode branch.Children[0x7] = NewShortNode(keybytesToHex(key1[1:]), valueNode(val1)) branch.Children[0xf] = NewShortNode(keybytesToHex(key2[1:]), valueNode(val2)) @@ -352,7 +352,7 @@ func TestIsSequence(t *testing.T) { func writeAccount(db ethdb.Putter, addrHash common.Hash, acc accounts.Account) error { value := make([]byte, acc.EncodingLengthForStorage()) acc.EncodeForStorage(value) - if err := db.Put(dbutils.CurrentStateBucketOld2, addrHash[:], value); err != nil { + if err := db.Put(dbutils.HashedAccountsBucket, addrHash[:], value); err != nil { return err } return nil