From 4145f32dcb0f394e24b1a5da92aafe99c7432527 Mon Sep 17 00:00:00 2001 From: joeycli Date: Sun, 24 Sep 2023 12:21:00 +0800 Subject: [PATCH] feat: lint erroractive pbss on bsc fix: lint error fix: ut error fix: code review comments --- cmd/geth/pruneblock_test.go | 7 +- cmd/geth/snapshot.go | 2 + core/blockchain.go | 10 +- core/blockchain_snapshot_test.go | 6 +- core/blockchain_test.go | 746 +++---------------------- core/eip3529tests/eip3529_test_util.go | 5 +- core/genesis_test.go | 20 - core/rawdb/ancient_utils.go | 3 + core/state/database.go | 10 + core/state/statedb.go | 44 +- core/state/statedb_test.go | 4 - core/state_prefetcher_test.go | 13 +- eth/downloader/fetchers_concurrent.go | 40 +- eth/handler.go | 28 +- eth/protocols/trust/handler_test.go | 3 +- eth/sync.go | 2 + ethclient/ethclient_test.go | 5 +- trie/database.go | 47 +- trie/trie.go | 5 +- 19 files changed, 258 insertions(+), 742 deletions(-) diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go index a20ba01fab..8361559212 100644 --- a/cmd/geth/pruneblock_test.go +++ b/cmd/geth/pruneblock_test.go @@ -41,6 +41,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -136,7 +137,11 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai t.Fatalf("failed to create database with ancient backend") } defer db.Close() - genesis := gspec.MustCommit(db) + + triedb := trie.NewDatabase(db, nil) + defer triedb.Close() + + genesis := gspec.MustCommit(db, triedb) // Initialize a fresh chain with only a genesis block blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index ffae740a1f..0d9e583e79 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -478,6 +478,7 @@ func pruneAllState(ctx *cli.Context) error { } chaindb := utils.MakeChainDatabase(ctx, stack, false, false) + defer chaindb.Close() pruner, err := pruner.NewAllPruner(chaindb) if err != nil { log.Error("Failed to open snapshot tree", "err", err) @@ -495,6 +496,7 @@ func verifyState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false) + defer chaindb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") diff --git a/core/blockchain.go b/core/blockchain.go index 08d30872f8..8c49be8311 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -162,7 +162,11 @@ type CacheConfig struct { // triedbConfig derives the configures for trie database. func (c *CacheConfig) triedbConfig() *trie.Config { - config := &trie.Config{Preimages: c.Preimages} + config := &trie.Config{ + Cache: c.TrieCleanLimit, + Preimages: c.Preimages, + NoTries: c.NoTries, + } if c.StateScheme == rawdb.HashScheme { config.HashDB = &hashdb.Config{ CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, @@ -392,6 +396,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis var diskRoot common.Hash if bc.cacheConfig.SnapshotLimit > 0 { diskRoot = rawdb.ReadSnapshotRoot(bc.db) + } else if bc.triedb.Scheme() == rawdb.PathScheme { + _, diskRoot = rawdb.ReadAccountTrieNode(bc.db, nil) } if diskRoot != (common.Hash{}) { log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot) @@ -875,7 +881,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha log.Crit("Failed to rollback state", "err", err) // Shouldn't happen } } - log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + log.Info("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index f0000eaded..14d0309988 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -483,7 +483,8 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme} { test := &crashSnapshotTest{ snapshotTestBasic{ scheme: scheme, @@ -525,7 +526,8 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C2 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme} { test := &crashSnapshotTest{ snapshotTestBasic{ scheme: scheme, diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 56b67eedaf..56a5c7763e 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -55,22 +55,13 @@ var ( // chain. Depending on the full flag, if creates either a full block chain or a // header only chain. The database and genesis specification for block generation // are also returned in case more test blocks are needed later. -<<<<<<< HEAD func newCanonical(engine consensus.Engine, n int, full bool, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) { -======= -func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) { ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) var ( genesis = &Genesis{ BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges, } ) -<<<<<<< HEAD -======= - // Initialize a fresh chain with only a genesis block - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) // Initialize a fresh chain with only a genesis block var ops []BlockChainOption @@ -99,7 +90,6 @@ func newGwei(n int64) *big.Int { } // Test fork of length N starting from block i -<<<<<<< HEAD func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) { // Copy old chain up to #i into a new db db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, pipeline) @@ -147,11 +137,6 @@ func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n func testFork(t *testing.T, blockchain *BlockChain, i, n int, full, pipeline bool, comparator func(td1, td2 *big.Int)) { // Copy old chain up to #i into a new db genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, pipeline) -======= -func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) { - // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -270,7 +255,6 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error } return nil } -<<<<<<< HEAD func TestBlockImportVerification(t *testing.T) { length := 5 @@ -288,15 +272,6 @@ func TestBlockImportVerification(t *testing.T) { func TestLastBlock(t *testing.T) { genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, false) -======= -func TestLastBlock(t *testing.T) { - testLastBlock(t, rawdb.HashScheme) - testLastBlock(t, rawdb.PathScheme) -} - -func testLastBlock(t *testing.T, scheme string) { - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -313,13 +288,9 @@ func testLastBlock(t *testing.T, scheme string) { // Test inserts the blocks/headers after the fork choice rule is changed. // The chain is reorged to whatever specified. -func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) { +func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) { // Copy old chain up to #i into a new db -<<<<<<< HEAD genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, false) -======= - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -367,7 +338,6 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. func TestExtendCanonicalHeaders(t *testing.T) { -<<<<<<< HEAD testExtendCanonical(t, false, false) } @@ -381,21 +351,6 @@ func testExtendCanonical(t *testing.T, full, pipeline bool) { // Make first chain starting from genesis _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline) -======= - testExtendCanonical(t, false, rawdb.HashScheme) - testExtendCanonical(t, false, rawdb.PathScheme) -} -func TestExtendCanonicalBlocks(t *testing.T) { - testExtendCanonical(t, true, rawdb.HashScheme) - testExtendCanonical(t, true, rawdb.PathScheme) -} - -func testExtendCanonical(t *testing.T, full bool, scheme string) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -408,51 +363,33 @@ func testExtendCanonical(t *testing.T, full bool, scheme string) { } } // Start fork from current height -<<<<<<< HEAD testFork(t, processor, length, 1, full, pipeline, better) testFork(t, processor, length, 2, full, pipeline, better) testFork(t, processor, length, 5, full, pipeline, better) testFork(t, processor, length, 10, full, pipeline, better) -======= - testFork(t, processor, length, 1, full, better, scheme) - testFork(t, processor, length, 2, full, better, scheme) - testFork(t, processor, length, 5, full, better, scheme) - testFork(t, processor, length, 10, full, better, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. -func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { - testExtendCanonicalAfterMerge(t, false, rawdb.HashScheme) - testExtendCanonicalAfterMerge(t, false, rawdb.PathScheme) -} -func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { - testExtendCanonicalAfterMerge(t, true, rawdb.HashScheme) - testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme) -} +func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) } +func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) } -func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { +func testExtendCanonicalAfterMerge(t *testing.T, full bool) { length := 5 // Make first chain starting from genesis -<<<<<<< HEAD _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, false) -======= - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, length, 1, full, scheme) - testInsertAfterMerge(t, processor, length, 10, full, scheme) + testInsertAfterMerge(t, processor, length, 1, full) + testInsertAfterMerge(t, processor, length, 10, full) } // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. -<<<<<<< HEAD func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false, false) } func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true, false) @@ -464,22 +401,6 @@ func testShorterFork(t *testing.T, full, pipeline bool) { // Make first chain starting from genesis _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline) -======= -func TestShorterForkHeaders(t *testing.T) { - testShorterFork(t, false, rawdb.HashScheme) - testShorterFork(t, false, rawdb.PathScheme) -} -func TestShorterForkBlocks(t *testing.T) { - testShorterFork(t, true, rawdb.HashScheme) - testShorterFork(t, true, rawdb.PathScheme) -} - -func testShorterFork(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -492,60 +413,40 @@ func testShorterFork(t *testing.T, full bool, scheme string) { } } // Sum of numbers must be less than `length` for this to be a shorter fork -<<<<<<< HEAD testFork(t, processor, 0, 3, full, pipeline, worse) testFork(t, processor, 0, 7, full, pipeline, worse) testFork(t, processor, 1, 1, full, pipeline, worse) testFork(t, processor, 1, 7, full, pipeline, worse) testFork(t, processor, 5, 3, full, pipeline, worse) testFork(t, processor, 5, 4, full, pipeline, worse) -======= - testFork(t, processor, 0, 3, full, worse, scheme) - testFork(t, processor, 0, 7, full, worse, scheme) - testFork(t, processor, 1, 1, full, worse, scheme) - testFork(t, processor, 1, 7, full, worse, scheme) - testFork(t, processor, 5, 3, full, worse, scheme) - testFork(t, processor, 5, 4, full, worse, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. -func TestShorterForkHeadersAfterMerge(t *testing.T) { - testShorterForkAfterMerge(t, false, rawdb.HashScheme) - testShorterForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestShorterForkBlocksAfterMerge(t *testing.T) { - testShorterForkAfterMerge(t, true, rawdb.HashScheme) - testShorterForkAfterMerge(t, true, rawdb.PathScheme) -} +func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) } +func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) } -func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { +func testShorterForkAfterMerge(t *testing.T, full bool) { length := 10 // Make first chain starting from genesis -<<<<<<< HEAD _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, false) -======= - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 3, full, scheme) - testInsertAfterMerge(t, processor, 0, 7, full, scheme) - testInsertAfterMerge(t, processor, 1, 1, full, scheme) - testInsertAfterMerge(t, processor, 1, 7, full, scheme) - testInsertAfterMerge(t, processor, 5, 3, full, scheme) - testInsertAfterMerge(t, processor, 5, 4, full, scheme) + testInsertAfterMerge(t, processor, 0, 3, full) + testInsertAfterMerge(t, processor, 0, 7, full) + testInsertAfterMerge(t, processor, 1, 1, full) + testInsertAfterMerge(t, processor, 1, 7, full) + testInsertAfterMerge(t, processor, 5, 3, full) + testInsertAfterMerge(t, processor, 5, 4, full) } // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. func TestLongerForkHeaders(t *testing.T) { -<<<<<<< HEAD testLongerFork(t, false, false) } func TestLongerForkBlocks(t *testing.T) { @@ -558,70 +459,44 @@ func testLongerFork(t *testing.T, full, pipeline bool) { // Make first chain starting from genesis _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline) -======= - testLongerFork(t, false, rawdb.HashScheme) - testLongerFork(t, false, rawdb.PathScheme) -} -func TestLongerForkBlocks(t *testing.T) { - testLongerFork(t, true, rawdb.HashScheme) - testLongerFork(t, true, rawdb.PathScheme) -} - -func testLongerFork(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 11, full, scheme) - testInsertAfterMerge(t, processor, 0, 15, full, scheme) - testInsertAfterMerge(t, processor, 1, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 12, full, scheme) - testInsertAfterMerge(t, processor, 5, 6, full, scheme) - testInsertAfterMerge(t, processor, 5, 8, full, scheme) + testInsertAfterMerge(t, processor, 0, 11, full) + testInsertAfterMerge(t, processor, 0, 15, full) + testInsertAfterMerge(t, processor, 1, 10, full) + testInsertAfterMerge(t, processor, 1, 12, full) + testInsertAfterMerge(t, processor, 5, 6, full) + testInsertAfterMerge(t, processor, 5, 8, full) } // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. -func TestLongerForkHeadersAfterMerge(t *testing.T) { - testLongerForkAfterMerge(t, false, rawdb.HashScheme) - testLongerForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestLongerForkBlocksAfterMerge(t *testing.T) { - testLongerForkAfterMerge(t, true, rawdb.HashScheme) - testLongerForkAfterMerge(t, true, rawdb.PathScheme) -} +func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) } +func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) } -func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { +func testLongerForkAfterMerge(t *testing.T, full bool) { length := 10 // Make first chain starting from genesis -<<<<<<< HEAD _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, false) -======= - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 11, full, scheme) - testInsertAfterMerge(t, processor, 0, 15, full, scheme) - testInsertAfterMerge(t, processor, 1, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 12, full, scheme) - testInsertAfterMerge(t, processor, 5, 6, full, scheme) - testInsertAfterMerge(t, processor, 5, 8, full, scheme) + testInsertAfterMerge(t, processor, 0, 11, full) + testInsertAfterMerge(t, processor, 0, 15, full) + testInsertAfterMerge(t, processor, 1, 10, full) + testInsertAfterMerge(t, processor, 1, 12, full) + testInsertAfterMerge(t, processor, 5, 6, full) + testInsertAfterMerge(t, processor, 5, 8, full) } // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. -<<<<<<< HEAD func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false, false) } func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true, true) @@ -633,22 +508,6 @@ func testEqualFork(t *testing.T, full, pipeline bool) { // Make first chain starting from genesis _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline) -======= -func TestEqualForkHeaders(t *testing.T) { - testEqualFork(t, false, rawdb.HashScheme) - testEqualFork(t, false, rawdb.PathScheme) -} -func TestEqualForkBlocks(t *testing.T) { - testEqualFork(t, true, rawdb.HashScheme) - testEqualFork(t, true, rawdb.PathScheme) -} - -func testEqualFork(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -661,58 +520,38 @@ func testEqualFork(t *testing.T, full bool, scheme string) { } } // Sum of numbers must be equal to `length` for this to be an equal fork -<<<<<<< HEAD testFork(t, processor, 0, 10, full, pipeline, equal) testFork(t, processor, 1, 9, full, pipeline, equal) testFork(t, processor, 2, 8, full, pipeline, equal) testFork(t, processor, 5, 5, full, pipeline, equal) testFork(t, processor, 6, 4, full, pipeline, equal) testFork(t, processor, 9, 1, full, pipeline, equal) -======= - testFork(t, processor, 0, 10, full, equal, scheme) - testFork(t, processor, 1, 9, full, equal, scheme) - testFork(t, processor, 2, 8, full, equal, scheme) - testFork(t, processor, 5, 5, full, equal, scheme) - testFork(t, processor, 6, 4, full, equal, scheme) - testFork(t, processor, 9, 1, full, equal, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. -func TestEqualForkHeadersAfterMerge(t *testing.T) { - testEqualForkAfterMerge(t, false, rawdb.HashScheme) - testEqualForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestEqualForkBlocksAfterMerge(t *testing.T) { - testEqualForkAfterMerge(t, true, rawdb.HashScheme) - testEqualForkAfterMerge(t, true, rawdb.PathScheme) -} +func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) } +func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) } -func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { +func testEqualForkAfterMerge(t *testing.T, full bool) { length := 10 // Make first chain starting from genesis -<<<<<<< HEAD _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, false) -======= - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 9, full, scheme) - testInsertAfterMerge(t, processor, 2, 8, full, scheme) - testInsertAfterMerge(t, processor, 5, 5, full, scheme) - testInsertAfterMerge(t, processor, 6, 4, full, scheme) - testInsertAfterMerge(t, processor, 9, 1, full, scheme) + testInsertAfterMerge(t, processor, 0, 10, full) + testInsertAfterMerge(t, processor, 1, 9, full) + testInsertAfterMerge(t, processor, 2, 8, full) + testInsertAfterMerge(t, processor, 5, 5, full) + testInsertAfterMerge(t, processor, 6, 4, full) + testInsertAfterMerge(t, processor, 9, 1, full) } // Tests that chains missing links do not get accepted by the processor. -<<<<<<< HEAD func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false, false) } func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true, false) @@ -722,20 +561,6 @@ func TestBrokenBlockChain(t *testing.T) { func testBrokenChain(t *testing.T, full, pipeline bool) { // Make chain starting from genesis genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, pipeline) -======= -func TestBrokenHeaderChain(t *testing.T) { - testBrokenChain(t, false, rawdb.HashScheme) - testBrokenChain(t, false, rawdb.PathScheme) -} -func TestBrokenBlockChain(t *testing.T) { - testBrokenChain(t, true, rawdb.HashScheme) - testBrokenChain(t, true, rawdb.PathScheme) -} - -func testBrokenChain(t *testing.T, full bool, scheme string) { - // Make chain starting from genesis - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -757,7 +582,6 @@ func testBrokenChain(t *testing.T, full bool, scheme string) { // Tests that reorganising a long difficult chain after a short easy one // overwrites the canonical numbers and links in the database. -<<<<<<< HEAD func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false, false) } func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true, false) @@ -766,24 +590,10 @@ func TestReorgLongBlocks(t *testing.T) { func testReorgLong(t *testing.T, full, pipeline bool) { testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, pipeline) -======= -func TestReorgLongHeaders(t *testing.T) { - testReorgLong(t, false, rawdb.HashScheme) - testReorgLong(t, false, rawdb.PathScheme) -} -func TestReorgLongBlocks(t *testing.T) { - testReorgLong(t, true, rawdb.HashScheme) - testReorgLong(t, true, rawdb.PathScheme) -} - -func testReorgLong(t *testing.T, full bool, scheme string) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } // Tests that reorganising a short difficult chain after a long easy one // overwrites the canonical numbers and links in the database. -<<<<<<< HEAD func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false, false) } func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true, false) @@ -791,18 +601,6 @@ func TestReorgShortBlocks(t *testing.T) { } func testReorgShort(t *testing.T, full, pipeline bool) { -======= -func TestReorgShortHeaders(t *testing.T) { - testReorgShort(t, false, rawdb.HashScheme) - testReorgShort(t, false, rawdb.PathScheme) -} -func TestReorgShortBlocks(t *testing.T) { - testReorgShort(t, true, rawdb.HashScheme) - testReorgShort(t, true, rawdb.PathScheme) -} - -func testReorgShort(t *testing.T, full bool, scheme string) { ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment // we need a fairly long chain of blocks with different difficulties for a short // one to become heavier than a long one. The 96 is an empirical value. @@ -814,21 +612,12 @@ func testReorgShort(t *testing.T, full bool, scheme string) { for i := 0; i < len(diff); i++ { diff[i] = -9 } -<<<<<<< HEAD testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, pipeline) } func testReorg(t *testing.T, first, second []int64, td int64, full, pipeline bool) { // Create a pristine chain and database genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline) -======= - testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme) -} - -func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -896,7 +685,6 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme } // Tests that the insertion functions detect banned hashes. -<<<<<<< HEAD func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false, false) } func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true, true) @@ -906,20 +694,6 @@ func TestBadBlockHashes(t *testing.T) { func testBadHashes(t *testing.T, full, pipeline bool) { // Create a pristine chain and database genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline) -======= -func TestBadHeaderHashes(t *testing.T) { - testBadHashes(t, false, rawdb.HashScheme) - testBadHashes(t, false, rawdb.PathScheme) -} -func TestBadBlockHashes(t *testing.T) { - testBadHashes(t, true, rawdb.HashScheme) - testBadHashes(t, true, rawdb.PathScheme) -} - -func testBadHashes(t *testing.T, full bool, scheme string) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -948,7 +722,6 @@ func testBadHashes(t *testing.T, full bool, scheme string) { // Tests that bad hashes are detected on boot, and the chain rolled back to a // good state prior to the bad hash. -<<<<<<< HEAD func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false, false) } func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true, false) @@ -958,20 +731,6 @@ func TestReorgBadBlockHashes(t *testing.T) { func testReorgBadHashes(t *testing.T, full, pipeline bool) { // Create a pristine chain and database genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline) -======= -func TestReorgBadHeaderHashes(t *testing.T) { - testReorgBadHashes(t, false, rawdb.HashScheme) - testReorgBadHashes(t, false, rawdb.PathScheme) -} -func TestReorgBadBlockHashes(t *testing.T) { - testReorgBadHashes(t, true, rawdb.HashScheme) - testReorgBadHashes(t, true, rawdb.PathScheme) -} - -func testReorgBadHashes(t *testing.T, full bool, scheme string) { - // Create a pristine chain and database - genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1001,7 +760,7 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string) { blockchain.Stop() // Create a new BlockChain and check that it rolled back the state. - ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create new chain manager: %v", err) } @@ -1021,7 +780,6 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string) { } // Tests chain insertions in the face of one entity containing an invalid nonce. -<<<<<<< HEAD func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false, false) } func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true, false) @@ -1032,21 +790,6 @@ func testInsertNonceError(t *testing.T, full bool, pipeline bool) { doTest := func(i int) { // Create a pristine chain and database genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline) -======= -func TestHeadersInsertNonceError(t *testing.T) { - testInsertNonceError(t, false, rawdb.HashScheme) - testInsertNonceError(t, false, rawdb.PathScheme) -} -func TestBlocksInsertNonceError(t *testing.T) { - testInsertNonceError(t, true, rawdb.HashScheme) - testInsertNonceError(t, true, rawdb.PathScheme) -} - -func testInsertNonceError(t *testing.T, full bool, scheme string) { - doTest := func(i int) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1101,11 +844,6 @@ func testInsertNonceError(t *testing.T, full bool, scheme string) { // Tests that fast importing a block chain produces the same chain data as the // classical full block processing. func TestFastVsFullChains(t *testing.T) { - testFastVsFullChains(t, rawdb.HashScheme) - testFastVsFullChains(t, rawdb.PathScheme) -} - -func testFastVsFullChains(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1138,7 +876,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { }) // Import the chain as an archive node for the comparison baseline archiveDb := rawdb.NewMemoryDatabase() - archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer archive.Stop() if n, err := archive.InsertChain(blocks); err != nil { @@ -1146,7 +884,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { } // Fast import the chain as a non-archive node to test fastDb := rawdb.NewMemoryDatabase() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -1165,8 +903,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { t.Fatalf("failed to create temp freezer db: %v", err) } defer ancientDb.Close() - - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -1234,11 +971,6 @@ func testFastVsFullChains(t *testing.T, scheme string) { // Tests that various import methods move the chain head pointers to the correct // positions. func TestLightVsFastVsFullChainHeads(t *testing.T) { - testLightVsFastVsFullChainHeads(t, rawdb.HashScheme) - testLightVsFastVsFullChainHeads(t, rawdb.PathScheme) -} - -func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1284,7 +1016,6 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { archiveCaching := *defaultCacheConfig archiveCaching.TrieDirtyDisabled = true - archiveCaching.StateScheme = scheme archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := archive.InsertChain(blocks); err != nil { @@ -1299,7 +1030,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a non-archive node and ensure all pointers are updated fastDb := makeDb() defer fastDb.Close() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -1319,7 +1050,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a ancient-first node and ensure all pointers are updated ancientDb := makeDb() defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -1338,7 +1069,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a light node and ensure all pointers are updated lightDb := makeDb() defer lightDb.Close() - light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := light.InsertHeaderChain(headers); err != nil { t.Fatalf("failed to insert header %d: %v", n, err) } @@ -1351,11 +1082,6 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Tests that chain reorganisations handle transaction removals and reinsertions. func TestChainTxReorgs(t *testing.T) { - testChainTxReorgs(t, rawdb.HashScheme) - testChainTxReorgs(t, rawdb.PathScheme) -} - -func testChainTxReorgs(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -1411,7 +1137,7 @@ func testChainTxReorgs(t *testing.T, scheme string) { }) // Import the chain. This runs all block validation rules. db := rawdb.NewMemoryDatabase() - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if i, err := blockchain.InsertChain(chain); err != nil { t.Fatalf("failed to insert original chain[%d]: %v", i, err) } @@ -1470,11 +1196,6 @@ func testChainTxReorgs(t *testing.T, scheme string) { } func TestLogReorgs(t *testing.T) { - testLogReorgs(t, rawdb.HashScheme) - testLogReorgs(t, rawdb.PathScheme) -} - -func testLogReorgs(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1485,7 +1206,7 @@ func testLogReorgs(t *testing.T, scheme string) { signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() rmLogsCh := make(chan RemovedLogsEvent) @@ -1530,18 +1251,13 @@ var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd // This test checks that log events and RemovedLogsEvent are sent // when the chain reorganizes. func TestLogRebirth(t *testing.T) { - testLogRebirth(t, rawdb.HashScheme) - testLogRebirth(t, rawdb.PathScheme) -} - -func testLogRebirth(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) engine = ethash.NewFaker() - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) ) defer blockchain.Stop() @@ -1612,17 +1328,12 @@ func testLogRebirth(t *testing.T, scheme string) { // This test is a variation of TestLogRebirth. It verifies that log events are emitted // when a side chain containing log events overtakes the canonical chain. func TestSideLogRebirth(t *testing.T) { - testSideLogRebirth(t, rawdb.HashScheme) - testSideLogRebirth(t, rawdb.PathScheme) -} - -func testSideLogRebirth(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ) defer blockchain.Stop() @@ -1707,11 +1418,6 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re } func TestReorgSideEvent(t *testing.T) { - testReorgSideEvent(t, rawdb.HashScheme) - testReorgSideEvent(t, rawdb.PathScheme) -} - -func testReorgSideEvent(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1721,7 +1427,7 @@ func testReorgSideEvent(t *testing.T, scheme string) { } signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) @@ -1792,16 +1498,7 @@ done: // Tests if the canonical block can be fetched from the database during chain insertion. func TestCanonicalBlockRetrieval(t *testing.T) { -<<<<<<< HEAD _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, false) -======= - testCanonicalBlockRetrieval(t, rawdb.HashScheme) - testCanonicalBlockRetrieval(t, rawdb.PathScheme) -} - -func testCanonicalBlockRetrieval(t *testing.T, scheme string) { - _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1845,12 +1542,8 @@ func testCanonicalBlockRetrieval(t *testing.T, scheme string) { } pend.Wait() } -func TestEIP155Transition(t *testing.T) { - testEIP155Transition(t, rawdb.HashScheme) - testEIP155Transition(t, rawdb.PathScheme) -} -func testEIP155Transition(t *testing.T, scheme string) { +func TestEIP155Transition(t *testing.T) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1909,7 +1602,7 @@ func testEIP155Transition(t *testing.T, scheme string) { } }) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(blocks); err != nil { @@ -1959,12 +1652,8 @@ func testEIP155Transition(t *testing.T, scheme string) { t.Errorf("have %v, want %v", have, want) } } -func TestEIP161AccountRemoval(t *testing.T) { - testEIP161AccountRemoval(t, rawdb.HashScheme) - testEIP161AccountRemoval(t, rawdb.PathScheme) -} -func testEIP161AccountRemoval(t *testing.T, scheme string) { +func TestEIP161AccountRemoval(t *testing.T) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -2002,7 +1691,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) { block.AddTx(tx) }) // account must exist pre eip 161 - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil { @@ -2035,11 +1724,6 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) { // // https://github.com/ethereum/go-ethereum/pull/15941 func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { - testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme) - testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme) -} - -func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -2060,7 +1744,7 @@ func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { } // Import the canonical and fork chain side by side, verifying the current block // and current header consistency - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2119,15 +1803,9 @@ func TestTrieForkGC(t *testing.T) { } } // Dereference all the recent tries and ensure no past trie is left in -<<<<<<< HEAD for i := 0; i < TestTriesInMemory; i++ { chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) -======= - for i := 0; i < TriesInMemory; i++ { - chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) - chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } if nodes, _ := chain.TrieDB().Size(); nodes > 0 { t.Fatalf("stale tries still alive after garbase collection") @@ -2137,11 +1815,6 @@ func TestTrieForkGC(t *testing.T) { // Tests that doing large reorgs works even if the state associated with the // forking point is not available any more. func TestLargeReorgTrieGC(t *testing.T) { - testLargeReorgTrieGC(t, rawdb.HashScheme) - testLargeReorgTrieGC(t, rawdb.PathScheme) -} - -func testLargeReorgTrieGC(t *testing.T, scheme string) { // Generate the original common chain segment and the two competing forks engine := ethash.NewFaker() genesis := &Genesis{ @@ -2153,10 +1826,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) - defer db.Close() - - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2169,7 +1839,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { t.Fatalf("failed to insert original chain: %v", err) } // Ensure that the state associated with the forking point is pruned away - if chain.HasState(shared[len(shared)-1].Root()) { + if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil { t.Fatalf("common-but-old ancestor still cache") } // Import the competitor chain without exceeding the canonical's TD and ensure @@ -2178,7 +1848,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { t.Fatalf("failed to insert competitor chain: %v", err) } for i, block := range competitor[:len(competitor)-2] { - if chain.HasState(block.Root()) { + if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil { t.Fatalf("competitor %d: low TD chain became processed", i) } } @@ -2187,36 +1857,14 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil { t.Fatalf("failed to finalize competitor chain: %v", err) } -<<<<<<< HEAD for i, block := range competitor[:len(competitor)-TestTriesInMemory] { if node, err := chain.stateCache.TrieDB().Node(block.Root()); node != nil { t.Fatalf("competitor %d: competing chain state missing, err: %v", i, err) -======= - // In path-based trie database implementation, it will keep 128 diff + 1 disk - // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory - if scheme == rawdb.PathScheme { - states = states + 1 - } - for i, block := range competitor[:len(competitor)-states] { - if chain.HasState(block.Root()) { - t.Fatalf("competitor %d: unexpected competing chain state", i) - } - } - for i, block := range competitor[len(competitor)-states:] { - if !chain.HasState(block.Root()) { - t.Fatalf("competitor %d: competing chain state missing", i) ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) } } } func TestBlockchainRecovery(t *testing.T) { - testBlockchainRecovery(t, rawdb.HashScheme) - testBlockchainRecovery(t, rawdb.PathScheme) -} - -func testBlockchainRecovery(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -2233,7 +1881,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { t.Fatalf("failed to create temp freezer db: %v", err) } defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) headers := make([]*types.Header, len(blocks)) for i, block := range blocks { @@ -2253,7 +1901,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if num := ancient.CurrentBlock().Number.Uint64(); num != 0 { t.Errorf("head block mismatch: have #%v, want #%v", num, 0) @@ -2268,13 +1916,8 @@ func testBlockchainRecovery(t *testing.T, scheme string) { // This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain. func TestInsertReceiptChainRollback(t *testing.T) { - testInsertReceiptChainRollback(t, rawdb.HashScheme) - testInsertReceiptChainRollback(t, rawdb.PathScheme) -} - -func testInsertReceiptChainRollback(t *testing.T, scheme string) { // Generate forked chain. The returned BlockChain object is used to process the side chain blocks. - tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains(scheme) + tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains() if err != nil { t.Fatal(err) } @@ -2305,7 +1948,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { } defer ancientDb.Close() - ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancientChain, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancientChain.Stop() // Import the canonical header chain. @@ -2350,11 +1993,6 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { // - https://github.com/ethereum/go-ethereum/issues/18977 // - https://github.com/ethereum/go-ethereum/pull/18988 func TestLowDiffLongChain(t *testing.T) { - testLowDiffLongChain(t, rawdb.HashScheme) - testLowDiffLongChain(t, rawdb.PathScheme) -} - -func testLowDiffLongChain(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -2369,14 +2007,11 @@ func testLowDiffLongChain(t *testing.T, scheme string) { }) // Import the canonical chain - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) - defer diskdb.Close() - - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } - defer chain.Stop() + defer chain.stopWithoutSaving() if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) @@ -2556,20 +2191,11 @@ func TestPrunedImportSideWithMerging(t *testing.T) { testSideImport(t, 1, -10, 1) } -func TestInsertKnownHeaders(t *testing.T) { - testInsertKnownChainData(t, "headers", rawdb.HashScheme) - testInsertKnownChainData(t, "headers", rawdb.PathScheme) -} -func TestInsertKnownReceiptChain(t *testing.T) { - testInsertKnownChainData(t, "receipts", rawdb.HashScheme) - testInsertKnownChainData(t, "receipts", rawdb.PathScheme) -} -func TestInsertKnownBlocks(t *testing.T) { - testInsertKnownChainData(t, "blocks", rawdb.HashScheme) - testInsertKnownChainData(t, "blocks", rawdb.PathScheme) -} +func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") } +func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") } +func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks") } -func testInsertKnownChainData(t *testing.T, typ string, scheme string) { +func testInsertKnownChainData(t *testing.T, typ string) { engine := ethash.NewFaker() genesis := &Genesis{ Config: params.TestChainConfig, @@ -2592,7 +2218,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) { } defer chaindb.Close() - chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2865,7 +2491,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } // getLongAndShortChains returns two chains: A is longer, B is heavier. -func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) { +func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -2877,7 +2503,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err) } @@ -2923,12 +2549,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types // 3. Then there should be no canon mapping for the block at height X // 4. The forked block should still be retrievable by hash func TestReorgToShorterRemovesCanonMapping(t *testing.T) { - testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme) -} - -func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) + chain, canonblocks, sideblocks, _, err := getLongAndShortChains() if err != nil { t.Fatal(err) } @@ -2966,12 +2587,7 @@ func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { // as TestReorgToShorterRemovesCanonMapping, but applied on headerchain // imports -- that is, for fast sync func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) { - testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme) -} - -func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) + chain, canonblocks, sideblocks, _, err := getLongAndShortChains() if err != nil { t.Fatal(err) } @@ -3120,11 +2736,6 @@ func TestTransactionIndices(t *testing.T) { } func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { - testSkipStaleTxIndicesInSnapSync(t, rawdb.HashScheme) - testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme) -} - -func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -3183,7 +2794,7 @@ func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) { // Import all blocks into ancient db, only HEAD-32 indices are kept. l := uint64(32) - chain, err := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) + chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3317,11 +2928,6 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) { // 2. Downloader starts to sync again // 3. The blocks fetched are all known and canonical blocks func TestSideImportPrunedBlocks(t *testing.T) { - testSideImportPrunedBlocks(t, rawdb.HashScheme) - testSideImportPrunedBlocks(t, rawdb.PathScheme) -} - -func testSideImportPrunedBlocks(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -3331,7 +2937,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { // Generate and import the canonical chain _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3340,30 +2946,15 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } -<<<<<<< HEAD lastPrunedIndex := len(blocks) - TestTriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex-1] -======= - // In path-based trie database implementation, it will keep 128 diff + 1 disk - // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory - if scheme == rawdb.PathScheme { - states = TriesInMemory + 1 - } - lastPrunedIndex := len(blocks) - states - 1 - lastPrunedBlock := blocks[lastPrunedIndex] ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64()) } -<<<<<<< HEAD firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory] -======= - firstNonPrunedBlock := blocks[len(blocks)-states] ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) // Verify firstNonPrunedBlock is not pruned if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) @@ -3385,11 +2976,6 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { // each transaction, so this works ok. The rework accumulated writes in memory // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { - testDeleteCreateRevert(t, rawdb.HashScheme) - testDeleteCreateRevert(t, rawdb.PathScheme) -} - -func testDeleteCreateRevert(t *testing.T, scheme string) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") @@ -3441,7 +3027,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3460,11 +3046,6 @@ func testDeleteCreateRevert(t *testing.T, scheme string) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlots(t *testing.T) { - testDeleteRecreateSlots(t, rawdb.HashScheme) - testDeleteRecreateSlots(t, rawdb.PathScheme) -} - -func testDeleteRecreateSlots(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3554,7 +3135,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { @@ -3588,11 +3169,6 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) { // regular value-transfer // Expected outcome is that _all_ slots are cleared from A func TestDeleteRecreateAccount(t *testing.T) { - testDeleteRecreateAccount(t, rawdb.HashScheme) - testDeleteRecreateAccount(t, rawdb.PathScheme) -} - -func testDeleteRecreateAccount(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3636,7 +3212,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { @@ -3666,11 +3242,6 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { - testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme) - testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme) -} - -func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3811,7 +3382,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { current = exp }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ //Debug: true, //Tracer: vm.NewJSONLogger(nil, os.Stdout), }, nil, nil) @@ -3869,14 +3440,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { // to the destructset in case something is created "onto" an existing item. // We need to either roll back the snapDestructs, or not place it into snapDestructs // in the first place. -// - func TestInitThenFailCreateContract(t *testing.T) { - testInitThenFailCreateContract(t, rawdb.HashScheme) - testInitThenFailCreateContract(t, rawdb.PathScheme) -} - -func testInitThenFailCreateContract(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3949,7 +3513,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ //Debug: true, //Tracer: vm.NewJSONLogger(nil, os.Stdout), }, nil, nil) @@ -3986,7 +3550,6 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) { // access list transaction, which specifies a single slot access, and then // checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated // correctly. -<<<<<<< HEAD // TestEIP2718TransitionWithTestChainConfig tests EIP-2718 with TestChainConfig. func TestEIP2718TransitionWithTestChainConfig(t *testing.T) { @@ -4000,14 +3563,6 @@ func TestEIP2718TransitionWithParliaConfig(t *testing.T) { // testEIP2718TransitionWithConfig tests EIP02718 with given ChainConfig. func testEIP2718TransitionWithConfig(t *testing.T, config *params.ChainConfig) { -======= -func TestEIP2718Transition(t *testing.T) { - testEIP2718Transition(t, rawdb.HashScheme) - testEIP2718Transition(t, rawdb.PathScheme) -} - -func testEIP2718Transition(t *testing.T, scheme string) { ->>>>>>> 503f1f7ad (all: activate pbss as experimental feature (#26274)) var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = ethash.NewFaker() @@ -4055,7 +3610,7 @@ func testEIP2718Transition(t *testing.T, scheme string) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -4085,11 +3640,6 @@ func testEIP2718Transition(t *testing.T, scheme string) { // gasFeeCap - gasTipCap < baseFee. // 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). func TestEIP1559Transition(t *testing.T) { - testEIP1559Transition(t, rawdb.HashScheme) - testEIP1559Transition(t, rawdb.PathScheme) -} - -func testEIP1559Transition(t *testing.T, scheme string) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = ethash.NewFaker() @@ -4100,9 +3650,8 @@ func testEIP1559Transition(t *testing.T, scheme string) { addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) - config = *params.AllEthashProtocolChanges gspec = &Genesis{ - Config: &config, + Config: params.AllEthashProtocolChanges, Alloc: GenesisAlloc{ addr1: {Balance: funds}, addr2: {Balance: funds}, @@ -4149,7 +3698,7 @@ func testEIP1559Transition(t *testing.T, scheme string) { b.AddTx(tx) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -4231,11 +3780,6 @@ func testEIP1559Transition(t *testing.T, scheme string) { // Tests the scenario the chain is requested to another point with the missing state. // It expects the state is recovered and all relevant chain markers are set correctly. func TestSetCanonical(t *testing.T) { - testSetCanonical(t, rawdb.HashScheme) - testSetCanonical(t, rawdb.PathScheme) -} - -func testSetCanonical(t *testing.T, scheme string) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) var ( @@ -4258,10 +3802,7 @@ func testSetCanonical(t *testing.T, scheme string) { } gen.AddTx(tx) }) - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) - defer diskdb.Close() - - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -4318,11 +3859,6 @@ func testSetCanonical(t *testing.T, scheme string) { // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted // correctly in case reorg is called. func TestCanonicalHashMarker(t *testing.T) { - testCanonicalHashMarker(t, rawdb.HashScheme) - testCanonicalHashMarker(t, rawdb.PathScheme) -} - -func testCanonicalHashMarker(t *testing.T, scheme string) { var cases = []struct { forkA int forkB int @@ -4370,7 +3906,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {}) // Initialize test chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -4726,116 +4262,6 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { } } -func TestDeleteThenCreate(t *testing.T) { - var ( - engine = ethash.NewFaker() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - factoryAddr = crypto.CreateAddress(address, 0) - funds = big.NewInt(1000000000000000) - ) - /* - contract Factory { - function deploy(bytes memory code) public { - address addr; - assembly { - addr := create2(0, add(code, 0x20), mload(code), 0) - if iszero(extcodesize(addr)) { - revert(0, 0) - } - } - } - } - */ - factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033") - - /* - contract C { - uint256 value; - constructor() { - value = 100; - } - function destruct() public payable { - selfdestruct(payable(msg.sender)); - } - receive() payable external {} - } - */ - contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033") - contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI)) - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - - // Block 1 - if i == 0 { - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - Data: factoryBIN, - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, - Data: data, - }) - b.AddTx(tx) - nonce++ - } else { - // Block 2 - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &contractAddr, - Data: common.Hex2Bytes("2b68b9c6"), // destruct - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, // re-creation - Data: data, - }) - b.AddTx(tx) - nonce++ - } - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - // TestTransientStorageReset ensures the transient storage is wiped correctly // between transactions. func TestTransientStorageReset(t *testing.T) { diff --git a/core/eip3529tests/eip3529_test_util.go b/core/eip3529tests/eip3529_test_util.go index abae9fb895..c2d63f349d 100644 --- a/core/eip3529tests/eip3529_test_util.go +++ b/core/eip3529tests/eip3529_test_util.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) func newGwei(n int64) *big.Int { @@ -42,7 +43,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng }, }, } - genesis = gspec.MustCommit(db) + genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil)) ) blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *core.BlockGen) { @@ -61,7 +62,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng // Import the canonical chain diskdb := rawdb.NewMemoryDatabase() - gspec.MustCommit(diskdb) + gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil)) chain, err := core.NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { diff --git a/core/genesis_test.go b/core/genesis_test.go index deba7e265f..9902c5a306 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -33,15 +33,6 @@ import ( "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) -func TestInvalidCliqueConfig(t *testing.T) { - block := DefaultGoerliGenesisBlock() - block.ExtraData = []byte{} - db := rawdb.NewMemoryDatabase() - if _, err := block.Commit(db, trie.NewDatabase(db, nil)); err == nil { - t.Fatal("Expected error on invalid clique config") - } -} - func TestSetupGenesis(t *testing.T) { testSetupGenesis(t, rawdb.HashScheme) testSetupGenesis(t, rawdb.PathScheme) @@ -102,17 +93,6 @@ func testSetupGenesis(t *testing.T, scheme string) { wantHash: customghash, wantConfig: customg.Config, }, - { - name: "custom block in DB, genesis == goerli", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := trie.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) - return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock()) - }, - wantErr: &GenesisMismatchError{Stored: customghash, New: params.GoerliGenesisHash}, - wantHash: params.GoerliGenesisHash, - wantConfig: params.GoerliChainConfig, - }, { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 7b4e6e7f12..61c816dace 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -88,6 +88,9 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case stateFreezerName: + if ReadStateScheme(db) != PathScheme { + continue + } datadir, err := db.AncientDatadir() if err != nil { return nil, err diff --git a/core/state/database.go b/core/state/database.go index 51f0940cc5..cc5dc73c77 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -281,6 +281,12 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre } func (db *cachingDB) CacheAccount(root common.Hash, t Trie) { + // only the hash scheme trie db support account cache, because the path scheme trie db + // account trie bind the previous layer, touch the dirty data when next access. This is + // related to the implementation of the Reader interface of pathdb. + if db.TrieDB().Scheme() == rawdb.PathScheme { + return + } if db.accountTrieCache == nil { return } @@ -289,6 +295,10 @@ func (db *cachingDB) CacheAccount(root common.Hash, t Trie) { } func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie) { + // ditto `CacheAccount` + if db.TrieDB().Scheme() == rawdb.PathScheme { + return + } if db.storageTrieCache == nil { return } diff --git a/core/state/statedb.go b/core/state/statedb.go index 36bf442c25..a218922b95 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1608,6 +1608,22 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc if root != types.EmptyRootHash { s.db.CacheAccount(root, s.trie) } + + origin := s.originalRoot + if origin == (common.Hash{}) { + origin = types.EmptyRootHash + } + + if root != origin { + start := time.Now() + if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { + return err + } + s.originalRoot = root + if metrics.EnabledExpensive { + s.TrieDBCommits += time.Since(start) + } + } } for _, postFunc := range postCommitFuncs { @@ -1736,20 +1752,20 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc if root == (common.Hash{}) { root = types.EmptyRootHash } - origin := s.originalRoot - if origin == (common.Hash{}) { - origin = types.EmptyRootHash - } - if root != origin { - start := time.Now() - if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { - return common.Hash{}, nil, err - } - s.originalRoot = root - if metrics.EnabledExpensive { - s.TrieDBCommits += time.Since(start) - } - } + //origin := s.originalRoot + //if origin == (common.Hash{}) { + // origin = types.EmptyRootHash + //} + //if root != origin { + // start := time.Now() + // if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { + // return common.Hash{}, nil, err + // } + // s.originalRoot = root + // if metrics.EnabledExpensive { + // s.TrieDBCommits += time.Since(start) + // } + //} // Clear all internal flags at the end of commit operation. s.accounts = make(map[common.Hash][]byte) s.storages = make(map[common.Hash]map[common.Hash][]byte) diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 50d485dae3..c61f63198a 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,7 +19,6 @@ package state import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -718,9 +717,6 @@ func TestCommitCopy(t *testing.T) { if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("unexpected storage slot: have %x", val) } - if !errors.Is(copied.Error(), trie.ErrCommitted) { - t.Fatalf("unexpected state error, %v", copied.Error()) - } } // TestDeleteCreateRevert tests a weird state transition corner case that we hit diff --git a/core/state_prefetcher_test.go b/core/state_prefetcher_test.go index 30d90a4898..b7224a0b36 100644 --- a/core/state_prefetcher_test.go +++ b/core/state_prefetcher_test.go @@ -1,16 +1,15 @@ package core import ( - "math/big" - "testing" - "time" - "bytes" "context" "errors" "fmt" + "math/big" "runtime/pprof" "strings" + "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -20,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" "github.com/google/pprof/profile" ) @@ -37,7 +37,8 @@ func TestPrefetchLeaking(t *testing.T) { Alloc: GenesisAlloc{address: {Balance: funds}}, BaseFee: big.NewInt(params.InitialBaseFee), } - genesis = gspec.MustCommit(gendb) + triedb = trie.NewDatabase(gendb, nil) + genesis = gspec.MustCommit(gendb, triedb) signer = types.LatestSigner(gspec.Config) ) blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) { @@ -51,7 +52,7 @@ func TestPrefetchLeaking(t *testing.T) { } }) archiveDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(archiveDb) + gspec.MustCommit(archiveDb, triedb) archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer archive.Stop() diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 649aa27615..2e485d028d 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -92,6 +92,10 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { }() ordering := make(map[*eth.Request]int) timeouts := prque.New[int64, *eth.Request](func(data *eth.Request, index int) { + if index < 0 { + delete(ordering, data) + return + } ordering[data] = index }) @@ -245,14 +249,16 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { req.Close() if index, live := ordering[req]; live { - timeouts.Remove(index) - if index == 0 { - if !timeout.Stop() { - <-timeout.C - } - if timeouts.Size() > 0 { - _, exp := timeouts.Peek() - timeout.Reset(time.Until(time.Unix(0, -exp))) + if index >= 0 && index < timeouts.Size() { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } } } delete(ordering, req) @@ -333,14 +339,16 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { // reschedule the timeout timer. index, live := ordering[res.Req] if live { - timeouts.Remove(index) - if index == 0 { - if !timeout.Stop() { - <-timeout.C - } - if timeouts.Size() > 0 { - _, exp := timeouts.Peek() - timeout.Reset(time.Until(time.Unix(0, -exp))) + if index >= 0 && index < timeouts.Size() { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } } } delete(ordering, res.Req) diff --git a/eth/handler.go b/eth/handler.go index 1ff9823e2d..d081c76266 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -164,6 +164,7 @@ type handler struct { // channels for fetcher, syncer, txsyncLoop quitSync chan struct{} + stopCh chan struct{} chainSync *chainSyncer wg sync.WaitGroup @@ -198,6 +199,7 @@ func newHandler(config *handlerConfig) (*handler, error) { quitSync: make(chan struct{}), handlerDoneCh: make(chan struct{}), handlerStartCh: make(chan struct{}), + stopCh: make(chan struct{}), } if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the snap @@ -365,6 +367,8 @@ func (h *handler) protoTracker() { <-h.handlerDoneCh } return + case <-h.stopCh: + return } } } @@ -729,6 +733,8 @@ func (h *handler) startMaliciousVoteMonitor() { h.maliciousVoteMonitor.ConflictDetect(event.Vote, pendingBlockNumber) case <-h.voteMonitorSub.Err(): return + case <-h.stopCh: + return } } } @@ -743,7 +749,7 @@ func (h *handler) Stop() { h.voteMonitorSub.Unsubscribe() } } - + close(h.stopCh) // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. close(h.quitSync) @@ -908,10 +914,18 @@ func (h *handler) BroadcastVote(vote *types.VoteEnvelope) { func (h *handler) minedBroadcastLoop() { defer h.wg.Done() - for obj := range h.minedBlockSub.Chan() { - if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { - h.BroadcastBlock(ev.Block, true) // First propagate block to peers - h.BroadcastBlock(ev.Block, false) // Only then announce to the rest + for { + select { + case obj := <-h.minedBlockSub.Chan(): + if obj == nil { + continue + } + if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { + h.BroadcastBlock(ev.Block, true) // First propagate block to peers + h.BroadcastBlock(ev.Block, false) // Only then announce to the rest + } + case <-h.stopCh: + return } } } @@ -925,6 +939,8 @@ func (h *handler) txBroadcastLoop() { h.BroadcastTransactions(event.Txs) case <-h.txsSub.Err(): return + case <-h.stopCh: + return } } } @@ -938,6 +954,8 @@ func (h *handler) txReannounceLoop() { h.ReannounceTransactions(event.Txs) case <-h.reannoTxsSub.Err(): return + case <-h.stopCh: + return } } } diff --git a/eth/protocols/trust/handler_test.go b/eth/protocols/trust/handler_test.go index 55252c0d63..144f4e602a 100644 --- a/eth/protocols/trust/handler_test.go +++ b/eth/protocols/trust/handler_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -53,7 +54,7 @@ func newTestBackendWithGenerator(blocks int) *testBackend { BaseFee: big.NewInt(0), } copy(genspec.ExtraData[32:], testAddr[:]) - genesis := genspec.MustCommit(db) + genesis := genspec.MustCommit(db, trie.NewDatabase(db, nil)) chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) generator := func(i int, block *core.BlockGen) { diff --git a/eth/sync.go b/eth/sync.go index 5fb8c2be2a..8e570ca5b4 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -132,6 +132,8 @@ func (cs *chainSyncer) loop() { <-cs.doneCh } return + case <-cs.handler.stopCh: + return } } } diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 3ee8aa04bc..45e0e9f413 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" ) // Verify that Client implements the ethereum interfaces. @@ -316,7 +317,7 @@ func generateTestChain() []*types.Block { signer := types.HomesteadSigner{} // Create a database pre-initialize with a genesis block db := rawdb.NewMemoryDatabase() - genesis.MustCommit(db) + genesis.MustCommit(db, trie.NewDatabase(db, nil)) chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil, core.EnablePersistDiff(860000)) generate := func(i int, block *core.BlockGen) { block.OffsetTime(5) @@ -352,7 +353,7 @@ func generateTestChain() []*types.Block { } } } - gblock := genesis.MustCommit(db) + gblock := genesis.MustCommit(db, trie.NewDatabase(db, nil)) engine := ethash.NewFaker() blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, testBlockNum, generate) blocks = append([]*types.Block{gblock}, blocks...) diff --git a/trie/database.go b/trie/database.go index 6661c7b2ec..7bad532dde 100644 --- a/trie/database.go +++ b/trie/database.go @@ -18,8 +18,10 @@ package trie import ( "errors" + "strings" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -31,7 +33,8 @@ import ( // Config defines all necessary options for database. type Config struct { NoTries bool - Preimages bool // Flag whether the preimage of node key is recorded + Preimages bool // Flag whether the preimage of node key is recorded + Cache int HashDB *hashdb.Config // Configs for hash-based scheme PathDB *pathdb.Config // Configs for experimental path-based scheme @@ -104,8 +107,22 @@ func prepare(diskdb ethdb.Database, config *Config) *Database { // the legacy hash-based scheme is used by default. func NewDatabase(diskdb ethdb.Database, config *Config) *Database { // Sanitize the config and use the default one if it's not specified. + dbScheme := rawdb.ReadStateScheme(diskdb) if config == nil { - config = HashDefaults + if dbScheme == rawdb.PathScheme { + config = &Config{ + PathDB: pathdb.Defaults, + } + } else { + config = HashDefaults + } + } + if config.PathDB == nil && config.HashDB == nil { + if dbScheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } else { + config.HashDB = hashdb.Defaults + } } var preimages *preimageStore if config.Preimages { @@ -116,12 +133,30 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database { diskdb: diskdb, preimages: preimages, } - if config.HashDB != nil && config.PathDB != nil { - log.Crit("Both 'hash' and 'path' mode are configured") - } - if config.PathDB != nil { + /* + * 1. First, initialize db according to the user config + * 2. Second, initialize the db according to the scheme already used by db + * 3. Last, use the default scheme, namely hash scheme + */ + if config.HashDB != nil { + if rawdb.ReadStateScheme(diskdb) == rawdb.PathScheme { + log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme) + } + db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) + } else if config.PathDB != nil { + if rawdb.ReadStateScheme(diskdb) == rawdb.HashScheme { + log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme) + } + db.backend = pathdb.New(diskdb, config.PathDB) + } else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 { + if config.PathDB == nil { + config.PathDB = pathdb.Defaults + } db.backend = pathdb.New(diskdb, config.PathDB) } else { + if config.HashDB == nil { + config.HashDB = hashdb.Defaults + } db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) } return db diff --git a/trie/trie.go b/trie/trie.go index 47bdb39548..d19cb31063 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -610,7 +610,10 @@ func (t *Trie) Hash() common.Hash { func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { defer t.tracer.reset() defer func() { - t.committed = true + // StateDB will cache the trie and reuse it to read and write, + // the committed flag is true will prevent the cache trie access + // the trie node. + t.committed = false }() // Trie is empty and can be classified into two types of situations: // (a) The trie was empty and no update happens => return nil