diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index f0e5d2619a33..72aace1c63a6 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/trie" "gopkg.in/urfave/cli.v1" ) @@ -261,9 +260,6 @@ func importChain(ctx *cli.Context) error { } fmt.Println(ioStats) - fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses()) - fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads()) - // Print the memory statistics used by the importing mem := new(runtime.MemStats) runtime.ReadMemStats(mem) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a331abc9fd5e..f966905a92d4 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -102,7 +102,6 @@ var ( utils.CacheDatabaseFlag, utils.CacheTrieFlag, utils.CacheGCFlag, - utils.TrieCacheGenFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 0338e447e493..a4787fff2b95 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -139,7 +139,6 @@ var AppHelpFlagGroups = []flagGroup{ utils.CacheDatabaseFlag, utils.CacheTrieFlag, utils.CacheGCFlag, - utils.TrieCacheGenFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index deb6df364ccf..b648f958b544 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -37,7 +37,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/dashboard" @@ -350,11 +349,6 @@ var ( Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", Value: 25, } - TrieCacheGenFlag = cli.IntFlag{ - Name: "trie-cache-gens", - Usage: "Number of trie node generations to keep in memory", - Value: int(state.MaxTrieCacheGen), - } // Miner settings MiningEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -1432,10 +1426,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.MinerGasPrice = big.NewInt(1) } } - // TODO(fjl): move trie cache generations into config - if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 { - state.MaxTrieCacheGen = uint16(gen) - } } // SetDashboardConfig applies dashboard related command line flags to the config. diff --git a/core/blockchain.go b/core/blockchain.go index d59ee99cd904..c4481588f993 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -342,7 +342,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { if block == nil { return fmt.Errorf("non existent block [%x…]", hash[:4]) } - if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { + if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { return err } // If all checks out, manually set the head block diff --git a/core/state/database.go b/core/state/database.go index ce085747a6a4..8798b738065f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -18,7 +18,6 @@ package state import ( "fmt" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -26,14 +25,7 @@ import ( lru "github.com/hashicorp/golang-lru" ) -// Trie cache generation limit after which to evict trie nodes from memory. -var MaxTrieCacheGen = uint16(120) - const ( - // Number of past tries to keep. This value is chosen such that - // reasonable chain reorg depths will hit an existing trie. - maxPastTries = 12 - // Number of codehash->size associations to keep. codeSizeCacheSize = 100000 ) @@ -59,28 +51,61 @@ type Database interface { TrieDB() *trie.Database } -// Trie is a Ethereum Merkle Trie. +// Trie is a Ethereum Merkle Patricia trie. type Trie interface { + // GetKey returns the sha3 preimage of a hashed key that was previously used + // to store a value. + // + // TODO(fjl): remove this when SecureTrie is removed + GetKey([]byte) []byte + + // TryGet returns the value for key stored in the trie. The value bytes must + // not be modified by the caller. If a node was not found in the database, a + // trie.MissingNodeError is returned. TryGet(key []byte) ([]byte, error) + + // TryUpdate associates key with value in the trie. If value has length zero, any + // existing value is deleted from the trie. The value bytes must not be modified + // by the caller while they are stored in the trie. If a node was not found in the + // database, a trie.MissingNodeError is returned. TryUpdate(key, value []byte) error + + // TryDelete removes any existing value for key from the trie. If a node was not + // found in the database, a trie.MissingNodeError is returned. TryDelete(key []byte) error - Commit(onleaf trie.LeafCallback) (common.Hash, error) + + // Hash returns the root hash of the trie. It does not write to the database and + // can be used even if the trie doesn't have one. Hash() common.Hash + + // Commit writes all nodes to the trie's memory database, tracking the internal + // and external (for account tries) references. + Commit(onleaf trie.LeafCallback) (common.Hash, error) + + // NodeIterator returns an iterator that returns nodes of the trie. Iteration + // starts at the key after the given start key. NodeIterator(startKey []byte) trie.NodeIterator - GetKey([]byte) []byte // TODO(fjl): remove this when SecureTrie is removed + + // Prove constructs a Merkle proof for key. The result contains all encoded nodes + // on the path to the value at key. The value itself is also included in the last + // node and can be retrieved by verifying the proof. + // + // If the trie does not contain a value for key, the returned proof contains all + // nodes of the longest existing prefix of the key (at least the root), ending + // with the node that proves the absence of the key. Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error } // NewDatabase creates a backing store for state. The returned database is safe for -// concurrent use and retains a few recent expanded trie nodes in memory. To keep -// more historical state in memory, use the NewDatabaseWithCache constructor. +// concurrent use, but does not retain any recent trie nodes in memory. To keep some +// historical state in memory, use the NewDatabaseWithCache constructor. func NewDatabase(db ethdb.Database) Database { return NewDatabaseWithCache(db, 0) } -// NewDatabase creates a backing store for state. The returned database is safe for -// concurrent use and retains both a few recent expanded trie nodes in memory, as -// well as a lot of collapsed RLP trie nodes in a large memory cache. +// NewDatabaseWithCache creates a backing store for state. The returned database +// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a +// large memory cache. func NewDatabaseWithCache(db ethdb.Database, cache int) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ @@ -91,50 +116,22 @@ func NewDatabaseWithCache(db ethdb.Database, cache int) Database { type cachingDB struct { db *trie.Database - mu sync.Mutex - pastTries []*trie.SecureTrie codeSizeCache *lru.Cache } -// OpenTrie opens the main account trie. +// OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - db.mu.Lock() - defer db.mu.Unlock() - - for i := len(db.pastTries) - 1; i >= 0; i-- { - if db.pastTries[i].Hash() == root { - return cachedTrie{db.pastTries[i].Copy(), db}, nil - } - } - tr, err := trie.NewSecure(root, db.db, MaxTrieCacheGen) - if err != nil { - return nil, err - } - return cachedTrie{tr, db}, nil -} - -func (db *cachingDB) pushTrie(t *trie.SecureTrie) { - db.mu.Lock() - defer db.mu.Unlock() - - if len(db.pastTries) >= maxPastTries { - copy(db.pastTries, db.pastTries[1:]) - db.pastTries[len(db.pastTries)-1] = t - } else { - db.pastTries = append(db.pastTries, t) - } + return trie.NewSecure(root, db.db) } // OpenStorageTrie opens the storage trie of an account. func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { - return trie.NewSecure(root, db.db, 0) + return trie.NewSecure(root, db.db) } // CopyTrie returns an independent copy of the given trie. func (db *cachingDB) CopyTrie(t Trie) Trie { switch t := t.(type) { - case cachedTrie: - return cachedTrie{t.SecureTrie.Copy(), db} case *trie.SecureTrie: return t.Copy() default: @@ -164,21 +161,3 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro func (db *cachingDB) TrieDB() *trie.Database { return db.db } - -// cachedTrie inserts its trie into a cachingDB on commit. -type cachedTrie struct { - *trie.SecureTrie - db *cachingDB -} - -func (m cachedTrie) Commit(onleaf trie.LeafCallback) (common.Hash, error) { - root, err := m.SecureTrie.Commit(onleaf) - if err == nil { - m.db.pushTrie(m.SecureTrie) - } - return root, err -} - -func (m cachedTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error { - return m.SecureTrie.Prove(key, fromLevel, proofDb) -} diff --git a/core/state/statedb.go b/core/state/statedb.go index 3fc1d327146c..4c00092f4e1d 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -662,6 +662,5 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) } return nil }) - log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads()) return root, err } diff --git a/eth/api.go b/eth/api.go index 816b9cd33548..03ac74247ead 100644 --- a/eth/api.go +++ b/eth/api.go @@ -446,11 +446,11 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc } triedb := api.eth.BlockChain().StateCache().TrieDB() - oldTrie, err := trie.NewSecure(startBlock.Root(), triedb, 0) + oldTrie, err := trie.NewSecure(startBlock.Root(), triedb) if err != nil { return nil, err } - newTrie, err := trie.NewSecure(endBlock.Root(), triedb, 0) + newTrie, err := trie.NewSecure(endBlock.Root(), triedb) if err != nil { return nil, err } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 405d52a71053..bee6438bfdf9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -187,7 +187,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block { func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { // For now only check that the state trie is correct if block := dl.GetBlockByHash(hash); block != nil { - _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0) + _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) return err } return fmt.Errorf("non existent block: %x", hash[:4]) diff --git a/trie/database.go b/trie/database.go index 73ba2e761bae..c853dfe516e9 100644 --- a/trie/database.go +++ b/trie/database.go @@ -154,11 +154,11 @@ func (n *cachedNode) rlp() []byte { // obj returns the decoded and expanded trie node, either directly from the cache, // or by regenerating it from the rlp encoded blob. -func (n *cachedNode) obj(hash common.Hash, cachegen uint16) node { +func (n *cachedNode) obj(hash common.Hash) node { if node, ok := n.node.(rawNode); ok { - return mustDecodeNode(hash[:], node, cachegen) + return mustDecodeNode(hash[:], node) } - return expandNode(hash[:], n.node, cachegen) + return expandNode(hash[:], n.node) } // childs returns all the tracked children of this node, both the implicit ones @@ -223,16 +223,15 @@ func simplifyNode(n node) node { // expandNode traverses the node hierarchy of a collapsed storage node and converts // all fields and keys into expanded memory form. -func expandNode(hash hashNode, n node, cachegen uint16) node { +func expandNode(hash hashNode, n node) node { switch n := n.(type) { case *rawShortNode: // Short nodes need key and child expansion return &shortNode{ Key: compactToHex(n.Key), - Val: expandNode(nil, n.Val, cachegen), + Val: expandNode(nil, n.Val), flags: nodeFlag{ hash: hash, - gen: cachegen, }, } @@ -241,12 +240,11 @@ func expandNode(hash hashNode, n node, cachegen uint16) node { node := &fullNode{ flags: nodeFlag{ hash: hash, - gen: cachegen, }, } for i := 0; i < len(node.Children); i++ { if n[i] != nil { - node.Children[i] = expandNode(nil, n[i], cachegen) + node.Children[i] = expandNode(nil, n[i]) } } return node @@ -349,13 +347,13 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { // node retrieves a cached trie node from memory, or returns nil if none can be // found in the memory cache. -func (db *Database) node(hash common.Hash, cachegen uint16) node { +func (db *Database) node(hash common.Hash) node { // Retrieve the node from the clean cache if available if db.cleans != nil { if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { memcacheCleanHitMeter.Mark(1) memcacheCleanReadMeter.Mark(int64(len(enc))) - return mustDecodeNode(hash[:], enc, cachegen) + return mustDecodeNode(hash[:], enc) } } // Retrieve the node from the dirty cache if available @@ -364,7 +362,7 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node { db.lock.RUnlock() if dirty != nil { - return dirty.obj(hash, cachegen) + return dirty.obj(hash) } // Content unavailable in memory, attempt to retrieve from disk enc, err := db.diskdb.Get(hash[:]) @@ -376,7 +374,7 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node { memcacheCleanMissMeter.Mark(1) memcacheCleanWriteMeter.Mark(int64(len(enc))) } - return mustDecodeNode(hash[:], enc, cachegen) + return mustDecodeNode(hash[:], enc) } // Node retrieves an encoded cached trie node from memory. If it cannot be found diff --git a/trie/hasher.go b/trie/hasher.go index 9d6756b6f4e8..54f6a9de2b6a 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -26,11 +26,9 @@ import ( ) type hasher struct { - tmp sliceBuffer - sha keccakState - cachegen uint16 - cachelimit uint16 - onleaf LeafCallback + tmp sliceBuffer + sha keccakState + onleaf LeafCallback } // keccakState wraps sha3.state. In addition to the usual hash methods, it also supports @@ -62,9 +60,9 @@ var hasherPool = sync.Pool{ }, } -func newHasher(cachegen, cachelimit uint16, onleaf LeafCallback) *hasher { +func newHasher(onleaf LeafCallback) *hasher { h := hasherPool.Get().(*hasher) - h.cachegen, h.cachelimit, h.onleaf = cachegen, cachelimit, onleaf + h.onleaf = onleaf return h } @@ -80,14 +78,13 @@ func (h *hasher) hash(n node, db *Database, force bool) (node, node, error) { if db == nil { return hash, n, nil } - if n.canUnload(h.cachegen, h.cachelimit) { - // Unload the node from cache. All of its subnodes will have a lower or equal - // cache generation number. - cacheUnloadCounter.Inc(1) - return hash, hash, nil - } if !dirty { - return hash, n, nil + switch n.(type) { + case *fullNode, *shortNode: + return hash, hash, nil + default: + return hash, n, nil + } } } // Trie not processed yet or needs storage, walk the children diff --git a/trie/iterator.go b/trie/iterator.go index 77f168166522..da93b2fadb3b 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -180,7 +180,7 @@ func (it *nodeIterator) LeafBlob() []byte { func (it *nodeIterator) LeafProof() [][]byte { if len(it.stack) > 0 { if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { - hasher := newHasher(0, 0, nil) + hasher := newHasher(nil) defer returnHasherToPool(hasher) proofs := make([][]byte, 0, len(it.stack)) diff --git a/trie/node.go b/trie/node.go index 1fafb7a53825..f4055e779a1b 100644 --- a/trie/node.go +++ b/trie/node.go @@ -30,7 +30,6 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b type node interface { fstring(string) string cache() (hashNode, bool) - canUnload(cachegen, cachelimit uint16) bool } type ( @@ -71,20 +70,9 @@ func (n *shortNode) copy() *shortNode { copy := *n; return © } // nodeFlag contains caching-related metadata about a node. type nodeFlag struct { hash hashNode // cached hash of the node (may be nil) - gen uint16 // cache generation counter dirty bool // whether the node has changes that must be written to the database } -// canUnload tells whether a node can be unloaded. -func (n *nodeFlag) canUnload(cachegen, cachelimit uint16) bool { - return !n.dirty && cachegen-n.gen >= cachelimit -} - -func (n *fullNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) } -func (n *shortNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) } -func (n hashNode) canUnload(uint16, uint16) bool { return false } -func (n valueNode) canUnload(uint16, uint16) bool { return false } - func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty } func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty } func (n hashNode) cache() (hashNode, bool) { return nil, true } @@ -117,8 +105,8 @@ func (n valueNode) fstring(ind string) string { return fmt.Sprintf("%x ", []byte(n)) } -func mustDecodeNode(hash, buf []byte, cachegen uint16) node { - n, err := decodeNode(hash, buf, cachegen) +func mustDecodeNode(hash, buf []byte) node { + n, err := decodeNode(hash, buf) if err != nil { panic(fmt.Sprintf("node %x: %v", hash, err)) } @@ -126,7 +114,7 @@ func mustDecodeNode(hash, buf []byte, cachegen uint16) node { } // decodeNode parses the RLP encoding of a trie node. -func decodeNode(hash, buf []byte, cachegen uint16) (node, error) { +func decodeNode(hash, buf []byte) (node, error) { if len(buf) == 0 { return nil, io.ErrUnexpectedEOF } @@ -136,22 +124,22 @@ func decodeNode(hash, buf []byte, cachegen uint16) (node, error) { } switch c, _ := rlp.CountValues(elems); c { case 2: - n, err := decodeShort(hash, elems, cachegen) + n, err := decodeShort(hash, elems) return n, wrapError(err, "short") case 17: - n, err := decodeFull(hash, elems, cachegen) + n, err := decodeFull(hash, elems) return n, wrapError(err, "full") default: return nil, fmt.Errorf("invalid number of list elements: %v", c) } } -func decodeShort(hash, elems []byte, cachegen uint16) (node, error) { +func decodeShort(hash, elems []byte) (node, error) { kbuf, rest, err := rlp.SplitString(elems) if err != nil { return nil, err } - flag := nodeFlag{hash: hash, gen: cachegen} + flag := nodeFlag{hash: hash} key := compactToHex(kbuf) if hasTerm(key) { // value node @@ -161,17 +149,17 @@ func decodeShort(hash, elems []byte, cachegen uint16) (node, error) { } return &shortNode{key, append(valueNode{}, val...), flag}, nil } - r, _, err := decodeRef(rest, cachegen) + r, _, err := decodeRef(rest) if err != nil { return nil, wrapError(err, "val") } return &shortNode{key, r, flag}, nil } -func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) { - n := &fullNode{flags: nodeFlag{hash: hash, gen: cachegen}} +func decodeFull(hash, elems []byte) (*fullNode, error) { + n := &fullNode{flags: nodeFlag{hash: hash}} for i := 0; i < 16; i++ { - cld, rest, err := decodeRef(elems, cachegen) + cld, rest, err := decodeRef(elems) if err != nil { return n, wrapError(err, fmt.Sprintf("[%d]", i)) } @@ -189,7 +177,7 @@ func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) { const hashLen = len(common.Hash{}) -func decodeRef(buf []byte, cachegen uint16) (node, []byte, error) { +func decodeRef(buf []byte) (node, []byte, error) { kind, val, rest, err := rlp.Split(buf) if err != nil { return nil, buf, err @@ -202,7 +190,7 @@ func decodeRef(buf []byte, cachegen uint16) (node, []byte, error) { err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen) return nil, buf, err } - n, err := decodeNode(nil, buf, cachegen) + n, err := decodeNode(nil, buf) return n, rest, err case kind == rlp.String && len(val) == 0: // empty node diff --git a/trie/node_test.go b/trie/node_test.go deleted file mode 100644 index 7ad1ff9e7bd3..000000000000 --- a/trie/node_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import "testing" - -func TestCanUnload(t *testing.T) { - tests := []struct { - flag nodeFlag - cachegen, cachelimit uint16 - want bool - }{ - { - flag: nodeFlag{dirty: true, gen: 0}, - want: false, - }, - { - flag: nodeFlag{dirty: false, gen: 0}, - cachegen: 0, cachelimit: 0, - want: true, - }, - { - flag: nodeFlag{dirty: false, gen: 65534}, - cachegen: 65535, cachelimit: 1, - want: true, - }, - { - flag: nodeFlag{dirty: false, gen: 65534}, - cachegen: 0, cachelimit: 1, - want: true, - }, - { - flag: nodeFlag{dirty: false, gen: 1}, - cachegen: 65535, cachelimit: 1, - want: true, - }, - } - - for _, test := range tests { - if got := test.flag.canUnload(test.cachegen, test.cachelimit); got != test.want { - t.Errorf("%+v\n got %t, want %t", test, got, test.want) - } - } -} diff --git a/trie/proof.go b/trie/proof.go index 0f18dd26bdd1..26a41ed277fb 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -65,7 +65,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error { panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) } } - hasher := newHasher(0, 0, nil) + hasher := newHasher(nil) defer returnHasherToPool(hasher) for i, n := range nodes { @@ -112,7 +112,7 @@ func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.Reader) (value if buf == nil { return nil, i, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash) } - n, err := decodeNode(wantHash[:], buf, 0) + n, err := decodeNode(wantHash[:], buf) if err != nil { return nil, i, fmt.Errorf("bad proof node %d: %v", i, err) } diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 6a50cfd5a65f..fbc591ed108a 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -51,7 +51,7 @@ type SecureTrie struct { // Loaded nodes are kept around until their 'cache generation' expires. // A new cache generation is created by each call to Commit. // cachelimit sets the number of past cache generations to keep. -func NewSecure(root common.Hash, db *Database, cachelimit uint16) (*SecureTrie, error) { +func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) { if db == nil { panic("trie.NewSecure called without a database") } @@ -59,7 +59,6 @@ func NewSecure(root common.Hash, db *Database, cachelimit uint16) (*SecureTrie, if err != nil { return nil, err } - trie.SetCacheLimit(cachelimit) return &SecureTrie{trie: *trie}, nil } @@ -161,12 +160,6 @@ func (t *SecureTrie) Hash() common.Hash { return t.trie.Hash() } -// Root returns the root hash of SecureTrie. -// Deprecated: use Hash instead. -func (t *SecureTrie) Root() []byte { - return t.trie.Root() -} - // Copy returns a copy of SecureTrie. func (t *SecureTrie) Copy() *SecureTrie { cpy := *t @@ -183,7 +176,7 @@ func (t *SecureTrie) NodeIterator(start []byte) NodeIterator { // The caller must not hold onto the return value because it will become // invalid on the next call to hashKey or secKey. func (t *SecureTrie) hashKey(key []byte) []byte { - h := newHasher(0, 0, nil) + h := newHasher(nil) h.sha.Reset() h.sha.Write(key) buf := h.sha.Sum(t.hashKeyBuf[:0]) diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index f0ca6c800287..fb6c38ee222b 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -28,7 +28,7 @@ import ( ) func newEmptySecure() *SecureTrie { - trie, _ := NewSecure(common.Hash{}, NewDatabase(memorydb.New()), 0) + trie, _ := NewSecure(common.Hash{}, NewDatabase(memorydb.New())) return trie } @@ -36,8 +36,7 @@ func newEmptySecure() *SecureTrie { func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) { // Create an empty trie triedb := NewDatabase(memorydb.New()) - - trie, _ := NewSecure(common.Hash{}, triedb, 0) + trie, _ := NewSecure(common.Hash{}, triedb) // Fill it with some arbitrary data content := make(map[string][]byte) diff --git a/trie/sync.go b/trie/sync.go index ef931f633b67..85f1b0f8507d 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -101,7 +101,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb } key := root.Bytes() blob, _ := s.database.Get(key) - if local, err := decodeNode(key, blob, 0); local != nil && err == nil { + if local, err := decodeNode(key, blob); local != nil && err == nil { return } // Assemble the new sub-trie sync request @@ -187,7 +187,7 @@ func (s *Sync) Process(results []SyncResult) (bool, int, error) { continue } // Decode the node data content and update the request - node, err := decodeNode(item.Hash[:], item.Data, 0) + node, err := decodeNode(item.Hash[:], item.Data) if err != nil { return committed, i, err } diff --git a/trie/sync_test.go b/trie/sync_test.go index d80070f3e8b4..0d8c29cfe783 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -133,7 +133,7 @@ func testIterativeSync(t *testing.T, batch int) { queue = append(queue[:0], sched.Missing(batch)...) } // Cross check that the two tries are in sync - checkTrieContents(t, triedb, srcTrie.Root(), srcData) + checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -167,7 +167,7 @@ func TestIterativeDelayedSync(t *testing.T) { queue = append(queue[len(results):], sched.Missing(10000)...) } // Cross check that the two tries are in sync - checkTrieContents(t, triedb, srcTrie.Root(), srcData) + checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) } // Tests that given a root hash, a trie can sync iteratively on a single thread, @@ -212,7 +212,7 @@ func testIterativeRandomSync(t *testing.T, batch int) { } } // Cross check that the two tries are in sync - checkTrieContents(t, triedb, srcTrie.Root(), srcData) + checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -259,7 +259,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) { } } // Cross check that the two tries are in sync - checkTrieContents(t, triedb, srcTrie.Root(), srcData) + checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) } // Tests that a trie sync will not request nodes multiple times, even if they @@ -299,7 +299,7 @@ func TestDuplicateAvoidanceSync(t *testing.T) { queue = append(queue[:0], sched.Missing(0)...) } // Cross check that the two tries are in sync - checkTrieContents(t, triedb, srcTrie.Root(), srcData) + checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) } // Tests that at any point in time during a sync, only complete sub-tries are in diff --git a/trie/trie.go b/trie/trie.go index af424d4ac676..920e331fd62f 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" ) var ( @@ -35,25 +34,6 @@ var ( emptyState = crypto.Keccak256Hash(nil) ) -var ( - cacheMissCounter = metrics.NewRegisteredCounter("trie/cachemiss", nil) - cacheUnloadCounter = metrics.NewRegisteredCounter("trie/cacheunload", nil) -) - -// CacheMisses retrieves a global counter measuring the number of cache misses -// the trie had since process startup. This isn't useful for anything apart from -// trie debugging purposes. -func CacheMisses() int64 { - return cacheMissCounter.Count() -} - -// CacheUnloads retrieves a global counter measuring the number of cache unloads -// the trie did since process startup. This isn't useful for anything apart from -// trie debugging purposes. -func CacheUnloads() int64 { - return cacheUnloadCounter.Count() -} - // LeafCallback is a callback type invoked when a trie operation reaches a leaf // node. It's used by state sync and commit to allow handling external references // between account and storage tries. @@ -67,23 +47,11 @@ type LeafCallback func(leaf []byte, parent common.Hash) error type Trie struct { db *Database root node - - // Cache generation values. - // cachegen increases by one with each commit operation. - // new nodes are tagged with the current generation and unloaded - // when their generation is older than than cachegen-cachelimit. - cachegen, cachelimit uint16 -} - -// SetCacheLimit sets the number of 'cache generations' to keep. -// A cache generation is created by a call to Commit. -func (t *Trie) SetCacheLimit(l uint16) { - t.cachelimit = l } // newFlag returns the cache flag value for a newly created node. func (t *Trie) newFlag() nodeFlag { - return nodeFlag{dirty: true, gen: t.cachegen} + return nodeFlag{dirty: true} } // New creates a trie with an existing root node from db. @@ -152,14 +120,12 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode if err == nil && didResolve { n = n.copy() n.Val = newnode - n.flags.gen = t.cachegen } return value, n, didResolve, err case *fullNode: value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1) if err == nil && didResolve { n = n.copy() - n.flags.gen = t.cachegen n.Children[key[pos]] = newnode } return value, n, didResolve, err @@ -428,19 +394,13 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) { } func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { - cacheMissCounter.Inc(1) - hash := common.BytesToHash(n) - if node := t.db.node(hash, t.cachegen); node != nil { + if node := t.db.node(hash); node != nil { return node, nil } return nil, &MissingNodeError{NodeHash: hash, Path: prefix} } -// Root returns the root hash of the trie. -// Deprecated: use Hash instead. -func (t *Trie) Root() []byte { return t.Hash().Bytes() } - // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { @@ -460,7 +420,6 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) { return common.Hash{}, err } t.root = cached - t.cachegen++ return common.BytesToHash(hash.(hashNode)), nil } @@ -468,7 +427,7 @@ func (t *Trie) hashRoot(db *Database, onleaf LeafCallback) (node, node, error) { if t.root == nil { return hashNode(emptyRoot.Bytes()), nil, nil } - h := newHasher(t.cachegen, t.cachelimit, onleaf) + h := newHasher(onleaf) defer returnHasherToPool(h) return h.hash(t.root, db, true) } diff --git a/trie/trie_test.go b/trie/trie_test.go index 1c874370c462..ea0b3cbdd722 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -19,7 +19,6 @@ package trie import ( "bytes" "encoding/binary" - "errors" "fmt" "io/ioutil" "math/big" @@ -328,38 +327,6 @@ func (db *countingDB) Get(key []byte) ([]byte, error) { return db.KeyValueStore.Get(key) } -// TestCacheUnload checks that decoded nodes are unloaded after a -// certain number of commit operations. -func TestCacheUnload(t *testing.T) { - // Create test trie with two branches. - trie := newEmpty() - key1 := "---------------------------------" - key2 := "---some other branch" - updateString(trie, key1, "this is the branch of key1.") - updateString(trie, key2, "this is the branch of key2.") - - root, _ := trie.Commit(nil) - trie.db.Commit(root, true) - - // Commit the trie repeatedly and access key1. - // The branch containing it is loaded from DB exactly two times: - // in the 0th and 6th iteration. - diskdb := &countingDB{KeyValueStore: trie.db.diskdb, gets: make(map[string]int)} - triedb := NewDatabase(diskdb) - trie, _ = New(root, triedb) - trie.SetCacheLimit(5) - for i := 0; i < 12; i++ { - getString(trie, key1) - trie.Commit(nil) - } - // Check that it got loaded two times. - for dbkey, count := range diskdb.gets { - if count != 2 { - t.Errorf("db key %x loaded %d times, want %d times", []byte(dbkey), count, 2) - } - } -} - // randTest performs random trie operations. // Instances of this test are created by Generate. type randTest []randTestStep @@ -379,7 +346,6 @@ const ( opHash opReset opItercheckhash - opCheckCacheInvariant opMax // boundary value, not an actual op ) @@ -458,8 +424,6 @@ func runRandTest(rt randTest) bool { if tr.Hash() != checktr.Hash() { rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") } - case opCheckCacheInvariant: - rt[i].err = checkCacheInvariant(tr.root, nil, tr.cachegen, false, 0) } // Abort the test on error. if rt[i].err != nil { @@ -469,40 +433,6 @@ func runRandTest(rt randTest) bool { return true } -func checkCacheInvariant(n, parent node, parentCachegen uint16, parentDirty bool, depth int) error { - var children []node - var flag nodeFlag - switch n := n.(type) { - case *shortNode: - flag = n.flags - children = []node{n.Val} - case *fullNode: - flag = n.flags - children = n.Children[:] - default: - return nil - } - - errorf := func(format string, args ...interface{}) error { - msg := fmt.Sprintf(format, args...) - msg += fmt.Sprintf("\nat depth %d node %s", depth, spew.Sdump(n)) - msg += fmt.Sprintf("parent: %s", spew.Sdump(parent)) - return errors.New(msg) - } - if flag.gen > parentCachegen { - return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen) - } - if depth > 0 && !parentDirty && flag.dirty { - return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen) - } - for _, child := range children { - if err := checkCacheInvariant(child, n, flag.gen, flag.dirty, depth+1); err != nil { - return err - } - } - return nil -} - func TestRandom(t *testing.T) { if err := quick.Check(runRandTest, nil); err != nil { if cerr, ok := err.(*quick.CheckError); ok { @@ -626,6 +556,6 @@ func TestDecodeNode(t *testing.T) { for i := 0; i < 5000000; i++ { rand.Read(hash) rand.Read(elems) - decodeNode(hash, elems, 1) + decodeNode(hash, elems) } }