diff --git a/core/blockchain.go b/core/blockchain.go index 79dbe7c818..919dc5be4c 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1194,15 +1194,12 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // Update the metrics touched during block processing accountReadTimer.Inc(statedb.AccountReads.Milliseconds()) // Account reads are complete, we can mark them storageReadTimer.Inc(statedb.StorageReads.Milliseconds()) // Storage reads are complete, we can mark them - accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them - storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete, we can mark them snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete, we can mark them triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation - trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates - trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates + trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + trieproc += statedb.SnapshotStorageReads + statedb.StorageReads blockExecutionTimer.Inc((time.Since(substart) - trieproc - triehash).Milliseconds()) - blockTrieOpsTimer.Inc((trieproc + triehash).Milliseconds()) // Validate the state using the default validator substart = time.Now() @@ -1212,9 +1209,15 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { } // Update the metrics touched during block validation - accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete, we can mark them - storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete, we can mark them - blockStateValidationTimer.Inc((time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)).Milliseconds()) + accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them + storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them + accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete, we can mark them + storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete, we can mark them + newTriehash := (statedb.AccountHashes + statedb.StorageHashes - triehash) + blockStateValidationTimer.Inc((time.Since(substart) - newTriehash).Milliseconds()) + trieproc += statedb.AccountUpdates + statedb.StorageUpdates + triehash += newTriehash + blockTrieOpsTimer.Inc((trieproc + triehash).Milliseconds()) // If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block // or the state trie to disk. diff --git a/trie/database.go b/trie/database.go index 4f67d333ae..7925167471 100644 --- a/trie/database.go +++ b/trie/database.go @@ -394,14 +394,11 @@ func (db *Database) EncodedNode(h common.Hash) node { func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { // Retrieve the node from the clean cache if available if db.cleans != nil { - if enc := db.cleans.Get(nil, hash[:]); enc != nil { + if enc, found := db.cleans.HasGet(nil, hash[:]); found { memcacheCleanHitMeter.Mark(1) memcacheCleanReadMeter.Mark(int64(len(enc))) return enc, nil, nil } - } else { - // TODO: remove later - log.Warn("trie database cleans is empty") } // Retrieve the node from the dirty cache if available db.lock.RLock() @@ -417,7 +414,7 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { // Content unavailable in memory, attempt to retrieve from disk enc := rawdb.ReadTrieNode(db.diskdb, hash) - if len(enc) != 0 { + if len(enc) > 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) @@ -564,7 +561,7 @@ type flushItem struct { // writeFlushItems writes all items in [toFlush] to disk in batches of // [ethdb.IdealBatchSize]. This function does not access any variables inside // of [Database] and does not need to be synchronized. -func (db *Database) writeFlushItems(toFlush []flushItem) error { +func (db *Database) writeFlushItems(toFlush []*flushItem) error { batch := db.diskdb.NewBatch() for _, item := range toFlush { rlp := item.node.rlp() @@ -621,12 +618,12 @@ func (db *Database) Cap(limit common.StorageSize) error { } // Keep removing nodes from the flush-list until we're below allowance - toFlush := make([]flushItem, 0, 128) + toFlush := make([]*flushItem, 0, 128) oldest := db.oldest for pendingSize > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.dirties[oldest] - toFlush = append(toFlush, flushItem{oldest, node, nil}) + toFlush = append(toFlush, &flushItem{oldest, node, nil}) // Iterate to the next flush item, or abort if the size cap was achieved. Size // is the total size, including the useful cached data (hash -> blob), the @@ -692,7 +689,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H db.lock.RLock() lockStart := time.Now() nodes, storage := len(db.dirties), db.dirtiesSize - toFlush, err := db.commit(node, make([]flushItem, 0, 128), callback) + toFlush, err := db.commit(node, make([]*flushItem, 0, 128), callback) if err != nil { log.Error("Failed to commit trie from trie database", "err", err) return err @@ -742,7 +739,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H // // [callback] will be invoked as soon as it is determined a trie node will be // flushed to disk (before it is actually written). -func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func(common.Hash)) ([]flushItem, error) { +func (db *Database) commit(hash common.Hash, toFlush []*flushItem, callback func(common.Hash)) ([]*flushItem, error) { // If the node does not exist, it's a previously committed node node, ok := db.dirties[hash] if !ok { @@ -760,7 +757,7 @@ func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func( // By processing the children of each node before the node itself, we ensure // that children are committed before their parents (an invariant of this // package). - toFlush = append(toFlush, flushItem{hash, node, nil}) + toFlush = append(toFlush, &flushItem{hash, node, nil}) if callback != nil { callback(hash) } diff --git a/utils/metered_cache.go b/utils/metered_cache.go index 17c86bdaa2..5a4e124b88 100644 --- a/utils/metered_cache.go +++ b/utils/metered_cache.go @@ -57,6 +57,7 @@ func NewMeteredCache(size int, journal string, namespace string, updateFrequency if journal == "" { cache = fastcache.New(size) } else { + log.Info("removed journal directory", "path", journal, "err", os.RemoveAll(journal)) dirSize, err := dirSize(journal) log.Info("attempting to load cache from disk", "path", journal, "dirSize", common.StorageSize(dirSize), "err", err) cache = fastcache.LoadFromFileOrNew(journal, size)