Skip to content

core, metrics, triedb: generalize the resettingTimer #32016

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1943,7 +1943,7 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
vmCfg.Tracer = nil
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)

blockPrefetchExecuteTimer.Update(time.Since(start))
blockPrefetchExecuteTimer.UpdateSince(start)
if interrupt.Load() {
blockPrefetchInterruptMeter.Mark(1)
}
Expand Down Expand Up @@ -2029,22 +2029,22 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
proctime := time.Since(startTime) // processing + validation + cross validation

// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
accountReadTimer.Update(int64(statedb.AccountReads)) // Account reads are complete(in processing)
storageReadTimer.Update(int64(statedb.StorageReads)) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
accountReadSingleTimer.Update(int64(statedb.AccountReads) / int64(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
storageReadSingleTimer.Update(int64(statedb.StorageReads) / int64(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
accountUpdateTimer.Update(int64(statedb.AccountUpdates)) // Account updates are complete(in validation)
storageUpdateTimer.Update(int64(statedb.StorageUpdates)) // Storage updates are complete(in validation)
accountHashTimer.Update(int64(statedb.AccountHashes)) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(int64(ptime - (statedb.AccountReads + statedb.StorageReads))) // The time spent on EVM processing
blockValidationTimer.Update(int64(vtime - (triehash + trieUpdate))) // The time spent on block validation
blockCrossValidationTimer.Update(int64(xvtime)) // The time spent on stateless cross validation

// Write the block to the chain and get the status.
var (
Expand All @@ -2061,12 +2061,12 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
return nil, err
}
// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
accountCommitTimer.Update(int64(statedb.AccountCommits)) // Account commits are complete, we can mark them
storageCommitTimer.Update(int64(statedb.StorageCommits)) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(int64(statedb.SnapshotCommits)) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(int64(statedb.TrieDBCommits)) // Trie database commits are complete, we can mark them

blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockWriteTimer.Update(int64(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits))
blockInsertTimer.UpdateSince(startTime)

return &blockProcessingResult{
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/difflayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
defer dl.lock.Unlock()

defer func(start time.Time) {
snapshotBloomIndexTimer.Update(time.Since(start))
snapshotBloomIndexTimer.UpdateSince(start)
}(time.Now())

// Inject the new origin that triggered the rebloom
Expand Down
12 changes: 6 additions & 6 deletions metrics/internal/sampledata.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ func ExampleMetrics() metrics.Registry {
registry.Register("test/meter", metrics.NewInactiveMeter())
{
timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry)
timer.Update(10 * time.Millisecond)
timer.Update(11 * time.Millisecond)
timer.Update(12 * time.Millisecond)
timer.Update(120 * time.Millisecond)
timer.Update(13 * time.Millisecond)
timer.Update(14 * time.Millisecond)
timer.Update(int64(10 * time.Millisecond))
timer.Update(int64(11 * time.Millisecond))
timer.Update(int64(12 * time.Millisecond))
timer.Update(int64(120 * time.Millisecond))
timer.Update(int64(13 * time.Millisecond))
timer.Update(int64(14 * time.Millisecond))
}
{
timer := metrics.NewRegisteredTimer("test/timer", registry)
Expand Down
17 changes: 5 additions & 12 deletions metrics/resetting_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,27 +53,20 @@ func (t *ResettingTimer) Snapshot() *ResettingTimerSnapshot {
return snapshot
}

// Time records the duration of the execution of the given function.
func (t *ResettingTimer) Time(f func()) {
ts := time.Now()
f()
t.Update(time.Since(ts))
}

// Update records the duration of an event.
func (t *ResettingTimer) Update(d time.Duration) {
func (t *ResettingTimer) Update(d int64) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why change the type here to int64? It seems more correct to use time.Duration, and the underlying type is the same.

if !metricsEnabled {
return
}
t.mutex.Lock()
defer t.mutex.Unlock()
t.values = append(t.values, int64(d))
t.sum += int64(d)
t.values = append(t.values, d)
t.sum += d
}

// UpdateSince records the duration of an event that started at a time and ends now.
func (t *ResettingTimer) UpdateSince(ts time.Time) {
t.Update(time.Since(ts))
t.Update(int64(time.Since(ts)))
}

// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
Expand Down Expand Up @@ -104,7 +97,6 @@ func (t *ResettingTimerSnapshot) Mean() float64 {
if !t.calculated {
t.calc(nil)
}

return t.mean
}

Expand Down Expand Up @@ -134,4 +126,5 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
}
t.min = t.values[0]
t.max = t.values[len(t.values)-1]
t.calculated = true
}
5 changes: 2 additions & 3 deletions metrics/resetting_timer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package metrics

import (
"testing"
"time"
)

func TestResettingTimer(t *testing.T) {
Expand Down Expand Up @@ -68,7 +67,7 @@ func TestResettingTimer(t *testing.T) {
}

for _, v := range tt.values {
timer.Update(time.Duration(v))
timer.Update(v)
}
snap := timer.Snapshot()

Expand Down Expand Up @@ -160,7 +159,7 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) {
}

for _, v := range tt.values {
timer.Update(time.Duration(v))
timer.Update(v)
}

snap := timer.Snapshot()
Expand Down
6 changes: 3 additions & 3 deletions triedb/hashdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ func (db *Database) Dereference(root common.Hash) {
db.gcsize += storage - db.dirtiesSize
db.gctime += time.Since(start)

memcacheGCTimeTimer.Update(time.Since(start))
memcacheGCTimeTimer.UpdateSince(start)
memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))

Expand Down Expand Up @@ -384,7 +384,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.flushsize += storage - db.dirtiesSize
db.flushtime += time.Since(start)

memcacheFlushTimeTimer.Update(time.Since(start))
memcacheFlushTimeTimer.UpdateSince(start)
memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))

Expand Down Expand Up @@ -428,7 +428,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
batch.Reset()

// Reset the storage counters and bumped metrics
memcacheCommitTimeTimer.Update(time.Since(start))
memcacheCommitTimeTimer.UpdateSince(start)
memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))

Expand Down