Skip to content

all: fix issues with benchmarks #30667

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Nov 4, 2024
26 changes: 25 additions & 1 deletion core/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,15 @@ var (
// value-transfer transaction with n bytes of extra data in each
// block.
func genValueTx(nbytes int) func(int, *BlockGen) {
// We can reuse the data for all transactions.
// During signing, the method tx.WithSignature(s, sig)
// performs:
// cpy := tx.inner.copy()
// cpy.setSignatureValues(signer.ChainID(), v, r, s)
// After this operation, the data can be reused by the caller.
data := make([]byte, nbytes)
return func(i int, gen *BlockGen) {
toaddr := common.Address{}
data := make([]byte, nbytes)
gas, _ := IntrinsicGas(data, nil, false, false, false, false)
signer := gen.Signer()
gasPrice := big.NewInt(0)
Expand Down Expand Up @@ -210,15 +216,27 @@ func BenchmarkChainRead_full_10k(b *testing.B) {
benchReadChain(b, true, 10000)
}
func BenchmarkChainRead_header_100k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchReadChain(b, false, 100000)
}
func BenchmarkChainRead_full_100k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchReadChain(b, true, 100000)
}
func BenchmarkChainRead_header_500k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchReadChain(b, false, 500000)
}
func BenchmarkChainRead_full_500k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchReadChain(b, true, 500000)
}
func BenchmarkChainWrite_header_10k(b *testing.B) {
Expand All @@ -234,9 +252,15 @@ func BenchmarkChainWrite_full_100k(b *testing.B) {
benchWriteChain(b, true, 100000)
}
func BenchmarkChainWrite_header_500k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchWriteChain(b, false, 500000)
}
func BenchmarkChainWrite_full_500k(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchWriteChain(b, true, 500000)
}

Expand Down
6 changes: 5 additions & 1 deletion core/rawdb/accessors_chain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -649,11 +649,15 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
// makeTestReceipts creates fake receipts for the ancient write benchmark.
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
receipts := make([]*types.Receipt, nPerBlock)
var logs []*types.Log
for i := 0; i < 5; i++ {
logs = append(logs, new(types.Log))
}
for i := 0; i < len(receipts); i++ {
receipts[i] = &types.Receipt{
Status: types.ReceiptStatusSuccessful,
CumulativeGasUsed: 0x888888888,
Logs: make([]*types.Log, 5),
Logs: logs,
}
}
allReceipts := make([]types.Receipts, n)
Expand Down
20 changes: 13 additions & 7 deletions core/txpool/blobpool/blobpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,10 @@ type BlobPool struct {
discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)

// txValidationFn defaults to txpool.ValidateTransaction, but can be
// overridden for testing purposes.
txValidationFn txpool.ValidationFunction

lock sync.RWMutex // Mutex protecting the pool during reorg handling
}

Expand All @@ -329,12 +333,13 @@ func New(config Config, chain BlockChain) *BlobPool {

// Create the transaction pool with its initial settings
return &BlobPool{
config: config,
signer: types.LatestSigner(chain.Config()),
chain: chain,
lookup: newLookup(),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
config: config,
signer: types.LatestSigner(chain.Config()),
chain: chain,
lookup: newLookup(),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
txValidationFn: txpool.ValidateTransaction,
}
}

Expand Down Expand Up @@ -1090,7 +1095,8 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error {
MaxSize: txMaxSize,
MinTip: p.gasTip.ToBig(),
}
if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {

if err := p.txValidationFn(tx, p.head, p.signer, baseOpts); err != nil {
return err
}
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
Expand Down
29 changes: 28 additions & 1 deletion core/txpool/blobpool/blobpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1449,11 +1449,29 @@ func TestAdd(t *testing.T) {
}
}

// fakeBilly is a billy.Database implementation which just drops data on the floor.
type fakeBilly struct {
billy.Database
count uint64
}

func (f *fakeBilly) Put(data []byte) (uint64, error) {
f.count++
return f.count, nil
}

var _ billy.Database = (*fakeBilly)(nil)

// Benchmarks the time it takes to assemble the lazy pending transaction list
// from the pool contents.
func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
func BenchmarkPoolPending10GB(b *testing.B) { benchmarkPoolPending(b, 10_000_000_000) }
func BenchmarkPoolPending10GB(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchmarkPoolPending(b, 10_000_000_000)
}

func benchmarkPoolPending(b *testing.B, datacap uint64) {
// Calculate the maximum number of transaction that would fit into the pool
Expand All @@ -1477,6 +1495,15 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
b.Fatalf("failed to create blob pool: %v", err)
}
// Make the pool not use disk (just drop everything). This test never reads
// back the data, it just iterates over the pool in-memory items
pool.store = &fakeBilly{pool.store, 0}
// Avoid validation - verifying all blob proofs take significant time
// when the capacity is large. The purpose of this bench is to measure assembling
// the lazies, not the kzg verifications.
pool.txValidationFn = func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *txpool.ValidationOptions) error {
return nil // accept all
}
// Fill the pool up with one random transaction from each account with the
// same price and everything to maximize the worst case scenario
for i := 0; i < int(capacity); i++ {
Expand Down
22 changes: 19 additions & 3 deletions core/txpool/blobpool/evictheap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,9 +239,25 @@ func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(
func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }

func BenchmarkPriceHeapOverflow25GB(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchmarkPriceHeapOverflow(b, 25*1024*1024*1024)
}
func BenchmarkPriceHeapOverflow50GB(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchmarkPriceHeapOverflow(b, 50*1024*1024*1024)
}
func BenchmarkPriceHeapOverflow100GB(b *testing.B) {
if testing.Short() {
b.Skip("Skipping in short-mode")
}
benchmarkPriceHeapOverflow(b, 100*1024*1024*1024)
}

func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
// Calculate how many unique transactions we can fit into the provided disk
Expand Down
5 changes: 5 additions & 0 deletions core/txpool/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ type ValidationOptions struct {
MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool
}

// ValidationFunction is an method type which the pools use to perform the tx-validations which do not
// require state access. Production code typically uses ValidateTransaction, whereas testing-code
// might choose to instead use something else, e.g. to always fail or avoid heavy cpu usage.
type ValidationFunction func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error

// ValidateTransaction is a helper method to check whether a transaction is valid
// according to the consensus rules, but does not check state-dependent validation
// (balance, nonce, etc).
Expand Down
13 changes: 2 additions & 11 deletions eth/tracers/internal/tracetest/calltrace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
)

Expand Down Expand Up @@ -202,7 +201,7 @@ func BenchmarkTracers(b *testing.B) {
func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
// Configure a blockchain with the given prestate
tx := new(types.Transaction)
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil {
b.Fatalf("failed to parse testcase input: %v", err)
}
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
Expand All @@ -211,15 +210,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
Origin: origin,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: uint64(test.Context.Time),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
context := test.Context.toBlockContext(test.Genesis)
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
Expand Down
4 changes: 3 additions & 1 deletion eth/tracers/tracers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,13 @@ func BenchmarkTransactionTrace(b *testing.B) {

for i := 0; i < b.N; i++ {
snap := state.StateDB.Snapshot()
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
_, err = st.TransitionDb()
res, err := st.TransitionDb()
if err != nil {
b.Fatal(err)
}
tracer.OnTxEnd(&types.Receipt{GasUsed: res.UsedGas}, nil)
state.StateDB.RevertToSnapshot(snap)
if have, want := len(tracer.StructLogs()), 244752; have != want {
b.Fatalf("trace wrong, want %d steps, have %d", want, have)
Expand Down
3 changes: 1 addition & 2 deletions log/logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"io"
"log/slog"
"math/big"
"os"
"strings"
"testing"
"time"
Expand Down Expand Up @@ -70,7 +69,7 @@ func TestJSONHandler(t *testing.T) {
}

func BenchmarkTraceLogging(b *testing.B) {
SetDefault(NewLogger(NewTerminalHandler(os.Stderr, true)))
SetDefault(NewLogger(NewTerminalHandler(io.Discard, true)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Trace("a message", "v", i)
Expand Down
32 changes: 1 addition & 31 deletions metrics/sample_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package metrics
import (
"math"
"math/rand"
"runtime"
"testing"
"time"
)
Expand All @@ -27,6 +26,7 @@ func BenchmarkCompute1000(b *testing.B) {
SampleVariance(mean, s)
}
}

func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
var sum int64
Expand All @@ -40,28 +40,6 @@ func BenchmarkCompute1000000(b *testing.B) {
SampleVariance(mean, s)
}
}
func BenchmarkCopy1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkCopy1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}

func BenchmarkExpDecaySample257(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(257, 0.015))
Expand Down Expand Up @@ -237,17 +215,9 @@ func TestUniformSampleStatistics(t *testing.T) {
}

func benchmarkSample(b *testing.B, s Sample) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
pauseTotalNs := memStats.PauseTotalNs
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Update(1)
}
b.StopTimer()
runtime.GC()
runtime.ReadMemStats(&memStats)
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}

func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
Expand Down
7 changes: 3 additions & 4 deletions triedb/pathdb/difflayer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
nblob = common.CopyBytes(blob)
}
}
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
}
var layer layer
layer = emptyLayer()
Expand Down Expand Up @@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
)
nodes[common.Hash{}][string(path)] = node
}
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
}
for i := 0; i < b.N; i++ {
b.StopTimer()
Expand Down Expand Up @@ -156,8 +156,7 @@ func BenchmarkJournal(b *testing.B) {
)
nodes[common.Hash{}][string(path)] = node
}
// TODO(rjl493456442) a non-nil state set is expected.
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), new(StateSetWithOrigin))
}
var layer layer
layer = emptyLayer()
Expand Down