Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cmd/geth: added counters to the geth inspect report #21495

Merged
merged 10 commits into from
Sep 17, 2020
206 changes: 130 additions & 76 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/olekukonko/tablewriter"
)

Expand Down Expand Up @@ -238,6 +239,50 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer
return frdb, nil
}

type counter uint64

func (c counter) String() string {
return fmt.Sprintf("%d", c)
}

func (c counter) Percentage(current uint64) string {
return fmt.Sprintf("%d", current*100/uint64(c))
}

// stat stores sizes and count for a parameter
type stat struct {
size common.StorageSize
count counter
}

// Add size to the stat and increase the counter by 1
func (s *stat) Add(size common.StorageSize) {
s.size += size
s.count++
}

func (s *stat) Size() string {
return s.size.String()
}

func (s *stat) Count() string {
return s.count.String()
}

// countReceiptsRLP counts how many receipts are stored in a RLP raw value
func countReceiptsRLP(data rlp.RawValue) counter {
it, err := rlp.NewListIterator(data)
if err != nil {
log.Warn("Receipt iteration error", "error", err)
return counter(0)
}
count := counter(0)
Copy link
Member

@rjl493456442 rjl493456442 Sep 15, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we use the List counter of RLP? Instead of iterating the whole list. NVM

for it.Next() {
count++
}
return count
}

// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database) error {
Expand All @@ -250,36 +295,38 @@ func InspectDatabase(db ethdb.Database) error {
logged = time.Now()

// Key-value store statistics
total common.StorageSize
headerSize common.StorageSize
bodySize common.StorageSize
receiptSize common.StorageSize
tdSize common.StorageSize
numHashPairing common.StorageSize
hashNumPairing common.StorageSize
trieSize common.StorageSize
codeSize common.StorageSize
txlookupSize common.StorageSize
accountSnapSize common.StorageSize
storageSnapSize common.StorageSize
preimageSize common.StorageSize
bloomBitsSize common.StorageSize
cliqueSnapsSize common.StorageSize
headers stat
bodies stat
receipts stat
tds stat
numHashPairings stat
hashNumPairings stat
tries stat
codes stat
txLookups stat
accountSnaps stat
storageSnaps stat
preimages stat
bloomBits stat
cliqueSnaps stat

// Ancient store statistics
ancientHeaders common.StorageSize
ancientBodies common.StorageSize
ancientReceipts common.StorageSize
ancientHashes common.StorageSize
ancientTds common.StorageSize
ancientHeadersSize common.StorageSize
ancientBodiesSize common.StorageSize
ancientReceiptsSize common.StorageSize
ancientTdsSize common.StorageSize
ancientHashesSize common.StorageSize

// Les statistic
chtTrieNodes common.StorageSize
bloomTrieNodes common.StorageSize
chtTrieNodes stat
bloomTrieNodes stat

// Meta- and unaccounted data
metadata common.StorageSize
unaccounted common.StorageSize
metadata stat
unaccounted stat

// Totals
total common.StorageSize
)
// Inspect key-value database first.
for it.Next() {
Expand All @@ -289,98 +336,105 @@ func InspectDatabase(db ethdb.Database) error {
)
total += size
switch {
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tdSize += size
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairing += size
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
headerSize += size
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairing += size
headers.Add(size)
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
bodySize += size
bodies.Add(size)
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
receiptSize += size
receipts.size += size
receipts.count += countReceiptsRLP(it.Value())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I missed this one. So here you are counting individual receipts. But if we just removed "individual receipts counting" from ancientdb, it doesn't make much sense to count them individually in leveldb. Maybe just report "receipt lists" in leveldb aswell

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. I'll come back proposing those counters someday though ... :D Just to be sure before starting to work on: what do you think if in a next PR I add something like a "deepinspect" flag and I do other inspections and counting, and not onlyabout receipts?

case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tds.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairings.Add(size)
case len(key) == common.HashLength:
tries.Add(size)
case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength:
codes.Add(size)
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
txlookupSize += size
txLookups.Add(size)
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
accountSnapSize += size
accountSnaps.Add(size)
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
storageSnapSize += size
storageSnaps.Add(size)
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
preimageSize += size
preimages.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBitsSize += size
bloomBits.Add(size)
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
cliqueSnapsSize += size
cliqueSnaps.Add(size)
case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength:
chtTrieNodes += size
chtTrieNodes.Add(size)
case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength:
bloomTrieNodes += size
case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength:
codeSize += size
case len(key) == common.HashLength:
trieSize += size
bloomTrieNodes.Add(size)
default:
var accounted bool
for _, meta := range [][]byte{databaseVerisionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey} {
if bytes.Equal(key, meta) {
metadata += size
metadata.Add(size)
accounted = true
break
}
}
if !accounted {
unaccounted += size
unaccounted.Add(size)
}
}
count += 1
count++
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
// Inspect append-only file store then.
ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds}
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancients[i] += common.StorageSize(size)
*ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size)
}
}
// Get number of ancient rows inside the freezer
ancients := counter(0)
if count, err := db.Ancients(); err == nil {
ancients = counter(count)
}
// Display the database statistic.
stats := [][]string{
{"Key-Value store", "Headers", headerSize.String()},
{"Key-Value store", "Bodies", bodySize.String()},
{"Key-Value store", "Receipts", receiptSize.String()},
{"Key-Value store", "Difficulties", tdSize.String()},
{"Key-Value store", "Block number->hash", numHashPairing.String()},
{"Key-Value store", "Block hash->number", hashNumPairing.String()},
{"Key-Value store", "Transaction index", txlookupSize.String()},
{"Key-Value store", "Bloombit index", bloomBitsSize.String()},
{"Key-Value store", "Contract codes", codeSize.String()},
{"Key-Value store", "Trie nodes", trieSize.String()},
{"Key-Value store", "Trie preimages", preimageSize.String()},
{"Key-Value store", "Account snapshot", accountSnapSize.String()},
{"Key-Value store", "Storage snapshot", storageSnapSize.String()},
{"Key-Value store", "Clique snapshots", cliqueSnapsSize.String()},
{"Key-Value store", "Singleton metadata", metadata.String()},
{"Ancient store", "Headers", ancientHeaders.String()},
{"Ancient store", "Bodies", ancientBodies.String()},
{"Ancient store", "Receipts", ancientReceipts.String()},
{"Ancient store", "Difficulties", ancientTds.String()},
{"Ancient store", "Block number->hash", ancientHashes.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.String()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.String()},
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
{"Key-Value store", "Receipts", receipts.Size(), receipts.Count()},
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
{"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
{"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
{"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size"})
table.SetFooter([]string{"", "Total", total.String()})
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
table.AppendBulk(stats)
table.Render()

if unaccounted > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted)
if unaccounted.size > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
}

return nil
}