diff --git a/.gitmodules b/.gitmodules
index 90d1be0a3d10..241c169c4772 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,3 +2,7 @@
path = tests/testdata
url = https://github.com/ethereum/tests
shallow = true
+[submodule "evm-benchmarks"]
+ path = tests/evm-benchmarks
+ url = https://github.com/ipsilon/evm-benchmarks
+ shallow = true
diff --git a/.golangci.yml b/.golangci.yml
index 395a91fe1bc8..4950b98c21ba 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint.
run:
- timeout: 5m
+ timeout: 20m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
diff --git a/.travis.yml b/.travis.yml
index 197d56748fc2..e08e271f3f12 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ jobs:
- stage: lint
os: linux
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- lint
git:
@@ -31,7 +31,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- docker
services:
@@ -48,7 +48,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- docker
services:
@@ -65,7 +65,7 @@ jobs:
if: type = push
os: linux
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- ubuntu-ppa
- GO111MODULE=on
@@ -90,7 +90,7 @@ jobs:
os: linux
dist: bionic
sudo: required
- go: 1.17.x
+ go: 1.18.x
env:
- azure-linux
- GO111MODULE=on
@@ -148,7 +148,7 @@ jobs:
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with
- - curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz
+ - curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
@@ -162,7 +162,7 @@ jobs:
- stage: build
if: type = push
os: osx
- go: 1.17.x
+ go: 1.18.x
env:
- azure-osx
- azure-ios
@@ -194,7 +194,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- GO111MODULE=on
script:
@@ -205,7 +205,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- GO111MODULE=on
script:
@@ -214,7 +214,7 @@ jobs:
- stage: build
os: linux
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- GO111MODULE=on
script:
@@ -225,7 +225,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- azure-purge
- GO111MODULE=on
@@ -239,7 +239,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
- go: 1.17.x
+ go: 1.18.x
env:
- GO111MODULE=on
script:
diff --git a/Dockerfile b/Dockerfile
index 7badbc1320a4..ec46f60773e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
-FROM golang:1.17-alpine as builder
+FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
diff --git a/Dockerfile.alltools b/Dockerfile.alltools
index 3ae5377e4f36..683f87a55e7e 100644
--- a/Dockerfile.alltools
+++ b/Dockerfile.alltools
@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
-FROM golang:1.17-alpine as builder
+FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
diff --git a/README.md b/README.md
index 0e8bdca4dbfe..cddc619a20cb 100644
--- a/README.md
+++ b/README.md
@@ -52,6 +52,22 @@ Going through all the possible command line flags is out of scope here (please c
but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance.
+### Hardware Requirements
+
+Minimum:
+
+* CPU with 2+ cores
+* 4GB RAM
+* 500GB free storage space to sync the Mainnet
+* 8 MBit/sec download Internet service
+
+Recommended:
+
+* Fast CPU with 4+ cores
+* 16GB+ RAM
+* High Performance SSD with at least 500GB free space
+* 25+ MBit/sec download Internet service
+
### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum
@@ -165,7 +181,7 @@ saving your blockchain as well as map the default ports. There is also an `alpin
available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
-and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
+and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
accessible from the outside.
### Programmatically interfacing `geth` nodes
diff --git a/SECURITY.md b/SECURITY.md
index 88b3f8fe17e9..41b900d5e984 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
**Please do not file a public ticket** mentioning the vulnerability.
-To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
+To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 261b4d1b868f..e6c117fe5f0d 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(arguments) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
}
- // Nothing to unmarshal, return default variables
- nonIndexedArgs := arguments.NonIndexed()
- defaultVars := make([]interface{}, len(nonIndexedArgs))
- for index, arg := range nonIndexedArgs {
- defaultVars[index] = reflect.New(arg.Type.GetType())
- }
- return defaultVars, nil
+ return make([]interface{}, 0), nil
}
return arguments.UnpackValues(data)
}
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 27d40f1d663b..ac696f446be6 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -230,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
defer b.mu.Unlock()
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
+ if receipt == nil {
+ return nil, ethereum.NotFound
+ }
return receipt, nil
}
@@ -639,7 +642,6 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
}
// SendTransaction updates the pending block to include the given transaction.
-// It panics if the transaction is invalid.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
@@ -647,17 +649,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
// Get the last block
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
if err != nil {
- panic("could not fetch parent")
+ return fmt.Errorf("could not fetch parent")
}
// Check transaction validity
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
sender, err := types.Sender(signer, tx)
if err != nil {
- panic(fmt.Errorf("invalid transaction: %v", err))
+ return fmt.Errorf("invalid transaction: %v", err)
}
nonce := b.pendingState.GetNonce(sender)
if tx.Nonce() != nonce {
- panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
+ return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
}
// Include tx in chain
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go
index 118abc59a7f1..b931fbb04d64 100644
--- a/accounts/abi/bind/util.go
+++ b/accounts/abi/bind/util.go
@@ -21,6 +21,7 @@ import (
"errors"
"time"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
@@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
- if receipt != nil {
+ if err == nil {
return receipt, nil
}
- if err != nil {
- logger.Trace("Receipt retrieval failed", "err", err)
- } else {
+
+ if errors.Is(err, ethereum.NotFound) {
logger.Trace("Transaction not yet mined")
+ } else {
+ logger.Trace("Receipt retrieval failed", "err", err)
}
+
// Wait for the next round.
select {
case <-ctx.Done():
diff --git a/accounts/abi/selector_parser.go b/accounts/abi/selector_parser.go
new file mode 100644
index 000000000000..75609b28a6cc
--- /dev/null
+++ b/accounts/abi/selector_parser.go
@@ -0,0 +1,152 @@
+package abi
+
+import (
+ "fmt"
+)
+
+type SelectorMarshaling struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Inputs []ArgumentMarshaling `json:"inputs"`
+}
+
+func isDigit(c byte) bool {
+ return c >= '0' && c <= '9'
+}
+
+func isAlpha(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+func isIdentifierSymbol(c byte) bool {
+ return c == '$' || c == '_'
+}
+
+func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
+ if len(unescapedSelector) == 0 {
+ return "", "", fmt.Errorf("empty token")
+ }
+ firstChar := unescapedSelector[0]
+ position := 1
+ if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
+ return "", "", fmt.Errorf("invalid token start: %c", firstChar)
+ }
+ for position < len(unescapedSelector) {
+ char := unescapedSelector[position]
+ if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
+ break
+ }
+ position++
+ }
+ return unescapedSelector[:position], unescapedSelector[position:], nil
+}
+
+func parseIdentifier(unescapedSelector string) (string, string, error) {
+ return parseToken(unescapedSelector, true)
+}
+
+func parseElementaryType(unescapedSelector string) (string, string, error) {
+ parsedType, rest, err := parseToken(unescapedSelector, false)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
+ }
+ // handle arrays
+ for len(rest) > 0 && rest[0] == '[' {
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ for len(rest) > 0 && isDigit(rest[0]) {
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ }
+ if len(rest) == 0 || rest[0] != ']' {
+ return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
+ }
+ parsedType = parsedType + string(rest[0])
+ rest = rest[1:]
+ }
+ return parsedType, rest, nil
+}
+
+func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
+ if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
+ return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
+ }
+ parsedType, rest, err := parseType(unescapedSelector[1:])
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to parse type: %v", err)
+ }
+ result := []interface{}{parsedType}
+ for len(rest) > 0 && rest[0] != ')' {
+ parsedType, rest, err = parseType(rest[1:])
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to parse type: %v", err)
+ }
+ result = append(result, parsedType)
+ }
+ if len(rest) == 0 || rest[0] != ')' {
+ return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
+ }
+ return result, rest[1:], nil
+}
+
+func parseType(unescapedSelector string) (interface{}, string, error) {
+ if len(unescapedSelector) == 0 {
+ return nil, "", fmt.Errorf("empty type")
+ }
+ if unescapedSelector[0] == '(' {
+ return parseCompositeType(unescapedSelector)
+ } else {
+ return parseElementaryType(unescapedSelector)
+ }
+}
+
+func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
+ arguments := make([]ArgumentMarshaling, 0)
+ for i, arg := range args {
+ // generate dummy name to avoid unmarshal issues
+ name := fmt.Sprintf("name%d", i)
+ if s, ok := arg.(string); ok {
+ arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
+ } else if components, ok := arg.([]interface{}); ok {
+ subArgs, err := assembleArgs(components)
+ if err != nil {
+ return nil, fmt.Errorf("failed to assemble components: %v", err)
+ }
+ arguments = append(arguments, ArgumentMarshaling{name, "tuple", "tuple", subArgs, false})
+ } else {
+ return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
+ }
+ }
+ return arguments, nil
+}
+
+// ParseSelector converts a method selector into a struct that can be JSON encoded
+// and consumed by other functions in this package.
+// Note, although uppercase letters are not part of the ABI spec, this function
+// still accepts it as the general format is valid.
+func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
+ name, rest, err := parseIdentifier(unescapedSelector)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
+ }
+ args := []interface{}{}
+ if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
+ rest = rest[2:]
+ } else {
+ args, rest, err = parseCompositeType(rest)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
+ }
+ }
+ if len(rest) > 0 {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
+ }
+
+ // Reassemble the fake ABI and constuct the JSON
+ fakeArgs, err := assembleArgs(args)
+ if err != nil {
+ return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
+ }
+
+ return SelectorMarshaling{name, "function", fakeArgs}, nil
+}
diff --git a/accounts/abi/selector_parser_test.go b/accounts/abi/selector_parser_test.go
new file mode 100644
index 000000000000..9720c9d5308e
--- /dev/null
+++ b/accounts/abi/selector_parser_test.go
@@ -0,0 +1,54 @@
+package abi
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "testing"
+)
+
+func TestParseSelector(t *testing.T) {
+ mkType := func(types ...interface{}) []ArgumentMarshaling {
+ var result []ArgumentMarshaling
+ for i, typeOrComponents := range types {
+ name := fmt.Sprintf("name%d", i)
+ if typeName, ok := typeOrComponents.(string); ok {
+ result = append(result, ArgumentMarshaling{name, typeName, typeName, nil, false})
+ } else if components, ok := typeOrComponents.([]ArgumentMarshaling); ok {
+ result = append(result, ArgumentMarshaling{name, "tuple", "tuple", components, false})
+ } else {
+ log.Fatalf("unexpected type %T", typeOrComponents)
+ }
+ }
+ return result
+ }
+ tests := []struct {
+ input string
+ name string
+ args []ArgumentMarshaling
+ }{
+ {"noargs()", "noargs", []ArgumentMarshaling{}},
+ {"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")},
+ {"other(uint256,address)", "other", mkType("uint256", "address")},
+ {"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")},
+ {"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")},
+ {"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest",
+ mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))},
+ }
+ for i, tt := range tests {
+ selector, err := ParseSelector(tt.input)
+ if err != nil {
+ t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err)
+ }
+ if selector.Name != tt.name {
+ t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name)
+ }
+
+ if selector.Type != "function" {
+ t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function")
+ }
+ if !reflect.DeepEqual(selector.Inputs, tt.args) {
+ t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args)
+ }
+ }
+}
diff --git a/accounts/accounts.go b/accounts/accounts.go
index af870dad1573..179a33c59fd3 100644
--- a/accounts/accounts.go
+++ b/accounts/accounts.go
@@ -46,7 +46,7 @@ const (
// accounts (derived from the same seed).
type Wallet interface {
// URL retrieves the canonical path under which this wallet is reachable. It is
- // user by upper layers to define a sorting order over all wallets from multiple
+ // used by upper layers to define a sorting order over all wallets from multiple
// backends.
URL() URL
@@ -89,7 +89,7 @@ type Wallet interface {
// accounts.
//
// Note, self derivation will increment the last component of the specified path
- // opposed to decending into a child path to allow discovering accounts starting
+ // opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
@@ -105,7 +105,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
- // a password to decrypt the account, or a PIN code o verify the transaction),
+ // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
@@ -124,13 +124,13 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
- // a password to decrypt the account, or a PIN code o verify the transaction),
+ // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
//
- // This method should return the signature in 'canonical' format, with v 0 or 1
+ // This method should return the signature in 'canonical' format, with v 0 or 1.
SignText(account Account, text []byte) ([]byte, error)
// SignTextWithPassphrase is identical to Signtext, but also takes a password
diff --git a/accounts/errors.go b/accounts/errors.go
index 2fed35f9d074..727e5329befa 100644
--- a/accounts/errors.go
+++ b/accounts/errors.go
@@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
var ErrWalletAlreadyOpen = errors.New("wallet already open")
// ErrWalletClosed is returned if a wallet is attempted to be opened the
-// secodn time.
+// second time.
var ErrWalletClosed = errors.New("wallet closed")
// AuthNeededError is returned by backends for signing requests where the user
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index b4d229bc0bf5..2a2b83bd1b15 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
-// opposed to decending into a child path to allow discovering accounts starting
+// opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go
index b6f1814488c0..382f3ddaee21 100644
--- a/accounts/usbwallet/wallet.go
+++ b/accounts/usbwallet/wallet.go
@@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
-// opposed to decending into a child path to allow discovering accounts starting
+// opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
diff --git a/appveyor.yml b/appveyor.yml
index 65b5f96841e2..d477e6db9f56 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -13,7 +13,7 @@ environment:
GETH_MINGW: 'C:\msys64\mingw32'
install:
- - git submodule update --init --depth 1
+ - git submodule update --init --depth 1 --recursive
- go version
for:
diff --git a/build/checksums.txt b/build/checksums.txt
index 5df27bbf6173..9d83c9ebba9f 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,37 +1,58 @@
# This file contains sha256 checksums of optional build dependencies.
-3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz
-2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz
-111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz
-443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz
-17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz
-4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz
-bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz
-6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz
-aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz
-3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz
-8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz
-6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip
-671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip
-45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip
+38f423db4cc834883f2b52344282fa7a39fbb93650dc62a11fdf0be6409bdad6 go1.18.src.tar.gz
+70bb4a066997535e346c8bfa3e0dfe250d61100b17ccc5676274642447834969 go1.18.darwin-amd64.tar.gz
+9cab6123af9ffade905525d79fc9ee76651e716c85f1f215872b5f2976782480 go1.18.darwin-arm64.tar.gz
+e63492d4f38487331518eb4b50e670d853bb8d67e88596269af84bb9aca0b381 go1.18.freebsd-386.tar.gz
+01cd67bbc12e659ff236ecebde1806f76452f7ca145c172d5ecdbf4f4803daae go1.18.freebsd-amd64.tar.gz
+1c04cf4440b323a66328e0df95d409f955b9b475e58eae235fdd3d1f1cf02f4f go1.18.linux-386.tar.gz
+e85278e98f57cdb150fe8409e6e5df5343ecb13cebf03a5d5ff12bd55a80264f go1.18.linux-amd64.tar.gz
+7ac7b396a691e588c5fb57687759e6c4db84a2a3bbebb0765f4b38e5b1c5b00e go1.18.linux-arm64.tar.gz
+a80fa43d1f4575fb030adbfbaa94acd860c6847820764eecb06c63b7c103612b go1.18.linux-armv6l.tar.gz
+070351edac192483c074b38d08ec19251a83f8210765a532a84c3dcf8aec04d8 go1.18.linux-ppc64le.tar.gz
+ea265f5e62fcaf941d53f0cdb81222d9668e1672a0d39d992f16ff0e87c0ee6b go1.18.linux-s390x.tar.gz
+e23fd2a0509690fe7e63b2b1bcd4c39ed57b46ccde76f35dc0d16ca7fdbc5aaa go1.18.windows-386.zip
+65c5c0c709a7ca1b357091b10b795b439d8b50e579d3893edab4c7e9b384f435 go1.18.windows-amd64.zip
+1c454eb60c64d481965a165c623ff1ed6cf32d68c6b31f36069c8768d908f093 go1.18.windows-arm64.zip
-d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
-e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
-14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
-337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
-6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
-878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
-42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
-6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
-2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
-08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
-c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
-3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
-f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
-1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
-8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
-5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
-e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
-7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
-59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
-65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip
+03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb
+08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm
+0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb
+10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb
+1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz
+15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb
+166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm
+1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz
+1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip
+3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz
+46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb
+4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb
+5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz
+518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm
+595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz
+6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip
+6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz
+726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz
+77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb
+7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip
+7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip
+7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
+828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
+879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
+87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
+8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
+9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
+995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
+a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
+a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
+aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
+c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
+dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
+eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
+ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
+ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
+ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
+f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
+f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
+fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
+fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm
\ No newline at end of file
diff --git a/build/ci.go b/build/ci.go
index 8b302511a711..c3dccfc58868 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -130,13 +130,14 @@ var (
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
- // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
+ // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite
debDistroGoBoots = map[string]string{
- "trusty": "golang-1.11",
- "xenial": "golang-go",
- "bionic": "golang-go",
- "focal": "golang-go",
- "hirsute": "golang-go",
+ "trusty": "golang-1.11", // EOL: 04/2024
+ "xenial": "golang-go", // EOL: 04/2026
+ "bionic": "golang-go", // EOL: 04/2028
+ "focal": "golang-go", // EOL: 04/2030
+ "impish": "golang-go", // EOL: 07/2022
+ // "jammy": "golang-go", // EOL: 04/2027
}
debGoBootPaths = map[string]string{
@@ -147,7 +148,7 @@ var (
// This is the version of go that will be downloaded by
//
// go run ci.go install -dlgo
- dlgoVersion = "1.17.5"
+ dlgoVersion = "1.18"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -331,12 +332,21 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
- const version = "1.42.0"
+ const version = "1.45.2"
csdb := build.MustLoadChecksums("build/checksums.txt")
- base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
- url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
- archivePath := filepath.Join(cachedir, base+".tar.gz")
+ arch := runtime.GOARCH
+ ext := ".tar.gz"
+
+ if runtime.GOOS == "windows" {
+ ext = ".zip"
+ }
+ if arch == "arm" {
+ arch += "v" + os.Getenv("GOARM")
+ }
+ base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
+ url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext)
+ archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
@@ -1233,21 +1243,21 @@ func doPurge(cmdline []string) {
// Iterate over the blobs, collect and sort all unstable builds
for i := 0; i < len(blobs); i++ {
- if !strings.Contains(blobs[i].Name, "unstable") {
+ if !strings.Contains(*blobs[i].Name, "unstable") {
blobs = append(blobs[:i], blobs[i+1:]...)
i--
}
}
for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ {
- if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
+ if blobs[i].Properties.LastModified.After(*blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i]
}
}
}
// Filter out all archives more recent that the given threshold
for i, blob := range blobs {
- if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
+ if time.Since(*blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i]
break
}
diff --git a/cmd/clef/main.go b/cmd/clef/main.go
index 3aaf898db2e6..f7c3adebc44a 100644
--- a/cmd/clef/main.go
+++ b/cmd/clef/main.go
@@ -661,7 +661,7 @@ func signer(c *cli.Context) error {
if err != nil {
utils.Fatalf("Could not register API: %w", err)
}
- handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
+ handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
// set port
port := c.Int(rpcPortFlag.Name)
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 7dcb412b53a8..d0d55a455d30 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -26,6 +26,7 @@ import (
"os"
"strings"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
@@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
return sum
}
+func (c *Chain) RootAt(height int) common.Hash {
+ if height < c.Len() {
+ return c.blocks[height].Root()
+ }
+ return common.Hash{}
+}
+
// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go
index e695cd42d69e..dd9dfd861981 100644
--- a/cmd/devp2p/internal/ethtest/helpers.go
+++ b/cmd/devp2p/internal/ethtest/helpers.go
@@ -96,6 +96,19 @@ func (s *Suite) dial66() (*Conn, error) {
return conn, nil
}
+// dial66 attempts to dial the given node and perform a handshake,
+// returning the created Conn with additional snap/1 capabilities if
+// successful.
+func (s *Suite) dialSnap() (*Conn, error) {
+ conn, err := s.dial66()
+ if err != nil {
+ return nil, fmt.Errorf("dial failed: %v", err)
+ }
+ conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
+ conn.ourHighestSnapProtoVersion = 1
+ return conn, nil
+}
+
// peer performs both the protocol handshake and the status message
// exchange with the node in order to peer with it.
func (c *Conn) peer(chain *Chain, status *Status) error {
@@ -131,7 +144,11 @@ func (c *Conn) handshake() error {
}
c.negotiateEthProtocol(msg.Caps)
if c.negotiatedProtoVersion == 0 {
- return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
+ return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
+ }
+ // If we require snap, verify that it was negotiated
+ if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
+ return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
}
return nil
default:
@@ -143,15 +160,21 @@ func (c *Conn) handshake() error {
// advertised capability from peer.
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
var highestEthVersion uint
+ var highestSnapVersion uint
for _, capability := range caps {
- if capability.Name != "eth" {
- continue
- }
- if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
- highestEthVersion = capability.Version
+ switch capability.Name {
+ case "eth":
+ if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
+ highestEthVersion = capability.Version
+ }
+ case "snap":
+ if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
+ highestSnapVersion = capability.Version
+ }
}
}
c.negotiatedProtoVersion = highestEthVersion
+ c.negotiatedSnapProtoVersion = highestSnapVersion
}
// statusExchange performs a `Status` message exchange with the given node.
@@ -325,6 +348,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
}
}
+func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
+ defer c.SetReadDeadline(time.Time{})
+ c.SetReadDeadline(time.Now().Add(5 * time.Second))
+ if err := c.Write(msg); err != nil {
+ return nil, fmt.Errorf("could not write to connection: %v", err)
+ }
+ return c.ReadSnap(id)
+}
+
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
// write request
diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go
new file mode 100644
index 000000000000..95dd90fd3b4b
--- /dev/null
+++ b/cmd/devp2p/internal/ethtest/snap.go
@@ -0,0 +1,675 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethtest
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/rand"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/protocols/snap"
+ "github.com/ethereum/go-ethereum/internal/utesting"
+ "github.com/ethereum/go-ethereum/light"
+ "github.com/ethereum/go-ethereum/trie"
+ "golang.org/x/crypto/sha3"
+)
+
+func (s *Suite) TestSnapStatus(t *utesting.T) {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err := conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+}
+
+type accRangeTest struct {
+ nBytes uint64
+ root common.Hash
+ origin common.Hash
+ limit common.Hash
+
+ expAccounts int
+ expFirst common.Hash
+ expLast common.Hash
+}
+
+// TestSnapGetAccountRange various forms of GetAccountRange requests.
+func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
+ var (
+ root = s.chain.RootAt(999)
+ ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ zero = common.Hash{}
+ firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
+ firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
+ secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
+ storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
+ )
+ for i, tc := range []accRangeTest{
+ // Tests decreasing the number of bytes
+ {4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
+ {3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
+ {2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
+ {1, root, zero, ffHash, 1, firstKey, firstKey},
+
+ // Tests variations of the range
+ //
+ // [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
+ {4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
+ // [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
+ {4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
+ {4000, root, zero, zero, 1, firstKey, firstKey},
+ {4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
+ {4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
+
+ // Test different root hashes
+ //
+ // A stateroot that does not exist
+ {4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
+ // The genesis stateroot (we expect it to not be served)
+ {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
+ // A 127 block old stateroot, expected to be served
+ {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
+ // A root which is not actually an account root, but a storage orot
+ {4000, storageRoot, zero, ffHash, 0, zero, zero},
+
+ // And some non-sensical requests
+ //
+ // range from [0xFF to 0x00], wrong order. Expect not to be serviced
+ {4000, root, ffHash, zero, 0, zero, zero},
+ // range from [firstkey, firstkey-1], wrong order. Expect to get first key.
+ {4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
+ // range from [firstkey, 0], wrong order. Expect to get first key.
+ {4000, root, firstKey, zero, 1, firstKey, firstKey},
+ // Max bytes: 0. Expect to deliver one account.
+ {0, root, zero, ffHash, 1, firstKey, firstKey},
+ } {
+ if err := s.snapGetAccountRange(t, &tc); err != nil {
+ t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
+ }
+ }
+}
+
+type stRangesTest struct {
+ root common.Hash
+ accounts []common.Hash
+ origin []byte
+ limit []byte
+ nBytes uint64
+
+ expSlots int
+}
+
+// TestSnapGetStorageRange various forms of GetStorageRanges requests.
+func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
+ var (
+ ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ zero = common.Hash{}
+ firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
+ )
+ for i, tc := range []stRangesTest{
+ {
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{secondKey, firstKey},
+ origin: zero[:],
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 0,
+ },
+
+ /*
+ Some tests against this account:
+ {
+ "balance": "0",
+ "nonce": 1,
+ "root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
+ "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
+ "storage": {
+ "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
+ "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
+ "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
+ },
+ "key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
+ }
+ */
+ { // [:] -> [slot1, slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: zero[:],
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 3,
+ },
+ { // [slot1:] -> [slot1, slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 3,
+ },
+ { // [slot1+ :] -> [slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 2,
+ },
+ { // [slot1:slot2] -> [slot1, slot2]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
+ limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
+ nBytes: 500,
+ expSlots: 2,
+ },
+ { // [slot1+:slot2+] -> [slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
+ nBytes: 500,
+ expSlots: 2,
+ },
+ } {
+ if err := s.snapGetStorageRanges(t, &tc); err != nil {
+ t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
+ i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
+ }
+ }
+}
+
+type byteCodesTest struct {
+ nBytes uint64
+ hashes []common.Hash
+
+ expHashes int
+}
+
+var (
+ // emptyRoot is the known root hash of an empty trie.
+ emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ // emptyCode is the known hash of the empty EVM bytecode.
+ emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
+)
+
+// TestSnapGetByteCodes various forms of GetByteCodes requests.
+func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
+ // The halfchain import should yield these bytecodes
+ var hcBytecodes []common.Hash
+ for _, s := range []string{
+ "0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
+ "0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
+ "0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
+ "0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
+ "0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
+ "0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
+ "0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
+ "0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
+ "0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
+ "0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
+ "0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
+ "0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
+ "0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
+ "0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
+ "0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
+ "0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
+ "0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
+ "0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
+ "0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
+ "0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
+ "0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
+ "0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
+ "0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
+ "0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
+ "0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
+ } {
+ hcBytecodes = append(hcBytecodes, common.HexToHash(s))
+ }
+
+ for i, tc := range []byteCodesTest{
+ // A few stateroots
+ {
+ nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
+ expHashes: 0,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
+ expHashes: 0,
+ },
+ // Empties
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyRoot},
+ expHashes: 0,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyCode},
+ expHashes: 1,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
+ expHashes: 3,
+ },
+ // The existing bytecodes
+ {
+ nBytes: 10000, hashes: hcBytecodes,
+ expHashes: len(hcBytecodes),
+ },
+ // The existing, with limited byte arg
+ {
+ nBytes: 1, hashes: hcBytecodes,
+ expHashes: 1,
+ },
+ {
+ nBytes: 0, hashes: hcBytecodes,
+ expHashes: 1,
+ },
+ {
+ nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
+ expHashes: 4,
+ },
+ } {
+ if err := s.snapGetByteCodes(t, &tc); err != nil {
+ t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
+ }
+ }
+}
+
+type trieNodesTest struct {
+ root common.Hash
+ paths []snap.TrieNodePathSet
+ nBytes uint64
+
+ expHashes []common.Hash
+ expReject bool
+}
+
+func decodeNibbles(nibbles []byte, bytes []byte) {
+ for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
+ bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
+ }
+}
+
+// hasTerm returns whether a hex key has the terminator flag.
+func hasTerm(s []byte) bool {
+ return len(s) > 0 && s[len(s)-1] == 16
+}
+
+func keybytesToHex(str []byte) []byte {
+ l := len(str)*2 + 1
+ var nibbles = make([]byte, l)
+ for i, b := range str {
+ nibbles[i*2] = b / 16
+ nibbles[i*2+1] = b % 16
+ }
+ nibbles[l-1] = 16
+ return nibbles
+}
+
+func hexToCompact(hex []byte) []byte {
+ terminator := byte(0)
+ if hasTerm(hex) {
+ terminator = 1
+ hex = hex[:len(hex)-1]
+ }
+ buf := make([]byte, len(hex)/2+1)
+ buf[0] = terminator << 5 // the flag byte
+ if len(hex)&1 == 1 {
+ buf[0] |= 1 << 4 // odd flag
+ buf[0] |= hex[0] // first nibble is contained in the first byte
+ hex = hex[1:]
+ }
+ decodeNibbles(hex, buf[1:])
+ return buf
+}
+
+// TestSnapTrieNodes various forms of GetTrieNodes requests.
+func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
+
+ key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ // helper function to iterate the key, and generate the compact-encoded
+ // trie paths along the way.
+ pathTo := func(length int) snap.TrieNodePathSet {
+ hex := keybytesToHex(key)[:length]
+ hex[len(hex)-1] = 0 // remove term flag
+ hKey := hexToCompact(hex)
+ return snap.TrieNodePathSet{hKey}
+ }
+ var accPaths []snap.TrieNodePathSet
+ for i := 1; i <= 65; i++ {
+ accPaths = append(accPaths, pathTo(i))
+ }
+ empty := emptyCode
+ for i, tc := range []trieNodesTest{
+ {
+ root: s.chain.RootAt(999),
+ paths: nil,
+ nBytes: 500,
+ expHashes: nil,
+ },
+ {
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
+ snap.TrieNodePathSet{[]byte{0}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{},
+ expReject: true,
+ },
+ {
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0}},
+ snap.TrieNodePathSet{[]byte{1}, []byte{0}},
+ },
+ nBytes: 5000,
+ //0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
+ expHashes: []common.Hash{s.chain.RootAt(999)},
+ },
+ { // nonsensically long path
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
+ },
+ {
+ root: s.chain.RootAt(0),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0}},
+ snap.TrieNodePathSet{[]byte{1}, []byte{0}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{},
+ },
+ {
+ // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
+ root: s.chain.RootAt(999),
+ paths: accPaths,
+ nBytes: 5000,
+ expHashes: []common.Hash{
+ common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
+ common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty},
+ },
+ {
+ // Basically the same as above, with different ordering
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ accPaths[10], accPaths[1], accPaths[0],
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{
+ empty,
+ common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
+ common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
+ },
+ },
+ } {
+ if err := s.snapGetTrieNodes(t, &tc); err != nil {
+ t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
+ }
+ }
+}
+
+func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetAccountRange{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Origin: tc.origin,
+ Limit: tc.limit,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("account range request failed: %v", err)
+ }
+ var res *snap.AccountRangePacket
+ if r, ok := resp.(*AccountRange); !ok {
+ return fmt.Errorf("account range response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.AccountRangePacket)(r)
+ }
+ if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
+ return fmt.Errorf("expected %d accounts, got %d", exp, got)
+ }
+ // Check that the encoding order is correct
+ for i := 1; i < len(res.Accounts); i++ {
+ if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
+ return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
+ }
+ }
+ var (
+ hashes []common.Hash
+ accounts [][]byte
+ proof = res.Proof
+ )
+ hashes, accounts, err = res.Unpack()
+ if err != nil {
+ return err
+ }
+ if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
+ return nil
+ }
+ if len(hashes) > 0 {
+ if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
+ return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
+ }
+ if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
+ return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
+ }
+ }
+ // Reconstruct a partial trie from the response and verify it
+ keys := make([][]byte, len(hashes))
+ for i, key := range hashes {
+ keys[i] = common.CopyBytes(key[:])
+ }
+ nodes := make(light.NodeList, len(proof))
+ for i, node := range proof {
+ nodes[i] = node
+ }
+ proofdb := nodes.NodeSet()
+
+ var end []byte
+ if len(keys) > 0 {
+ end = keys[len(keys)-1]
+ }
+ _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
+ return err
+}
+
+func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetStorageRanges{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Accounts: tc.accounts,
+ Origin: tc.origin,
+ Limit: tc.limit,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("account range request failed: %v", err)
+ }
+ var res *snap.StorageRangesPacket
+ if r, ok := resp.(*StorageRanges); !ok {
+ return fmt.Errorf("account range response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.StorageRangesPacket)(r)
+ }
+ gotSlots := 0
+ // Ensure the ranges are monotonically increasing
+ for i, slots := range res.Slots {
+ gotSlots += len(slots)
+ for j := 1; j < len(slots); j++ {
+ if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
+ return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
+ }
+ }
+ }
+ if exp, got := tc.expSlots, gotSlots; exp != got {
+ return fmt.Errorf("expected %d slots, got %d", exp, got)
+ }
+ return nil
+}
+
+func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetByteCodes{
+ ID: uint64(rand.Int63()),
+ Hashes: tc.hashes,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("getBytecodes request failed: %v", err)
+ }
+ var res *snap.ByteCodesPacket
+ if r, ok := resp.(*ByteCodes); !ok {
+ return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.ByteCodesPacket)(r)
+ }
+ if exp, got := tc.expHashes, len(res.Codes); exp != got {
+ for i, c := range res.Codes {
+ fmt.Printf("%d. %#x\n", i, c)
+ }
+ return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
+ }
+ // Cross reference the requested bytecodes with the response to find gaps
+ // that the serving node is missing
+ var (
+ bytecodes = res.Codes
+ hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash = make([]byte, 32)
+ codes = make([][]byte, len(req.Hashes))
+ )
+
+ for i, j := 0, 0; i < len(bytecodes); i++ {
+ // Find the next hash that we've been served, leaving misses with nils
+ hasher.Reset()
+ hasher.Write(bytecodes[i])
+ hasher.Read(hash)
+
+ for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
+ j++
+ }
+ if j < len(req.Hashes) {
+ codes[j] = bytecodes[i]
+ j++
+ continue
+ }
+ // We've either ran out of hashes, or got unrequested data
+ return errors.New("unexpected bytecode")
+ }
+
+ return nil
+}
+
+func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetTrieNodes{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Paths: tc.paths,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ if tc.expReject {
+ return nil
+ }
+ return fmt.Errorf("trienodes request failed: %v", err)
+ }
+ var res *snap.TrieNodesPacket
+ if r, ok := resp.(*TrieNodes); !ok {
+ return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.TrieNodesPacket)(r)
+ }
+
+ // Check the correctness
+
+ // Cross reference the requested trienodes with the response to find gaps
+ // that the serving node is missing
+ hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash := make([]byte, 32)
+ trienodes := res.Nodes
+ if got, want := len(trienodes), len(tc.expHashes); got != want {
+ return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
+ }
+ for i, trienode := range trienodes {
+ hasher.Reset()
+ hasher.Write(trienode)
+ hasher.Read(hash)
+ if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
+ fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
+ err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
+ }
+ }
+ return err
+}
diff --git a/cmd/devp2p/internal/ethtest/snapTypes.go b/cmd/devp2p/internal/ethtest/snapTypes.go
new file mode 100644
index 000000000000..bb8638c3d803
--- /dev/null
+++ b/cmd/devp2p/internal/ethtest/snapTypes.go
@@ -0,0 +1,36 @@
+package ethtest
+
+import "github.com/ethereum/go-ethereum/eth/protocols/snap"
+
+// GetAccountRange represents an account range query.
+type GetAccountRange snap.GetAccountRangePacket
+
+func (g GetAccountRange) Code() int { return 33 }
+
+type AccountRange snap.AccountRangePacket
+
+func (g AccountRange) Code() int { return 34 }
+
+type GetStorageRanges snap.GetStorageRangesPacket
+
+func (g GetStorageRanges) Code() int { return 35 }
+
+type StorageRanges snap.StorageRangesPacket
+
+func (g StorageRanges) Code() int { return 36 }
+
+type GetByteCodes snap.GetByteCodesPacket
+
+func (g GetByteCodes) Code() int { return 37 }
+
+type ByteCodes snap.ByteCodesPacket
+
+func (g ByteCodes) Code() int { return 38 }
+
+type GetTrieNodes snap.GetTrieNodesPacket
+
+func (g GetTrieNodes) Code() int { return 39 }
+
+type TrieNodes snap.TrieNodesPacket
+
+func (g TrieNodes) Code() int { return 40 }
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index 28ba4aa76740..dee59bc57987 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test {
}
}
+func (s *Suite) SnapTests() []utesting.Test {
+ return []utesting.Test{
+ {Name: "TestSnapStatus", Fn: s.TestSnapStatus},
+ {Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
+ {Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
+ {Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
+ {Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
+ }
+}
+
var (
eth66 = true // indicates whether suite should negotiate eth66 connection
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
index 6d14404e6624..9bc55bc0abc5 100644
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ b/cmd/devp2p/internal/ethtest/suite_test.go
@@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) {
}
}
+func TestSnapSuite(t *testing.T) {
+ geth, err := runGeth()
+ if err != nil {
+ t.Fatalf("could not run geth: %v", err)
+ }
+ defer geth.Close()
+
+ suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
+ if err != nil {
+ t.Fatalf("could not create new test suite: %v", err)
+ }
+ for _, test := range suite.SnapTests() {
+ t.Run(test.Name, func(t *testing.T) {
+ result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
+ if result[0].Failed {
+ t.Fatal()
+ }
+ })
+ }
+}
+
// runGeth creates and starts a geth node
func runGeth() (*node.Node, error) {
stack, err := node.New(&node.Config{
diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go
index e49ea284e94c..09bb218d5183 100644
--- a/cmd/devp2p/internal/ethtest/types.go
+++ b/cmd/devp2p/internal/ethtest/types.go
@@ -19,6 +19,7 @@ package ethtest
import (
"crypto/ecdsa"
"fmt"
+ "time"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/p2p"
@@ -126,10 +127,12 @@ func (pt PooledTransactions) Code() int { return 26 }
// Conn represents an individual connection with a peer
type Conn struct {
*rlpx.Conn
- ourKey *ecdsa.PrivateKey
- negotiatedProtoVersion uint
- ourHighestProtoVersion uint
- caps []p2p.Cap
+ ourKey *ecdsa.PrivateKey
+ negotiatedProtoVersion uint
+ negotiatedSnapProtoVersion uint
+ ourHighestProtoVersion uint
+ ourHighestSnapProtoVersion uint
+ caps []p2p.Cap
}
// Read reads an eth packet from the connection.
@@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) {
// Write writes a eth packet to the connection.
func (c *Conn) Write(msg Message) error {
- // check if message is eth protocol message
- var (
- payload []byte
- err error
- )
- payload, err = rlp.EncodeToBytes(msg)
+ payload, err := rlp.EncodeToBytes(msg)
if err != nil {
return err
}
@@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
_, err = c.Conn.Write(uint64(code), payload)
return err
}
+
+// ReadSnap reads a snap/1 response with the given id from the connection.
+func (c *Conn) ReadSnap(id uint64) (Message, error) {
+ respId := id + 1
+ start := time.Now()
+ for respId != id && time.Since(start) < timeout {
+ code, rawData, _, err := c.Conn.Read()
+ if err != nil {
+ return nil, fmt.Errorf("could not read from connection: %v", err)
+ }
+ var snpMsg interface{}
+ switch int(code) {
+ case (GetAccountRange{}).Code():
+ snpMsg = new(GetAccountRange)
+ case (AccountRange{}).Code():
+ snpMsg = new(AccountRange)
+ case (GetStorageRanges{}).Code():
+ snpMsg = new(GetStorageRanges)
+ case (StorageRanges{}).Code():
+ snpMsg = new(StorageRanges)
+ case (GetByteCodes{}).Code():
+ snpMsg = new(GetByteCodes)
+ case (ByteCodes{}).Code():
+ snpMsg = new(ByteCodes)
+ case (GetTrieNodes{}).Code():
+ snpMsg = new(GetTrieNodes)
+ case (TrieNodes{}).Code():
+ snpMsg = new(TrieNodes)
+ default:
+ //return nil, fmt.Errorf("invalid message code: %d", code)
+ continue
+ }
+ if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
+ return nil, fmt.Errorf("could not rlp decode message: %v", err)
+ }
+ return snpMsg.(Message), nil
+
+ }
+ return nil, fmt.Errorf("request timed out")
+}
diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go
index 24a16f0b3cb6..6557a239da77 100644
--- a/cmd/devp2p/rlpxcmd.go
+++ b/cmd/devp2p/rlpxcmd.go
@@ -36,6 +36,7 @@ var (
Subcommands: []cli.Command{
rlpxPingCommand,
rlpxEthTestCommand,
+ rlpxSnapTestCommand,
},
}
rlpxPingCommand = cli.Command{
@@ -53,6 +54,16 @@ var (
testTAPFlag,
},
}
+ rlpxSnapTestCommand = cli.Command{
+ Name: "snap-test",
+ Usage: "Runs tests against a node",
+ ArgsUsage: " ",
+ Action: rlpxSnapTest,
+ Flags: []cli.Flag{
+ testPatternFlag,
+ testTAPFlag,
+ },
+ }
)
func rlpxPing(ctx *cli.Context) error {
@@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error {
}
return runTests(ctx, suite.AllEthTests())
}
+
+// rlpxSnapTest runs the snap protocol test suite.
+func rlpxSnapTest(ctx *cli.Context) error {
+ if ctx.NArg() < 3 {
+ exit("missing path to chain.rlp as command-line argument")
+ }
+ suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
+ if err != nil {
+ exit(err)
+ }
+ return runTests(ctx, suite.SnapTests())
+}
diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go
index f2986e8ee91b..70baae92f460 100644
--- a/cmd/ethkey/utils.go
+++ b/cmd/ethkey/utils.go
@@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
// signHash is a helper function that calculates a hash for the given message
// that can be safely used to calculate a signature from.
//
-// The hash is calulcated as
+// The hash is calculated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index dfdde4217396..874685f15ea0 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -67,6 +67,7 @@ type ommer struct {
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
Difficulty *big.Int `json:"currentDifficulty"`
+ Random *big.Int `json:"currentRandom"`
ParentDifficulty *big.Int `json:"parentDifficulty"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
@@ -81,6 +82,7 @@ type stEnv struct {
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
+ Random *math.HexOrDecimal256
ParentDifficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
@@ -139,6 +141,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if pre.Env.BaseFee != nil {
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
}
+ // If random is defined, add it to the vmContext.
+ if pre.Env.Random != nil {
+ rnd := common.BigToHash(pre.Env.Random)
+ vmContext.Random = &rnd
+ }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
// done in StateProcessor.Process(block, ...), right before transactions are applied.
if chainConfig.DAOForkSupport &&
diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go
index 1bb3c6a46b0c..a6d774cdabcf 100644
--- a/cmd/evm/internal/t8ntool/gen_stenv.go
+++ b/cmd/evm/internal/t8ntool/gen_stenv.go
@@ -18,6 +18,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -31,6 +32,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
@@ -48,6 +50,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -69,6 +72,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.Difficulty != nil {
s.Difficulty = (*big.Int)(dec.Difficulty)
}
+ if dec.Random != nil {
+ s.Random = (*big.Int)(dec.Random)
+ }
if dec.ParentDifficulty != nil {
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 11d71e4ce55d..097f9ce65c55 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -252,6 +252,10 @@ func Transition(ctx *cli.Context) error {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
+ // Sanity check, to not `panic` in state_transition
+ if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
+ return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
+ }
if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it.
switch {
diff --git a/cmd/evm/testdata/15/exp3.json b/cmd/evm/testdata/15/exp3.json
index 6c46d267cf37..d7606a207361 100644
--- a/cmd/evm/testdata/15/exp3.json
+++ b/cmd/evm/testdata/15/exp3.json
@@ -21,19 +21,19 @@
"error": "transaction type not supported"
},
{
- "error": "rlp: expected List"
+ "error": "typed transaction too short"
},
{
- "error": "rlp: expected List"
+ "error": "typed transaction too short"
},
{
- "error": "rlp: expected List"
+ "error": "typed transaction too short"
},
{
- "error": "rlp: expected List"
+ "error": "typed transaction too short"
},
{
- "error": "rlp: expected List"
+ "error": "typed transaction too short"
},
{
"error": "rlp: expected input list for types.AccessListTx"
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index 9455eeda3615..3a71b8571603 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
t.Error(err)
}
- geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
+ geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
defer geth.ExpectExit()
geth.Expect(expected)
}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 7a642edd0e41..26eeccb8b60b 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
@@ -161,7 +162,23 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
}
- backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
+ backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
+ // Warn users to migrate if they have a legacy freezer format.
+ if eth != nil {
+ firstIdx := uint64(0)
+ // Hack to speed up check for mainnet because we know
+ // the first non-empty block.
+ ghash := rawdb.ReadCanonicalHash(eth.ChainDb(), 0)
+ if cfg.Eth.NetworkId == 1 && ghash == params.MainnetGenesisHash {
+ firstIdx = 46147
+ }
+ isLegacy, _, err := dbHasLegacyReceipts(eth.ChainDb(), firstIdx)
+ if err != nil {
+ log.Error("Failed to check db for legacy receipts", "err", err)
+ } else if isLegacy {
+ log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
+ }
+ }
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index c2c42276b535..33a7becfcd00 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -34,9 +34,12 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
@@ -69,6 +72,8 @@ Remove blockchain and state databases`,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
+ dbMetadataCmd,
+ dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
@@ -233,6 +238,38 @@ WARNING: This is a low-level operation which may cause database corruption!`,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
+ dbMetadataCmd = cli.Command{
+ Action: utils.MigrateFlags(showMetaData),
+ Name: "metadata",
+ Usage: "Shows metadata about the chain status.",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.SyncModeFlag,
+ utils.MainnetFlag,
+ utils.RopstenFlag,
+ utils.SepoliaFlag,
+ utils.RinkebyFlag,
+ utils.GoerliFlag,
+ },
+ Description: "Shows metadata about the chain status.",
+ }
+ dbMigrateFreezerCmd = cli.Command{
+ Action: utils.MigrateFlags(freezerMigrate),
+ Name: "freezer-migrate",
+ Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.SyncModeFlag,
+ utils.MainnetFlag,
+ utils.RopstenFlag,
+ utils.SepoliaFlag,
+ utils.RinkebyFlag,
+ utils.GoerliFlag,
+ },
+ Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
+WARNING: please back-up the receipt files in your ancients before running this command.`,
+ }
)
func removeDB(ctx *cli.Context) error {
@@ -539,7 +576,7 @@ func freezerInspect(ctx *cli.Context) error {
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
- if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
+ if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
@@ -685,3 +722,138 @@ func exportChaindata(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
+
+func showMetaData(ctx *cli.Context) error {
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+ db := utils.MakeChainDatabase(ctx, stack, true)
+ ancients, err := db.Ancients()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
+ }
+ pp := func(val *uint64) string {
+ if val == nil {
+ return ""
+ }
+ return fmt.Sprintf("%d (0x%x)", *val, *val)
+ }
+ data := [][]string{
+ {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
+ {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
+ {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
+ {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
+ if b := rawdb.ReadHeadBlock(db); b != nil {
+ data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
+ data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
+ data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
+ }
+ if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
+ data = append(data, []string{"SkeletonSyncStatus", string(b)})
+ }
+ if h := rawdb.ReadHeadHeader(db); h != nil {
+ data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
+ data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
+ data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
+ }
+ data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
+ {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
+ {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
+ {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
+ {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
+ {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
+ {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
+ {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
+ {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
+ {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
+ }...)
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader([]string{"Field", "Value"})
+ table.AppendBulk(data)
+ table.Render()
+ return nil
+}
+
+func freezerMigrate(ctx *cli.Context) error {
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+
+ db := utils.MakeChainDatabase(ctx, stack, false)
+ defer db.Close()
+
+ // Check first block for legacy receipt format
+ numAncients, err := db.Ancients()
+ if err != nil {
+ return err
+ }
+ if numAncients < 1 {
+ log.Info("No receipts in freezer to migrate")
+ return nil
+ }
+
+ isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
+ if err != nil {
+ return err
+ }
+ if !isFirstLegacy {
+ log.Info("No legacy receipts to migrate")
+ return nil
+ }
+
+ log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
+ start := time.Now()
+ if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
+ return err
+ }
+ if err := db.Close(); err != nil {
+ return err
+ }
+ log.Info("Migration finished", "duration", time.Since(start))
+
+ return nil
+}
+
+// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
+// non-empty receipt and checks its format. The index of this first non-empty element is
+// the second return parameter.
+func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
+ // Check first block for legacy receipt format
+ numAncients, err := db.Ancients()
+ if err != nil {
+ return false, 0, err
+ }
+ if numAncients < 1 {
+ return false, 0, nil
+ }
+ if firstIdx >= numAncients {
+ return false, firstIdx, nil
+ }
+ var (
+ legacy bool
+ blob []byte
+ emptyRLPList = []byte{192}
+ )
+ // Find first block with non-empty receipt, only if
+ // the index is not already provided.
+ if firstIdx == 0 {
+ for i := uint64(0); i < numAncients; i++ {
+ blob, err = db.Ancient("receipts", i)
+ if err != nil {
+ return false, 0, err
+ }
+ if len(blob) == 0 {
+ continue
+ }
+ if !bytes.Equal(blob, emptyRLPList) {
+ firstIdx = i
+ break
+ }
+ }
+ }
+ // Is first non-empty receipt legacy?
+ first, err := db.Ancient("receipts", firstIdx)
+ if err != nil {
+ return false, 0, err
+ }
+ legacy, err = types.IsLegacyStoredReceipts(first)
+ return legacy, firstIdx, err
+}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 6342cd9d0023..bf99483cd6f9 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -107,7 +107,8 @@ var (
utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag,
utils.LightNoSyncServeFlag,
- utils.WhitelistFlag,
+ utils.EthPeerRequiredBlocksFlag,
+ utils.LegacyWhitelistFlag,
utils.BloomFilterSizeFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
@@ -118,6 +119,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
+ utils.FDLimitFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
@@ -146,6 +148,7 @@ var (
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
+ utils.KilnFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.EthStatsURLFlag,
@@ -157,7 +160,6 @@ var (
utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag,
configFileFlag,
- utils.CatalystFlag,
}
rpcFlags = []cli.Flag{
@@ -165,6 +167,10 @@ var (
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag,
+ utils.AuthListenFlag,
+ utils.AuthPortFlag,
+ utils.AuthVirtualHostsFlag,
+ utils.JWTSecretFlag,
utils.HTTPVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
@@ -208,7 +214,7 @@ func init() {
// Initialize the CLI app and start Geth
app.Action = geth
app.HideVersion = true // we have a command to print the version
- app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
+ app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
app.Commands = []cli.Command{
// See chaincmd.go:
initCommand,
diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go
index bd2c2443a68f..d0539eeff74c 100644
--- a/cmd/geth/snapshot.go
+++ b/cmd/geth/snapshot.go
@@ -314,8 +314,7 @@ func traverseState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
- code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
- if len(code) == 0 {
+ if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code")
}
@@ -386,11 +385,10 @@ func traverseRawState(ctx *cli.Context) error {
nodes += 1
node := accIter.Hash()
+ // Check the present for non-empty hash node(embedded node doesn't
+ // have their own hash).
if node != (common.Hash{}) {
- // Check the present for non-empty hash node(embedded node doesn't
- // have their own hash).
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasTrieNode(chaindb, node) {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
@@ -418,8 +416,7 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasTrieNode(chaindb, node) {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
@@ -435,8 +432,7 @@ func traverseRawState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
- code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
- if len(code) == 0 {
+ if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code")
}
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index c63c62fd3820..0916b14bec42 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -46,6 +46,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.RinkebyFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
+ utils.KilnFlag,
utils.SyncModeFlag,
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
@@ -53,7 +54,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.EthStatsURLFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
- utils.WhitelistFlag,
+ utils.EthPeerRequiredBlocksFlag,
},
},
{
@@ -119,6 +120,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
+ utils.FDLimitFlag,
},
},
{
@@ -148,6 +150,10 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.WSApiFlag,
utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag,
+ utils.JWTSecretFlag,
+ utils.AuthListenFlag,
+ utils.AuthPortFlag,
+ utils.AuthVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
@@ -221,6 +227,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "ALIASED (deprecated)",
Flags: []cli.Flag{
utils.NoUSBFlag,
+ utils.LegacyWhitelistFlag,
},
},
{
@@ -229,7 +236,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
cli.HelpFlag,
- utils.CatalystFlag,
},
},
}
diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go
index 0f056d1967d4..4be32d5e4f6d 100644
--- a/cmd/geth/version_check_test.go
+++ b/cmd/geth/version_check_test.go
@@ -25,6 +25,8 @@ import (
"strconv"
"strings"
"testing"
+
+ "github.com/jedisct1/go-minisign"
)
func TestVerification(t *testing.T) {
@@ -128,3 +130,39 @@ func TestMatching(t *testing.T) {
}
}
}
+
+func TestGethPubKeysParseable(t *testing.T) {
+ for _, pubkey := range gethPubKeys {
+ _, err := minisign.NewPublicKey(pubkey)
+ if err != nil {
+ t.Errorf("Should be parseable")
+ }
+ }
+}
+
+func TestKeyID(t *testing.T) {
+ type args struct {
+ id [8]byte
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
+ {"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
+ {"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := keyID(tt.args.id); got != tt.want {
+ t.Errorf("keyID() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func extractKeyId(pubkey string) [8]byte {
+ p, _ := minisign.NewPublicKey(pubkey)
+ return p.KeyId
+}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index ffff2c92cb84..ae1e77675355 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -45,7 +45,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
+ ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -56,6 +56,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
+ lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@@ -160,6 +161,10 @@ var (
Name: "sepolia",
Usage: "Sepolia network: pre-configured proof-of-work test network",
}
+ KilnFlag = cli.BoolFlag{
+ Name: "kiln",
+ Usage: "Kiln network: pre-configured proof-of-work to proof-of-stake test network",
+ }
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@@ -236,9 +241,13 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
- WhitelistFlag = cli.StringFlag{
+ EthPeerRequiredBlocksFlag = cli.StringFlag{
+ Name: "eth.requiredblocks",
+ Usage: "Comma separated block number-to-hash mappings to require for peering (=)",
+ }
+ LegacyWhitelistFlag = cli.StringFlag{
Name: "whitelist",
- Usage: "Comma separated block number-to-hash mappings to enforce (=)",
+ Usage: "Comma separated block number-to-hash mappings to enforce (=) (deprecated in favor of --peer.requiredblocks)",
}
BloomFilterSizeFlag = cli.Uint64Flag{
Name: "bloomfilter.size",
@@ -432,6 +441,10 @@ var (
Name: "cache.preimages",
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
}
+ FDLimitFlag = cli.IntFlag{
+ Name: "fdlimit",
+ Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
+ }
// Miner settings
MiningEnabledFlag = cli.BoolFlag{
Name: "mine",
@@ -517,6 +530,26 @@ var (
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
Value: ethconfig.Defaults.RPCTxFeeCap,
}
+ // Authenticated RPC HTTP settings
+ AuthListenFlag = cli.StringFlag{
+ Name: "authrpc.addr",
+ Usage: "Listening address for authenticated APIs",
+ Value: node.DefaultConfig.AuthAddr,
+ }
+ AuthPortFlag = cli.IntFlag{
+ Name: "authrpc.port",
+ Usage: "Listening port for authenticated APIs",
+ Value: node.DefaultConfig.AuthPort,
+ }
+ AuthVirtualHostsFlag = cli.StringFlag{
+ Name: "authrpc.vhosts",
+ Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
+ Value: strings.Join(node.DefaultConfig.AuthVirtualHosts, ","),
+ }
+ JWTSecretFlag = cli.StringFlag{
+ Name: "authrpc.jwtsecret",
+ Usage: "Path to a JWT secret to use for authenticated RPC endpoints",
+ }
// Logging and debug settings
EthStatsURLFlag = cli.StringFlag{
Name: "ethstats",
@@ -789,11 +822,6 @@ var (
Usage: "InfluxDB organization name (v2 only)",
Value: metrics.DefaultConfig.InfluxDBOrganization,
}
-
- CatalystFlag = cli.BoolFlag{
- Name: "catalyst",
- Usage: "Catalyst mode (eth2 integration testing)",
- }
)
// MakeDataDir retrieves the currently requested data directory, terminating
@@ -815,6 +843,9 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia")
}
+ if ctx.GlobalBool(KilnFlag.Name) {
+ return filepath.Join(path, "kiln")
+ }
return path
}
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@@ -869,6 +900,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name):
urls = params.GoerliBootnodes
+ case ctx.GlobalBool(KilnFlag.Name):
+ urls = params.KilnBootnodes
case cfg.BootstrapNodes != nil:
return // already set, don't apply defaults.
}
@@ -955,6 +988,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
}
+ if ctx.GlobalIsSet(AuthListenFlag.Name) {
+ cfg.AuthAddr = ctx.GlobalString(AuthListenFlag.Name)
+ }
+
+ if ctx.GlobalIsSet(AuthPortFlag.Name) {
+ cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
+ }
+
+ if ctx.GlobalIsSet(AuthVirtualHostsFlag.Name) {
+ cfg.AuthVirtualHosts = SplitAndTrim(ctx.GlobalString(AuthVirtualHostsFlag.Name))
+ }
+
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
}
@@ -1061,11 +1106,24 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
// MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
-func MakeDatabaseHandles() int {
+func MakeDatabaseHandles(max int) int {
limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
+ switch {
+ case max == 0:
+ // User didn't specify a meaningful value, use system limits
+ case max < 128:
+ // User specified something unhealthy, just use system defaults
+ log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit)
+ case max > limit:
+ // User requested more than the OS allows, notify that we can't allocate it
+ log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit)
+ default:
+ // User limit is meaningful and within allowed range, use that
+ limit = max
+ }
raised, err := fdlimit.Raise(uint64(limit))
if err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
@@ -1222,6 +1280,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setDataDir(ctx, cfg)
setSmartCard(ctx, cfg)
+ if ctx.GlobalIsSet(JWTSecretFlag.Name) {
+ cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
+ }
+
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
}
@@ -1290,6 +1352,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
+ case ctx.GlobalBool(KilnFlag.Name) && cfg.DataDir == node.DefaultDataDir():
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "kiln")
}
}
@@ -1408,26 +1472,33 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
}
}
-func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
- whitelist := ctx.GlobalString(WhitelistFlag.Name)
- if whitelist == "" {
- return
+func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
+ peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
+
+ if peerRequiredBlocks == "" {
+ if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
+ log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
+ peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
+ } else {
+ return
+ }
}
- cfg.Whitelist = make(map[uint64]common.Hash)
- for _, entry := range strings.Split(whitelist, ",") {
+
+ cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
+ for _, entry := range strings.Split(peerRequiredBlocks, ",") {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
- Fatalf("Invalid whitelist entry: %s", entry)
+ Fatalf("Invalid peer required block entry: %s", entry)
}
number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil {
- Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
+ Fatalf("Invalid peer required block number %s: %v", parts[0], err)
}
var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
- Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
+ Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
}
- cfg.Whitelist[number] = hash
+ cfg.PeerRequiredBlocks[number] = hash
}
}
@@ -1475,7 +1546,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
- CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
+ CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@@ -1494,7 +1565,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setMiner(ctx, &cfg.Miner)
- setWhitelist(ctx, cfg)
+ setPeerRequiredBlocks(ctx, cfg)
setLes(ctx, cfg)
// Cap the cache allowance and tune the garbage collector
@@ -1526,7 +1597,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
- cfg.DatabaseHandles = MakeDatabaseHandles()
+ cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
}
@@ -1637,6 +1708,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
cfg.Genesis = core.DefaultGoerliGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
+ case ctx.GlobalBool(KilnFlag.Name):
+ if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
+ cfg.NetworkId = 1337802
+ }
+ cfg.Genesis = core.DefaultKilnGenesisBlock()
+ SetDNSDiscoveryDefaults(cfg, params.KilnGenesisHash)
case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337
@@ -1673,9 +1750,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.GlobalIsSet(DataDirFlag.Name) {
+ // If datadir doesn't exist we need to open db in write-mode
+ // so leveldb can create files.
+ readonly := true
+ if !common.FileExist(stack.ResolvePath("chaindata")) {
+ readonly = false
+ }
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
- chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
+ chaindb := MakeChainDatabase(ctx, stack, readonly)
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
cfg.Genesis = nil // fallback to db content
}
@@ -1710,15 +1793,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
-func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
+func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
- if isCatalyst {
- if err := catalyst.RegisterLight(stack, backend); err != nil {
+ if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
+ if err := lescatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
@@ -1734,8 +1817,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool
Fatalf("Failed to create the LES server: %v", err)
}
}
- if isCatalyst {
- if err := catalyst.Register(stack, backend); err != nil {
+ if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
+ if err := ethcatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
@@ -1838,7 +1921,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
- handles = MakeDatabaseHandles()
+ handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
err error
chainDb ethdb.Database
@@ -1869,6 +1952,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name):
genesis = core.DefaultGoerliGenesisBlock()
+ case ctx.GlobalBool(KilnFlag.Name):
+ genesis = core.DefaultKilnGenesisBlock()
case ctx.GlobalBool(DeveloperFlag.Name):
Fatalf("Developer chains are ephemeral")
}
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index 9467fea67bef..1fd7deb872fb 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -43,7 +43,6 @@ var (
// error types into the consensus package.
var (
errTooManyUncles = errors.New("too many uncles")
- errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidNonce = errors.New("invalid nonce")
errInvalidUncleHash = errors.New("invalid uncle hash")
)
@@ -182,10 +181,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if len(header.Extra) > 32 {
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
}
- // Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
- if header.MixDigest != (common.Hash{}) {
- return errInvalidMixDigest
- }
+ // Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
if header.Nonce != beaconNonce {
return errInvalidNonce
}
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 4e33d99c8dde..c196ad062170 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -549,6 +549,11 @@ func NewShared() *Ethash {
// Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error {
+ return ethash.StopRemoteSealer()
+}
+
+// StopRemoteSealer stops the remote sealer
+func (ethash *Ethash) StopRemoteSealer() error {
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
if ethash.remote == nil {
diff --git a/core/beacon/errors.go b/core/beacon/errors.go
new file mode 100644
index 000000000000..83d5eebd5fa0
--- /dev/null
+++ b/core/beacon/errors.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package beacon
+
+import "github.com/ethereum/go-ethereum/rpc"
+
+var (
+ // VALID is returned by the engine API in the following calls:
+ // - newPayloadV1: if the payload was already known or was just validated and executed
+ // - forkchoiceUpdateV1: if the chain accepted the reorg (might ignore if it's stale)
+ VALID = "VALID"
+
+ // INVALID is returned by the engine API in the following calls:
+ // - newPayloadV1: if the payload failed to execute on top of the local chain
+ // - forkchoiceUpdateV1: if the new head is unknown, pre-merge, or reorg to it fails
+ INVALID = "INVALID"
+
+ // SYNCING is returned by the engine API in the following calls:
+ // - newPayloadV1: if the payload was accepted on top of an active sync
+ // - forkchoiceUpdateV1: if the new head was seen before, but not part of the chain
+ SYNCING = "SYNCING"
+
+ // ACCEPTED is returned by the engine API in the following calls:
+ // - newPayloadV1: if the payload was accepted, but not processed (side chain)
+ ACCEPTED = "ACCEPTED"
+
+ INVALIDBLOCKHASH = "INVALID_BLOCK_HASH"
+ INVALIDTERMINALBLOCK = "INVALID_TERMINAL_BLOCK"
+
+ GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
+ UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
+ InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
+
+ STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
+ STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}
+)
diff --git a/eth/catalyst/gen_blockparams.go b/core/beacon/gen_blockparams.go
similarity index 85%
rename from eth/catalyst/gen_blockparams.go
rename to core/beacon/gen_blockparams.go
index ccf5c327ffa3..0e2ea4bb1338 100644
--- a/eth/catalyst/gen_blockparams.go
+++ b/core/beacon/gen_blockparams.go
@@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-package catalyst
+package beacon
import (
"encoding/json"
@@ -16,7 +16,7 @@ var _ = (*payloadAttributesMarshaling)(nil)
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
type PayloadAttributesV1 struct {
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Random common.Hash `json:"random" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
}
var enc PayloadAttributesV1
@@ -30,7 +30,7 @@ func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
type PayloadAttributesV1 struct {
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Random *common.Hash `json:"random" gencodec:"required"`
+ Random *common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
}
var dec PayloadAttributesV1
@@ -42,7 +42,7 @@ func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
}
p.Timestamp = uint64(*dec.Timestamp)
if dec.Random == nil {
- return errors.New("missing required field 'random' for PayloadAttributesV1")
+ return errors.New("missing required field 'prevRandao' for PayloadAttributesV1")
}
p.Random = *dec.Random
if dec.SuggestedFeeRecipient == nil {
diff --git a/eth/catalyst/gen_ed.go b/core/beacon/gen_ed.go
similarity index 93%
rename from eth/catalyst/gen_ed.go
rename to core/beacon/gen_ed.go
index 46eb45808bca..dcee3bf18c79 100644
--- a/eth/catalyst/gen_ed.go
+++ b/core/beacon/gen_ed.go
@@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-package catalyst
+package beacon
import (
"encoding/json"
@@ -19,9 +19,9 @@ func (e ExecutableDataV1) MarshalJSON() ([]byte, error) {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"random" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
@@ -60,9 +60,9 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
+ ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random *common.Hash `json:"random" gencodec:"required"`
+ Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
@@ -97,7 +97,7 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
}
e.LogsBloom = *dec.LogsBloom
if dec.Random == nil {
- return errors.New("missing required field 'random' for ExecutableDataV1")
+ return errors.New("missing required field 'prevRandao' for ExecutableDataV1")
}
e.Random = *dec.Random
if dec.Number == nil {
diff --git a/core/beacon/types.go b/core/beacon/types.go
new file mode 100644
index 000000000000..18d5d2ab78b4
--- /dev/null
+++ b/core/beacon/types.go
@@ -0,0 +1,194 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package beacon
+
+import (
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
+
+// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
+type PayloadAttributesV1 struct {
+ Timestamp uint64 `json:"timestamp" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
+}
+
+// JSON type overrides for PayloadAttributesV1.
+type payloadAttributesMarshaling struct {
+ Timestamp hexutil.Uint64
+}
+
+//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
+
+// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
+type ExecutableDataV1 struct {
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom []byte `json:"logsBloom" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ Number uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData []byte `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions [][]byte `json:"transactions" gencodec:"required"`
+}
+
+// JSON type overrides for executableData.
+type executableDataMarshaling struct {
+ Number hexutil.Uint64
+ GasLimit hexutil.Uint64
+ GasUsed hexutil.Uint64
+ Timestamp hexutil.Uint64
+ BaseFeePerGas *hexutil.Big
+ ExtraData hexutil.Bytes
+ LogsBloom hexutil.Bytes
+ Transactions []hexutil.Bytes
+}
+
+type PayloadStatusV1 struct {
+ Status string `json:"status"`
+ LatestValidHash *common.Hash `json:"latestValidHash"`
+ ValidationError *string `json:"validationError"`
+}
+
+type TransitionConfigurationV1 struct {
+ TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty"`
+ TerminalBlockHash common.Hash `json:"terminalBlockHash"`
+ TerminalBlockNumber hexutil.Uint64 `json:"terminalBlockNumber"`
+}
+
+// PayloadID is an identifier of the payload build process
+type PayloadID [8]byte
+
+func (b PayloadID) String() string {
+ return hexutil.Encode(b[:])
+}
+
+func (b PayloadID) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(b[:]).MarshalText()
+}
+
+func (b *PayloadID) UnmarshalText(input []byte) error {
+ err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
+ if err != nil {
+ return fmt.Errorf("invalid payload id %q: %w", input, err)
+ }
+ return nil
+}
+
+type ForkChoiceResponse struct {
+ PayloadStatus PayloadStatusV1 `json:"payloadStatus"`
+ PayloadID *PayloadID `json:"payloadId"`
+}
+
+type ForkchoiceStateV1 struct {
+ HeadBlockHash common.Hash `json:"headBlockHash"`
+ SafeBlockHash common.Hash `json:"safeBlockHash"`
+ FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
+}
+
+func encodeTransactions(txs []*types.Transaction) [][]byte {
+ var enc = make([][]byte, len(txs))
+ for i, tx := range txs {
+ enc[i], _ = tx.MarshalBinary()
+ }
+ return enc
+}
+
+func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
+ var txs = make([]*types.Transaction, len(enc))
+ for i, encTx := range enc {
+ var tx types.Transaction
+ if err := tx.UnmarshalBinary(encTx); err != nil {
+ return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
+ }
+ txs[i] = &tx
+ }
+ return txs, nil
+}
+
+// ExecutableDataToBlock constructs a block from executable data.
+// It verifies that the following fields:
+// len(extraData) <= 32
+// uncleHash = emptyUncleHash
+// difficulty = 0
+// and that the blockhash of the constructed block matches the parameters.
+func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
+ txs, err := decodeTransactions(params.Transactions)
+ if err != nil {
+ return nil, err
+ }
+ if len(params.ExtraData) > 32 {
+ return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
+ }
+ header := &types.Header{
+ ParentHash: params.ParentHash,
+ UncleHash: types.EmptyUncleHash,
+ Coinbase: params.FeeRecipient,
+ Root: params.StateRoot,
+ TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
+ ReceiptHash: params.ReceiptsRoot,
+ Bloom: types.BytesToBloom(params.LogsBloom),
+ Difficulty: common.Big0,
+ Number: new(big.Int).SetUint64(params.Number),
+ GasLimit: params.GasLimit,
+ GasUsed: params.GasUsed,
+ Time: params.Timestamp,
+ BaseFee: params.BaseFeePerGas,
+ Extra: params.ExtraData,
+ MixDigest: params.Random,
+ }
+ block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
+ if block.Hash() != params.BlockHash {
+ return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
+ }
+ return block, nil
+}
+
+// BlockToExecutableData constructs the executableDataV1 structure by filling the
+// fields from the given block. It assumes the given block is post-merge block.
+func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
+ return &ExecutableDataV1{
+ BlockHash: block.Hash(),
+ ParentHash: block.ParentHash(),
+ FeeRecipient: block.Coinbase(),
+ StateRoot: block.Root(),
+ Number: block.NumberU64(),
+ GasLimit: block.GasLimit(),
+ GasUsed: block.GasUsed(),
+ BaseFeePerGas: block.BaseFee(),
+ Timestamp: block.Time(),
+ ReceiptsRoot: block.ReceiptHash(),
+ LogsBloom: block.Bloom().Bytes(),
+ Transactions: encodeTransactions(block.Transactions()),
+ Random: block.MixDigest(),
+ ExtraData: block.Extra(),
+ }
+}
diff --git a/core/blockchain.go b/core/blockchain.go
index a6c8e87a27fc..b61bed0dfc37 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -542,6 +542,19 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
+ if newHeadBlock.NumberU64() == 0 {
+ // Recommit the genesis state into disk in case the rewinding destination
+ // is genesis block and the relevant state is gone. In the future this
+ // rewinding destination can be the earliest block stored in the chain
+ // if the historical chain pruning is enabled. In that case the logic
+ // needs to be improved here.
+ if !bc.HasState(bc.genesisBlock.Root()) {
+ if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
+ log.Crit("Failed to commit genesis state", "err", err)
+ }
+ log.Debug("Recommitted genesis state to disk")
+ }
+ }
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break
}
@@ -554,7 +567,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// Degrade the chain markers if they are explicitly reverted.
// In theory we should update all in-memory markers in the
// last step, however the direction of SetHead is from high
- // to low, so it's safe the update in-memory markers directly.
+ // to low, so it's safe to update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
@@ -592,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateAncients(num); err != nil {
+ if err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
@@ -979,38 +992,37 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// range. In this case, all tx indices of newly imported blocks should be
// generated.
var batch = bc.db.NewBatch()
- for _, block := range blockChain {
+ for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
}
stats.processed++
- }
- // Flush all tx-lookup index data.
- size += int64(batch.ValueSize())
- if err := batch.Write(); err != nil {
- // The tx index data could not be written.
- // Roll back the ancient store update.
- fastBlock := bc.CurrentFastBlock().NumberU64()
- if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
- log.Error("Can't truncate ancient store after failed insert", "err", err)
+ if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
+ size += int64(batch.ValueSize())
+ if err = batch.Write(); err != nil {
+ fastBlock := bc.CurrentFastBlock().NumberU64()
+ if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
+ log.Error("Can't truncate ancient store after failed insert", "err", err)
+ }
+ return 0, err
+ }
+ batch.Reset()
}
- return 0, err
}
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
}
-
// Update the current fast block because all block data is now present in DB.
previousFastBlock := bc.CurrentFastBlock().NumberU64()
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
- if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
+ if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
@@ -1647,12 +1659,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
blockInsertTimer.UpdateSince(start)
+ // Report the import stats before returning the various results
+ stats.processed++
+ stats.usedGas += usedGas
+
+ dirty, _ := bc.stateCache.TrieDB().Size()
+ stats.report(chain, it.index, dirty, setHead)
+
if !setHead {
- // We did not setHead, so we don't have any stats to update
- log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start)))
- return it.index, nil
+ return it.index, nil // Direct block insertion of a single block
}
-
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
@@ -1679,11 +1695,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
- stats.processed++
- stats.usedGas += usedGas
-
- dirty, _ := bc.stateCache.TrieDB().Size()
- stats.report(chain, it.index, dirty)
}
// Any blocks remaining here? The only ones we care about are the future ones
@@ -2081,28 +2092,39 @@ func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
// block. It's possible that after the reorg the relevant state of head
// is missing. It can be fixed by inserting a new block which triggers
// the re-execution.
-func (bc *BlockChain) SetChainHead(newBlock *types.Block) error {
+func (bc *BlockChain) SetChainHead(head *types.Block) error {
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
// Run the reorg if necessary and set the given block as new head.
- if newBlock.ParentHash() != bc.CurrentBlock().Hash() {
- if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil {
+ start := time.Now()
+ if head.ParentHash() != bc.CurrentBlock().Hash() {
+ if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
return err
}
}
- bc.writeHeadBlock(newBlock)
+ bc.writeHeadBlock(head)
// Emit events
- logs := bc.collectLogs(newBlock.Hash(), false)
- bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs})
+ logs := bc.collectLogs(head.Hash(), false)
+ bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock})
- log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash())
+ bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
+
+ context := []interface{}{
+ "number", head.Number(),
+ "hash", head.Hash(),
+ "root", head.Root(),
+ "elapsed", time.Since(start),
+ }
+ if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ }
+ log.Info("Chain head was updated", context...)
return nil
}
@@ -2288,6 +2310,9 @@ Error: %v
// of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ if len(chain) == 0 {
+ return 0, nil
+ }
start := time.Now()
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index 446487027911..479eccc83e47 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
-func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) {
+func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
// Fetch the timings for the batch
var (
now = mclock.Now()
@@ -71,8 +71,11 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
- log.Info("Imported new chain segment", context...)
-
+ if setHead {
+ log.Info("Imported new chain segment", context...)
+ } else {
+ log.Info("Imported new potential chain segment", context...)
+ }
// Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1}
}
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index eb5025ed55e7..913367179658 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1779,6 +1779,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
SnapshotLimit: 0, // Disable snapshot by default
}
)
+ defer engine.Close()
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
@@ -1836,25 +1837,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
- defer chain.Stop()
+ defer newChain.Stop()
// Iterate over all the remaining blocks and ensure there are no gaps
- verifyNoGaps(t, chain, true, canonblocks)
- verifyNoGaps(t, chain, false, sideblocks)
- verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
- verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+ verifyNoGaps(t, newChain, true, canonblocks)
+ verifyNoGaps(t, newChain, false, sideblocks)
+ verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
- if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
}
- if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
}
- if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
}
if frozen, err := db.(freezer).Ancients(); err != nil {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 6e542fe2f47b..37a1a42d0c60 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -2987,10 +2987,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
initCode := []byte{
byte(vm.PUSH1), 0x3, // value
byte(vm.PUSH1), 0x3, // location
- byte(vm.SSTORE), // Set slot[3] = 1
+ byte(vm.SSTORE), // Set slot[3] = 3
byte(vm.PUSH1), 0x4, // value
byte(vm.PUSH1), 0x4, // location
- byte(vm.SSTORE), // Set slot[4] = 1
+ byte(vm.SSTORE), // Set slot[4] = 4
// Slots are set, now return the code
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
byte(vm.PUSH1), 0x0, // memory start on stack
diff --git a/core/evm.go b/core/evm.go
index 6c67fc43762c..536ac673e6a6 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var (
beneficiary common.Address
baseFee *big.Int
+ random *common.Hash
)
// If we don't have an explicit author (i.e. not mining), extract from the header
@@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee)
}
+ if header.Difficulty.Cmp(common.Big0) == 0 {
+ random = &header.MixDigest
+ }
return vm.BlockContext{
CanTransfer: CanTransfer,
Transfer: Transfer,
@@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee,
GasLimit: header.GasLimit,
+ Random: random,
}
}
diff --git a/core/genesis.go b/core/genesis.go
index 557440d08aa1..4949197da5cd 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -80,6 +80,81 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil
}
+// flush adds allocated genesis accounts into a fresh new statedb and
+// commit the state changes into the given database handler.
+func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) {
+ statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ for addr, account := range *ga {
+ statedb.AddBalance(addr, account.Balance)
+ statedb.SetCode(addr, account.Code)
+ statedb.SetNonce(addr, account.Nonce)
+ for key, value := range account.Storage {
+ statedb.SetState(addr, key, value)
+ }
+ }
+ root, err := statedb.Commit(false)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ err = statedb.Database().TrieDB().Commit(root, true, nil)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return root, nil
+}
+
+// write writes the json marshaled genesis state into database
+// with the given block hash as the unique identifier.
+func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
+ blob, err := json.Marshal(ga)
+ if err != nil {
+ return err
+ }
+ rawdb.WriteGenesisState(db, hash, blob)
+ return nil
+}
+
+// CommitGenesisState loads the stored genesis state with the given block
+// hash and commits them into the given database handler.
+func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
+ var alloc GenesisAlloc
+ blob := rawdb.ReadGenesisState(db, hash)
+ if len(blob) != 0 {
+ if err := alloc.UnmarshalJSON(blob); err != nil {
+ return err
+ }
+ } else {
+ // Genesis allocation is missing and there are several possibilities:
+ // the node is legacy which doesn't persist the genesis allocation or
+ // the persisted allocation is just lost.
+ // - supported networks(mainnet, testnets), recover with defined allocations
+ // - private network, can't recover
+ var genesis *Genesis
+ switch hash {
+ case params.MainnetGenesisHash:
+ genesis = DefaultGenesisBlock()
+ case params.RopstenGenesisHash:
+ genesis = DefaultRopstenGenesisBlock()
+ case params.RinkebyGenesisHash:
+ genesis = DefaultRinkebyGenesisBlock()
+ case params.GoerliGenesisHash:
+ genesis = DefaultGoerliGenesisBlock()
+ case params.SepoliaGenesisHash:
+ genesis = DefaultSepoliaGenesisBlock()
+ }
+ if genesis != nil {
+ alloc = genesis.Alloc
+ } else {
+ return errors.New("not found")
+ }
+ }
+ _, err := alloc.flush(db)
+ return err
+}
+
// GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct {
Code []byte `json:"code,omitempty"`
@@ -219,11 +294,19 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil
}
- // Special case: don't change the existing config of a non-mainnet chain if no new
- // config is supplied. These chains would get AllProtocolChanges (and a compat error)
- // if we just continued here.
+ // Special case: if a private network is being used (no genesis and also no
+ // mainnet hash in the database), we must not apply the `configOrDefault`
+ // chain config as that would be AllProtocolChanges (applying any new fork
+ // on top of an existing private network genesis block). In that case, only
+ // apply the overrides.
if genesis == nil && stored != params.MainnetGenesisHash {
- return storedcfg, stored, nil
+ newcfg = storedcfg
+ if overrideArrowGlacier != nil {
+ newcfg.ArrowGlacierBlock = overrideArrowGlacier
+ }
+ if overrideTerminalTotalDifficulty != nil {
+ newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
+ }
}
// Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero.
@@ -253,6 +336,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash:
return params.GoerliChainConfig
+ case ghash == params.KilnGenesisHash:
+ return DefaultKilnGenesisBlock().Config
default:
return params.AllEthashProtocolChanges
}
@@ -264,19 +349,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil {
db = rawdb.NewMemoryDatabase()
}
- statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
+ root, err := g.Alloc.flush(db)
if err != nil {
panic(err)
}
- for addr, account := range g.Alloc {
- statedb.AddBalance(addr, account.Balance)
- statedb.SetCode(addr, account.Code)
- statedb.SetNonce(addr, account.Nonce)
- for key, value := range account.Storage {
- statedb.SetState(addr, key, value)
- }
- }
- root := statedb.IntermediateRoot(false)
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
@@ -294,7 +370,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit
}
- if g.Difficulty == nil {
+ if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
head.Difficulty = params.GenesisDifficulty
}
if g.Config != nil && g.Config.IsLondon(common.Big0) {
@@ -304,9 +380,6 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
}
}
- statedb.Commit(false)
- statedb.Database().TrieDB().Commit(root, true, nil)
-
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
}
@@ -327,6 +400,9 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers")
}
+ if err := g.Alloc.write(db, block.Hash()); err != nil {
+ return nil, err
+ }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
@@ -418,6 +494,15 @@ func DefaultSepoliaGenesisBlock() *Genesis {
}
}
+func DefaultKilnGenesisBlock() *Genesis {
+ g := new(Genesis)
+ reader := strings.NewReader(KilnAllocData)
+ if err := json.NewDecoder(reader).Decode(g); err != nil {
+ panic(err)
+ }
+ return g
+}
+
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
diff --git a/core/genesis_alloc.go b/core/genesis_alloc.go
index 3d053904e7af..041c55424238 100644
--- a/core/genesis_alloc.go
+++ b/core/genesis_alloc.go
@@ -27,3 +27,868 @@ const rinkebyAllocData = "\xf9\x03\xb7\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03
const goerliAllocData = "\xf9\x04\x06\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xe0\x94L*\xe4\x82Y5\x05\xf0\x16<\xde\xfc\a>\x81\xc6<\xdaA\a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa8\xe8\xf1G2e\x8eKQ\xe8q\x191\x05:\x8ai\xba\xf2\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe1\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbJG\xe3\xc1$H\xf4\xad\x00\x00\x00"
const calaverasAllocData = "\xf9\x06\x14\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x0e\x89\xe2\xae\xdb\x1c\xfc\u06d4$\xd4\x1a\x1f!\x8fA2s\x81r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x10A\xaf\xbc\xb3Y\u0568\xdcX\xc1[/\xf5\x13T\xff\x8a!}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94#o\xf1\xe9t\x19\xae\x93\xad\x80\xca\xfb\xaa!\"\f]x\xfb}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94`\xad\xc0\xf8\x9aA\xaf#|\xe75T\xed\xe1p\xd73\xec\x14\xe0\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8a\x8e\xaf\xb1\xcfb\xbf\xbe\xb1t\x17i\xda\xe1\xa9\xddG\x99a\x92\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8b\xa1\xf1\tU\x1b\xd42\x800\x12dZ\xc16\xdd\xd6M\xbar\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xb0*.\xda\x1b1\u007f\xbd\x16v\x01(\x83k\n\u015bV\x0e\x9d\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xba\xdc\r\xe9\xe0yK\x04\x9b^\xa6<>\x1ei\x8a4v\xc1r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xf00\v\ue24a\xe2r\xeb4~\x83i\xac\fv\xdfB\xc9?\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xfe;U~\x8f\xb6+\x89\xf4\x91kr\x1b\xe5\\\ub08d\xbds\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
const sepoliaAllocData = "\xf9\x01\xee\u0791i\x16\xa8{\x823?BE\x04f#\xb27\x94\xc6\\\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\x10\xf5\xd4XT\xe08\a\x14\x85\xac\x9e@#\b\u03c0\xd2\xd2\xfe\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\u0794y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\x88\r\u0db3\xa7d\x00\x00\xe0\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\x8b\u007f\tw\xbbO\x0f\xbepv\xfa\"\xbc$\xac\xa0CX?^\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa2\xa6\xd949\x14O\xfeM'\xc9\xe0\x88\xdc\u0637\x83\x94bc\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xaa\xec\x869DA\xf9\x15\xbc\xe3\xe6\xab9\x99w\xe9\x90o;i\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u1532\x1c3\xde\x1f\xab?\xa1T\x99\xc6+Y\xfe\f\xc3%\x00 \u044bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xbc\x11)Y6\xaay\u0554\x13\x9d\xe1\xb2\xe1&)AO;\u06ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xbe\xef2\xca[\x9a\x19\x8d'\xb4\xe0/LpC\x9f\xe6\x03V\u03ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\xd7\xd7lX\xb3\xa5\x19\xe9\xfal\xc4\xd2-\xc0\x17%\x9b\u011f\x1e\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xd7\xed\xdbx\xed)[<\x96)$\x0e\x89$\xfb\x8d\x88t\xdd\u060a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xe2\xe2e\x90(\x147\x84\xd5W\xbc\xeco\xf3\xa0r\x10H\x88\n\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xf4|\xae\x1c\xf7\x9c\xa6u\x8b\xfcx}\xbd!\u6f7eq\x12\xb8\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00"
+const KilnAllocData = `{
+ "config": {
+ "chainId": 1337802,
+ "homesteadBlock": 0,
+ "eip150Block": 0,
+ "eip155Block": 0,
+ "eip158Block": 0,
+ "byzantiumBlock": 0,
+ "constantinopleBlock": 0,
+ "petersburgBlock": 0,
+ "istanbulBlock": 0,
+ "berlinBlock": 0,
+ "londonBlock": 0,
+ "mergeForkBlock": 1000,
+ "terminalTotalDifficulty": 20000000000000
+ },
+ "alloc": {
+ "0x0000000000000000000000000000000000000000": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000001": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000002": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000003": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000004": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000005": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000006": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000007": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000008": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000009": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000000f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000010": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000011": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000012": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000013": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000014": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000015": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000016": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000017": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000018": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000019": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000001f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000020": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000021": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000022": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000023": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000024": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000025": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000026": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000027": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000028": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000029": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000002f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000030": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000031": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000032": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000033": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000034": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000035": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000036": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000037": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000038": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000039": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000003f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000040": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000041": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000042": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000043": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000044": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000045": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000046": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000047": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000048": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000049": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000004f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000050": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000051": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000052": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000053": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000054": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000055": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000056": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000057": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000058": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000059": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000005f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000060": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000061": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000062": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000063": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000064": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000065": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000066": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000067": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000068": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000069": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000006f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000070": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000071": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000072": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000073": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000074": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000075": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000076": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000077": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000078": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000079": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000007f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000080": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000081": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000082": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000083": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000084": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000085": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000086": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000087": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000088": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000089": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000008f": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000090": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000091": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000092": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000093": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000094": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000095": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000096": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000097": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000098": {
+ "balance": "1"
+ },
+ "0x0000000000000000000000000000000000000099": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009a": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009b": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009c": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009d": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009e": {
+ "balance": "1"
+ },
+ "0x000000000000000000000000000000000000009f": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000a9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000aa": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ab": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ac": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ad": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ae": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000af": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000b9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ba": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000bb": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000bc": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000bd": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000be": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000bf": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000c9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ca": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000cb": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000cc": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000cd": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ce": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000cf": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000d9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000da": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000db": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000dc": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000dd": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000de": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000df": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000e9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ea": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000eb": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ec": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ed": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ee": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ef": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f0": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f1": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f2": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f3": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f4": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f5": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f6": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f7": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f8": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000f9": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000fa": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000fb": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000fc": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000fd": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000fe": {
+ "balance": "1"
+ },
+ "0x00000000000000000000000000000000000000ff": {
+ "balance": "1"
+ },
+ "0x4242424242424242424242424242424242424242": {
+ "balance": "0",
+ "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b",
+ "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71",
+ "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c",
+ "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c",
+ "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30",
+ "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1",
+ "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c",
+ "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193",
+ "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1",
+ "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b",
+ "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220",
+ "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f",
+ "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e",
+ "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784",
+ "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb",
+ "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb",
+ "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab",
+ "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4",
+ "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f",
+ "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa",
+ "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c",
+ "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167",
+ "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7",
+ "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0",
+ "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544",
+ "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765",
+ "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4",
+ "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1",
+ "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636",
+ "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c",
+ "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7"
+ }
+ },
+ "0xf97e180c050e5Ab072211Ad2C213Eb5AEE4DF134": {
+ "balance": "10000000000000000000000000"
+ },
+ "0x2cA5F489CC1Fd1CEC24747B64E8dE0F4A6A850E1": {
+ "balance": "10000000000000000000000000"
+ },
+ "0x7203bd333a874D9d329050ecE393820fCD501eaA": {
+ "balance": "10000000000000000000000000"
+ },
+ "0xA51918aA40D78Ff8be939bf0E8404252875c6aDF": {
+ "balance": "10000000000000000000000000"
+ },
+ "0xAA81078e6b2121dd7A846690DFdD6b10d7658d8B": {
+ "balance": "10000000000000000000000000"
+ },
+ "0xFA2d31D8f21c1D1633E9BEB641dF77D21D63ccDd": {
+ "balance": "10000000000000000000000000"
+ },
+ "0xf751C9c6d60614226fE57D2cAD6e10C856a2ddA3": {
+ "balance": "10000000000000000000000000"
+ },
+ "0x9cD16887f6A808AEaa65D3c840f059EeA4ca1319": {
+ "balance": "10000000000000000000000000"
+ },
+ "0x2E07043584F11BFF0AC39c927665DF6c6ebaffFB": {
+ "balance": "10000000000000000000000000"
+ },
+ "0x60e771E5eCA8E26690920de669520Da210D64A9B": {
+ "balance": "10000000000000000000000000"
+ },
+ "0xFC4db92C2Cf77CE02fBfd7Da0346d2CbFA66aD59": {
+ "balance": "10000000000000000000000000"
+ }
+ },
+ "coinbase": "0x0000000000000000000000000000000000000000",
+ "difficulty": "0x01",
+ "extraData": "",
+ "gasLimit": "0x400000",
+ "nonce": "0x1234",
+ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0"
+ }`
diff --git a/core/genesis_test.go b/core/genesis_test.go
index f3d6b23e5fe0..e8010e3d4ebd 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -213,3 +213,33 @@ func TestGenesis_Commit(t *testing.T) {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
}
+
+func TestReadWriteGenesisAlloc(t *testing.T) {
+ var (
+ db = rawdb.NewMemoryDatabase()
+ alloc = &GenesisAlloc{
+ {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
+ {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
+ }
+ hash = common.HexToHash("0xdeadbeef")
+ )
+ alloc.write(db, hash)
+
+ var reload GenesisAlloc
+ err := reload.UnmarshalJSON(rawdb.ReadGenesisState(db, hash))
+ if err != nil {
+ t.Fatalf("Failed to load genesis state %v", err)
+ }
+ if len(reload) != len(*alloc) {
+ t.Fatal("Unexpected genesis allocation")
+ }
+ for addr, account := range reload {
+ want, ok := (*alloc)[addr]
+ if !ok {
+ t.Fatal("Account is not found")
+ }
+ if !reflect.DeepEqual(want, account) {
+ t.Fatal("Unexpected account")
+ }
+ }
+}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 8e9706ea6fdb..f9c224dfa8f8 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -83,8 +83,8 @@ type NumberHash struct {
Hash common.Hash
}
-// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
-// both canonical and reorged forks included.
+// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
+// heights, both canonical and reorged forks included.
// This method considers both limits to be _inclusive_.
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
var (
@@ -776,7 +776,7 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteHeader(db, block.Header())
}
-// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
+// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
var (
tdSum = new(big.Int).Set(td)
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index 3b0fcf0f2d1f..f5a161adb688 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -81,6 +81,19 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
}
}
+// ReadGenesisState retrieves the genesis state based on the given genesis hash.
+func ReadGenesisState(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(genesisKey(hash))
+ return data
+}
+
+// WriteGenesisState writes the genesis state into the disk.
+func WriteGenesisState(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
+ if err := db.Put(genesisKey(hash), data); err != nil {
+ log.Crit("Failed to store genesis state", "err", err)
+ }
+}
+
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
// database
type crashList struct {
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
index 1c828662c150..3c82b3f73141 100644
--- a/core/rawdb/accessors_snapshot.go
+++ b/core/rawdb/accessors_snapshot.go
@@ -115,7 +115,7 @@ func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash com
// IterateStorageSnapshots returns an iterator for walking the entire storage
// space of a specific account.
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
- return db.NewIterator(storageSnapshotsKey(accountHash), nil)
+ return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
}
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 6112de03ad53..41e21b6ca40b 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -28,29 +28,16 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
-// WritePreimages writes the provided set of preimages to the database.
-func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
- for hash, preimage := range preimages {
- if err := db.Put(preimageKey(hash), preimage); err != nil {
- log.Crit("Failed to store trie preimage", "err", err)
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
-}
-
// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- // Try with the legacy code scheme first, if not then try with current
- // scheme. Since most of the code will be found with legacy scheme.
- //
- // todo(rjl493456442) change the order when we forcibly upgrade the code
- // scheme with snapshot.
- data, _ := db.Get(hash[:])
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ data := ReadCodeWithPrefix(db, hash)
if len(data) != 0 {
return data
}
- return ReadCodeWithPrefix(db, hash)
+ data, _ = db.Get(hash.Bytes())
+ return data
}
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
@@ -61,24 +48,54 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
-// WriteCode writes the provided contract code database.
-func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
- if err := db.Put(codeKey(hash), code); err != nil {
- log.Crit("Failed to store contract code", "err", err)
+// ReadTrieNode retrieves the trie node of the provided hash.
+func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(hash.Bytes())
+ return data
+}
+
+// HasCode checks if the contract code corresponding to the
+// provided code hash is present in the db.
+func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ if ok := HasCodeWithPrefix(db, hash); ok {
+ return true
}
+ ok, _ := db.Has(hash.Bytes())
+ return ok
}
-// DeleteCode deletes the specified contract code from the database.
-func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(codeKey(hash)); err != nil {
- log.Crit("Failed to delete contract code", "err", err)
+// HasCodeWithPrefix checks if the contract code corresponding to the
+// provided code hash is present in the db. This function will only check
+// presence using the prefix-scheme.
+func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(codeKey(hash))
+ return ok
+}
+
+// HasTrieNode checks if the trie node with the provided hash is present in db.
+func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ log.Crit("Failed to store trie preimage", "err", err)
+ }
}
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
}
-// ReadTrieNode retrieves the trie node of the provided hash.
-func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(hash.Bytes())
- return data
+// WriteCode writes the provided contract code database.
+func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(codeKey(hash), code); err != nil {
+ log.Crit("Failed to store contract code", "err", err)
+ }
}
// WriteTrieNode writes the provided trie node database.
@@ -88,6 +105,13 @@ func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
}
}
+// DeleteCode deletes the specified contract code from the database.
+func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(codeKey(hash)); err != nil {
+ log.Crit("Failed to delete contract code", "err", err)
+ }
+}
+
// DeleteTrieNode deletes the specified trie node from the database.
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(hash.Bytes()); err != nil {
diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go
new file mode 100644
index 000000000000..50dfb848e4e0
--- /dev/null
+++ b/core/rawdb/accessors_sync.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
+func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(skeletonSyncStatusKey)
+ return data
+}
+
+// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
+func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
+ if err := db.Put(skeletonSyncStatusKey, status); err != nil {
+ log.Crit("Failed to store skeleton sync status", "err", err)
+ }
+}
+
+// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
+// shutdown
+func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
+ if err := db.Delete(skeletonSyncStatusKey); err != nil {
+ log.Crit("Failed to remove skeleton sync status", "err", err)
+ }
+}
+
+// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
+func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
+ data, _ := db.Get(skeletonHeaderKey(number))
+ if len(data) == 0 {
+ return nil
+ }
+ header := new(types.Header)
+ if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
+ log.Error("Invalid skeleton header RLP", "number", number, "err", err)
+ return nil
+ }
+ return header
+}
+
+// WriteSkeletonHeader stores a block header into the skeleton sync store.
+func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
+ data, err := rlp.EncodeToBytes(header)
+ if err != nil {
+ log.Crit("Failed to RLP encode header", "err", err)
+ }
+ key := skeletonHeaderKey(header.Number.Uint64())
+ if err := db.Put(key, data); err != nil {
+ log.Crit("Failed to store skeleton header", "err", err)
+ }
+}
+
+// DeleteSkeletonHeader removes all block header data associated with a hash.
+func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Delete(skeletonHeaderKey(number)); err != nil {
+ log.Crit("Failed to delete skeleton header", "err", err)
+ }
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 5ef64d26a205..5d645b61dbee 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -99,6 +99,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
+// Tail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Tail() (uint64, error) {
+ return 0, errNotSupported
+}
+
// AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
@@ -109,8 +114,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
return 0, errNotSupported
}
-// TruncateAncients returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateAncients(items uint64) error {
+// TruncateHead returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateHead(items uint64) error {
+ return errNotSupported
+}
+
+// TruncateTail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateTail(items uint64) error {
return errNotSupported
}
@@ -135,6 +145,12 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
return fn(db)
}
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
+ return errNotSupported
+}
+
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -211,7 +227,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// Block #1 is still in the database, we're allowed to init a new feezer
}
// Otherwise, the head header is still the genesis, we're allowed to init a new
- // feezer.
+ // freezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two
@@ -321,6 +337,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
storageSnaps stat
preimages stat
bloomBits stat
+ beaconHeaders stat
cliqueSnaps stat
// Ancient store statistics
@@ -375,10 +392,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size)
+ case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
+ metadata.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
bloomBits.Add(size)
+ case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
+ beaconHeaders.Add(size)
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
cliqueSnaps.Add(size)
case bytes.HasPrefix(key, []byte("cht-")) ||
@@ -395,7 +416,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
- uncleanShutdownKey, badBlockKey, transitionStatusKey,
+ uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
} {
if bytes.Equal(key, meta) {
metadata.Add(size)
@@ -441,6 +462,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
+ {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index e19c202adc84..8266933ee655 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -19,6 +19,7 @@ package rawdb
import (
"errors"
"fmt"
+ "io/ioutil"
"math"
"os"
"path/filepath"
@@ -66,7 +67,7 @@ const (
freezerTableSize = 2 * 1000 * 1000 * 1000
)
-// freezer is an memory mapped append-only database to store immutable chain data
+// freezer is a memory mapped append-only database to store immutable chain data
// into flat files:
//
// - The append only nature ensures that disk writes are minimized.
@@ -78,6 +79,7 @@ type freezer struct {
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
frozen uint64 // Number of blocks already frozen
+ tail uint64 // Number of the first stored item in the freezer
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation, as well as
@@ -133,7 +135,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
// Create the tables.
for name, disableSnappy := range tables {
- table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
+ table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
if err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -144,8 +146,15 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
freezer.tables[name] = table
}
- // Truncate all tables to common length.
- if err := freezer.repair(); err != nil {
+ if freezer.readonly {
+ // In readonly mode only validate, don't truncate.
+ // validate also sets `freezer.frozen`.
+ err = freezer.validate()
+ } else {
+ // Truncate all tables to common length.
+ err = freezer.repair()
+ }
+ if err != nil {
for _, table := range freezer.tables {
table.Close()
}
@@ -219,6 +228,11 @@ func (f *freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil
}
+// Tail returns the number of first stored item in the freezer.
+func (f *freezer) Tail() (uint64, error) {
+ return atomic.LoadUint64(&f.tail), nil
+}
+
// AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields.
@@ -254,7 +268,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
if err != nil {
// The write operation has failed. Go back to the previous item position.
for name, table := range f.tables {
- err := table.truncate(prevItem)
+ err := table.truncateHead(prevItem)
if err != nil {
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
}
@@ -274,8 +288,8 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
return writeSize, nil
}
-// TruncateAncients discards any recent data above the provided threshold number.
-func (f *freezer) TruncateAncients(items uint64) error {
+// TruncateHead discards any recent data above the provided threshold number.
+func (f *freezer) TruncateHead(items uint64) error {
if f.readonly {
return errReadOnly
}
@@ -286,7 +300,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil
}
for _, table := range f.tables {
- if err := table.truncate(items); err != nil {
+ if err := table.truncateHead(items); err != nil {
return err
}
}
@@ -294,6 +308,26 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil
}
+// TruncateTail discards any recent data below the provided threshold number.
+func (f *freezer) TruncateTail(tail uint64) error {
+ if f.readonly {
+ return errReadOnly
+ }
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ if atomic.LoadUint64(&f.tail) >= tail {
+ return nil
+ }
+ for _, table := range f.tables {
+ if err := table.truncateTail(tail); err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint64(&f.tail, tail)
+ return nil
+}
+
// Sync flushes all data tables to disk.
func (f *freezer) Sync() error {
var errs []error
@@ -308,21 +342,59 @@ func (f *freezer) Sync() error {
return nil
}
+// validate checks that every table has the same length.
+// Used instead of `repair` in readonly mode.
+func (f *freezer) validate() error {
+ if len(f.tables) == 0 {
+ return nil
+ }
+ var (
+ length uint64
+ name string
+ )
+ // Hack to get length of any table
+ for kind, table := range f.tables {
+ length = atomic.LoadUint64(&table.items)
+ name = kind
+ break
+ }
+ // Now check every table against that length
+ for kind, table := range f.tables {
+ items := atomic.LoadUint64(&table.items)
+ if length != items {
+ return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
+ }
+ }
+ atomic.StoreUint64(&f.frozen, length)
+ return nil
+}
+
// repair truncates all data tables to the same length.
func (f *freezer) repair() error {
- min := uint64(math.MaxUint64)
+ var (
+ head = uint64(math.MaxUint64)
+ tail = uint64(0)
+ )
for _, table := range f.tables {
items := atomic.LoadUint64(&table.items)
- if min > items {
- min = items
+ if head > items {
+ head = items
+ }
+ hidden := atomic.LoadUint64(&table.itemHidden)
+ if hidden > tail {
+ tail = hidden
}
}
for _, table := range f.tables {
- if err := table.truncate(min); err != nil {
+ if err := table.truncateHead(head); err != nil {
+ return err
+ }
+ if err := table.truncateTail(tail); err != nil {
return err
}
}
- atomic.StoreUint64(&f.frozen, min)
+ atomic.StoreUint64(&f.frozen, head)
+ atomic.StoreUint64(&f.tail, tail)
return nil
}
@@ -546,3 +618,116 @@ func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []
return hashes, err
}
+
+// convertLegacyFn takes a raw freezer entry in an older format and
+// returns it in the new format.
+type convertLegacyFn = func([]byte) ([]byte, error)
+
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
+ if f.readonly {
+ return errReadOnly
+ }
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ table, ok := f.tables[kind]
+ if !ok {
+ return errUnknownTable
+ }
+ // forEach iterates every entry in the table serially and in order, calling `fn`
+ // with the item as argument. If `fn` returns an error the iteration stops
+ // and that error will be returned.
+ forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
+ var (
+ items = atomic.LoadUint64(&t.items)
+ batchSize = uint64(1024)
+ maxBytes = uint64(1024 * 1024)
+ )
+ for i := offset; i < items; {
+ if i+batchSize > items {
+ batchSize = items - i
+ }
+ data, err := t.RetrieveItems(i, batchSize, maxBytes)
+ if err != nil {
+ return err
+ }
+ for j, item := range data {
+ if err := fn(i+uint64(j), item); err != nil {
+ return err
+ }
+ }
+ i += uint64(len(data))
+ }
+ return nil
+ }
+ // TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
+ // process assumes no deletion at tail and needs to be modified to account for that.
+ if table.itemOffset > 0 || table.itemHidden > 0 {
+ return fmt.Errorf("migration not supported for tail-deleted freezers")
+ }
+ ancientsPath := filepath.Dir(table.index.Name())
+ // Set up new dir for the migrated table, the content of which
+ // we'll at the end move over to the ancients dir.
+ migrationPath := filepath.Join(ancientsPath, "migration")
+ newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
+ if err != nil {
+ return err
+ }
+ var (
+ batch = newTable.newBatch()
+ out []byte
+ start = time.Now()
+ logged = time.Now()
+ offset = newTable.items
+ )
+ if offset > 0 {
+ log.Info("found previous migration attempt", "migrated", offset)
+ }
+ // Iterate through entries and transform them
+ if err := forEach(table, offset, func(i uint64, blob []byte) error {
+ if i%10000 == 0 && time.Since(logged) > 16*time.Second {
+ log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ out, err = convert(blob)
+ if err != nil {
+ return err
+ }
+ if err := batch.AppendRaw(i, out); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ if err := batch.commit(); err != nil {
+ return err
+ }
+ log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
+ // Release and delete old table files. Note this won't
+ // delete the index file.
+ table.releaseFilesAfter(0, true)
+
+ if err := newTable.Close(); err != nil {
+ return err
+ }
+ files, err := ioutil.ReadDir(migrationPath)
+ if err != nil {
+ return err
+ }
+ // Move migrated files to ancients dir.
+ for _, f := range files {
+ // This will replace the old index file as a side-effect.
+ if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
+ return err
+ }
+ }
+ // Delete by now empty dir.
+ if err := os.Remove(migrationPath); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index 762fa8f25f19..864a7f5e98bf 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -191,7 +191,7 @@ func (batch *freezerTableBatch) commit() error {
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
- // Write index.
+ // Write indices.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err
diff --git a/core/rawdb/freezer_meta.go b/core/rawdb/freezer_meta.go
new file mode 100644
index 000000000000..d0bd2f954436
--- /dev/null
+++ b/core/rawdb/freezer_meta.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io"
+ "os"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+const freezerVersion = 1 // The initial version tag of freezer table metadata
+
+// freezerTableMeta wraps all the metadata of the freezer table.
+type freezerTableMeta struct {
+ // Version is the versioning descriptor of the freezer table.
+ Version uint16
+
+ // VirtualTail indicates how many items have been marked as deleted.
+ // Its value is equal to the number of items removed from the table
+ // plus the number of items hidden in the table, so it should never
+ // be lower than the "actual tail".
+ VirtualTail uint64
+}
+
+// newMetadata initializes the metadata object with the given virtual tail.
+func newMetadata(tail uint64) *freezerTableMeta {
+ return &freezerTableMeta{
+ Version: freezerVersion,
+ VirtualTail: tail,
+ }
+}
+
+// readMetadata reads the metadata of the freezer table from the
+// given metadata file.
+func readMetadata(file *os.File) (*freezerTableMeta, error) {
+ _, err := file.Seek(0, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ var meta freezerTableMeta
+ if err := rlp.Decode(file, &meta); err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// writeMetadata writes the metadata of the freezer table into the
+// given metadata file.
+func writeMetadata(file *os.File, meta *freezerTableMeta) error {
+ _, err := file.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ return rlp.Encode(file, meta)
+}
+
+// loadMetadata loads the metadata from the given metadata file.
+// Initializes the metadata file with the given "actual tail" if
+// it's empty.
+func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ // Write the metadata with the given actual tail into metadata file
+ // if it's non-existent. There are two possible scenarios here:
+ // - the freezer table is empty
+ // - the freezer table is legacy
+ // In both cases, write the meta into the file with the actual tail
+ // as the virtual tail.
+ if stat.Size() == 0 {
+ m := newMetadata(tail)
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+ m, err := readMetadata(file)
+ if err != nil {
+ return nil, err
+ }
+ // Update the virtual tail with the given actual tail if it's even
+ // lower than it. Theoretically it shouldn't happen at all, print
+ // a warning here.
+ if m.VirtualTail < tail {
+ log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
+ m.VirtualTail = tail
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go
new file mode 100644
index 000000000000..191744a75410
--- /dev/null
+++ b/core/rawdb/freezer_meta_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestReadWriteFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ err = writeMetadata(f, newMetadata(100))
+ if err != nil {
+ t.Fatalf("Failed to write metadata %v", err)
+ }
+ meta, err := readMetadata(f)
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
+
+func TestInitializeFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ meta, err := loadMetadata(f, uint64(100))
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index 22405cf9b4f8..01867ee8c242 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -47,20 +47,19 @@ var (
)
// indexEntry contains the number/id of the file that the data resides in, aswell as the
-// offset within the file to the end of the data
+// offset within the file to the end of the data.
// In serialized form, the filenum is stored as uint16.
type indexEntry struct {
- filenum uint32 // stored as uint16 ( 2 bytes)
- offset uint32 // stored as uint32 ( 4 bytes)
+ filenum uint32 // stored as uint16 ( 2 bytes )
+ offset uint32 // stored as uint32 ( 4 bytes )
}
const indexEntrySize = 6
// unmarshalBinary deserializes binary b into the rawIndex entry.
-func (i *indexEntry) unmarshalBinary(b []byte) error {
+func (i *indexEntry) unmarshalBinary(b []byte) {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6])
- return nil
}
// append adds the encoded entry to the end of b.
@@ -75,14 +74,14 @@ func (i *indexEntry) append(b []byte) []byte {
// bounds returns the start- and end- offsets, and the file number of where to
// read there data item marked by the two index entries. The two entries are
// assumed to be sequential.
-func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
- if start.filenum != end.filenum {
+func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
+ if i.filenum != end.filenum {
// If a piece of data 'crosses' a data-file,
// it's actually in one piece on the second data-file.
// We return a zero-indexEntry for the second file as start
return 0, end.offset, end.filenum
}
- return start.offset, end.offset, end.filenum
+ return i.offset, end.offset, end.filenum
}
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
@@ -92,22 +91,28 @@ type freezerTable struct {
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- items uint64 // Number of items stored in the table (including items removed from tail)
-
- noCompression bool // if true, disables snappy compression. Note: does not work retroactively
+ items uint64 // Number of items stored in the table (including items removed from tail)
+ itemOffset uint64 // Number of items removed from the table
+
+ // itemHidden is the number of items marked as deleted. Tail deletion is
+ // only supported at file level which means the actual deletion will be
+ // delayed until the entire data file is marked as deleted. Before that
+ // these items will be hidden to prevent being visited again. The value
+ // should never be lower than itemOffset.
+ itemHidden uint64
+
+ noCompression bool // if true, disables snappy compression. Note: does not work retroactively
+ readonly bool
maxFileSize uint32 // Max file size for data-files
name string
path string
head *os.File // File descriptor for the data head of the table
+ index *os.File // File descriptor for the indexEntry file of the table
+ meta *os.File // File descriptor for metadata of the table
files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file
- index *os.File // File descriptor for the indexEntry file of the table
-
- // In the case that old items are deleted (from the tail), we use itemOffset
- // to count how many historic items have gone missing.
- itemOffset uint32 // Offset (number of discarded items)
headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read
@@ -119,71 +124,61 @@ type freezerTable struct {
}
// NewFreezerTable opens the given path as a freezer table.
-func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
- return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
-}
-
-// openFreezerFileForAppend opens a freezer table file and seeks to the end
-func openFreezerFileForAppend(filename string) (*os.File, error) {
- // Open the file without the O_APPEND flag
- // because it has differing behaviour during Truncate operations
- // on different OS's
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return nil, err
- }
- // Seek to end for append
- if _, err = file.Seek(0, io.SeekEnd); err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// openFreezerFileForReadOnly opens a freezer table file for read only access
-func openFreezerFileForReadOnly(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDONLY, 0644)
-}
-
-// openFreezerFileTruncated opens a freezer table making sure it is truncated
-func openFreezerFileTruncated(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
-}
-
-// truncateFreezerFile resizes a freezer table file and seeks to the end
-func truncateFreezerFile(file *os.File, size int64) error {
- if err := file.Truncate(size); err != nil {
- return err
- }
- // Seek to end for append
- if _, err := file.Seek(0, io.SeekEnd); err != nil {
- return err
- }
- return nil
+func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
+ return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
}
// newTable opens a freezer table, creating the data and index files if they are
-// non existent. Both files are truncated to the shortest common length to ensure
+// non-existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
-func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
+func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
var idxName string
if noCompression {
- // Raw idx
- idxName = fmt.Sprintf("%s.ridx", name)
+ idxName = fmt.Sprintf("%s.ridx", name) // raw index file
} else {
- // Compressed idx
- idxName = fmt.Sprintf("%s.cidx", name)
+ idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
}
- offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
- if err != nil {
- return nil, err
+ var (
+ err error
+ index *os.File
+ meta *os.File
+ )
+ if readonly {
+ // Will fail if table doesn't exist
+ index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ // TODO(rjl493456442) change it to read-only mode. Open the metadata file
+ // in rw mode. It's a temporary solution for now and should be changed
+ // whenever the tail deletion is actually used. The reason for this hack is
+ // the additional meta file for each freezer table is added in order to support
+ // tail deletion, but for most legacy nodes this file is missing. This check
+ // will suddenly break lots of database relevant commands. So the metadata file
+ // is always opened for mutation and nothing else will be written except
+ // the initialization.
+ meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
+ if err != nil {
+ return nil, err
+ }
}
// Create the table and repair any past inconsistency
tab := &freezerTable{
- index: offsets,
+ index: index,
+ meta: meta,
files: make(map[uint32]*os.File),
readMeter: readMeter,
writeMeter: writeMeter,
@@ -192,6 +187,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
path: path,
logger: log.New("database", path, "table", name),
noCompression: noCompression,
+ readonly: readonly,
maxFileSize: maxFilesize,
}
if err := tab.repair(); err != nil {
@@ -209,7 +205,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
return tab, nil
}
-// repair cross checks the head and the index file and truncates them to
+// repair cross-checks the head and the index file and truncates them to
// be in sync with each other after a potential crash / data loss.
func (t *freezerTable) repair() error {
// Create a temporary offset buffer to init files with and read indexEntry into
@@ -247,12 +243,32 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer)
+ // Assign the tail fields with the first stored index.
+ // The total removed items is represented with an uint32,
+ // which is not enough in theory but enough in practice.
+ // TODO: use uint64 to represent total removed items.
t.tailId = firstIndex.filenum
- t.itemOffset = firstIndex.offset
+ t.itemOffset = uint64(firstIndex.offset)
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
- lastIndex.unmarshalBinary(buffer)
- t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
+ // Load metadata from the file
+ meta, err := loadMetadata(t.meta, t.itemOffset)
+ if err != nil {
+ return err
+ }
+ t.itemHidden = meta.VirtualTail
+
+ // Read the last index, use the default value in case the freezer is empty
+ if offsetsSize == indexEntrySize {
+ lastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ lastIndex.unmarshalBinary(buffer)
+ }
+ if t.readonly {
+ t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
+ } else {
+ t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
+ }
if err != nil {
return err
}
@@ -263,7 +279,6 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset)
-
for contentExp != contentSize {
// Truncate the head file to the last offset pointer
if contentExp < contentSize {
@@ -280,9 +295,16 @@ func (t *freezerTable) repair() error {
return err
}
offsetsSize -= indexEntrySize
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+
+ // Read the new head index, use the default value in case
+ // the freezer is already empty.
var newLastIndex indexEntry
- newLastIndex.unmarshalBinary(buffer)
+ if offsetsSize == indexEntrySize {
+ newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ newLastIndex.unmarshalBinary(buffer)
+ }
// We might have slipped back into an earlier head-file here
if newLastIndex.filenum != lastIndex.filenum {
// Release earlier opened file
@@ -301,18 +323,30 @@ func (t *freezerTable) repair() error {
contentExp = int64(lastIndex.offset)
}
}
- // Ensure all reparation changes have been written to disk
- if err := t.index.Sync(); err != nil {
- return err
- }
- if err := t.head.Sync(); err != nil {
- return err
+ // Sync() fails for read-only files on windows.
+ if !t.readonly {
+ // Ensure all reparation changes have been written to disk
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
+ if err := t.head.Sync(); err != nil {
+ return err
+ }
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
}
// Update the item and byte counters and return
- t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
+ t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
t.headBytes = contentSize
t.headId = lastIndex.filenum
+ // Delete the leftover files because of head deletion
+ t.releaseFilesAfter(t.headId, true)
+
+ // Delete the leftover files because of tail deletion
+ t.releaseFilesBefore(t.tailId, true)
+
// Close opened files and preopen all files
if err := t.preopen(); err != nil {
return err
@@ -328,27 +362,35 @@ func (t *freezerTable) repair() error {
func (t *freezerTable) preopen() (err error) {
// The repair might have already opened (some) files
t.releaseFilesAfter(0, false)
+
// Open all except head in RDONLY
for i := t.tailId; i < t.headId; i++ {
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
return err
}
}
- // Open head in read/write
- t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
+ if t.readonly {
+ t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
+ } else {
+ // Open head in read/write
+ t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
+ }
return err
}
-// truncate discards any recent data above the provided threshold number.
-func (t *freezerTable) truncate(items uint64) error {
+// truncateHead discards any recent data above the provided threshold number.
+func (t *freezerTable) truncateHead(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
- // If our item count is correct, don't do anything
+ // Ensure the given truncate target falls in the correct range
existing := atomic.LoadUint64(&t.items)
if existing <= items {
return nil
}
+ if items < atomic.LoadUint64(&t.itemHidden) {
+ return errors.New("truncation below tail")
+ }
// We need to truncate, save the old size for metrics tracking
oldSize, err := t.sizeNolock()
if err != nil {
@@ -360,17 +402,24 @@ func (t *freezerTable) truncate(items uint64) error {
log = t.logger.Warn // Only loud warn if we delete multiple items
}
log("Truncating freezer table", "items", existing, "limit", items)
- if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
+
+ // Truncate the index file first, the tail position is also considered
+ // when calculating the new freezer table length.
+ length := items - atomic.LoadUint64(&t.itemOffset)
+ if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
// Calculate the new expected size of the data file and truncate it
- buffer := make([]byte, indexEntrySize)
- if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
- return err
- }
var expected indexEntry
- expected.unmarshalBinary(buffer)
-
+ if length == 0 {
+ expected = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ buffer := make([]byte, indexEntrySize)
+ if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
+ return err
+ }
+ expected.unmarshalBinary(buffer)
+ }
// We might need to truncate back to older files
if expected.filenum != t.headId {
// If already open for reading, force-reopen for writing
@@ -399,7 +448,110 @@ func (t *freezerTable) truncate(items uint64) error {
return err
}
t.sizeGauge.Dec(int64(oldSize - newSize))
+ return nil
+}
+// truncateTail discards any recent data before the provided threshold number.
+func (t *freezerTable) truncateTail(items uint64) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Ensure the given truncate target falls in the correct range
+ if atomic.LoadUint64(&t.itemHidden) >= items {
+ return nil
+ }
+ if atomic.LoadUint64(&t.items) < items {
+ return errors.New("truncation above head")
+ }
+ // Load the new tail index by the given new tail position
+ var (
+ newTailId uint32
+ buffer = make([]byte, indexEntrySize)
+ )
+ if atomic.LoadUint64(&t.items) == items {
+ newTailId = t.headId
+ } else {
+ offset := items - atomic.LoadUint64(&t.itemOffset)
+ if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var newTail indexEntry
+ newTail.unmarshalBinary(buffer)
+ newTailId = newTail.filenum
+ }
+ // Update the virtual tail marker and hidden these entries in table.
+ atomic.StoreUint64(&t.itemHidden, items)
+ if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
+ return err
+ }
+ // Hidden items still fall in the current tail file, no data file
+ // can be dropped.
+ if t.tailId == newTailId {
+ return nil
+ }
+ // Hidden items fall in the incorrect range, returns the error.
+ if t.tailId > newTailId {
+ return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
+ }
+ // Hidden items exceed the current tail file, drop the relevant
+ // data files. We need to truncate, save the old size for metrics
+ // tracking.
+ oldSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ // Count how many items can be deleted from the file.
+ var (
+ newDeleted = items
+ deleted = atomic.LoadUint64(&t.itemOffset)
+ )
+ for current := items - 1; current >= deleted; current -= 1 {
+ if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var pre indexEntry
+ pre.unmarshalBinary(buffer)
+ if pre.filenum != newTailId {
+ break
+ }
+ newDeleted = current
+ }
+ // Commit the changes of metadata file first before manipulating
+ // the indexes file.
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
+ // Truncate the deleted index entries from the index file.
+ err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
+ tailIndex := indexEntry{
+ filenum: newTailId,
+ offset: uint32(newDeleted),
+ }
+ _, err := f.Write(tailIndex.append(nil))
+ return err
+ })
+ if err != nil {
+ return err
+ }
+ // Reopen the modified index file to load the changes
+ if err := t.index.Close(); err != nil {
+ return err
+ }
+ t.index, err = openFreezerFileForAppend(t.index.Name())
+ if err != nil {
+ return err
+ }
+ // Release any files before the current tail
+ t.tailId = newTailId
+ atomic.StoreUint64(&t.itemOffset, newDeleted)
+ t.releaseFilesBefore(t.tailId, true)
+
+ // Retrieve the new size and update the total size counter
+ newSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ t.sizeGauge.Dec(int64(oldSize - newSize))
return nil
}
@@ -414,6 +566,11 @@ func (t *freezerTable) Close() error {
}
t.index = nil
+ if err := t.meta.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ t.meta = nil
+
for _, f := range t.files {
if err := f.Close(); err != nil {
errs = append(errs, err)
@@ -468,6 +625,19 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
}
}
+// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
+func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
+ for fnum, f := range t.files {
+ if fnum < num {
+ delete(t.files, fnum)
+ f.Close()
+ if remove {
+ os.Remove(f.Name())
+ }
+ }
+ }
+}
+
// getIndices returns the index entries for the given from-item, covering 'count' items.
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
// error is returned).
@@ -476,7 +646,7 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
// it will return error.
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
// Apply the table-offset
- from = from - uint64(t.itemOffset)
+ from = from - t.itemOffset
// For reading N items, we need N+1 indices.
buffer := make([]byte, (count+1)*indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
@@ -561,18 +731,21 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
t.lock.RLock()
defer t.lock.RUnlock()
- // Ensure the table and the item is accessible
+ // Ensure the table and the item are accessible
if t.index == nil || t.head == nil {
return nil, nil, errClosed
}
- itemCount := atomic.LoadUint64(&t.items) // max number
+ var (
+ items = atomic.LoadUint64(&t.items) // the total items(head + 1)
+ hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
+ )
// Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something
- if itemCount <= start || uint64(t.itemOffset) > start || count == 0 {
+ if items <= start || hidden > start || count == 0 {
return nil, nil, errOutOfBounds
}
- if start+count > itemCount {
- count = itemCount - start
+ if start+count > items {
+ count = items - start
}
var (
output = make([]byte, maxBytes) // Buffer to read data into
@@ -648,10 +821,10 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output[:outputSize], sizes, nil
}
-// has returns an indicator whether the specified number data
-// exists in the freezer table.
+// has returns an indicator whether the specified number data is still accessible
+// in the freezer table.
func (t *freezerTable) has(number uint64) bool {
- return atomic.LoadUint64(&t.items) > number
+ return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
}
// size returns the total data size in the freezer table.
@@ -705,6 +878,9 @@ func (t *freezerTable) Sync() error {
if err := t.index.Sync(); err != nil {
return err
}
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
return t.head.Sync()
}
@@ -722,13 +898,20 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
}
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
+ meta, err := readMetadata(t.meta)
+ if err != nil {
+ fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
+
buf := make([]byte, indexEntrySize)
fmt.Fprintf(w, "| number | fileno | offset |\n")
fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ {
- if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
+ if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
break
}
var entry indexEntry
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 803809b5207f..0bddcf721136 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -18,13 +18,18 @@ package rawdb
import (
"bytes"
+ "encoding/binary"
"fmt"
"math/rand"
"os"
"path/filepath"
+ "reflect"
+ "sync/atomic"
"testing"
+ "testing/quick"
"time"
+ "github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/metrics"
"github.com/stretchr/testify/require"
)
@@ -40,7 +45,7 @@ func TestFreezerBasics(t *testing.T) {
// set cutoff at 50 bytes
f, err := newTable(os.TempDir(),
fmt.Sprintf("unittest-%d", rand.Uint64()),
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -85,7 +90,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
f *freezerTable
err error
)
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -99,7 +104,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
require.NoError(t, batch.commit())
f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -116,7 +121,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
}
f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -131,7 +136,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -160,7 +165,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Now open it again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -183,7 +188,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// Fill a table and close it
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -204,12 +209,12 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
}
// Remove everything but the first item, and leave data unaligned
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
- idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
+ idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
idxFile.Close()
// Now open it again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -232,7 +237,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// And if we open it, we should now be able to read all of them (new values)
{
- f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
for y := 1; y < 255; y++ {
exp := getChunk(15, ^y)
got, err := f.Retrieve(uint64(y))
@@ -254,7 +259,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -265,7 +270,7 @@ func TestSnappyDetection(t *testing.T) {
// Open without snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
if err != nil {
t.Fatal(err)
}
@@ -277,7 +282,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -309,7 +314,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// Fill a table and close it
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -345,7 +350,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// 45, 45, 15
// with 3+3+1 items
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -366,7 +371,7 @@ func TestFreezerTruncate(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -382,12 +387,12 @@ func TestFreezerTruncate(t *testing.T) {
// Reopen, truncate
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
defer f.Close()
- f.truncate(10) // 150 bytes
+ f.truncateHead(10) // 150 bytes
if f.items != 10 {
t.Fatalf("expected %d items, got %d", 10, f.items)
}
@@ -407,7 +412,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -440,7 +445,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Reopen
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -475,7 +480,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -491,7 +496,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Reopen and read all files
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -504,7 +509,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
}
// Now, truncate back to zero
- f.truncate(0)
+ f.truncateHead(0)
// Write the data again
batch := f.newBatch()
@@ -523,7 +528,7 @@ func TestFreezerOffset(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -565,18 +570,19 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 4 ] at index zero
- tailId := uint32(2) // First file is 2
- itemOffset := uint32(4) // We have removed four items
zeroIndex := indexEntry{
- filenum: tailId,
- offset: itemOffset,
+ filenum: uint32(2), // First file is 2
+ offset: uint32(4), // We have removed four items
}
buf := zeroIndex.append(nil)
+
// Overwrite index zero
copy(indexBuf, buf)
+
// Remove the four next indices by overwriting
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
indexFile.WriteAt(indexBuf, 0)
+
// Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close()
@@ -584,7 +590,7 @@ func TestFreezerOffset(t *testing.T) {
// Now open again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -623,13 +629,12 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 1M ] at index zero
- tailId := uint32(2) // First file is 2
- itemOffset := uint32(1000000) // We have removed 1M items
zeroIndex := indexEntry{
- offset: itemOffset,
- filenum: tailId,
+ offset: uint32(1000000), // We have removed 1M items
+ filenum: uint32(2), // First file is 2
}
buf := zeroIndex.append(nil)
+
// Overwrite index zero
copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0)
@@ -638,7 +643,7 @@ func TestFreezerOffset(t *testing.T) {
// Check that existing items have been moved to index 1M.
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -659,6 +664,171 @@ func TestFreezerOffset(t *testing.T) {
}
}
+func TestTruncateTail(t *testing.T) {
+ t.Parallel()
+ rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
+ fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
+
+ // Fill table
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write 7 x 20 bytes, splitting out into four files
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
+ require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
+ require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ // nothing to do, all the items should still be there.
+ f.truncateTail(0)
+ fmt.Println(f.dumpIndexString(0, 1000))
+ checkRetrieve(t, f, map[uint64][]byte{
+ 0: getChunk(20, 0xFF),
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate single element( item 0 ), deletion is only supported at file level
+ f.truncateTail(1)
+ fmt.Println(f.dumpIndexString(0, 1000))
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // Reopen the table, the deletion information should be persisted as well
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 1: getChunk(20, 0xEE),
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate two elements( item 0, item 1 ), the file 0 should be deleted
+ f.truncateTail(2)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // Reopen the table, the above testing should still pass
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 2: getChunk(20, 0xdd),
+ 3: getChunk(20, 0xcc),
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+
+ // truncate all, the entire freezer should be deleted
+ f.truncateTail(7)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ 2: errOutOfBounds,
+ 3: errOutOfBounds,
+ 4: errOutOfBounds,
+ 5: errOutOfBounds,
+ 6: errOutOfBounds,
+ })
+}
+
+func TestTruncateHead(t *testing.T) {
+ t.Parallel()
+ rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
+ fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
+
+ // Fill table
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write 7 x 20 bytes, splitting out into four files
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
+ require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
+ require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ f.truncateTail(4) // Tail = 4
+
+ // NewHead is required to be 3, the entire table should be truncated
+ f.truncateHead(4)
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds, // Deleted by tail
+ 1: errOutOfBounds, // Deleted by tail
+ 2: errOutOfBounds, // Deleted by tail
+ 3: errOutOfBounds, // Deleted by tail
+ 4: errOutOfBounds, // Deleted by Head
+ 5: errOutOfBounds, // Deleted by Head
+ 6: errOutOfBounds, // Deleted by Head
+ })
+
+ // Append new items
+ batch = f.newBatch()
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
+ require.NoError(t, batch.commit())
+
+ checkRetrieve(t, f, map[uint64][]byte{
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x11),
+ })
+}
+
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
t.Helper()
@@ -726,7 +896,7 @@ func TestSequentialRead(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
{ // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -736,7 +906,7 @@ func TestSequentialRead(t *testing.T) {
f.Close()
}
{ // Open it, iterate, verify iteration
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -757,7 +927,7 @@ func TestSequentialRead(t *testing.T) {
}
{ // Open it, iterate, verify byte limit. The byte limit is less than item
// size, so each lookup should only return one item
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -786,7 +956,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
{ // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
if err != nil {
t.Fatal(err)
}
@@ -808,7 +978,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
{100, 109, 10},
} {
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
if err != nil {
t.Fatal(err)
}
@@ -829,3 +999,298 @@ func TestSequentialReadByteLimit(t *testing.T) {
}
}
}
+
+func TestFreezerReadonly(t *testing.T) {
+ tmpdir := os.TempDir()
+ // Case 1: Check it fails on non-existent file.
+ _, err := newTable(tmpdir,
+ fmt.Sprintf("readonlytest-%d", rand.Uint64()),
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Fatal("readonly table instantiation should fail for non-existent table")
+ }
+
+ // Case 2: Check that it fails on invalid index length.
+ fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
+ if err != nil {
+ t.Errorf("Failed to open index file: %v\n", err)
+ }
+ // size should not be a multiple of indexEntrySize.
+ idxFile.Write(make([]byte, 17))
+ idxFile.Close()
+ _, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Errorf("readonly table instantiation should fail for invalid index size")
+ }
+
+ // Case 3: Open table non-readonly table to write some data.
+ // Then corrupt the head file and make sure opening the table
+ // again in readonly triggers an error.
+ fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ f, err := newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ t.Fatalf("failed to instantiate table: %v", err)
+ }
+ writeChunks(t, f, 8, 32)
+ // Corrupt table file
+ if _, err := f.head.Write([]byte{1, 1}); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ _, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Errorf("readonly table instantiation should fail for corrupt table file")
+ }
+
+ // Case 4: Write some data to a table and later re-open it as readonly.
+ // Should be successful.
+ fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ f, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ t.Fatalf("failed to instantiate table: %v\n", err)
+ }
+ writeChunks(t, f, 32, 128)
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ f, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := f.Retrieve(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp := getChunk(128, 10)
+ if !bytes.Equal(v, exp) {
+ t.Errorf("retrieved value is incorrect")
+ }
+
+ // Case 5: Now write some data via a batch.
+ // This should fail either during AppendRaw or Commit
+ batch := f.newBatch()
+ writeErr := batch.AppendRaw(32, make([]byte, 1))
+ if writeErr == nil {
+ writeErr = batch.commit()
+ }
+ if writeErr == nil {
+ t.Fatalf("Writing to readonly table should fail")
+ }
+}
+
+// randTest performs random freezer table operations.
+// Instances of this test are created by Generate.
+type randTest []randTestStep
+
+type randTestStep struct {
+ op int
+ items []uint64 // for append and retrieve
+ blobs [][]byte // for append
+ target uint64 // for truncate(head/tail)
+ err error // for debugging
+}
+
+const (
+ opReload = iota
+ opAppend
+ opRetrieve
+ opTruncateHead
+ opTruncateHeadAll
+ opTruncateTail
+ opTruncateTailAll
+ opCheckAll
+ opMax // boundary value, not an actual op
+)
+
+func getVals(first uint64, n int) [][]byte {
+ var ret [][]byte
+ for i := 0; i < n; i++ {
+ val := make([]byte, 8)
+ binary.BigEndian.PutUint64(val, first+uint64(i))
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
+ var (
+ deleted uint64 // The number of deleted items from tail
+ items []uint64 // The index of entries in table
+
+ // getItems retrieves the indexes for items in table.
+ getItems = func(n int) []uint64 {
+ length := len(items)
+ if length == 0 {
+ return nil
+ }
+ var ret []uint64
+ index := rand.Intn(length)
+ for i := index; len(ret) < n && i < length; i++ {
+ ret = append(ret, items[i])
+ }
+ return ret
+ }
+
+ // addItems appends the given length items into the table.
+ addItems = func(n int) []uint64 {
+ var first = deleted
+ if len(items) != 0 {
+ first = items[len(items)-1] + 1
+ }
+ var ret []uint64
+ for i := 0; i < n; i++ {
+ ret = append(ret, first+uint64(i))
+ }
+ items = append(items, ret...)
+ return ret
+ }
+ )
+
+ var steps randTest
+ for i := 0; i < size; i++ {
+ step := randTestStep{op: r.Intn(opMax)}
+ switch step.op {
+ case opReload, opCheckAll:
+ case opAppend:
+ num := r.Intn(3)
+ step.items = addItems(num)
+ if len(step.items) == 0 {
+ step.blobs = nil
+ } else {
+ step.blobs = getVals(step.items[0], num)
+ }
+ case opRetrieve:
+ step.items = getItems(r.Intn(3))
+ case opTruncateHead:
+ if len(items) == 0 {
+ step.target = deleted
+ } else {
+ index := r.Intn(len(items))
+ items = items[:index]
+ step.target = deleted + uint64(index)
+ }
+ case opTruncateHeadAll:
+ step.target = deleted
+ items = items[:0]
+ case opTruncateTail:
+ if len(items) == 0 {
+ step.target = deleted
+ } else {
+ index := r.Intn(len(items))
+ items = items[index:]
+ deleted += uint64(index)
+ step.target = deleted
+ }
+ case opTruncateTailAll:
+ step.target = deleted + uint64(len(items))
+ items = items[:0]
+ deleted = step.target
+ }
+ steps = append(steps, step)
+ }
+ return reflect.ValueOf(steps)
+}
+
+func runRandTest(rt randTest) bool {
+ fname := fmt.Sprintf("randtest-%d", rand.Uint64())
+ f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ panic("failed to initialize table")
+ }
+ var values [][]byte
+ for i, step := range rt {
+ switch step.op {
+ case opReload:
+ f.Close()
+ f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ rt[i].err = fmt.Errorf("failed to reload table %v", err)
+ }
+ case opCheckAll:
+ tail := atomic.LoadUint64(&f.itemHidden)
+ head := atomic.LoadUint64(&f.items)
+
+ if tail == head {
+ continue
+ }
+ got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
+ if err != nil {
+ rt[i].err = err
+ } else {
+ if !reflect.DeepEqual(got, values) {
+ rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
+ }
+ }
+
+ case opAppend:
+ batch := f.newBatch()
+ for i := 0; i < len(step.items); i++ {
+ batch.AppendRaw(step.items[i], step.blobs[i])
+ }
+ batch.commit()
+ values = append(values, step.blobs...)
+
+ case opRetrieve:
+ var blobs [][]byte
+ if len(step.items) == 0 {
+ continue
+ }
+ tail := atomic.LoadUint64(&f.itemHidden)
+ for i := 0; i < len(step.items); i++ {
+ blobs = append(blobs, values[step.items[i]-tail])
+ }
+ got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
+ if err != nil {
+ rt[i].err = err
+ } else {
+ if !reflect.DeepEqual(got, blobs) {
+ rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
+ }
+ }
+
+ case opTruncateHead:
+ f.truncateHead(step.target)
+
+ length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
+ values = values[:length]
+
+ case opTruncateHeadAll:
+ f.truncateHead(step.target)
+ values = nil
+
+ case opTruncateTail:
+ prev := atomic.LoadUint64(&f.itemHidden)
+ f.truncateTail(step.target)
+
+ truncated := atomic.LoadUint64(&f.itemHidden) - prev
+ values = values[truncated:]
+
+ case opTruncateTailAll:
+ f.truncateTail(step.target)
+ values = nil
+ }
+ // Abort the test on error.
+ if rt[i].err != nil {
+ return false
+ }
+ }
+ f.Close()
+ return true
+}
+
+func TestRandom(t *testing.T) {
+ if err := quick.Check(runRandTest, nil); err != nil {
+ if cerr, ok := err.(*quick.CheckError); ok {
+ t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
+ }
+ t.Fatal(err)
+ }
+}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index fa84f803068b..b3fd3059e750 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -24,6 +24,7 @@ import (
"math/big"
"math/rand"
"os"
+ "path"
"sync"
"testing"
@@ -186,7 +187,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
wg.Wait()
}
-// This test runs ModifyAncients and TruncateAncients concurrently with each other.
+// This test runs ModifyAncients and TruncateHead concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
f, dir := newFreezerForTesting(t, freezerTestTableDef)
defer os.RemoveAll(dir)
@@ -196,7 +197,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ {
// First reset and write 100 items.
- if err := f.TruncateAncients(0); err != nil {
+ if err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err)
}
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -231,7 +232,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done()
}()
go func() {
- truncateErr = f.TruncateAncients(10)
+ truncateErr = f.TruncateHead(10)
wg.Done()
}()
go func() {
@@ -253,6 +254,44 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
}
}
+func TestFreezerReadonlyValidate(t *testing.T) {
+ tables := map[string]bool{"a": true, "b": true}
+ dir, err := ioutil.TempDir("", "freezer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ // Open non-readonly freezer and fill individual tables
+ // with different amount of data.
+ f, err := newFreezer(dir, "", false, 2049, tables)
+ if err != nil {
+ t.Fatal("can't open freezer", err)
+ }
+ var item = make([]byte, 1024)
+ aBatch := f.tables["a"].newBatch()
+ require.NoError(t, aBatch.AppendRaw(0, item))
+ require.NoError(t, aBatch.AppendRaw(1, item))
+ require.NoError(t, aBatch.AppendRaw(2, item))
+ require.NoError(t, aBatch.commit())
+ bBatch := f.tables["b"].newBatch()
+ require.NoError(t, bBatch.AppendRaw(0, item))
+ require.NoError(t, bBatch.commit())
+ if f.tables["a"].items != 3 {
+ t.Fatalf("unexpected number of items in table")
+ }
+ if f.tables["b"].items != 1 {
+ t.Fatalf("unexpected number of items in table")
+ }
+ require.NoError(t, f.Close())
+
+ // Re-openening as readonly should fail when validating
+ // table lengths.
+ f, err = newFreezer(dir, "", true, 2049, tables)
+ if err == nil {
+ t.Fatal("readonly freezer should fail with differing table lengths")
+ }
+}
+
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
t.Helper()
@@ -299,3 +338,92 @@ func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
}
}
+
+func TestRenameWindows(t *testing.T) {
+ var (
+ fname = "file.bin"
+ fname2 = "file2.bin"
+ data = []byte{1, 2, 3, 4}
+ data2 = []byte{2, 3, 4, 5}
+ data3 = []byte{3, 5, 6, 7}
+ dataLen = 4
+ )
+
+ // Create 2 temp dirs
+ dir1, err := os.MkdirTemp("", "rename-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(dir1)
+ dir2, err := os.MkdirTemp("", "rename-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(dir2)
+
+ // Create file in dir1 and fill with data
+ f, err := os.Create(path.Join(dir1, fname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f2, err := os.Create(path.Join(dir1, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f3, err := os.Create(path.Join(dir2, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Write(data); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f2.Write(data2); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f3.Write(data3); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := f2.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := f3.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check file contents
+ f, err = os.Open(path.Join(dir2, fname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.Remove(f.Name())
+ buf := make([]byte, dataLen)
+ if _, err := f.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf, data) {
+ t.Errorf("unexpected file contents. Got %v\n", buf)
+ }
+
+ f, err = os.Open(path.Join(dir2, fname2))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.Remove(f.Name())
+ if _, err := f.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf, data2) {
+ t.Errorf("unexpected file contents. Got %v\n", buf)
+ }
+}
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
new file mode 100644
index 000000000000..5695fc0fa891
--- /dev/null
+++ b/core/rawdb/freezer_utils.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
+// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
+// Before the copy is executed, there is a callback can be registered to
+// manipulate the dest file.
+// It is perfectly valid to have destPath == srcPath.
+func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
+ // Create a temp file in the same dir where we want it to wind up
+ f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
+ if err != nil {
+ return err
+ }
+ fname := f.Name()
+
+ // Clean up the leftover file
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ os.Remove(fname)
+ }()
+ // Apply the given function if it's not nil before we copy
+ // the content from the src.
+ if before != nil {
+ if err := before(f); err != nil {
+ return err
+ }
+ }
+ // Open the source file
+ src, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ if _, err = src.Seek(int64(offset), 0); err != nil {
+ src.Close()
+ return err
+ }
+ // io.Copy uses 32K buffer internally.
+ _, err = io.Copy(f, src)
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Rename the temporary file to the specified dest name.
+ // src may be same as dest, so needs to be closed before
+ // we do the final move.
+ src.Close()
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ f = nil
+
+ if err := os.Rename(fname, destPath); err != nil {
+ return err
+ }
+ return nil
+}
+
+// openFreezerFileForAppend opens a freezer table file and seeks to the end
+func openFreezerFileForAppend(filename string) (*os.File, error) {
+ // Open the file without the O_APPEND flag
+ // because it has differing behaviour during Truncate operations
+ // on different OS's
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ // Seek to end for append
+ if _, err = file.Seek(0, io.SeekEnd); err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// openFreezerFileForReadOnly opens a freezer table file for read only access
+func openFreezerFileForReadOnly(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDONLY, 0644)
+}
+
+// openFreezerFileTruncated opens a freezer table making sure it is truncated
+func openFreezerFileTruncated(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+}
+
+// truncateFreezerFile resizes a freezer table file and seeks to the end
+func truncateFreezerFile(file *os.File, size int64) error {
+ if err := file.Truncate(size); err != nil {
+ return err
+ }
+ // Seek to end for append
+ if _, err := file.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/core/rawdb/freezer_utils_test.go b/core/rawdb/freezer_utils_test.go
new file mode 100644
index 000000000000..de8087f9b936
--- /dev/null
+++ b/core/rawdb/freezer_utils_test.go
@@ -0,0 +1,76 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestCopyFrom(t *testing.T) {
+ var (
+ content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
+ prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
+ )
+ var cases = []struct {
+ src, dest string
+ offset uint64
+ writePrefix bool
+ }{
+ {"foo", "bar", 0, false},
+ {"foo", "bar", 1, false},
+ {"foo", "bar", 8, false},
+ {"foo", "foo", 0, false},
+ {"foo", "foo", 1, false},
+ {"foo", "foo", 8, false},
+ {"foo", "bar", 0, true},
+ {"foo", "bar", 1, true},
+ {"foo", "bar", 8, true},
+ }
+ for _, c := range cases {
+ ioutil.WriteFile(c.src, content, 0644)
+
+ if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
+ if !c.writePrefix {
+ return nil
+ }
+ f.Write(prefix)
+ return nil
+ }); err != nil {
+ os.Remove(c.src)
+ t.Fatalf("Failed to copy %v", err)
+ }
+
+ blob, err := ioutil.ReadFile(c.dest)
+ if err != nil {
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ t.Fatalf("Failed to read %v", err)
+ }
+ want := content[c.offset:]
+ if c.writePrefix {
+ want = append(prefix, want...)
+ }
+ if !bytes.Equal(blob, want) {
+ t.Fatal("Unexpected value")
+ }
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ }
+}
diff --git a/core/rawdb/key_length_iterator.go b/core/rawdb/key_length_iterator.go
new file mode 100644
index 000000000000..d1c5af269a31
--- /dev/null
+++ b/core/rawdb/key_length_iterator.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import "github.com/ethereum/go-ethereum/ethdb"
+
+// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
+// with a specific key length will be returned.
+type KeyLengthIterator struct {
+ requiredKeyLength int
+ ethdb.Iterator
+}
+
+// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
+// pairs where keys with a specific key length will be returned.
+func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
+ return &KeyLengthIterator{
+ Iterator: it,
+ requiredKeyLength: keyLen,
+ }
+}
+
+func (it *KeyLengthIterator) Next() bool {
+ // Return true as soon as a key with the required key length is discovered
+ for it.Iterator.Next() {
+ if len(it.Iterator.Key()) == it.requiredKeyLength {
+ return true
+ }
+ }
+
+ // Return false when we exhaust the keys in the underlying iterator.
+ return false
+}
diff --git a/core/rawdb/key_length_iterator_test.go b/core/rawdb/key_length_iterator_test.go
new file mode 100644
index 000000000000..654efc5b55b5
--- /dev/null
+++ b/core/rawdb/key_length_iterator_test.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "encoding/binary"
+ "testing"
+)
+
+func TestKeyLengthIterator(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ keyLen := 8
+ expectedKeys := make(map[string]struct{})
+ for i := 0; i < 100; i++ {
+ key := make([]byte, keyLen)
+ binary.BigEndian.PutUint64(key, uint64(i))
+ if err := db.Put(key, []byte{0x1}); err != nil {
+ t.Fatal(err)
+ }
+ expectedKeys[string(key)] = struct{}{}
+
+ longerKey := make([]byte, keyLen*2)
+ binary.BigEndian.PutUint64(longerKey, uint64(i))
+ if err := db.Put(longerKey, []byte{0x1}); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
+ for it.Next() {
+ key := it.Key()
+ _, exists := expectedKeys[string(key)]
+ if !exists {
+ t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
+ }
+ delete(expectedKeys, string(key))
+ if len(key) != keyLen {
+ t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
+ }
+ }
+
+ if len(expectedKeys) != 0 {
+ t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
+ }
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index b35fcba45f79..08f373488056 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -63,6 +63,9 @@ var (
// snapshotSyncStatusKey tracks the snapshot sync status across restarts.
snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
+ // skeletonSyncStatusKey tracks the skeleton sync status across restarts.
+ skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
+
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail")
@@ -92,9 +95,11 @@ var (
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
+ skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
- PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
- configPrefix = []byte("ethereum-config-") // config prefix for the db
+ PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
+ configPrefix = []byte("ethereum-config-") // config prefix for the db
+ genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
@@ -210,6 +215,11 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
return key
}
+// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
+func skeletonHeaderKey(number uint64) []byte {
+ return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
+}
+
// preimageKey = PreimagePrefix + hash
func preimageKey(hash common.Hash) []byte {
return append(PreimagePrefix, hash.Bytes()...)
@@ -233,3 +243,8 @@ func IsCodeKey(key []byte) (bool, []byte) {
func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)
}
+
+// genesisKey = genesisPrefix + hash
+func genesisKey(hash common.Hash) []byte {
+ return append(genesisPrefix, hash.Bytes()...)
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 91fc31b660d6..5eadf5f7c159 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -74,6 +74,12 @@ func (t *table) Ancients() (uint64, error) {
return t.db.Ancients()
}
+// Tail is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Tail() (uint64, error) {
+ return t.db.Tail()
+}
+
// AncientSize is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AncientSize(kind string) (uint64, error) {
@@ -89,10 +95,16 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err err
return t.db.ReadAncients(fn)
}
-// TruncateAncients is a noop passthrough that just forwards the request to the underlying
+// TruncateHead is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) TruncateHead(items uint64) error {
+ return t.db.TruncateHead(items)
+}
+
+// TruncateTail is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateAncients(items uint64) error {
- return t.db.TruncateAncients(items)
+func (t *table) TruncateTail(items uint64) error {
+ return t.db.TruncateTail(items)
}
// Sync is a noop passthrough that just forwards the request to the underlying
@@ -101,6 +113,12 @@ func (t *table) Sync() error {
return t.db.Sync()
}
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
+ return t.db.MigrateTable(kind, convert)
+}
+
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
@@ -172,6 +190,18 @@ func (t *table) NewBatch() ethdb.Batch {
return &tableBatch{t.db.NewBatch(), t.prefix}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (t *table) NewBatchWithSize(size int) ethdb.Batch {
+ return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
+}
+
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
+ return t.db.NewSnapshot()
+}
+
// tableBatch is a wrapper around a database batch that prefixes each key access
// with a pre-configured string.
type tableBatch struct {
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 37772ca35c55..4e3daac669b4 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -265,7 +265,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
- if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
+ if !rawdb.HasTrieNode(p.db, root) {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
@@ -279,7 +279,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 2; i-- {
- if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
+ if rawdb.HasTrieNode(p.db, layers[i].Root()) {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 5cfb9a9f2ad9..6836a574090c 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -66,6 +66,29 @@ type journalStorage struct {
Vals [][]byte
}
+func ParseGeneratorStatus(generatorBlob []byte) string {
+ if len(generatorBlob) == 0 {
+ return ""
+ }
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+ log.Warn("failed to decode snapshot generator", "err", err)
+ return ""
+ }
+ // Figure out whether we're after or within an account
+ var m string
+ switch marker := generator.Marker; len(marker) {
+ case common.HashLength:
+ m = fmt.Sprintf("at %#x", marker)
+ case 2 * common.HashLength:
+ m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
+ default:
+ m = fmt.Sprintf("%#x", marker)
+ }
+ return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
+ generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
+}
+
// loadAndParseJournal tries to parse the snapshot journal in latest format.
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the disk layer generator. It must exist, no matter the
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 6ee6b06bb5f2..76200851e469 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -546,20 +546,19 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
for it.Next() {
- if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
- batch.Delete(key)
- base.cache.Del(key[1:])
- snapshotFlushStorageItemMeter.Mark(1)
-
- // Ensure we don't delete too much data blindly (contract can be
- // huge). It's ok to flush, the root will go missing in case of a
- // crash and we'll detect and regenerate the snapshot.
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- log.Crit("Failed to write storage deletions", "err", err)
- }
- batch.Reset()
+ key := it.Key()
+ batch.Delete(key)
+ base.cache.Del(key[1:])
+ snapshotFlushStorageItemMeter.Mark(1)
+
+ // Ensure we don't delete too much data blindly (contract can be
+ // huge). It's ok to flush, the root will go missing in case of a
+ // crash and we'll detect and regenerate the snapshot.
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write storage deletions", "err", err)
}
+ batch.Reset()
}
}
it.Release()
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 138fcbdecde8..bcb6dca4f56b 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -198,25 +198,10 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
- enc []byte
- err error
- meter *time.Duration
+ enc []byte
+ err error
)
- readStart := time.Now()
- if metrics.EnabledExpensive {
- // If the snap is 'under construction', the first lookup may fail. If that
- // happens, we don't want to double-count the time elapsed. Thus this
- // dance with the metering.
- defer func() {
- if meter != nil {
- *meter += time.Since(readStart)
- }
- }()
- }
if s.db.snap != nil {
- if metrics.EnabledExpensive {
- meter = &s.db.SnapshotStorageReads
- }
// If the object was destructed in *this* block (and potentially resurrected),
// the storage has been cleared out, and we should *not* consult the previous
// snapshot about any storage values. The only possible alternatives are:
@@ -226,20 +211,20 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
return common.Hash{}
}
+ start := time.Now()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
+ if metrics.EnabledExpensive {
+ s.db.SnapshotStorageReads += time.Since(start)
+ }
}
// If the snapshot is unavailable or reading from it fails, load from the database.
if s.db.snap == nil || err != nil {
- if meter != nil {
- // If we already spent time checking the snapshot, account for it
- // and reset the readStart
- *meter += time.Since(readStart)
- readStart = time.Now()
- }
+ start := time.Now()
+ enc, err = s.getTrie(db).TryGet(key.Bytes())
if metrics.EnabledExpensive {
- meter = &s.db.StorageReads
+ s.db.StorageReads += time.Since(start)
}
- if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
+ if err != nil {
s.setError(err)
return common.Hash{}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e3541339eaa5..1d31cf470be0 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -506,16 +506,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
return obj
}
// If no live objects are available, attempt to use snapshots
- var (
- data *types.StateAccount
- err error
- )
+ var data *types.StateAccount
if s.snap != nil {
+ start := time.Now()
+ acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
+ s.SnapshotAccountReads += time.Since(start)
}
- var acc *snapshot.Account
- if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
+ if err == nil {
if acc == nil {
return nil
}
@@ -534,11 +532,12 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// If snapshot unavailable or reading from it failed, load from the database
- if s.snap == nil || err != nil {
+ if data == nil {
+ start := time.Now()
+ enc, err := s.trie.TryGet(addr.Bytes())
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
+ s.AccountReads += time.Since(start)
}
- enc, err := s.trie.TryGet(addr.Bytes())
if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
return nil
@@ -885,7 +884,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) Prepare(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
- s.accessList = newAccessList()
}
func (s *StateDB) clearJournalAndRefund() {
@@ -995,6 +993,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
//
// This method should only be called if Berlin/2929+2930 is applicable at the current number.
func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
+ // Clear out any leftover from previous executions
+ s.accessList = newAccessList()
+
s.AddAddressToAccessList(sender)
if dst != nil {
s.AddAddressToAccessList(*dst)
diff --git a/core/state_processor.go b/core/state_processor.go
index 16ce3f239897..872bb50eb7e9 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -19,15 +19,20 @@ package core
import (
"fmt"
"math/big"
+ "time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
)
// StateProcessor is a basic Processor, which takes care of transitioning
@@ -141,6 +146,46 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
return receipt, err
}
+func applyTransactionWithResult(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, msgTx types.Message, usedGas *uint64, evm *vm.EVM, tracer TracerResult) (*types.Receipt, *ExecutionResult, interface{}, error) {
+ // Create a new context to be used in the EVM environment.
+ txContext := NewEVMTxContext(msg)
+ evm.Reset(txContext, statedb)
+
+ // Apply the transaction to the current state (included in the env).
+ result, err := ApplyMessage(evm, msg, gp)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ traceResult, err := tracer.GetResult()
+ // Update the state with pending changes.
+ var root []byte
+ if config.IsByzantium(header.Number) {
+ // statedb.GetRefund()
+
+ } else {
+ root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
+ }
+ *usedGas += result.UsedGas
+
+ // Create a new receipt for the transaction, storing the intermediate root and gas used
+ // by the tx.
+ receipt := &types.Receipt{Type: 0, PostState: root, CumulativeGasUsed: *usedGas}
+ if result.Failed() {
+ receipt.Status = types.ReceiptStatusFailed
+ } else {
+ receipt.Status = types.ReceiptStatusSuccessful
+ }
+ // receipt.TxHash = tx.Hash()
+ receipt.GasUsed = result.UsedGas
+
+ // Set the receipt logs and create the bloom filter.
+ receipt.BlockHash = header.Hash()
+ receipt.BlockNumber = header.Number
+ receipt.TransactionIndex = uint(statedb.TxIndex())
+ return receipt, result, traceResult, err
+}
+
// ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
@@ -155,3 +200,249 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
}
+
+func ApplyUnsignedTransactionWithResult(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, msg types.Message, usedGas *uint64, cfg vm.Config) (*types.Receipt, *ExecutionResult, interface{}, error) {
+ // Create call tracer to get JSON stack traces
+ tracer := NewCallTracer(statedb)
+
+ // Create a new context to be used in the EVM environment
+ blockContext := NewEVMBlockContext(header, bc, author)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true})
+ return applyTransactionWithResult(msg, config, bc, author, gp, statedb, header, msg, usedGas, vmenv, tracer)
+}
+
+// StructLogRes stores a structured log emitted by the EVM while replaying a
+// transaction in debug mode
+type StructLogRes struct {
+ Pc uint64 `json:"pc"`
+ Op string `json:"op"`
+ Gas uint64 `json:"gas"`
+ GasCost uint64 `json:"gasCost"`
+ Depth int `json:"depth"`
+ Error string `json:"error,omitempty"`
+ Stack *[]string `json:"stack,omitempty"`
+ Memory *[]string `json:"memory,omitempty"`
+ Storage *map[string]string `json:"storage,omitempty"`
+}
+
+// FormatLogs formats EVM returned structured logs for json output
+func FormatLogs(logs []logger.StructLog) []StructLogRes {
+ formatted := make([]StructLogRes, len(logs))
+ for index, trace := range logs {
+ formatted[index] = StructLogRes{
+ Pc: trace.Pc,
+ Op: trace.Op.String(),
+ Gas: trace.Gas,
+ GasCost: trace.GasCost,
+ Depth: trace.Depth,
+ Error: trace.ErrorString(),
+ }
+ if trace.Stack != nil {
+ stack := make([]string, len(trace.Stack))
+ for i, stackValue := range trace.Stack {
+ stack[i] = stackValue.Hex()
+ }
+ formatted[index].Stack = &stack
+ }
+ if trace.Memory != nil {
+ memory := make([]string, 0, (len(trace.Memory)+31)/32)
+ for i := 0; i+32 <= len(trace.Memory); i += 32 {
+ memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32]))
+ }
+ formatted[index].Memory = &memory
+ }
+ if trace.Storage != nil {
+ storage := make(map[string]string)
+ for i, storageValue := range trace.Storage {
+ storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue)
+ }
+ formatted[index].Storage = &storage
+ }
+ }
+ return formatted
+}
+
+type call struct {
+ Type string `json:"type"`
+ From common.Address `json:"from"`
+ To common.Address `json:"to"`
+ Value *hexutil.Big `json:"value,omitempty"`
+ Gas hexutil.Uint64 `json:"gas"`
+ GasUsed hexutil.Uint64 `json:"gasUsed"`
+ Input hexutil.Bytes `json:"input"`
+ Output hexutil.Bytes `json:"output"`
+ Time string `json:"time,omitempty"`
+ Calls []*call `json:"calls,omitempty"`
+ Error string `json:"error,omitempty"`
+ startTime time.Time
+ outOff uint64
+ outLen uint64
+ gasIn uint64
+ gasCost uint64
+}
+
+type TracerResult interface {
+ vm.EVMLogger
+ GetResult() (interface{}, error)
+}
+
+type CallTracer struct {
+ callStack []*call
+ descended bool
+ statedb *state.StateDB
+}
+
+func NewCallTracer(statedb *state.StateDB) TracerResult {
+ return &CallTracer{
+ callStack: []*call{},
+ descended: false,
+ statedb: statedb,
+ }
+}
+
+func (tracer *CallTracer) i() int {
+ return len(tracer.callStack) - 1
+}
+
+func (tracer *CallTracer) GetResult() (interface{}, error) {
+ return tracer.callStack[0], nil
+}
+
+func (tracer *CallTracer) CaptureStart(evm *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+ hvalue := hexutil.Big(*value)
+ tracer.callStack = []*call{&call{
+ From: from,
+ To: to,
+ Value: &hvalue,
+ Gas: hexutil.Uint64(gas),
+ Input: hexutil.Bytes(input),
+ Calls: []*call{},
+ }}
+}
+func (tracer *CallTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {
+ tracer.callStack[tracer.i()].GasUsed = hexutil.Uint64(gasUsed)
+ tracer.callStack[tracer.i()].Time = fmt.Sprintf("%v", t)
+ tracer.callStack[tracer.i()].Output = hexutil.Bytes(output)
+}
+
+func (tracer *CallTracer) descend(newCall *call) {
+ tracer.callStack[tracer.i()].Calls = append(tracer.callStack[tracer.i()].Calls, newCall)
+ tracer.callStack = append(tracer.callStack, newCall)
+ tracer.descended = true
+}
+
+func toAddress(value *uint256.Int) common.Address {
+ return common.BytesToAddress(value.Bytes())
+}
+
+func (tracer *CallTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+ // for depth < len(tracer.callStack) {
+ // c := tracer.callStack[tracer.i()]
+ // c.GasUsed = c.Gas - gas
+ // tracer.callStack = tracer.callStack[:tracer.i()]
+ // }
+ defer func() {
+ if r := recover(); r != nil {
+ tracer.callStack[tracer.i()].Error = "internal failure"
+ log.Warn("Panic during trace. Recovered.", "err", r)
+ }
+ }()
+ if op == vm.CREATE || op == vm.CREATE2 {
+ inOff := scope.Stack.Back(1).Uint64()
+ inLen := scope.Stack.Back(2).Uint64()
+ hvalue := hexutil.Big(*scope.Contract.Value())
+ tracer.descend(&call{
+ Type: op.String(),
+ From: scope.Contract.Caller(),
+ Input: scope.Memory.GetCopy(int64(inOff), int64(inLen)),
+ gasIn: gas,
+ gasCost: cost,
+ Value: &hvalue,
+ startTime: time.Now(),
+ })
+ return
+ }
+ if op == vm.SELFDESTRUCT {
+ hvalue := hexutil.Big(*tracer.statedb.GetBalance(scope.Contract.Caller()))
+ tracer.descend(&call{
+ Type: op.String(),
+ From: scope.Contract.Caller(),
+ To: toAddress(scope.Stack.Back(0)),
+ // TODO: Is this input correct?
+ Input: scope.Contract.Input,
+ Value: &hvalue,
+ gasIn: gas,
+ gasCost: cost,
+ startTime: time.Now(),
+ })
+ return
+ }
+ if op == vm.CALL || op == vm.CALLCODE || op == vm.DELEGATECALL || op == vm.STATICCALL {
+ toAddress := toAddress(scope.Stack.Back(1))
+ if _, isPrecompile := vm.PrecompiledContractsIstanbul[toAddress]; isPrecompile {
+ return
+ }
+ off := 1
+ if op == vm.DELEGATECALL || op == vm.STATICCALL {
+ off = 0
+ }
+ inOff := scope.Stack.Back(2 + off).Uint64()
+ inLength := scope.Stack.Back(3 + off).Uint64()
+ newCall := &call{
+ Type: op.String(),
+ From: scope.Contract.Address(),
+ To: toAddress,
+ Input: scope.Memory.GetCopy(int64(inOff), int64(inLength)),
+ gasIn: gas,
+ gasCost: cost,
+ outOff: scope.Stack.Back(4 + off).Uint64(),
+ outLen: scope.Stack.Back(5 + off).Uint64(),
+ startTime: time.Now(),
+ }
+ if off == 1 {
+ value := hexutil.Big(*new(big.Int).SetBytes(scope.Stack.Back(2).Bytes()))
+ newCall.Value = &value
+ }
+ tracer.descend(newCall)
+ return
+ }
+ if tracer.descended {
+ if depth >= len(tracer.callStack) {
+ tracer.callStack[tracer.i()].Gas = hexutil.Uint64(gas)
+ }
+ tracer.descended = false
+ }
+ if op == vm.REVERT {
+ tracer.callStack[tracer.i()].Error = "execution reverted"
+ return
+ }
+ if depth == len(tracer.callStack)-1 {
+ c := tracer.callStack[tracer.i()]
+ // c.Time = fmt.Sprintf("%v", time.Since(c.startTime))
+ tracer.callStack = tracer.callStack[:len(tracer.callStack)-1]
+ if vm.StringToOp(c.Type) == vm.CREATE || vm.StringToOp(c.Type) == vm.CREATE2 {
+ c.GasUsed = hexutil.Uint64(c.gasIn - c.gasCost - gas)
+ ret := scope.Stack.Back(0)
+ if ret.Uint64() != 0 {
+ c.To = common.BytesToAddress(ret.Bytes())
+ c.Output = tracer.statedb.GetCode(c.To)
+ } else if c.Error == "" {
+ c.Error = "internal failure"
+ }
+ } else {
+ c.GasUsed = hexutil.Uint64(c.gasIn - c.gasCost + uint64(c.Gas) - gas)
+ ret := scope.Stack.Back(0)
+ if ret.Uint64() != 0 {
+ c.Output = hexutil.Bytes(scope.Memory.GetCopy(int64(c.outOff), int64(c.outLen)))
+ } else if c.Error == "" {
+ c.Error = "internal failure"
+ }
+ }
+ }
+ return
+}
+func (tracer *CallTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.ScopeContext, depth int, err error) {
+}
+func (tracer *CallTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+func (tracer *CallTracer) CaptureExit(output []byte, gasUsed uint64, err error) {}
diff --git a/core/state_transition.go b/core/state_transition.go
index 135a9c6dbe85..05d5633075b9 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -310,7 +310,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
}
// Set up the initial access list.
- if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin {
+ if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil); rules.IsBerlin {
st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList())
}
var (
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 141410602ea5..c108355f204e 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -395,7 +395,7 @@ func (pool *TxPool) loop() {
pool.removeTx(tx.Hash(), true)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: list,
+ Txs: list,
Reason: dropOld,
})
queuedEvictionMeter.Mark(int64(len(list)))
@@ -474,7 +474,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) {
}
pool.priced.Removed(len(drop))
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: drop,
+ Txs: drop,
Reason: dropGasPriceUpdated,
})
}
@@ -728,7 +728,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
pool.removeTx(tx.Hash(), false)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: drop,
+ Txs: drop,
Reason: dropUnderpriced,
})
}
@@ -747,8 +747,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
pool.priced.Removed(1)
pendingReplaceMeter.Mark(1)
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: []*types.Transaction{old},
- Reason: dropReplaced,
+ Txs: []*types.Transaction{old},
+ Reason: dropReplaced,
Replacement: tx,
})
}
@@ -803,7 +803,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local boo
pool.priced.Removed(1)
queuedReplaceMeter.Mark(1)
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: []*types.Transaction{old},
+ Txs: []*types.Transaction{old},
Reason: dropReplaced,
})
} else {
@@ -863,7 +863,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
pool.priced.Removed(1)
pendingReplaceMeter.Mark(1)
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: []*types.Transaction{old},
+ Txs: []*types.Transaction{old},
Reason: dropReplaced,
})
} else {
@@ -965,7 +965,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
}
if err != nil {
pool.rejectTxFeed.Send(RejectedTxEvent{
- Tx: txs[nilSlot],
+ Tx: txs[nilSlot],
Reason: err,
})
}
@@ -1065,7 +1065,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
// Reduce the pending counter
pendingGauge.Dec(int64(1 + len(invalids)))
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: invalids,
+ Txs: invalids,
Reason: dropUnexecutable,
})
return
@@ -1375,7 +1375,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
}
log.Trace("Removed old queued transactions", "count", len(forwards))
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: forwards,
+ Txs: forwards,
Reason: dropLowNonce,
})
// Drop all transactions that are too costly (low balance or out of gas)
@@ -1387,7 +1387,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
log.Trace("Removed unpayable queued transactions", "count", len(drops))
queuedNofundsMeter.Mark(int64(len(drops)))
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: drops,
+ Txs: drops,
Reason: dropUnpayable,
})
@@ -1413,7 +1413,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
}
queuedRateLimitMeter.Mark(int64(len(caps)))
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: caps,
+ Txs: caps,
Reason: dropAccountCap,
})
}
@@ -1481,7 +1481,7 @@ func (pool *TxPool) truncatePending() {
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: caps,
+ Txs: caps,
Reason: dropAccountCap,
})
pool.priced.Removed(len(caps))
@@ -1512,7 +1512,7 @@ func (pool *TxPool) truncatePending() {
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: caps,
+ Txs: caps,
Reason: dropAccountCap,
})
pool.priced.Removed(len(caps))
@@ -1560,7 +1560,7 @@ func (pool *TxPool) truncateQueue() {
pool.removeTx(tx.Hash(), true)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: txs,
+ Txs: txs,
Reason: dropTruncating,
})
drop -= size
@@ -1574,7 +1574,7 @@ func (pool *TxPool) truncateQueue() {
drop--
queuedRateLimitMeter.Mark(1)
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: []*types.Transaction{txs[i]},
+ Txs: []*types.Transaction{txs[i]},
Reason: dropTruncating,
})
}
@@ -1601,7 +1601,7 @@ func (pool *TxPool) demoteUnexecutables() {
log.Trace("Removed old pending transaction", "hash", hash)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: olds,
+ Txs: olds,
Reason: dropLowNonce,
})
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
@@ -1612,7 +1612,7 @@ func (pool *TxPool) demoteUnexecutables() {
pool.all.Remove(hash)
}
pool.dropTxFeed.Send(DropTxsEvent{
- Txs: drops,
+ Txs: drops,
Reason: dropUnpayable,
})
pool.priced.Removed(len(olds) + len(drops))
diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go
index ee5f194b77b8..8ad5e739e9eb 100644
--- a/core/types/access_list_tx.go
+++ b/core/types/access_list_tx.go
@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-//go:generate gencodec -type AccessTuple -out gen_access_tuple.go
+//go:generate go run github.com/fjl/gencodec@latest -type AccessTuple -out gen_access_tuple.go
// AccessList is an EIP-2930 access list.
type AccessList []AccessTuple
diff --git a/core/types/block.go b/core/types/block.go
index f38c55c1ffd7..314990dc99af 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -63,7 +63,8 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
-//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Header -field-override headerMarshaling -out gen_header_json.go
+//go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go
// Header represents a block header in the Ethereum blockchain.
type Header struct {
diff --git a/core/types/block_test.go b/core/types/block_test.go
index 5cdea3fc06e0..aa1db2f4faad 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -285,7 +285,7 @@ func makeBenchBlock() *Block {
func TestRlpDecodeParentHash(t *testing.T) {
// A minimum one
want := common.HexToHash("0x112233445566778899001122334455667788990011223344556677889900aabb")
- if rlpData, err := rlp.EncodeToBytes(Header{ParentHash: want}); err != nil {
+ if rlpData, err := rlp.EncodeToBytes(&Header{ParentHash: want}); err != nil {
t.Fatal(err)
} else {
if have := HeaderParentHashFromRLP(rlpData); have != want {
@@ -299,7 +299,7 @@ func TestRlpDecodeParentHash(t *testing.T) {
// | BaseFee | dynamic| *big.Int | 64 bits |
mainnetTd := new(big.Int)
mainnetTd.SetString("5ad3c2c71bbff854908", 16)
- if rlpData, err := rlp.EncodeToBytes(Header{
+ if rlpData, err := rlp.EncodeToBytes(&Header{
ParentHash: want,
Difficulty: mainnetTd,
Number: new(big.Int).SetUint64(math.MaxUint64),
@@ -316,7 +316,7 @@ func TestRlpDecodeParentHash(t *testing.T) {
{
// The rlp-encoding of the heder belowCauses _total_ length of 65540,
// which is the first to blow the fast-path.
- h := Header{
+ h := &Header{
ParentHash: want,
Extra: make([]byte, 65041),
}
diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go
new file mode 100644
index 000000000000..5181d884112f
--- /dev/null
+++ b/core/types/gen_account_rlp.go
@@ -0,0 +1,27 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *StateAccount) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteUint64(obj.Nonce)
+ if obj.Balance == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Balance.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Balance)
+ }
+ w.WriteBytes(obj.Root[:])
+ w.WriteBytes(obj.CodeHash)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
new file mode 100644
index 000000000000..e1a687331853
--- /dev/null
+++ b/core/types/gen_header_rlp.go
@@ -0,0 +1,56 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Header) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteBytes(obj.ParentHash[:])
+ w.WriteBytes(obj.UncleHash[:])
+ w.WriteBytes(obj.Coinbase[:])
+ w.WriteBytes(obj.Root[:])
+ w.WriteBytes(obj.TxHash[:])
+ w.WriteBytes(obj.ReceiptHash[:])
+ w.WriteBytes(obj.Bloom[:])
+ if obj.Difficulty == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Difficulty.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Difficulty)
+ }
+ if obj.Number == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Number.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Number)
+ }
+ w.WriteUint64(obj.GasLimit)
+ w.WriteUint64(obj.GasUsed)
+ w.WriteUint64(obj.Time)
+ w.WriteBytes(obj.Extra)
+ w.WriteBytes(obj.MixDigest[:])
+ w.WriteBytes(obj.Nonce[:])
+ _tmp1 := obj.BaseFee != nil
+ if _tmp1 {
+ if obj.BaseFee == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.BaseFee.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.BaseFee)
+ }
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go
new file mode 100644
index 000000000000..4a6c6b0094f8
--- /dev/null
+++ b/core/types/gen_log_rlp.go
@@ -0,0 +1,23 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+//go:build !norlpgen
+// +build !norlpgen
+
+package types
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *rlpLog) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteBytes(obj.Address[:])
+ _tmp1 := w.List()
+ for _, _tmp2 := range obj.Topics {
+ w.WriteBytes(_tmp2[:])
+ }
+ w.ListEnd(_tmp1)
+ w.WriteBytes(obj.Data)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
diff --git a/core/types/legacy.go b/core/types/legacy.go
new file mode 100644
index 000000000000..9254381b1e11
--- /dev/null
+++ b/core/types/legacy.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// IsLegacyStoredReceipts tries to parse the RLP-encoded blob
+// first as an array of v3 stored receipt, then v4 stored receipt and
+// returns true if successful.
+func IsLegacyStoredReceipts(raw []byte) (bool, error) {
+ var v3 []v3StoredReceiptRLP
+ if err := rlp.DecodeBytes(raw, &v3); err == nil {
+ return true, nil
+ }
+ var v4 []v4StoredReceiptRLP
+ if err := rlp.DecodeBytes(raw, &v4); err == nil {
+ return true, nil
+ }
+ var v5 []storedReceiptRLP
+ // Check to see valid fresh stored receipt
+ if err := rlp.DecodeBytes(raw, &v5); err == nil {
+ return false, nil
+ }
+ return false, errors.New("value is not a valid receipt encoding")
+}
+
+// ConvertLegacyStoredReceipts takes the RLP encoding of an array of legacy
+// stored receipts and returns a fresh RLP-encoded stored receipt.
+func ConvertLegacyStoredReceipts(raw []byte) ([]byte, error) {
+ var receipts []ReceiptForStorage
+ if err := rlp.DecodeBytes(raw, &receipts); err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(&receipts)
+}
diff --git a/core/types/log.go b/core/types/log.go
index 88274e39dae0..b27c7ccbd3a9 100644
--- a/core/types/log.go
+++ b/core/types/log.go
@@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Log -field-override logMarshaling -out gen_log_json.go
// Log represents a contract log event. These events are generated by the LOG opcode and
// stored/indexed by the node.
@@ -62,15 +62,14 @@ type logMarshaling struct {
Index hexutil.Uint
}
+//go:generate go run ../../rlp/rlpgen -type rlpLog -out gen_log_rlp.go
+
type rlpLog struct {
Address common.Address
Topics []common.Hash
Data []byte
}
-// rlpStorageLog is the storage encoding of a log.
-type rlpStorageLog rlpLog
-
// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields.
type legacyRlpStorageLog struct {
Address common.Address
@@ -85,7 +84,8 @@ type legacyRlpStorageLog struct {
// EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data})
+ rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
+ return rlp.Encode(w, &rl)
}
// DecodeRLP implements rlp.Decoder.
@@ -104,11 +104,8 @@ type LogForStorage Log
// EncodeRLP implements rlp.Encoder.
func (l *LogForStorage) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpStorageLog{
- Address: l.Address,
- Topics: l.Topics,
- Data: l.Data,
- })
+ rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
+ return rlp.Encode(w, &rl)
}
// DecodeRLP implements rlp.Decoder.
@@ -119,7 +116,7 @@ func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error {
if err != nil {
return err
}
- var dec rlpStorageLog
+ var dec rlpLog
err = rlp.DecodeBytes(blob, &dec)
if err == nil {
*l = LogForStorage{
diff --git a/core/types/receipt.go b/core/types/receipt.go
index c3588990c00f..03e2d7500e02 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -31,15 +31,14 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
+//go:generate go run github.com/fjl/gencodec@latest -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
var (
receiptStatusFailedRLP = []byte{}
receiptStatusSuccessfulRLP = []byte{0x01}
)
-// This error is returned when a typed receipt is decoded, but the string is empty.
-var errEmptyTypedReceipt = errors.New("empty typed receipt bytes")
+var errShortTypedReceipt = errors.New("typed receipt too short")
const (
// ReceiptStatusFailed is the status code of a transaction if execution failed.
@@ -182,26 +181,13 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
}
r.Type = LegacyTxType
return r.setFromRLP(dec)
- case kind == rlp.String:
+ default:
// It's an EIP-2718 typed tx receipt.
b, err := s.Bytes()
if err != nil {
return err
}
- if len(b) == 0 {
- return errEmptyTypedReceipt
- }
- r.Type = b[0]
- if r.Type == AccessListTxType || r.Type == DynamicFeeTxType {
- var dec receiptRLP
- if err := rlp.DecodeBytes(b[1:], &dec); err != nil {
- return err
- }
- return r.setFromRLP(dec)
- }
- return ErrTxTypeNotSupported
- default:
- return rlp.ErrExpectedList
+ return r.decodeTyped(b)
}
}
@@ -224,8 +210,8 @@ func (r *Receipt) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed receipt from the canonical format.
func (r *Receipt) decodeTyped(b []byte) error {
- if len(b) == 0 {
- return errEmptyTypedReceipt
+ if len(b) <= 1 {
+ return errShortTypedReceipt
}
switch b[0] {
case DynamicFeeTxType, AccessListTxType:
@@ -287,16 +273,20 @@ type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream.
-func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
- enc := &storedReceiptRLP{
- PostStateOrStatus: (*Receipt)(r).statusEncoding(),
- CumulativeGasUsed: r.CumulativeGasUsed,
- Logs: make([]*LogForStorage, len(r.Logs)),
- }
- for i, log := range r.Logs {
- enc.Logs[i] = (*LogForStorage)(log)
+func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ outerList := w.List()
+ w.WriteBytes((*Receipt)(r).statusEncoding())
+ w.WriteUint64(r.CumulativeGasUsed)
+ logList := w.List()
+ for _, log := range r.Logs {
+ if err := rlp.Encode(w, log); err != nil {
+ return err
+ }
}
- return rlp.Encode(w, enc)
+ w.ListEnd(logList)
+ w.ListEnd(outerList)
+ return w.Flush()
}
// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 613559a6586c..bba18d2a7bf3 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -86,7 +86,7 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
input := []byte{0x80}
var r Receipt
err := rlp.DecodeBytes(input, &r)
- if err != errEmptyTypedReceipt {
+ if err != errShortTypedReceipt {
t.Fatal("wrong error:", err)
}
}
diff --git a/core/types/state_account.go b/core/types/state_account.go
index 68804bf311f4..3b01be45198f 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -22,6 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common"
)
+//go:generate go run ../../rlp/rlpgen -type StateAccount -out gen_account_rlp.go
+
// StateAccount is the Ethereum consensus representation of accounts.
// These objects are stored in the main account trie.
type StateAccount struct {
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 83f1766e67e2..1ac0b712e1cd 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -37,7 +37,7 @@ var (
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
- errEmptyTypedTx = errors.New("empty typed transaction bytes")
+ errShortTypedTx = errors.New("typed transaction too short")
)
// Transaction types.
@@ -134,7 +134,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(&inner, int(rlp.ListSize(size)))
}
return err
- case kind == rlp.String:
+ default:
// It's an EIP-2718 typed TX envelope.
var b []byte
if b, err = s.Bytes(); err != nil {
@@ -145,8 +145,6 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(inner, len(b))
}
return err
- default:
- return rlp.ErrExpectedList
}
}
@@ -174,8 +172,8 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed transaction from the canonical format.
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
- if len(b) == 0 {
- return nil, errEmptyTypedTx
+ if len(b) <= 1 {
+ return nil, errShortTypedTx
}
switch b[0] {
case AccessListTxType:
@@ -611,6 +609,7 @@ func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) {
msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap)
}
var err error
+ // recover sender address
msg.from, err = Sender(s, tx)
return msg, err
}
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 58c95071b288..a4755675cd25 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -76,7 +76,7 @@ func TestDecodeEmptyTypedTx(t *testing.T) {
input := []byte{0x80}
var tx Transaction
err := rlp.DecodeBytes(input, &tx)
- if err != errEmptyTypedTx {
+ if err != errShortTypedTx {
t.Fatal("wrong error:", err)
}
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 2c7880b3bf38..dd55618bf812 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -75,6 +75,7 @@ type BlockContext struct {
Time *big.Int // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY
BaseFee *big.Int // Provides information for BASEFEE
+ Random *common.Hash // Provides information for RANDOM
}
// TxContext provides the EVM with information about a transaction.
@@ -131,7 +132,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
StateDB: statedb,
Config: config,
chainConfig: chainConfig,
- chainRules: chainConfig.Rules(blockCtx.BlockNumber),
+ chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil),
}
evm.interpreter = NewEVMInterpreter(evm, config)
return evm
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 4eda3bf5317f..db507c481100 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -477,6 +477,12 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
return nil, nil
}
+func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ v := new(uint256.Int).SetBytes((interpreter.evm.Context.Random.Bytes()))
+ scope.Stack.push(v)
+ return nil, nil
+}
+
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
return nil, nil
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index e67acd83271f..36589a126957 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -654,3 +655,36 @@ func TestCreate2Addreses(t *testing.T) {
}
}
}
+
+func TestRandom(t *testing.T) {
+ type testcase struct {
+ name string
+ random common.Hash
+ }
+
+ for _, tt := range []testcase{
+ {name: "empty hash", random: common.Hash{}},
+ {name: "1", random: common.Hash{0}},
+ {name: "emptyCodeHash", random: emptyCodeHash},
+ {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
+ } {
+ var (
+ env = NewEVM(BlockContext{Random: &tt.random}, TxContext{}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ opRandom(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
+ if len(stack.data) != 1 {
+ t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
+ }
+ actual := stack.pop()
+ expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.random.Bytes()))
+ if overflow {
+ t.Errorf("Testcase %v: invalid overflow", tt.name)
+ }
+ if actual.Cmp(expected) != 0 {
+ t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
+ }
+ }
+}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 1660e3ce0ff1..21e3c914e139 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -69,6 +69,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
// If jump table was not initialised we set the default one.
if cfg.JumpTable == nil {
switch {
+ case evm.chainRules.IsMerge:
+ cfg.JumpTable = &mergeInstructionSet
case evm.chainRules.IsLondon:
cfg.JumpTable = &londonInstructionSet
case evm.chainRules.IsBerlin:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 6dea5d81f33c..eef3b53d8c66 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -54,6 +54,7 @@ var (
istanbulInstructionSet = newIstanbulInstructionSet()
berlinInstructionSet = newBerlinInstructionSet()
londonInstructionSet = newLondonInstructionSet()
+ mergeInstructionSet = newMergeInstructionSet()
)
// JumpTable contains the EVM opcodes supported at a given fork.
@@ -77,6 +78,17 @@ func validate(jt JumpTable) JumpTable {
return jt
}
+func newMergeInstructionSet() JumpTable {
+ instructionSet := newLondonInstructionSet()
+ instructionSet[RANDOM] = &operation{
+ execute: opRandom,
+ constantGas: GasQuickStep,
+ minStack: minStack(0, 1),
+ maxStack: maxStack(0, 1),
+ }
+ return validate(instructionSet)
+}
+
// newLondonInstructionSet returns the frontier, homestead, byzantium,
// contantinople, istanbul, petersburg, berlin and london instructions.
func newLondonInstructionSet() JumpTable {
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index a1833e510966..ba70fa09d486 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -95,6 +95,7 @@ const (
TIMESTAMP OpCode = 0x42
NUMBER OpCode = 0x43
DIFFICULTY OpCode = 0x44
+ RANDOM OpCode = 0x44 // Same as DIFFICULTY
GASLIMIT OpCode = 0x45
CHAINID OpCode = 0x46
SELFBALANCE OpCode = 0x47
@@ -275,7 +276,7 @@ var opCodeToString = map[OpCode]string{
COINBASE: "COINBASE",
TIMESTAMP: "TIMESTAMP",
NUMBER: "NUMBER",
- DIFFICULTY: "DIFFICULTY",
+ DIFFICULTY: "DIFFICULTY", // TODO (MariusVanDerWijden) rename to RANDOM post merge
GASLIMIT: "GASLIMIT",
CHAINID: "CHAINID",
SELFBALANCE: "SELFBALANCE",
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index 483226eefad8..551e1f5f1188 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -214,7 +214,7 @@ var (
// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200)
- // gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539
+ // gasSStoreEIP2539 implements gas cost for SSTORE according to EIP-2539
// Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800)
gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529)
)
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 103ce3e175ff..7861fb92dba3 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -118,7 +118,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
}
cfg.State.CreateAccount(address)
@@ -150,7 +150,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil)
}
// Call the code with the given configuration.
@@ -176,7 +176,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
sender := cfg.State.GetOrNewStateObject(cfg.Origin)
statedb := cfg.State
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
}
// Call the code with the given configuration.
diff --git a/crypto/bls12381/bls12_381.go b/crypto/bls12381/bls12_381.go
index e204a927d1a9..1c1c97765f47 100644
--- a/crypto/bls12381/bls12_381.go
+++ b/crypto/bls12381/bls12_381.go
@@ -119,105 +119,105 @@ var g2One = PointG2{
*/
var frobeniusCoeffs61 = [6]fe2{
- fe2{
+ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
},
- fe2{
+ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
},
- fe2{
+ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
},
}
var frobeniusCoeffs62 = [6]fe2{
- fe2{
+ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
}
var frobeniusCoeffs12 = [12]fe2{
- fe2{
+ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb},
fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf},
},
- fe2{
+ {
fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8},
fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2},
},
- fe2{
+ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd},
fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd},
},
- fe2{
+ {
fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf},
fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb},
},
- fe2{
+ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2},
fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8},
},
- fe2{
+ {
fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
},
- fe2{
+ {
fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd},
fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd},
},
diff --git a/crypto/bls12381/isogeny.go b/crypto/bls12381/isogeny.go
index 91e03936d57d..c3cb0a6f7bf0 100644
--- a/crypto/bls12381/isogeny.go
+++ b/crypto/bls12381/isogeny.go
@@ -77,149 +77,149 @@ func isogenyMapG2(e *fp2, x, y *fe2) {
}
var isogenyConstansG1 = [4][16]*fe{
- [16]*fe{
- &fe{0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4},
- &fe{0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad},
- &fe{0xa542583a480b664b, 0xfc7169c026e568c6, 0x5ba2ef314ed8b5a6, 0x5b5491c05102f0e7, 0xdf6e99707d2a0079, 0x0784151ed7605524},
- &fe{0x494e212870f72741, 0xab9be52fbda43021, 0x26f5577994e34c3d, 0x049dfee82aefbd60, 0x65dadd7828505289, 0x0e93d431ea011aeb},
- &fe{0x90ee774bd6a74d45, 0x7ada1c8a41bfb185, 0x0f1a8953b325f464, 0x104c24211be4805c, 0x169139d319ea7a8f, 0x09f20ead8e532bf6},
- &fe{0x6ddd93e2f43626b7, 0xa5482c9aa1ccd7bd, 0x143245631883f4bd, 0x2e0a94ccf77ec0db, 0xb0282d480e56489f, 0x18f4bfcbb4368929},
- &fe{0x23c5f0c953402dfd, 0x7a43ff6958ce4fe9, 0x2c390d3d2da5df63, 0xd0df5c98e1f9d70f, 0xffd89869a572b297, 0x1277ffc72f25e8fe},
- &fe{0x79f4f0490f06a8a6, 0x85f894a88030fd81, 0x12da3054b18b6410, 0xe2a57f6505880d65, 0xbba074f260e400f1, 0x08b76279f621d028},
- &fe{0xe67245ba78d5b00b, 0x8456ba9a1f186475, 0x7888bff6e6b33bb4, 0xe21585b9a30f86cb, 0x05a69cdcef55feee, 0x09e699dd9adfa5ac},
- &fe{0x0de5c357bff57107, 0x0a0db4ae6b1a10b2, 0xe256bb67b3b3cd8d, 0x8ad456574e9db24f, 0x0443915f50fd4179, 0x098c4bf7de8b6375},
- &fe{0xe6b0617e7dd929c7, 0xfe6e37d442537375, 0x1dafdeda137a489e, 0xe4efd1ad3f767ceb, 0x4a51d8667f0fe1cf, 0x054fdf4bbf1d821c},
- &fe{0x72db2a50658d767b, 0x8abf91faa257b3d5, 0xe969d6833764ab47, 0x464170142a1009eb, 0xb14f01aadb30be2f, 0x18ae6a856f40715d},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
+ {
+ {0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4},
+ {0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad},
+ {0xa542583a480b664b, 0xfc7169c026e568c6, 0x5ba2ef314ed8b5a6, 0x5b5491c05102f0e7, 0xdf6e99707d2a0079, 0x0784151ed7605524},
+ {0x494e212870f72741, 0xab9be52fbda43021, 0x26f5577994e34c3d, 0x049dfee82aefbd60, 0x65dadd7828505289, 0x0e93d431ea011aeb},
+ {0x90ee774bd6a74d45, 0x7ada1c8a41bfb185, 0x0f1a8953b325f464, 0x104c24211be4805c, 0x169139d319ea7a8f, 0x09f20ead8e532bf6},
+ {0x6ddd93e2f43626b7, 0xa5482c9aa1ccd7bd, 0x143245631883f4bd, 0x2e0a94ccf77ec0db, 0xb0282d480e56489f, 0x18f4bfcbb4368929},
+ {0x23c5f0c953402dfd, 0x7a43ff6958ce4fe9, 0x2c390d3d2da5df63, 0xd0df5c98e1f9d70f, 0xffd89869a572b297, 0x1277ffc72f25e8fe},
+ {0x79f4f0490f06a8a6, 0x85f894a88030fd81, 0x12da3054b18b6410, 0xe2a57f6505880d65, 0xbba074f260e400f1, 0x08b76279f621d028},
+ {0xe67245ba78d5b00b, 0x8456ba9a1f186475, 0x7888bff6e6b33bb4, 0xe21585b9a30f86cb, 0x05a69cdcef55feee, 0x09e699dd9adfa5ac},
+ {0x0de5c357bff57107, 0x0a0db4ae6b1a10b2, 0xe256bb67b3b3cd8d, 0x8ad456574e9db24f, 0x0443915f50fd4179, 0x098c4bf7de8b6375},
+ {0xe6b0617e7dd929c7, 0xfe6e37d442537375, 0x1dafdeda137a489e, 0xe4efd1ad3f767ceb, 0x4a51d8667f0fe1cf, 0x054fdf4bbf1d821c},
+ {0x72db2a50658d767b, 0x8abf91faa257b3d5, 0xe969d6833764ab47, 0x464170142a1009eb, 0xb14f01aadb30be2f, 0x18ae6a856f40715d},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
},
- [16]*fe{
- &fe{0xb962a077fdb0f945, 0xa6a9740fefda13a0, 0xc14d568c3ed6c544, 0xb43fc37b908b133e, 0x9c0b3ac929599016, 0x0165aa6c93ad115f},
- &fe{0x23279a3ba506c1d9, 0x92cfca0a9465176a, 0x3b294ab13755f0ff, 0x116dda1c5070ae93, 0xed4530924cec2045, 0x083383d6ed81f1ce},
- &fe{0x9885c2a6449fecfc, 0x4a2b54ccd37733f0, 0x17da9ffd8738c142, 0xa0fba72732b3fafd, 0xff364f36e54b6812, 0x0f29c13c660523e2},
- &fe{0xe349cc118278f041, 0xd487228f2f3204fb, 0xc9d325849ade5150, 0x43a92bd69c15c2df, 0x1c2c7844bc417be4, 0x12025184f407440c},
- &fe{0x587f65ae6acb057b, 0x1444ef325140201f, 0xfbf995e71270da49, 0xccda066072436a42, 0x7408904f0f186bb2, 0x13b93c63edf6c015},
- &fe{0xfb918622cd141920, 0x4a4c64423ecaddb4, 0x0beb232927f7fb26, 0x30f94df6f83a3dc2, 0xaeedd424d780f388, 0x06cc402dd594bbeb},
- &fe{0xd41f761151b23f8f, 0x32a92465435719b3, 0x64f436e888c62cb9, 0xdf70a9a1f757c6e4, 0x6933a38d5b594c81, 0x0c6f7f7237b46606},
- &fe{0x693c08747876c8f7, 0x22c9850bf9cf80f0, 0x8e9071dab950c124, 0x89bc62d61c7baf23, 0xbc6be2d8dad57c23, 0x17916987aa14a122},
- &fe{0x1be3ff439c1316fd, 0x9965243a7571dfa7, 0xc7f7f62962f5cd81, 0x32c6aa9af394361c, 0xbbc2ee18e1c227f4, 0x0c102cbac531bb34},
- &fe{0x997614c97bacbf07, 0x61f86372b99192c0, 0x5b8c95fc14353fc3, 0xca2b066c2a87492f, 0x16178f5bbf698711, 0x12a6dcd7f0f4e0e8},
- &fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
- &fe{0, 0, 0, 0, 0, 0},
+ {
+ {0xb962a077fdb0f945, 0xa6a9740fefda13a0, 0xc14d568c3ed6c544, 0xb43fc37b908b133e, 0x9c0b3ac929599016, 0x0165aa6c93ad115f},
+ {0x23279a3ba506c1d9, 0x92cfca0a9465176a, 0x3b294ab13755f0ff, 0x116dda1c5070ae93, 0xed4530924cec2045, 0x083383d6ed81f1ce},
+ {0x9885c2a6449fecfc, 0x4a2b54ccd37733f0, 0x17da9ffd8738c142, 0xa0fba72732b3fafd, 0xff364f36e54b6812, 0x0f29c13c660523e2},
+ {0xe349cc118278f041, 0xd487228f2f3204fb, 0xc9d325849ade5150, 0x43a92bd69c15c2df, 0x1c2c7844bc417be4, 0x12025184f407440c},
+ {0x587f65ae6acb057b, 0x1444ef325140201f, 0xfbf995e71270da49, 0xccda066072436a42, 0x7408904f0f186bb2, 0x13b93c63edf6c015},
+ {0xfb918622cd141920, 0x4a4c64423ecaddb4, 0x0beb232927f7fb26, 0x30f94df6f83a3dc2, 0xaeedd424d780f388, 0x06cc402dd594bbeb},
+ {0xd41f761151b23f8f, 0x32a92465435719b3, 0x64f436e888c62cb9, 0xdf70a9a1f757c6e4, 0x6933a38d5b594c81, 0x0c6f7f7237b46606},
+ {0x693c08747876c8f7, 0x22c9850bf9cf80f0, 0x8e9071dab950c124, 0x89bc62d61c7baf23, 0xbc6be2d8dad57c23, 0x17916987aa14a122},
+ {0x1be3ff439c1316fd, 0x9965243a7571dfa7, 0xc7f7f62962f5cd81, 0x32c6aa9af394361c, 0xbbc2ee18e1c227f4, 0x0c102cbac531bb34},
+ {0x997614c97bacbf07, 0x61f86372b99192c0, 0x5b8c95fc14353fc3, 0xca2b066c2a87492f, 0x16178f5bbf698711, 0x12a6dcd7f0f4e0e8},
+ {0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
},
- [16]*fe{
- &fe{0x2b567ff3e2837267, 0x1d4d9e57b958a767, 0xce028fea04bd7373, 0xcc31a30a0b6cd3df, 0x7d7b18a682692693, 0x0d300744d42a0310},
- &fe{0x99c2555fa542493f, 0xfe7f53cc4874f878, 0x5df0608b8f97608a, 0x14e03832052b49c8, 0x706326a6957dd5a4, 0x0a8dadd9c2414555},
- &fe{0x13d942922a5cf63a, 0x357e33e36e261e7d, 0xcf05a27c8456088d, 0x0000bd1de7ba50f0, 0x83d0c7532f8c1fde, 0x13f70bf38bbf2905},
- &fe{0x5c57fd95bfafbdbb, 0x28a359a65e541707, 0x3983ceb4f6360b6d, 0xafe19ff6f97e6d53, 0xb3468f4550192bf7, 0x0bb6cde49d8ba257},
- &fe{0x590b62c7ff8a513f, 0x314b4ce372cacefd, 0x6bef32ce94b8a800, 0x6ddf84a095713d5f, 0x64eace4cb0982191, 0x0386213c651b888d},
- &fe{0xa5310a31111bbcdd, 0xa14ac0f5da148982, 0xf9ad9cc95423d2e9, 0xaa6ec095283ee4a7, 0xcf5b1f022e1c9107, 0x01fddf5aed881793},
- &fe{0x65a572b0d7a7d950, 0xe25c2d8183473a19, 0xc2fcebe7cb877dbd, 0x05b2d36c769a89b0, 0xba12961be86e9efb, 0x07eb1b29c1dfde1f},
- &fe{0x93e09572f7c4cd24, 0x364e929076795091, 0x8569467e68af51b5, 0xa47da89439f5340f, 0xf4fa918082e44d64, 0x0ad52ba3e6695a79},
- &fe{0x911429844e0d5f54, 0xd03f51a3516bb233, 0x3d587e5640536e66, 0xfa86d2a3a9a73482, 0xa90ed5adf1ed5537, 0x149c9c326a5e7393},
- &fe{0x462bbeb03c12921a, 0xdc9af5fa0a274a17, 0x9a558ebde836ebed, 0x649ef8f11a4fae46, 0x8100e1652b3cdc62, 0x1862bd62c291dacb},
- &fe{0x05c9b8ca89f12c26, 0x0194160fa9b9ac4f, 0x6a643d5a6879fa2c, 0x14665bdd8846e19d, 0xbb1d0d53af3ff6bf, 0x12c7e1c3b28962e5},
- &fe{0xb55ebf900b8a3e17, 0xfedc77ec1a9201c4, 0x1f07db10ea1a4df4, 0x0dfbd15dc41a594d, 0x389547f2334a5391, 0x02419f98165871a4},
- &fe{0xb416af000745fc20, 0x8e563e9d1ea6d0f5, 0x7c763e17763a0652, 0x01458ef0159ebbef, 0x8346fe421f96bb13, 0x0d2d7b829ce324d2},
- &fe{0x93096bb538d64615, 0x6f2a2619951d823a, 0x8f66b3ea59514fa4, 0xf563e63704f7092f, 0x724b136c4cf2d9fa, 0x046959cfcfd0bf49},
- &fe{0xea748d4b6e405346, 0x91e9079c2c02d58f, 0x41064965946d9b59, 0xa06731f1d2bbe1ee, 0x07f897e267a33f1b, 0x1017290919210e5f},
- &fe{0x872aa6c17d985097, 0xeecc53161264562a, 0x07afe37afff55002, 0x54759078e5be6838, 0xc4b92d15db8acca8, 0x106d87d1b51d13b9},
+ {
+ {0x2b567ff3e2837267, 0x1d4d9e57b958a767, 0xce028fea04bd7373, 0xcc31a30a0b6cd3df, 0x7d7b18a682692693, 0x0d300744d42a0310},
+ {0x99c2555fa542493f, 0xfe7f53cc4874f878, 0x5df0608b8f97608a, 0x14e03832052b49c8, 0x706326a6957dd5a4, 0x0a8dadd9c2414555},
+ {0x13d942922a5cf63a, 0x357e33e36e261e7d, 0xcf05a27c8456088d, 0x0000bd1de7ba50f0, 0x83d0c7532f8c1fde, 0x13f70bf38bbf2905},
+ {0x5c57fd95bfafbdbb, 0x28a359a65e541707, 0x3983ceb4f6360b6d, 0xafe19ff6f97e6d53, 0xb3468f4550192bf7, 0x0bb6cde49d8ba257},
+ {0x590b62c7ff8a513f, 0x314b4ce372cacefd, 0x6bef32ce94b8a800, 0x6ddf84a095713d5f, 0x64eace4cb0982191, 0x0386213c651b888d},
+ {0xa5310a31111bbcdd, 0xa14ac0f5da148982, 0xf9ad9cc95423d2e9, 0xaa6ec095283ee4a7, 0xcf5b1f022e1c9107, 0x01fddf5aed881793},
+ {0x65a572b0d7a7d950, 0xe25c2d8183473a19, 0xc2fcebe7cb877dbd, 0x05b2d36c769a89b0, 0xba12961be86e9efb, 0x07eb1b29c1dfde1f},
+ {0x93e09572f7c4cd24, 0x364e929076795091, 0x8569467e68af51b5, 0xa47da89439f5340f, 0xf4fa918082e44d64, 0x0ad52ba3e6695a79},
+ {0x911429844e0d5f54, 0xd03f51a3516bb233, 0x3d587e5640536e66, 0xfa86d2a3a9a73482, 0xa90ed5adf1ed5537, 0x149c9c326a5e7393},
+ {0x462bbeb03c12921a, 0xdc9af5fa0a274a17, 0x9a558ebde836ebed, 0x649ef8f11a4fae46, 0x8100e1652b3cdc62, 0x1862bd62c291dacb},
+ {0x05c9b8ca89f12c26, 0x0194160fa9b9ac4f, 0x6a643d5a6879fa2c, 0x14665bdd8846e19d, 0xbb1d0d53af3ff6bf, 0x12c7e1c3b28962e5},
+ {0xb55ebf900b8a3e17, 0xfedc77ec1a9201c4, 0x1f07db10ea1a4df4, 0x0dfbd15dc41a594d, 0x389547f2334a5391, 0x02419f98165871a4},
+ {0xb416af000745fc20, 0x8e563e9d1ea6d0f5, 0x7c763e17763a0652, 0x01458ef0159ebbef, 0x8346fe421f96bb13, 0x0d2d7b829ce324d2},
+ {0x93096bb538d64615, 0x6f2a2619951d823a, 0x8f66b3ea59514fa4, 0xf563e63704f7092f, 0x724b136c4cf2d9fa, 0x046959cfcfd0bf49},
+ {0xea748d4b6e405346, 0x91e9079c2c02d58f, 0x41064965946d9b59, 0xa06731f1d2bbe1ee, 0x07f897e267a33f1b, 0x1017290919210e5f},
+ {0x872aa6c17d985097, 0xeecc53161264562a, 0x07afe37afff55002, 0x54759078e5be6838, 0xc4b92d15db8acca8, 0x106d87d1b51d13b9},
},
- [16]*fe{
- &fe{0xeb6c359d47e52b1c, 0x18ef5f8a10634d60, 0xddfa71a0889d5b7e, 0x723e71dcc5fc1323, 0x52f45700b70d5c69, 0x0a8b981ee47691f1},
- &fe{0x616a3c4f5535b9fb, 0x6f5f037395dbd911, 0xf25f4cc5e35c65da, 0x3e50dffea3c62658, 0x6a33dca523560776, 0x0fadeff77b6bfe3e},
- &fe{0x2be9b66df470059c, 0x24a2c159a3d36742, 0x115dbe7ad10c2a37, 0xb6634a652ee5884d, 0x04fe8bb2b8d81af4, 0x01c2a7a256fe9c41},
- &fe{0xf27bf8ef3b75a386, 0x898b367476c9073f, 0x24482e6b8c2f4e5f, 0xc8e0bbd6fe110806, 0x59b0c17f7631448a, 0x11037cd58b3dbfbd},
- &fe{0x31c7912ea267eec6, 0x1dbf6f1c5fcdb700, 0xd30d4fe3ba86fdb1, 0x3cae528fbee9a2a4, 0xb1cce69b6aa9ad9a, 0x044393bb632d94fb},
- &fe{0xc66ef6efeeb5c7e8, 0x9824c289dd72bb55, 0x71b1a4d2f119981d, 0x104fc1aafb0919cc, 0x0e49df01d942a628, 0x096c3a09773272d4},
- &fe{0x9abc11eb5fadeff4, 0x32dca50a885728f0, 0xfb1fa3721569734c, 0xc4b76271ea6506b3, 0xd466a75599ce728e, 0x0c81d4645f4cb6ed},
- &fe{0x4199f10e5b8be45b, 0xda64e495b1e87930, 0xcb353efe9b33e4ff, 0x9e9efb24aa6424c6, 0xf08d33680a237465, 0x0d3378023e4c7406},
- &fe{0x7eb4ae92ec74d3a5, 0xc341b4aa9fac3497, 0x5be603899e907687, 0x03bfd9cca75cbdeb, 0x564c2935a96bfa93, 0x0ef3c33371e2fdb5},
- &fe{0x7ee91fd449f6ac2e, 0xe5d5bd5cb9357a30, 0x773a8ca5196b1380, 0xd0fda172174ed023, 0x6cb95e0fa776aead, 0x0d22d5a40cec7cff},
- &fe{0xf727e09285fd8519, 0xdc9d55a83017897b, 0x7549d8bd057894ae, 0x178419613d90d8f8, 0xfce95ebdeb5b490a, 0x0467ffaef23fc49e},
- &fe{0xc1769e6a7c385f1b, 0x79bc930deac01c03, 0x5461c75a23ede3b5, 0x6e20829e5c230c45, 0x828e0f1e772a53cd, 0x116aefa749127bff},
- &fe{0x101c10bf2744c10a, 0xbbf18d053a6a3154, 0xa0ecf39ef026f602, 0xfc009d4996dc5153, 0xb9000209d5bd08d3, 0x189e5fe4470cd73c},
- &fe{0x7ebd546ca1575ed2, 0xe47d5a981d081b55, 0x57b2b625b6d4ca21, 0xb0a1ba04228520cc, 0x98738983c2107ff3, 0x13dddbc4799d81d6},
- &fe{0x09319f2e39834935, 0x039e952cbdb05c21, 0x55ba77a9a2f76493, 0xfd04e3dfc6086467, 0xfb95832e7d78742e, 0x0ef9c24eccaf5e0e},
- &fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
+ {
+ {0xeb6c359d47e52b1c, 0x18ef5f8a10634d60, 0xddfa71a0889d5b7e, 0x723e71dcc5fc1323, 0x52f45700b70d5c69, 0x0a8b981ee47691f1},
+ {0x616a3c4f5535b9fb, 0x6f5f037395dbd911, 0xf25f4cc5e35c65da, 0x3e50dffea3c62658, 0x6a33dca523560776, 0x0fadeff77b6bfe3e},
+ {0x2be9b66df470059c, 0x24a2c159a3d36742, 0x115dbe7ad10c2a37, 0xb6634a652ee5884d, 0x04fe8bb2b8d81af4, 0x01c2a7a256fe9c41},
+ {0xf27bf8ef3b75a386, 0x898b367476c9073f, 0x24482e6b8c2f4e5f, 0xc8e0bbd6fe110806, 0x59b0c17f7631448a, 0x11037cd58b3dbfbd},
+ {0x31c7912ea267eec6, 0x1dbf6f1c5fcdb700, 0xd30d4fe3ba86fdb1, 0x3cae528fbee9a2a4, 0xb1cce69b6aa9ad9a, 0x044393bb632d94fb},
+ {0xc66ef6efeeb5c7e8, 0x9824c289dd72bb55, 0x71b1a4d2f119981d, 0x104fc1aafb0919cc, 0x0e49df01d942a628, 0x096c3a09773272d4},
+ {0x9abc11eb5fadeff4, 0x32dca50a885728f0, 0xfb1fa3721569734c, 0xc4b76271ea6506b3, 0xd466a75599ce728e, 0x0c81d4645f4cb6ed},
+ {0x4199f10e5b8be45b, 0xda64e495b1e87930, 0xcb353efe9b33e4ff, 0x9e9efb24aa6424c6, 0xf08d33680a237465, 0x0d3378023e4c7406},
+ {0x7eb4ae92ec74d3a5, 0xc341b4aa9fac3497, 0x5be603899e907687, 0x03bfd9cca75cbdeb, 0x564c2935a96bfa93, 0x0ef3c33371e2fdb5},
+ {0x7ee91fd449f6ac2e, 0xe5d5bd5cb9357a30, 0x773a8ca5196b1380, 0xd0fda172174ed023, 0x6cb95e0fa776aead, 0x0d22d5a40cec7cff},
+ {0xf727e09285fd8519, 0xdc9d55a83017897b, 0x7549d8bd057894ae, 0x178419613d90d8f8, 0xfce95ebdeb5b490a, 0x0467ffaef23fc49e},
+ {0xc1769e6a7c385f1b, 0x79bc930deac01c03, 0x5461c75a23ede3b5, 0x6e20829e5c230c45, 0x828e0f1e772a53cd, 0x116aefa749127bff},
+ {0x101c10bf2744c10a, 0xbbf18d053a6a3154, 0xa0ecf39ef026f602, 0xfc009d4996dc5153, 0xb9000209d5bd08d3, 0x189e5fe4470cd73c},
+ {0x7ebd546ca1575ed2, 0xe47d5a981d081b55, 0x57b2b625b6d4ca21, 0xb0a1ba04228520cc, 0x98738983c2107ff3, 0x13dddbc4799d81d6},
+ {0x09319f2e39834935, 0x039e952cbdb05c21, 0x55ba77a9a2f76493, 0xfd04e3dfc6086467, 0xfb95832e7d78742e, 0x0ef9c24eccaf5e0e},
+ {0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
},
}
var isogenyConstantsG2 = [4][4]*fe2{
- [4]*fe2{
- &fe2{
+ {
+ {
fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41},
fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41},
},
- &fe2{
+ {
fe{0, 0, 0, 0, 0, 0},
fe{0x5fe55555554c71d0, 0x873fffdd236aaaa3, 0x6a6b4619b26ef918, 0x21c2888408874945, 0x2836cda7028cabc5, 0x0ac73310a7fd5abd},
},
- &fe2{
+ {
fe{0x0a0c5555555971c3, 0xdb0c00101f9eaaae, 0xb1fb2f941d797997, 0xd3960742ef416e1c, 0xb70040e2c20556f4, 0x149d7861e581393b},
fe{0xaff2aaaaaaa638e8, 0x439fffee91b55551, 0xb535a30cd9377c8c, 0x90e144420443a4a2, 0x941b66d3814655e2, 0x0563998853fead5e},
},
- &fe2{
+ {
fe{0x40aac71c71c725ed, 0x190955557a84e38e, 0xd817050a8f41abc3, 0xd86485d4c87f6fb1, 0x696eb479f885d059, 0x198e1a74328002d2},
fe{0, 0, 0, 0, 0, 0},
},
},
- [4]*fe2{
- &fe2{
+ {
+ {
fe{0, 0, 0, 0, 0, 0},
fe{0x1f3affffff13ab97, 0xf25bfc611da3ff3e, 0xca3757cb3819b208, 0x3e6427366f8cec18, 0x03977bc86095b089, 0x04f69db13f39a952},
},
- &fe2{
+ {
fe{0x447600000027552e, 0xdcb8009a43480020, 0x6f7ee9ce4a6e8b59, 0xb10330b7c0a95bc6, 0x6140b1fcfb1e54b7, 0x0381be097f0bb4e1},
fe{0x7588ffffffd8557d, 0x41f3ff646e0bffdf, 0xf7b1e8d2ac426aca, 0xb3741acd32dbb6f8, 0xe9daf5b9482d581f, 0x167f53e0ba7431b8},
},
- &fe2{
+ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0, 0, 0, 0, 0, 0},
},
- &fe2{
+ {
fe{0, 0, 0, 0, 0, 0},
fe{0, 0, 0, 0, 0, 0},
},
},
- [4]*fe2{
- &fe2{
+ {
+ {
fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3},
fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3},
},
- &fe2{
+ {
fe{0, 0, 0, 0, 0, 0},
fe{0xbf0a71c71c91b406, 0x4d6d55d28b7638fd, 0x9d82f98e5f205aee, 0xa27aa27b1d1a18d5, 0x02c3b2b2d2938e86, 0x0c7d13420b09807f},
},
- &fe2{
+ {
fe{0xd7f9555555531c74, 0x21cffff748daaaa8, 0x5a9ad1866c9bbe46, 0x4870a2210221d251, 0x4a0db369c0a32af1, 0x02b1ccc429ff56af},
fe{0xe205aaaaaaac8e37, 0xfcdc000768795556, 0x0c96011a8a1537dd, 0x1c06a963f163406e, 0x010df44c82a881e6, 0x174f45260f808feb},
},
- &fe2{
+ {
fe{0xa470bda12f67f35c, 0xc0fe38e23327b425, 0xc9d3d0f2c6f0678d, 0x1c55c9935b5a982e, 0x27f6c0e2f0746764, 0x117c5e6e28aa9054},
fe{0, 0, 0, 0, 0, 0},
},
},
- [4]*fe2{
- &fe2{
+ {
+ {
fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151},
fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151},
},
- &fe2{
+ {
fe{0, 0, 0, 0, 0, 0},
fe{0x5db0fffffd3b02c5, 0xd713f52358ebfdba, 0x5ea60761a84d161a, 0xbb2c75a34ea6c44a, 0x0ac6735921c1119b, 0x0ee3d913bdacfbf6},
},
- &fe2{
+ {
fe{0x66b10000003affc5, 0xcb1400e764ec0030, 0xa73e5eb56fa5d106, 0x8984c913a0fe09a9, 0x11e10afb78ad7f13, 0x05429d0e3e918f52},
fe{0x534dffffffc4aae6, 0x5397ff174c67ffcf, 0xbff273eb870b251d, 0xdaf2827152870915, 0x393a9cbaca9e2dc3, 0x14be74dbfaee5748},
},
- &fe2{
+ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0, 0, 0, 0, 0, 0},
},
diff --git a/crypto/bls12381/swu.go b/crypto/bls12381/swu.go
index 40d8c9154db5..e78753b2403a 100644
--- a/crypto/bls12381/swu.go
+++ b/crypto/bls12381/swu.go
@@ -17,7 +17,7 @@
package bls12381
// swuMapG1 is implementation of Simplified Shallue-van de Woestijne-Ulas Method
-// follows the implmentation at draft-irtf-cfrg-hash-to-curve-06.
+// follows the implementation at draft-irtf-cfrg-hash-to-curve-06.
func swuMapG1(u *fe) (*fe, *fe) {
var params = swuParamsForG1
var tv [4]*fe
diff --git a/crypto/bn256/cloudflare/gfp_amd64.s b/crypto/bn256/cloudflare/gfp_amd64.s
index bdb4ffb78707..64c97eaed951 100644
--- a/crypto/bn256/cloudflare/gfp_amd64.s
+++ b/crypto/bn256/cloudflare/gfp_amd64.s
@@ -49,7 +49,7 @@ TEXT ·gfpNeg(SB),0,$0-16
SBBQ 24(DI), R11
MOVQ $0, AX
- gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX)
+ gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,CX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -68,7 +68,7 @@ TEXT ·gfpAdd(SB),0,$0-24
ADCQ 24(SI), R11
ADCQ $0, R12
- gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX)
+ gfpCarry(R8,R9,R10,R11,R12, R13,R14,CX,AX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -83,7 +83,7 @@ TEXT ·gfpSub(SB),0,$0-24
MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14
- MOVQ ·p2+24(SB), R15
+ MOVQ ·p2+24(SB), CX
MOVQ $0, AX
SUBQ 0(SI), R8
@@ -94,12 +94,12 @@ TEXT ·gfpSub(SB),0,$0-24
CMOVQCC AX, R12
CMOVQCC AX, R13
CMOVQCC AX, R14
- CMOVQCC AX, R15
+ CMOVQCC AX, CX
ADDQ R12, R8
ADCQ R13, R9
ADCQ R14, R10
- ADCQ R15, R11
+ ADCQ CX, R11
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
@@ -115,7 +115,7 @@ TEXT ·gfpMul(SB),0,$160-24
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP))
- storeBlock(R12,R13,R14,R15, 32(SP))
+ storeBlock(R12,R13,R14,CX, 32(SP))
gfpReduceBMI2()
JMP end
@@ -125,5 +125,5 @@ nobmi2Mul:
end:
MOVQ c+0(FP), DI
- storeBlock(R12,R13,R14,R15, 0(DI))
+ storeBlock(R12,R13,R14,CX, 0(DI))
RET
diff --git a/crypto/bn256/cloudflare/mul_amd64.h b/crypto/bn256/cloudflare/mul_amd64.h
index bab5da8313b6..9d8e4b37dbe7 100644
--- a/crypto/bn256/cloudflare/mul_amd64.h
+++ b/crypto/bn256/cloudflare/mul_amd64.h
@@ -165,7 +165,7 @@
\
\ // Add the 512-bit intermediate to m*N
loadBlock(96+stack, R8,R9,R10,R11) \
- loadBlock(128+stack, R12,R13,R14,R15) \
+ loadBlock(128+stack, R12,R13,R14,CX) \
\
MOVQ $0, AX \
ADDQ 0+stack, R8 \
@@ -175,7 +175,7 @@
ADCQ 32+stack, R12 \
ADCQ 40+stack, R13 \
ADCQ 48+stack, R14 \
- ADCQ 56+stack, R15 \
+ ADCQ 56+stack, CX \
ADCQ $0, AX \
\
- gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
+ gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)
diff --git a/crypto/bn256/cloudflare/mul_bmi2_amd64.h b/crypto/bn256/cloudflare/mul_bmi2_amd64.h
index 71ad0499afd5..403566c6fad8 100644
--- a/crypto/bn256/cloudflare/mul_bmi2_amd64.h
+++ b/crypto/bn256/cloudflare/mul_bmi2_amd64.h
@@ -29,7 +29,7 @@
ADCQ $0, R14 \
\
MOVQ a2, DX \
- MOVQ $0, R15 \
+ MOVQ $0, CX \
MULXQ 0+rb, AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
@@ -43,7 +43,7 @@
MULXQ 24+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
- ADCQ $0, R15 \
+ ADCQ $0, CX \
\
MOVQ a3, DX \
MULXQ 0+rb, AX, BX \
@@ -52,13 +52,13 @@
MULXQ 16+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
- ADCQ $0, R15 \
+ ADCQ $0, CX \
MULXQ 8+rb, AX, BX \
ADDQ AX, R12 \
ADCQ BX, R13 \
MULXQ 24+rb, AX, BX \
ADCQ AX, R14 \
- ADCQ BX, R15
+ ADCQ BX, CX
#define gfpReduceBMI2() \
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
@@ -106,7 +106,7 @@
ADCQ 32(SP), R12 \
ADCQ 40(SP), R13 \
ADCQ 48(SP), R14 \
- ADCQ 56(SP), R15 \
+ ADCQ 56(SP), CX \
ADCQ $0, AX \
\
- gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
+ gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)
diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go
index 0a6aeb2b5175..96e33da006fb 100644
--- a/crypto/ecies/ecies_test.go
+++ b/crypto/ecies/ecies_test.go
@@ -279,7 +279,7 @@ var testCases = []testCase{
{
Curve: elliptic.P384(),
Name: "P384",
- Expected: ECIES_AES256_SHA384,
+ Expected: ECIES_AES192_SHA384,
},
{
Curve: elliptic.P521(),
diff --git a/crypto/ecies/params.go b/crypto/ecies/params.go
index 0bd3877ddd6f..39e7c8947373 100644
--- a/crypto/ecies/params.go
+++ b/crypto/ecies/params.go
@@ -80,6 +80,14 @@ var (
KeyLen: 16,
}
+ ECIES_AES192_SHA384 = &ECIESParams{
+ Hash: sha512.New384,
+ hashAlgo: crypto.SHA384,
+ Cipher: aes.NewCipher,
+ BlockSize: aes.BlockSize,
+ KeyLen: 24,
+ }
+
ECIES_AES256_SHA256 = &ECIESParams{
Hash: sha256.New,
hashAlgo: crypto.SHA256,
@@ -108,7 +116,7 @@ var (
var paramsFromCurve = map[elliptic.Curve]*ECIESParams{
ethcrypto.S256(): ECIES_AES128_SHA256,
elliptic.P256(): ECIES_AES128_SHA256,
- elliptic.P384(): ECIES_AES256_SHA384,
+ elliptic.P384(): ECIES_AES192_SHA384,
elliptic.P521(): ECIES_AES256_SHA512,
}
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index fd1e66c7e6fa..3e48e51e84eb 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -24,37 +24,48 @@ import (
"crypto/elliptic"
"errors"
"fmt"
- "math/big"
- "github.com/btcsuite/btcd/btcec"
+ "github.com/btcsuite/btcd/btcec/v2"
+ btc_ecdsa "github.com/btcsuite/btcd/btcec/v2/ecdsa"
)
// Ecrecover returns the uncompressed public key that created the given signature.
func Ecrecover(hash, sig []byte) ([]byte, error) {
- pub, err := SigToPub(hash, sig)
+ pub, err := sigToPub(hash, sig)
if err != nil {
return nil, err
}
- bytes := (*btcec.PublicKey)(pub).SerializeUncompressed()
+ bytes := pub.SerializeUncompressed()
return bytes, err
}
-// SigToPub returns the public key that created the given signature.
-func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
+func sigToPub(hash, sig []byte) (*btcec.PublicKey, error) {
+ if len(sig) != SignatureLength {
+ return nil, errors.New("invalid signature")
+ }
// Convert to btcec input format with 'recovery id' v at the beginning.
btcsig := make([]byte, SignatureLength)
- btcsig[0] = sig[64] + 27
+ btcsig[0] = sig[RecoveryIDOffset] + 27
copy(btcsig[1:], sig)
- pub, _, err := btcec.RecoverCompact(btcec.S256(), btcsig, hash)
- return (*ecdsa.PublicKey)(pub), err
+ pub, _, err := btc_ecdsa.RecoverCompact(btcsig, hash)
+ return pub, err
+}
+
+// SigToPub returns the public key that created the given signature.
+func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
+ pub, err := sigToPub(hash, sig)
+ if err != nil {
+ return nil, err
+ }
+ return pub.ToECDSA(), nil
}
// Sign calculates an ECDSA signature.
//
// This function is susceptible to chosen plaintext attacks that can leak
// information about the private key that is used for signing. Callers must
-// be aware that the given hash cannot be chosen by an adversery. Common
+// be aware that the given hash cannot be chosen by an adversary. Common
// solution is to hash any input before calculating the signature.
//
// The produced signature is in the [R || S || V] format where V is 0 or 1.
@@ -65,14 +76,20 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
if prv.Curve != btcec.S256() {
return nil, fmt.Errorf("private key curve is not secp256k1")
}
- sig, err := btcec.SignCompact(btcec.S256(), (*btcec.PrivateKey)(prv), hash, false)
+ // ecdsa.PrivateKey -> btcec.PrivateKey
+ var priv btcec.PrivateKey
+ if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() {
+ return nil, fmt.Errorf("invalid private key")
+ }
+ defer priv.Zero()
+ sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
if err != nil {
return nil, err
}
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27
copy(sig, sig[1:])
- sig[64] = v
+ sig[RecoveryIDOffset] = v
return sig, nil
}
@@ -83,13 +100,20 @@ func VerifySignature(pubkey, hash, signature []byte) bool {
if len(signature) != 64 {
return false
}
- sig := &btcec.Signature{R: new(big.Int).SetBytes(signature[:32]), S: new(big.Int).SetBytes(signature[32:])}
- key, err := btcec.ParsePubKey(pubkey, btcec.S256())
+ var r, s btcec.ModNScalar
+ if r.SetByteSlice(signature[:32]) {
+ return false // overflow
+ }
+ if s.SetByteSlice(signature[32:]) {
+ return false
+ }
+ sig := btc_ecdsa.NewSignature(&r, &s)
+ key, err := btcec.ParsePubKey(pubkey)
if err != nil {
return false
}
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
- if sig.S.Cmp(secp256k1halfN) > 0 {
+ if s.IsOverHalfOrder() {
return false
}
return sig.Verify(hash, key)
@@ -100,16 +124,26 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
if len(pubkey) != 33 {
return nil, errors.New("invalid compressed public key length")
}
- key, err := btcec.ParsePubKey(pubkey, btcec.S256())
+ key, err := btcec.ParsePubKey(pubkey)
if err != nil {
return nil, err
}
return key.ToECDSA(), nil
}
-// CompressPubkey encodes a public key to the 33-byte compressed format.
+// CompressPubkey encodes a public key to the 33-byte compressed format. The
+// provided PublicKey must be valid. Namely, the coordinates must not be larger
+// than 32 bytes each, they must be less than the field prime, and it must be a
+// point on the secp256k1 curve. This is the case for a PublicKey constructed by
+// elliptic.Unmarshal (see UnmarshalPubkey), or by ToECDSA and ecdsa.GenerateKey
+// when constructing a PrivateKey.
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
- return (*btcec.PublicKey)(pubkey).SerializeCompressed()
+ // NOTE: the coordinates may be validated with
+ // btcec.ParsePubKey(FromECDSAPub(pubkey))
+ var x, y btcec.FieldVal
+ x.SetByteSlice(pubkey.X.Bytes())
+ y.SetByteSlice(pubkey.Y.Bytes())
+ return btcec.NewPublicKey(&x, &y).SerializeCompressed()
}
// S256 returns an instance of the secp256k1 curve.
diff --git a/docs/postmortems/2021-08-22-split-postmortem.md b/docs/postmortems/2021-08-22-split-postmortem.md
index 429f22d70afb..2004f0f2870d 100644
--- a/docs/postmortems/2021-08-22-split-postmortem.md
+++ b/docs/postmortems/2021-08-22-split-postmortem.md
@@ -5,7 +5,7 @@ This is a post-mortem concerning the minority split that occurred on Ethereum ma
## Timeline
-- 2021-08-17: Guido Vranken submitted bounty report. Investigation started, root cause identified, patch variations discussed.
+- 2021-08-17: Guido Vranken submitted a bounty report. Investigation started, root cause identified, patch variations discussed.
- 2021-08-18: Made public announcement over twitter about upcoming security release upcoming Tuesday. Downstream projects were also notified about the upcoming patch-release.
- 2021-08-24: Released [v1.10.8](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8) containing the fix on Tuesday morning (CET). Erigon released [v2021.08.04](https://github.com/ledgerwatch/erigon/releases/tag/v2021.08.04).
- 2021-08-27: At 12:50:07 UTC, issue exploited. Analysis started roughly 30m later,
@@ -51,7 +51,7 @@ A memory-corruption bug within the EVM can cause a consensus error, where vulner
#### Handling
-On the evening of 17th, we discussed options how to handle it. We made a state test to reproduce the issue, and verified that neither `openethereum`, `nethermind` nor `besu` were affected by the same vulnerability, and started a full-sync with a patched version of `geth`.
+On the evening of 17th, we discussed options on how to handle it. We made a state test to reproduce the issue, and verified that neither `openethereum`, `nethermind` nor `besu` were affected by the same vulnerability, and started a full-sync with a patched version of `geth`.
It was decided that in this specific instance, it would be possible to make a public announcement and a patch release:
diff --git a/eth/backend.go b/eth/backend.go
index a53982166d78..273f8b3b2ef8 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -220,21 +220,21 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
checkpoint = params.TrustedCheckpoints[genesisHash]
}
if eth.handler, err = newHandler(&handlerConfig{
- Database: chainDb,
- Chain: eth.blockchain,
- TxPool: eth.txPool,
- Merger: merger,
- Network: config.NetworkId,
- Sync: config.SyncMode,
- BloomCache: uint64(cacheLimit),
- EventMux: eth.eventMux,
- Checkpoint: checkpoint,
- Whitelist: config.Whitelist,
+ Database: chainDb,
+ Chain: eth.blockchain,
+ TxPool: eth.txPool,
+ Merger: merger,
+ Network: config.NetworkId,
+ Sync: config.SyncMode,
+ BloomCache: uint64(cacheLimit),
+ EventMux: eth.eventMux,
+ Checkpoint: checkpoint,
+ PeerRequiredBlocks: config.PeerRequiredBlocks,
}); err != nil {
return nil, err
}
- eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, merger)
+ eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
@@ -292,7 +292,7 @@ func makeExtraData(extra []byte) []byte {
// APIs return the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *Ethereum) APIs() []rpc.API {
- apis := ethapi.GetAPIs(s.APIBackend)
+ apis := ethapi.GetAPIs(s.APIBackend, s.BlockChain())
// Append any APIs exposed explicitly by the consensus engine
apis = append(apis, s.engine.APIs(s.BlockChain())...)
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 3c0b6d9e43fc..45f233df6dfa 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -22,35 +22,17 @@ import (
"encoding/binary"
"errors"
"fmt"
- "math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/misc"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- chainParams "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie"
-)
-
-var (
- VALID = GenericStringResponse{"VALID"}
- SUCCESS = GenericStringResponse{"SUCCESS"}
- INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
- SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
- GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
- UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
- InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
- InvalidPayloadID = rpc.CustomError{Code: 1, ValidationError: "invalid payload id"}
)
// Register adds catalyst APIs to the full node.
@@ -58,406 +40,314 @@ func Register(stack *node.Node, backend *eth.Ethereum) error {
log.Warn("Catalyst mode enabled", "protocol", "eth")
stack.RegisterAPIs([]rpc.API{
{
- Namespace: "engine",
- Version: "1.0",
- Service: NewConsensusAPI(backend, nil),
- Public: true,
+ Namespace: "engine",
+ Version: "1.0",
+ Service: NewConsensusAPI(backend),
+ Public: true,
+ Authenticated: true,
},
- })
- return nil
-}
-
-// RegisterLight adds catalyst APIs to the light client.
-func RegisterLight(stack *node.Node, backend *les.LightEthereum) error {
- log.Warn("Catalyst mode enabled", "protocol", "les")
- stack.RegisterAPIs([]rpc.API{
{
- Namespace: "engine",
- Version: "1.0",
- Service: NewConsensusAPI(nil, backend),
- Public: true,
+ Namespace: "engine",
+ Version: "1.0",
+ Service: NewConsensusAPI(backend),
+ Public: true,
+ Authenticated: false,
},
})
return nil
}
type ConsensusAPI struct {
- light bool
- eth *eth.Ethereum
- les *les.LightEthereum
- engine consensus.Engine // engine is the post-merge consensus engine, only for block creation
- preparedBlocks map[uint64]*ExecutableDataV1
+ eth *eth.Ethereum
+ remoteBlocks *headerQueue // Cache of remote payloads received
+ localBlocks *payloadQueue // Cache of local payloads generated
}
-func NewConsensusAPI(eth *eth.Ethereum, les *les.LightEthereum) *ConsensusAPI {
- var engine consensus.Engine
- if eth == nil {
- if les.BlockChain().Config().TerminalTotalDifficulty == nil {
- panic("Catalyst started without valid total difficulty")
- }
- if b, ok := les.Engine().(*beacon.Beacon); ok {
- engine = beacon.New(b.InnerEngine())
- } else {
- engine = beacon.New(les.Engine())
- }
- } else {
- if eth.BlockChain().Config().TerminalTotalDifficulty == nil {
- panic("Catalyst started without valid total difficulty")
- }
- if b, ok := eth.Engine().(*beacon.Beacon); ok {
- engine = beacon.New(b.InnerEngine())
- } else {
- engine = beacon.New(eth.Engine())
- }
+// NewConsensusAPI creates a new consensus api for the given backend.
+// The underlying blockchain needs to have a valid terminal total difficulty set.
+func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
+ if eth.BlockChain().Config().TerminalTotalDifficulty == nil {
+ panic("Catalyst started without valid total difficulty")
}
return &ConsensusAPI{
- light: eth == nil,
- eth: eth,
- les: les,
- engine: engine,
- preparedBlocks: make(map[uint64]*ExecutableDataV1),
+ eth: eth,
+ remoteBlocks: newHeaderQueue(),
+ localBlocks: newPayloadQueue(),
}
}
-// blockExecutionEnv gathers all the data required to execute
-// a block, either when assembling it or when inserting it.
-type blockExecutionEnv struct {
- chain *core.BlockChain
- state *state.StateDB
- tcount int
- gasPool *core.GasPool
-
- header *types.Header
- txs []*types.Transaction
- receipts []*types.Receipt
-}
-
-func (env *blockExecutionEnv) commitTransaction(tx *types.Transaction, coinbase common.Address) error {
- vmconfig := *env.chain.GetVMConfig()
- snap := env.state.Snapshot()
- receipt, err := core.ApplyTransaction(env.chain.Config(), env.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, vmconfig)
- if err != nil {
- env.state.RevertToSnapshot(snap)
- return err
+// ForkchoiceUpdatedV1 has several responsibilities:
+// If the method is called with an empty head block:
+// we return success, which can be used to check if the catalyst mode is enabled
+// If the total difficulty was not reached:
+// we return INVALID
+// If the finalizedBlockHash is set:
+// we check if we have the finalizedBlockHash in our db, if not we start a sync
+// We try to set our blockchain to the headBlock
+// If there are payloadAttributes:
+// we try to assemble a block with the payloadAttributes and return its payloadID
+func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
+ log.Trace("Engine API request received", "method", "ForkchoiceUpdated", "head", update.HeadBlockHash, "finalized", update.FinalizedBlockHash, "safe", update.SafeBlockHash)
+ if update.HeadBlockHash == (common.Hash{}) {
+ log.Warn("Forkchoice requested update to zero hash")
+ return beacon.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this?
+ }
+ // Check whether we have the block yet in our database or not. If not, we'll
+ // need to either trigger a sync, or to reject this forkchoice update for a
+ // reason.
+ block := api.eth.BlockChain().GetBlockByHash(update.HeadBlockHash)
+ if block == nil {
+ // If the head hash is unknown (was not given to us in a newPayload request),
+ // we cannot resolve the header, so not much to do. This could be extended in
+ // the future to resolve from the `eth` network, but it's an unexpected case
+ // that should be fixed, not papered over.
+ header := api.remoteBlocks.get(update.HeadBlockHash)
+ if header == nil {
+ log.Warn("Forkchoice requested unknown head", "hash", update.HeadBlockHash)
+ return beacon.STATUS_SYNCING, nil
+ }
+ // Header advertised via a past newPayload request. Start syncing to it.
+ // Before we do however, make sure any legacy sync in switched off so we
+ // don't accidentally have 2 cycles running.
+ if merger := api.eth.Merger(); !merger.TDDReached() {
+ merger.ReachTTD()
+ api.eth.Downloader().Cancel()
+ }
+ log.Info("Forkchoice requested sync to new head", "number", header.Number, "hash", header.Hash())
+ if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), header); err != nil {
+ return beacon.STATUS_SYNCING, err
+ }
+ return beacon.STATUS_SYNCING, nil
+ }
+ // Block is known locally, just sanity check that the beacon client does not
+ // attempt to push us back to before the merge.
+ if block.Difficulty().BitLen() > 0 || block.NumberU64() == 0 {
+ var (
+ td = api.eth.BlockChain().GetTd(update.HeadBlockHash, block.NumberU64())
+ ptd = api.eth.BlockChain().GetTd(block.ParentHash(), block.NumberU64()-1)
+ ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
+ )
+ if td == nil || (block.NumberU64() > 0 && ptd == nil) {
+ log.Error("TDs unavailable for TTD check", "number", block.NumberU64(), "hash", update.HeadBlockHash, "td", td, "parent", block.ParentHash(), "ptd", ptd)
+ return beacon.STATUS_INVALID, errors.New("TDs unavailable for TDD check")
+ }
+ if td.Cmp(ttd) < 0 || (block.NumberU64() > 0 && ptd.Cmp(ttd) > 0) {
+ log.Error("Refusing beacon update to pre-merge", "number", block.NumberU64(), "hash", update.HeadBlockHash, "diff", block.Difficulty(), "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
+ return beacon.ForkChoiceResponse{PayloadStatus: beacon.PayloadStatusV1{Status: beacon.INVALIDTERMINALBLOCK}, PayloadID: nil}, nil
+ }
}
- env.txs = append(env.txs, tx)
- env.receipts = append(env.receipts, receipt)
- return nil
-}
-func (api *ConsensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) {
- // The parent state might be missing. It can be the special scenario
- // that consensus layer tries to build a new block based on the very
- // old side chain block and the relevant state is already pruned. So
- // try to retrieve the live state from the chain, if it's not existent,
- // do the necessary recovery work.
- var (
- err error
- state *state.StateDB
- )
- if api.eth.BlockChain().HasState(parent.Root()) {
- state, err = api.eth.BlockChain().StateAt(parent.Root())
+ if rawdb.ReadCanonicalHash(api.eth.ChainDb(), block.NumberU64()) != update.HeadBlockHash {
+ // Block is not canonical, set head.
+ if err := api.eth.BlockChain().SetChainHead(block); err != nil {
+ return beacon.STATUS_INVALID, err
+ }
} else {
- // The maximum acceptable reorg depth can be limited by the
- // finalised block somehow. TODO(rjl493456442) fix the hard-
- // coded number here later.
- state, err = api.eth.StateAtBlock(parent, 1000, nil, false, false)
- }
- if err != nil {
- return nil, err
- }
- env := &blockExecutionEnv{
- chain: api.eth.BlockChain(),
- state: state,
- header: header,
- gasPool: new(core.GasPool).AddGas(header.GasLimit),
- }
- return env, nil
-}
-
-func (api *ConsensusAPI) GetPayloadV1(payloadID hexutil.Bytes) (*ExecutableDataV1, error) {
- hash := []byte(payloadID)
- if len(hash) < 8 {
- return nil, &InvalidPayloadID
- }
- id := binary.BigEndian.Uint64(hash[:8])
- data, ok := api.preparedBlocks[id]
- if !ok {
- return nil, &UnknownPayload
+ // If the head block is already in our canonical chain, the beacon client is
+ // probably resyncing. Ignore the update.
+ log.Info("Ignoring beacon update to old head", "number", block.NumberU64(), "hash", update.HeadBlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)), "have", api.eth.BlockChain().CurrentBlock().NumberU64())
}
- return data, nil
-}
+ api.eth.SetSynced()
-func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads ForkchoiceStateV1, PayloadAttributes *PayloadAttributesV1) (ForkChoiceResponse, error) {
- if heads.HeadBlockHash == (common.Hash{}) {
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
- }
- if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
- if block := api.eth.BlockChain().GetBlockByHash(heads.HeadBlockHash); block == nil {
- // TODO (MariusVanDerWijden) trigger sync
- return SYNCING, nil
+ // If the beacon client also advertised a finalized block, mark the local
+ // chain final and completely in PoS mode.
+ if update.FinalizedBlockHash != (common.Hash{}) {
+ if merger := api.eth.Merger(); !merger.PoSFinalized() {
+ merger.FinalizePoS()
}
- return INVALID, err
- }
- // If the finalized block is set, check if it is in our blockchain
- if heads.FinalizedBlockHash != (common.Hash{}) {
- if block := api.eth.BlockChain().GetBlockByHash(heads.FinalizedBlockHash); block == nil {
- // TODO (MariusVanDerWijden) trigger sync
- return SYNCING, nil
+ // TODO (MariusVanDerWijden): If the finalized block is not in our canonical tree, somethings wrong
+ finalBlock := api.eth.BlockChain().GetBlockByHash(update.FinalizedBlockHash)
+ if finalBlock == nil {
+ log.Warn("Final block not available in database", "hash", update.FinalizedBlockHash)
+ return beacon.STATUS_INVALID, errors.New("final block not available")
+ } else if rawdb.ReadCanonicalHash(api.eth.ChainDb(), finalBlock.NumberU64()) != update.FinalizedBlockHash {
+ log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", update.HeadBlockHash)
+ return beacon.STATUS_INVALID, errors.New("final block not canonical")
}
}
- // SetHead
- if err := api.setHead(heads.HeadBlockHash); err != nil {
- return INVALID, err
+ // TODO (MariusVanDerWijden): Check if the safe block hash is in our canonical tree, if not somethings wrong
+ if update.SafeBlockHash != (common.Hash{}) {
+ safeBlock := api.eth.BlockChain().GetBlockByHash(update.SafeBlockHash)
+ if safeBlock == nil {
+ log.Warn("Safe block not available in database")
+ return beacon.STATUS_INVALID, errors.New("safe head not available")
+ }
+ if rawdb.ReadCanonicalHash(api.eth.ChainDb(), safeBlock.NumberU64()) != update.SafeBlockHash {
+ log.Warn("Safe block not in canonical chain")
+ return beacon.STATUS_INVALID, errors.New("safe head not canonical")
+ }
}
- // Assemble block (if needed)
- if PayloadAttributes != nil {
- data, err := api.assembleBlock(heads.HeadBlockHash, PayloadAttributes)
+ // If payload generation was requested, create a new block to be potentially
+ // sealed by the beacon client. The payload will be requested later, and we
+ // might replace it arbitrarily many times in between.
+ if payloadAttributes != nil {
+ log.Info("Creating new payload for sealing")
+ start := time.Now()
+
+ data, err := api.assembleBlock(update.HeadBlockHash, payloadAttributes)
if err != nil {
- return INVALID, err
+ log.Error("Failed to create sealing payload", "err", err)
+ return api.validForkChoiceResponse(nil), err // valid setHead, invalid payload
}
- hash := computePayloadId(heads.HeadBlockHash, PayloadAttributes)
- id := binary.BigEndian.Uint64(hash)
- api.preparedBlocks[id] = data
- log.Info("Created payload", "payloadid", id)
- // TODO (MariusVanDerWijden) do something with the payloadID?
- hex := hexutil.Bytes(hash)
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: &hex}, nil
- }
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
-}
+ id := computePayloadId(update.HeadBlockHash, payloadAttributes)
+ api.localBlocks.put(id, data)
-func computePayloadId(headBlockHash common.Hash, params *PayloadAttributesV1) []byte {
- // Hash
- hasher := sha256.New()
- hasher.Write(headBlockHash[:])
- binary.Write(hasher, binary.BigEndian, params.Timestamp)
- hasher.Write(params.Random[:])
- hasher.Write(params.SuggestedFeeRecipient[:])
- return hasher.Sum([]byte{})[:8]
+ log.Info("Created payload for sealing", "id", id, "elapsed", time.Since(start))
+ return api.validForkChoiceResponse(&id), nil
+ }
+ return api.validForkChoiceResponse(nil), nil
}
-func (api *ConsensusAPI) invalid() ExecutePayloadResponse {
- if api.light {
- return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()}
+// validForkChoiceResponse returns the ForkChoiceResponse{VALID}
+// with the latest valid hash and an optional payloadID.
+func (api *ConsensusAPI) validForkChoiceResponse(id *beacon.PayloadID) beacon.ForkChoiceResponse {
+ currentHash := api.eth.BlockChain().CurrentBlock().Hash()
+ return beacon.ForkChoiceResponse{
+ PayloadStatus: beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: ¤tHash},
+ PayloadID: id,
}
- return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()}
}
-// ExecutePayload creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
-func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePayloadResponse, error) {
- block, err := ExecutableDataToBlock(params)
- if err != nil {
- return api.invalid(), err
- }
- if api.light {
- parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
- if parent == nil {
- return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
- }
- if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
- return api.invalid(), err
- }
- return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
- }
- if !api.eth.BlockChain().HasBlock(block.ParentHash(), block.NumberU64()-1) {
- /*
- TODO (MariusVanDerWijden) reenable once sync is merged
- if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil {
- return SYNCING, err
- }
- */
- // TODO (MariusVanDerWijden) we should return nil here not empty hash
- return ExecutePayloadResponse{Status: SYNCING.Status, LatestValidHash: common.Hash{}}, nil
+// ExchangeTransitionConfigurationV1 checks the given configuration against
+// the configuration of the node.
+func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config beacon.TransitionConfigurationV1) (*beacon.TransitionConfigurationV1, error) {
+ if config.TerminalTotalDifficulty == nil {
+ return nil, errors.New("invalid terminal total difficulty")
}
- parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash)
- td := api.eth.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty
- if td.Cmp(ttd) < 0 {
- return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
- }
- if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
- return api.invalid(), err
- }
-
- if merger := api.merger(); !merger.TDDReached() {
- merger.ReachTTD()
+ if ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 {
+ log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty)
+ return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty)
+ }
+
+ if config.TerminalBlockHash != (common.Hash{}) {
+ if hash := api.eth.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash {
+ return &beacon.TransitionConfigurationV1{
+ TerminalTotalDifficulty: (*hexutil.Big)(ttd),
+ TerminalBlockHash: config.TerminalBlockHash,
+ TerminalBlockNumber: config.TerminalBlockNumber,
+ }, nil
+ }
+ return nil, fmt.Errorf("invalid terminal block hash")
}
- return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
+ return &beacon.TransitionConfigurationV1{TerminalTotalDifficulty: (*hexutil.Big)(ttd)}, nil
}
-// AssembleBlock creates a new block, inserts it into the chain, and returns the "execution
-// data" required for eth2 clients to process the new block.
-func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *PayloadAttributesV1) (*ExecutableDataV1, error) {
- if api.light {
- return nil, errors.New("not supported")
+// GetPayloadV1 returns a cached payload by id.
+func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
+ log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
+ data := api.localBlocks.get(payloadID)
+ if data == nil {
+ return nil, &beacon.UnknownPayload
}
- log.Info("Producing block", "parentHash", parentHash)
+ return data, nil
+}
- bc := api.eth.BlockChain()
- parent := bc.GetBlockByHash(parentHash)
+// NewPayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) NewPayloadV1(params beacon.ExecutableDataV1) (beacon.PayloadStatusV1, error) {
+ log.Trace("Engine API request received", "method", "ExecutePayload", "number", params.Number, "hash", params.BlockHash)
+ block, err := beacon.ExecutableDataToBlock(params)
+ if err != nil {
+ log.Debug("Invalid NewPayload params", "params", params, "error", err)
+ return beacon.PayloadStatusV1{Status: beacon.INVALIDBLOCKHASH}, nil
+ }
+ // If we already have the block locally, ignore the entire execution and just
+ // return a fake success.
+ if block := api.eth.BlockChain().GetBlockByHash(params.BlockHash); block != nil {
+ log.Warn("Ignoring already known beacon payload", "number", params.Number, "hash", params.BlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
+ hash := block.Hash()
+ return beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &hash}, nil
+ }
+ // If the parent is missing, we - in theory - could trigger a sync, but that
+ // would also entail a reorg. That is problematic if multiple sibling blocks
+ // are being fed to us, and even more so, if some semi-distant uncle shortens
+ // our live chain. As such, payload execution will not permit reorgs and thus
+ // will not trigger a sync cycle. That is fine though, if we get a fork choice
+ // update after legit payload executions.
+ parent := api.eth.BlockChain().GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", parentHash)
- return nil, fmt.Errorf("cannot assemble block with unknown parent %s", parentHash)
- }
-
- if params.Timestamp < parent.Time() {
- return nil, fmt.Errorf("child timestamp lower than parent's: %d < %d", params.Timestamp, parent.Time())
- }
- if now := uint64(time.Now().Unix()); params.Timestamp > now+1 {
- diff := time.Duration(params.Timestamp-now) * time.Second
- log.Warn("Producing block too far in the future", "diff", common.PrettyDuration(diff))
- }
- pending := api.eth.TxPool().Pending(true)
- coinbase := params.SuggestedFeeRecipient
- num := parent.Number()
- header := &types.Header{
- ParentHash: parent.Hash(),
- Number: num.Add(num, common.Big1),
- Coinbase: coinbase,
- GasLimit: parent.GasLimit(), // Keep the gas limit constant in this prototype
- Extra: []byte{}, // TODO (MariusVanDerWijden) properly set extra data
- Time: params.Timestamp,
- }
- if config := api.eth.BlockChain().Config(); config.IsLondon(header.Number) {
- header.BaseFee = misc.CalcBaseFee(config, parent.Header())
+ // Stash the block away for a potential forced forckchoice update to it
+ // at a later time.
+ api.remoteBlocks.put(block.Hash(), block.Header())
+
+ // Although we don't want to trigger a sync, if there is one already in
+ // progress, try to extend if with the current payload request to relieve
+ // some strain from the forkchoice update.
+ if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil {
+ log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash)
+ return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil
+ }
+ // Either no beacon sync was started yet, or it rejected the delivered
+ // payload as non-integratable on top of the existing sync. We'll just
+ // have to rely on the beacon client to forcefully update the head with
+ // a forkchoice update request.
+ log.Warn("Ignoring payload with missing parent", "number", params.Number, "hash", params.BlockHash, "parent", params.ParentHash)
+ return beacon.PayloadStatusV1{Status: beacon.ACCEPTED}, nil
+ }
+ // We have an existing parent, do some sanity checks to avoid the beacon client
+ // triggering too early
+ var (
+ td = api.eth.BlockChain().GetTd(parent.Hash(), parent.NumberU64())
+ ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
+ )
+ if td.Cmp(ttd) < 0 {
+ log.Warn("Ignoring pre-merge payload", "number", params.Number, "hash", params.BlockHash, "td", td, "ttd", ttd)
+ return beacon.PayloadStatusV1{Status: beacon.INVALIDTERMINALBLOCK}, nil
}
- if err := api.engine.Prepare(bc, header); err != nil {
- return nil, err
+ if block.Time() <= parent.Time() {
+ log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time())
+ return api.invalid(errors.New("invalid timestamp")), nil
}
- env, err := api.makeEnv(parent, header)
- if err != nil {
- return nil, err
+ if !api.eth.BlockChain().HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
+ api.remoteBlocks.put(block.Hash(), block.Header())
+ log.Warn("State not available, ignoring new payload")
+ return beacon.PayloadStatusV1{Status: beacon.ACCEPTED}, nil
}
- var (
- signer = types.MakeSigner(bc.Config(), header.Number)
- txHeap = types.NewTransactionsByPriceAndNonce(signer, pending, nil)
- transactions []*types.Transaction
- )
- for {
- if env.gasPool.Gas() < chainParams.TxGas {
- log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", chainParams.TxGas)
- break
- }
- tx := txHeap.Peek()
- if tx == nil {
- break
- }
-
- // The sender is only for logging purposes, and it doesn't really matter if it's correct.
- from, _ := types.Sender(signer, tx)
-
- // Execute the transaction
- env.state.Prepare(tx.Hash(), env.tcount)
- err = env.commitTransaction(tx, coinbase)
- switch err {
- case core.ErrGasLimitReached:
- // Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace("Gas limit exceeded for current block", "sender", from)
- txHeap.Pop()
-
- case core.ErrNonceTooLow:
- // New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
- txHeap.Shift()
-
- case core.ErrNonceTooHigh:
- // Reorg notification data race between the transaction pool and miner, skip account =
- log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
- txHeap.Pop()
-
- case nil:
- // Everything ok, collect the logs and shift in the next transaction from the same account
- env.tcount++
- txHeap.Shift()
- transactions = append(transactions, tx)
-
- default:
- // Strange error, discard the transaction and get the next in line (note, the
- // nonce-too-high clause will prevent us from executing in vain).
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
- txHeap.Shift()
- }
+ log.Trace("Inserting block without sethead", "hash", block.Hash(), "number", block.Number)
+ if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
+ log.Warn("NewPayloadV1: inserting block failed", "error", err)
+ return api.invalid(err), nil
}
- // Create the block.
- block, err := api.engine.FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts)
- if err != nil {
- return nil, err
+ // We've accepted a valid payload from the beacon client. Mark the local
+ // chain transitions to notify other subsystems (e.g. downloader) of the
+ // behavioral change.
+ if merger := api.eth.Merger(); !merger.TDDReached() {
+ merger.ReachTTD()
+ api.eth.Downloader().Cancel()
}
- return BlockToExecutableData(block, params.Random), nil
+ hash := block.Hash()
+ return beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &hash}, nil
}
-func encodeTransactions(txs []*types.Transaction) [][]byte {
- var enc = make([][]byte, len(txs))
- for i, tx := range txs {
- enc[i], _ = tx.MarshalBinary()
- }
- return enc
+// computePayloadId computes a pseudo-random payloadid, based on the parameters.
+func computePayloadId(headBlockHash common.Hash, params *beacon.PayloadAttributesV1) beacon.PayloadID {
+ // Hash
+ hasher := sha256.New()
+ hasher.Write(headBlockHash[:])
+ binary.Write(hasher, binary.BigEndian, params.Timestamp)
+ hasher.Write(params.Random[:])
+ hasher.Write(params.SuggestedFeeRecipient[:])
+ var out beacon.PayloadID
+ copy(out[:], hasher.Sum(nil)[:8])
+ return out
}
-func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
- var txs = make([]*types.Transaction, len(enc))
- for i, encTx := range enc {
- var tx types.Transaction
- if err := tx.UnmarshalBinary(encTx); err != nil {
- return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
- }
- txs[i] = &tx
- }
- return txs, nil
+// invalid returns a response "INVALID" with the latest valid hash set to the current head.
+func (api *ConsensusAPI) invalid(err error) beacon.PayloadStatusV1 {
+ currentHash := api.eth.BlockChain().CurrentHeader().Hash()
+ errorMsg := err.Error()
+ return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg}
}
-func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
- txs, err := decodeTransactions(params.Transactions)
+// assembleBlock creates a new block and returns the "execution
+// data" required for beacon clients to process the new block.
+func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *beacon.PayloadAttributesV1) (*beacon.ExecutableDataV1, error) {
+ log.Info("Producing block", "parentHash", parentHash)
+ block, err := api.eth.Miner().GetSealingBlock(parentHash, params.Timestamp, params.SuggestedFeeRecipient, params.Random)
if err != nil {
return nil, err
}
- if len(params.ExtraData) > 32 {
- return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
- }
- number := big.NewInt(0)
- number.SetUint64(params.Number)
- header := &types.Header{
- ParentHash: params.ParentHash,
- UncleHash: types.EmptyUncleHash,
- Coinbase: params.FeeRecipient,
- Root: params.StateRoot,
- TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
- ReceiptHash: params.ReceiptsRoot,
- Bloom: types.BytesToBloom(params.LogsBloom),
- Difficulty: common.Big0,
- Number: number,
- GasLimit: params.GasLimit,
- GasUsed: params.GasUsed,
- Time: params.Timestamp,
- BaseFee: params.BaseFeePerGas,
- Extra: params.ExtraData,
- // TODO (MariusVanDerWijden) add params.Random to header once required
- }
- block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
- if block.Hash() != params.BlockHash {
- return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
- }
- return block, nil
-}
-
-func BlockToExecutableData(block *types.Block, random common.Hash) *ExecutableDataV1 {
- return &ExecutableDataV1{
- BlockHash: block.Hash(),
- ParentHash: block.ParentHash(),
- FeeRecipient: block.Coinbase(),
- StateRoot: block.Root(),
- Number: block.NumberU64(),
- GasLimit: block.GasLimit(),
- GasUsed: block.GasUsed(),
- BaseFeePerGas: block.BaseFee(),
- Timestamp: block.Time(),
- ReceiptsRoot: block.ReceiptHash(),
- LogsBloom: block.Bloom().Bytes(),
- Transactions: encodeTransactions(block.Transactions()),
- Random: random,
- ExtraData: block.Extra(),
- }
+ return beacon.BlockToExecutableData(block), nil
}
// Used in tests to add a the list of transactions from a block to the tx pool.
@@ -467,70 +357,3 @@ func (api *ConsensusAPI) insertTransactions(txs types.Transactions) error {
}
return nil
}
-
-func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
- // shortcut if we entered PoS already
- if api.merger().PoSFinalized() {
- return nil
- }
- // make sure the parent has enough terminal total difficulty
- newHeadBlock := api.eth.BlockChain().GetBlockByHash(head)
- if newHeadBlock == nil {
- return &GenericServerError
- }
- td := api.eth.BlockChain().GetTd(newHeadBlock.Hash(), newHeadBlock.NumberU64())
- if td != nil && td.Cmp(api.eth.BlockChain().Config().TerminalTotalDifficulty) < 0 {
- return &InvalidTB
- }
- return nil
-}
-
-// setHead is called to perform a force choice.
-func (api *ConsensusAPI) setHead(newHead common.Hash) error {
- log.Info("Setting head", "head", newHead)
- if api.light {
- headHeader := api.les.BlockChain().CurrentHeader()
- if headHeader.Hash() == newHead {
- return nil
- }
- newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
- if newHeadHeader == nil {
- return &GenericServerError
- }
- if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
- return err
- }
- // Trigger the transition if it's the first `NewHead` event.
- merger := api.merger()
- if !merger.PoSFinalized() {
- merger.FinalizePoS()
- }
- return nil
- }
- headBlock := api.eth.BlockChain().CurrentBlock()
- if headBlock.Hash() == newHead {
- return nil
- }
- newHeadBlock := api.eth.BlockChain().GetBlockByHash(newHead)
- if newHeadBlock == nil {
- return &GenericServerError
- }
- if err := api.eth.BlockChain().SetChainHead(newHeadBlock); err != nil {
- return err
- }
- // Trigger the transition if it's the first `NewHead` event.
- if merger := api.merger(); !merger.PoSFinalized() {
- merger.FinalizePoS()
- }
- // TODO (MariusVanDerWijden) are we really synced now?
- api.eth.SetSynced()
- return nil
-}
-
-// Helper function, return the merger instance.
-func (api *ConsensusAPI) merger() *consensus.Merger {
- if api.light {
- return api.les.Merger()
- }
- return api.eth.Merger()
-}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 6e52c4fea27d..de2e58a4f1e5 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -17,6 +17,7 @@
package catalyst
import (
+ "fmt"
"math/big"
"testing"
"time"
@@ -25,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -48,11 +50,12 @@ func generatePreMergeChain(n int) (*core.Genesis, []*types.Block) {
db := rawdb.NewMemoryDatabase()
config := params.AllEthashProtocolChanges
genesis := &core.Genesis{
- Config: config,
- Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
- ExtraData: []byte("test genesis"),
- Timestamp: 9000,
- BaseFee: big.NewInt(params.InitialBaseFee),
+ Config: config,
+ Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
+ ExtraData: []byte("test genesis"),
+ Timestamp: 9000,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ Difficulty: big.NewInt(0),
}
testNonce := uint64(0)
generate := func(i int, g *core.BlockGen) {
@@ -78,14 +81,14 @@ func TestEth2AssembleBlock(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID)
tx, err := types.SignTx(types.NewTransaction(uint64(10), blocks[9].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey)
if err != nil {
t.Fatalf("error signing transaction, err=%v", err)
}
ethservice.TxPool().AddLocal(tx)
- blockParams := PayloadAttributesV1{
+ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[9].Time() + 5,
}
execData, err := api.assembleBlock(blocks[9].Hash(), &blockParams)
@@ -102,11 +105,11 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
// Put the 10th block's tx in the pool and produce a new block
api.insertTransactions(blocks[9].Transactions())
- blockParams := PayloadAttributesV1{
+ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[8].Time() + 5,
}
execData, err := api.assembleBlock(blocks[8].Hash(), &blockParams)
@@ -123,48 +126,61 @@ func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
- fcState := ForkchoiceStateV1{
+ api := NewConsensusAPI(ethservice)
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: blocks[5].Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
- if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil {
- t.Errorf("fork choice updated before total terminal difficulty should fail")
+ if resp, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ t.Errorf("fork choice updated should not error: %v", err)
+ } else if resp.PayloadStatus.Status != beacon.INVALIDTERMINALBLOCK {
+ t.Errorf("fork choice updated before total terminal difficulty should be INVALID")
}
}
func TestEth2PrepareAndGetPayload(t *testing.T) {
- genesis, blocks := generatePreMergeChain(10)
- // We need to properly set the terminal total difficulty
- genesis.Config.TerminalTotalDifficulty.Sub(genesis.Config.TerminalTotalDifficulty, blocks[9].Difficulty())
- n, ethservice := startEthService(t, genesis, blocks[:9])
- defer n.Close()
+ // TODO (MariusVanDerWijden) TestEth2PrepareAndGetPayload is currently broken, fixed in upcoming merge-kiln-v2 pr
+ /*
+ genesis, blocks := generatePreMergeChain(10)
+ // We need to properly set the terminal total difficulty
+ genesis.Config.TerminalTotalDifficulty.Sub(genesis.Config.TerminalTotalDifficulty, blocks[9].Difficulty())
+ n, ethservice := startEthService(t, genesis, blocks[:9])
+ defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
- // Put the 10th block's tx in the pool and produce a new block
- api.insertTransactions(blocks[9].Transactions())
- blockParams := PayloadAttributesV1{
- Timestamp: blocks[8].Time() + 5,
- }
- fcState := ForkchoiceStateV1{
- HeadBlockHash: blocks[8].Hash(),
- SafeBlockHash: common.Hash{},
- FinalizedBlockHash: common.Hash{},
- }
- _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams)
- if err != nil {
- t.Fatalf("error preparing payload, err=%v", err)
- }
- payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams)
- execData, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
- if err != nil {
- t.Fatalf("error getting payload, err=%v", err)
- }
- if len(execData.Transactions) != blocks[9].Transactions().Len() {
- t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
- }
+ // Put the 10th block's tx in the pool and produce a new block
+ api.insertTransactions(blocks[9].Transactions())
+ blockParams := beacon.PayloadAttributesV1{
+ Timestamp: blocks[8].Time() + 5,
+ }
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: blocks[8].Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams)
+ if err != nil {
+ t.Fatalf("error preparing payload, err=%v", err)
+ }
+ payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams)
+ execData, err := api.GetPayloadV1(payloadID)
+ if err != nil {
+ t.Fatalf("error getting payload, err=%v", err)
+ }
+ if len(execData.Transactions) != blocks[9].Transactions().Len() {
+ t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
+ }
+ // Test invalid payloadID
+ var invPayload beacon.PayloadID
+ copy(invPayload[:], payloadID[:])
+ invPayload[0] = ^invPayload[0]
+ _, err = api.GetPayloadV1(invPayload)
+ if err == nil {
+ t.Fatal("expected error retrieving invalid payload")
+ }
+ */
}
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) {
@@ -185,6 +201,51 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan co
}
}
+func TestInvalidPayloadTimestamp(t *testing.T) {
+ genesis, preMergeBlocks := generatePreMergeChain(10)
+ n, ethservice := startEthService(t, genesis, preMergeBlocks)
+ ethservice.Merger().ReachTTD()
+ defer n.Close()
+ var (
+ api = NewConsensusAPI(ethservice)
+ parent = ethservice.BlockChain().CurrentBlock()
+ )
+ tests := []struct {
+ time uint64
+ shouldErr bool
+ }{
+ {0, true},
+ {parent.Time(), true},
+ {parent.Time() - 1, true},
+
+ // TODO (MariusVanDerWijden) following tests are currently broken,
+ // fixed in upcoming merge-kiln-v2 pr
+ //{parent.Time() + 1, false},
+ //{uint64(time.Now().Unix()) + uint64(time.Minute), false},
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("Timestamp test: %v", i), func(t *testing.T) {
+ params := beacon.PayloadAttributesV1{
+ Timestamp: test.time,
+ Random: crypto.Keccak256Hash([]byte{byte(123)}),
+ SuggestedFeeRecipient: parent.Coinbase(),
+ }
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: parent.Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ _, err := api.ForkchoiceUpdatedV1(fcState, ¶ms)
+ if test.shouldErr && err == nil {
+ t.Fatalf("expected error preparing payload with invalid timestamp, err=%v", err)
+ } else if !test.shouldErr && err != nil {
+ t.Fatalf("error preparing payload with valid timestamp, err=%v", err)
+ }
+ })
+ }
+}
+
func TestEth2NewBlock(t *testing.T) {
genesis, preMergeBlocks := generatePreMergeChain(10)
n, ethservice := startEthService(t, genesis, preMergeBlocks)
@@ -192,7 +253,7 @@ func TestEth2NewBlock(t *testing.T) {
defer n.Close()
var (
- api = NewConsensusAPI(ethservice, nil)
+ api = NewConsensusAPI(ethservice)
parent = preMergeBlocks[len(preMergeBlocks)-1]
// This EVM code generates a log when the contract is created.
@@ -210,17 +271,17 @@ func TestEth2NewBlock(t *testing.T) {
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx)
- execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
+ execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 5,
})
if err != nil {
t.Fatalf("Failed to create the executable data %v", err)
}
- block, err := ExecutableDataToBlock(*execData)
+ block, err := beacon.ExecutableDataToBlock(*execData)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
- newResp, err := api.ExecutePayloadV1(*execData)
+ newResp, err := api.NewPayloadV1(*execData)
if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err)
}
@@ -228,7 +289,7 @@ func TestEth2NewBlock(t *testing.T) {
t.Fatalf("Chain head shouldn't be updated")
}
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.Hash(),
SafeBlockHash: block.Hash(),
FinalizedBlockHash: block.Hash(),
@@ -250,17 +311,17 @@ func TestEth2NewBlock(t *testing.T) {
)
parent = preMergeBlocks[len(preMergeBlocks)-1]
for i := 0; i < 10; i++ {
- execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
+ execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 6,
})
if err != nil {
t.Fatalf("Failed to create the executable data %v", err)
}
- block, err := ExecutableDataToBlock(*execData)
+ block, err := beacon.ExecutableDataToBlock(*execData)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
- newResp, err := api.ExecutePayloadV1(*execData)
+ newResp, err := api.NewPayloadV1(*execData)
if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err)
}
@@ -268,7 +329,7 @@ func TestEth2NewBlock(t *testing.T) {
t.Fatalf("Chain head shouldn't be updated")
}
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.Hash(),
SafeBlockHash: block.Hash(),
FinalizedBlockHash: block.Hash(),
@@ -362,7 +423,7 @@ func TestFullAPI(t *testing.T) {
ethservice.Merger().ReachTTD()
defer n.Close()
var (
- api = NewConsensusAPI(ethservice, nil)
+ api = NewConsensusAPI(ethservice)
parent = ethservice.BlockChain().CurrentBlock()
// This EVM code generates a log when the contract is created.
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
@@ -373,12 +434,13 @@ func TestFullAPI(t *testing.T) {
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx)
- params := PayloadAttributesV1{
+ params := beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 1,
Random: crypto.Keccak256Hash([]byte{byte(i)}),
SuggestedFeeRecipient: parent.Coinbase(),
}
- fcState := ForkchoiceStateV1{
+
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -387,22 +449,21 @@ func TestFullAPI(t *testing.T) {
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
- if resp.Status != SUCCESS.Status {
- t.Fatalf("error preparing payload, invalid status: %v", resp.Status)
+ if resp.PayloadStatus.Status != beacon.VALID {
+ t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status)
}
- payloadID := computePayloadId(parent.Hash(), ¶ms)
- payload, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
+ payload, err := api.GetPayloadV1(*resp.PayloadID)
if err != nil {
t.Fatalf("can't get payload: %v", err)
}
- execResp, err := api.ExecutePayloadV1(*payload)
+ execResp, err := api.NewPayloadV1(*payload)
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
- if execResp.Status != VALID.Status {
+ if execResp.Status != beacon.VALID {
t.Fatalf("invalid status: %v", execResp.Status)
}
- fcState = ForkchoiceStateV1{
+ fcState = beacon.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: payload.ParentHash,
FinalizedBlockHash: payload.ParentHash,
@@ -414,6 +475,51 @@ func TestFullAPI(t *testing.T) {
t.Fatalf("Chain head should be updated")
}
parent = ethservice.BlockChain().CurrentBlock()
+ }
+}
+func TestExchangeTransitionConfig(t *testing.T) {
+ genesis, preMergeBlocks := generatePreMergeChain(10)
+ n, ethservice := startEthService(t, genesis, preMergeBlocks)
+ ethservice.Merger().ReachTTD()
+ defer n.Close()
+ var (
+ api = NewConsensusAPI(ethservice)
+ )
+ // invalid ttd
+ config := beacon.TransitionConfigurationV1{
+ TerminalTotalDifficulty: (*hexutil.Big)(big.NewInt(0)),
+ TerminalBlockHash: common.Hash{},
+ TerminalBlockNumber: 0,
+ }
+ if _, err := api.ExchangeTransitionConfigurationV1(config); err == nil {
+ t.Fatal("expected error on invalid config, invalid ttd")
+ }
+ // invalid terminal block hash
+ config = beacon.TransitionConfigurationV1{
+ TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
+ TerminalBlockHash: common.Hash{1},
+ TerminalBlockNumber: 0,
+ }
+ if _, err := api.ExchangeTransitionConfigurationV1(config); err == nil {
+ t.Fatal("expected error on invalid config, invalid hash")
+ }
+ // valid config
+ config = beacon.TransitionConfigurationV1{
+ TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
+ TerminalBlockHash: common.Hash{},
+ TerminalBlockNumber: 0,
+ }
+ if _, err := api.ExchangeTransitionConfigurationV1(config); err != nil {
+ t.Fatalf("expected no error on valid config, got %v", err)
+ }
+ // valid config
+ config = beacon.TransitionConfigurationV1{
+ TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
+ TerminalBlockHash: preMergeBlocks[5].Hash(),
+ TerminalBlockNumber: 6,
+ }
+ if _, err := api.ExchangeTransitionConfigurationV1(config); err != nil {
+ t.Fatalf("expected no error on valid config, got %v", err)
}
}
diff --git a/eth/catalyst/api_types.go b/eth/catalyst/api_types.go
deleted file mode 100644
index 1f6703030a7c..000000000000
--- a/eth/catalyst/api_types.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package catalyst
-
-import (
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
-
-// Structure described at https://github.com/ethereum/execution-apis/pull/74
-type PayloadAttributesV1 struct {
- Timestamp uint64 `json:"timestamp" gencodec:"required"`
- Random common.Hash `json:"random" gencodec:"required"`
- SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
-}
-
-// JSON type overrides for PayloadAttributesV1.
-type payloadAttributesMarshaling struct {
- Timestamp hexutil.Uint64
-}
-
-//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
-
-// Structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
-type ExecutableDataV1 struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom []byte `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"random" gencodec:"required"`
- Number uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp uint64 `json:"timestamp" gencodec:"required"`
- ExtraData []byte `json:"extraData" gencodec:"required"`
- BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Transactions [][]byte `json:"transactions" gencodec:"required"`
-}
-
-// JSON type overrides for executableData.
-type executableDataMarshaling struct {
- Number hexutil.Uint64
- GasLimit hexutil.Uint64
- GasUsed hexutil.Uint64
- Timestamp hexutil.Uint64
- BaseFeePerGas *hexutil.Big
- ExtraData hexutil.Bytes
- LogsBloom hexutil.Bytes
- Transactions []hexutil.Bytes
-}
-
-//go:generate go run github.com/fjl/gencodec -type PayloadResponse -field-override payloadResponseMarshaling -out gen_payload.go
-
-type PayloadResponse struct {
- PayloadID uint64 `json:"payloadId"`
-}
-
-// JSON type overrides for payloadResponse.
-type payloadResponseMarshaling struct {
- PayloadID hexutil.Uint64
-}
-
-type NewBlockResponse struct {
- Valid bool `json:"valid"`
-}
-
-type GenericResponse struct {
- Success bool `json:"success"`
-}
-
-type GenericStringResponse struct {
- Status string `json:"status"`
-}
-
-type ExecutePayloadResponse struct {
- Status string `json:"status"`
- LatestValidHash common.Hash `json:"latestValidHash"`
-}
-
-type ConsensusValidatedParams struct {
- BlockHash common.Hash `json:"blockHash"`
- Status string `json:"status"`
-}
-
-type ForkChoiceResponse struct {
- Status string `json:"status"`
- PayloadID *hexutil.Bytes `json:"payloadId"`
-}
-
-type ForkchoiceStateV1 struct {
- HeadBlockHash common.Hash `json:"headBlockHash"`
- SafeBlockHash common.Hash `json:"safeBlockHash"`
- FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
-}
diff --git a/eth/catalyst/gen_payload.go b/eth/catalyst/gen_payload.go
deleted file mode 100644
index a0b00fcfd8c3..000000000000
--- a/eth/catalyst/gen_payload.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-
-package catalyst
-
-import (
- "encoding/json"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-var _ = (*payloadResponseMarshaling)(nil)
-
-// MarshalJSON marshals as JSON.
-func (p PayloadResponse) MarshalJSON() ([]byte, error) {
- type PayloadResponse struct {
- PayloadID hexutil.Uint64 `json:"payloadId"`
- }
- var enc PayloadResponse
- enc.PayloadID = hexutil.Uint64(p.PayloadID)
- return json.Marshal(&enc)
-}
-
-// UnmarshalJSON unmarshals from JSON.
-func (p *PayloadResponse) UnmarshalJSON(input []byte) error {
- type PayloadResponse struct {
- PayloadID *hexutil.Uint64 `json:"payloadId"`
- }
- var dec PayloadResponse
- if err := json.Unmarshal(input, &dec); err != nil {
- return err
- }
- if dec.PayloadID != nil {
- p.PayloadID = uint64(*dec.PayloadID)
- }
- return nil
-}
diff --git a/eth/catalyst/queue.go b/eth/catalyst/queue.go
new file mode 100644
index 000000000000..ffb2f56bf430
--- /dev/null
+++ b/eth/catalyst/queue.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package catalyst
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// maxTrackedPayloads is the maximum number of prepared payloads the execution
+// engine tracks before evicting old ones. Ideally we should only ever track the
+// latest one; but have a slight wiggle room for non-ideal conditions.
+const maxTrackedPayloads = 10
+
+// maxTrackedHeaders is the maximum number of executed payloads the execution
+// engine tracks before evicting old ones. Ideally we should only ever track the
+// latest one; but have a slight wiggle room for non-ideal conditions.
+const maxTrackedHeaders = 10
+
+// payloadQueueItem represents an id->payload tuple to store until it's retrieved
+// or evicted.
+type payloadQueueItem struct {
+ id beacon.PayloadID
+ payload *beacon.ExecutableDataV1
+}
+
+// payloadQueue tracks the latest handful of constructed payloads to be retrieved
+// by the beacon chain if block production is requested.
+type payloadQueue struct {
+ payloads []*payloadQueueItem
+ lock sync.RWMutex
+}
+
+// newPayloadQueue creates a pre-initialized queue with a fixed number of slots
+// all containing empty items.
+func newPayloadQueue() *payloadQueue {
+ return &payloadQueue{
+ payloads: make([]*payloadQueueItem, maxTrackedPayloads),
+ }
+}
+
+// put inserts a new payload into the queue at the given id.
+func (q *payloadQueue) put(id beacon.PayloadID, data *beacon.ExecutableDataV1) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ copy(q.payloads[1:], q.payloads)
+ q.payloads[0] = &payloadQueueItem{
+ id: id,
+ payload: data,
+ }
+}
+
+// get retrieves a previously stored payload item or nil if it does not exist.
+func (q *payloadQueue) get(id beacon.PayloadID) *beacon.ExecutableDataV1 {
+ q.lock.RLock()
+ defer q.lock.RUnlock()
+
+ for _, item := range q.payloads {
+ if item == nil {
+ return nil // no more items
+ }
+ if item.id == id {
+ return item.payload
+ }
+ }
+ return nil
+}
+
+// headerQueueItem represents an hash->header tuple to store until it's retrieved
+// or evicted.
+type headerQueueItem struct {
+ hash common.Hash
+ header *types.Header
+}
+
+// headerQueue tracks the latest handful of constructed headers to be retrieved
+// by the beacon chain if block production is requested.
+type headerQueue struct {
+ headers []*headerQueueItem
+ lock sync.RWMutex
+}
+
+// newHeaderQueue creates a pre-initialized queue with a fixed number of slots
+// all containing empty items.
+func newHeaderQueue() *headerQueue {
+ return &headerQueue{
+ headers: make([]*headerQueueItem, maxTrackedHeaders),
+ }
+}
+
+// put inserts a new header into the queue at the given hash.
+func (q *headerQueue) put(hash common.Hash, data *types.Header) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ copy(q.headers[1:], q.headers)
+ q.headers[0] = &headerQueueItem{
+ hash: hash,
+ header: data,
+ }
+}
+
+// get retrieves a previously stored header item or nil if it does not exist.
+func (q *headerQueue) get(hash common.Hash) *types.Header {
+ q.lock.RLock()
+ defer q.lock.RUnlock()
+
+ for _, item := range q.headers {
+ if item == nil {
+ return nil // no more items
+ }
+ if item.hash == hash {
+ return item.header
+ }
+ }
+ return nil
+}
diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go
new file mode 100644
index 000000000000..d8ea58c239fc
--- /dev/null
+++ b/eth/downloader/beaconsync.go
@@ -0,0 +1,308 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// beaconBackfiller is the chain and state backfilling that can be commenced once
+// the skeleton syncer has successfully reverse downloaded all the headers up to
+// the genesis block or an existing header in the database. Its operation is fully
+// directed by the skeleton sync's head/tail events.
+type beaconBackfiller struct {
+ downloader *Downloader // Downloader to direct via this callback implementation
+ syncMode SyncMode // Sync mode to use for backfilling the skeleton chains
+ success func() // Callback to run on successful sync cycle completion
+ filling bool // Flag whether the downloader is backfilling or not
+ started chan struct{} // Notification channel whether the downloader inited
+ lock sync.Mutex // Mutex protecting the sync lock
+}
+
+// newBeaconBackfiller is a helper method to create the backfiller.
+func newBeaconBackfiller(dl *Downloader, success func()) backfiller {
+ return &beaconBackfiller{
+ downloader: dl,
+ success: success,
+ }
+}
+
+// suspend cancels any background downloader threads.
+func (b *beaconBackfiller) suspend() {
+ // If no filling is running, don't waste cycles
+ b.lock.Lock()
+ filling := b.filling
+ started := b.started
+ b.lock.Unlock()
+
+ if !filling {
+ return
+ }
+ // A previous filling should be running, though it may happen that it hasn't
+ // yet started (being done on a new goroutine). Many concurrent beacon head
+ // announcements can lead to sync start/stop thrashing. In that case we need
+ // to wait for initialization before we can safely cancel it. It is safe to
+ // read this channel multiple times, it gets closed on startup.
+ <-started
+
+ // Now that we're sure the downloader successfully started up, we can cancel
+ // it safely without running the risk of data races.
+ b.downloader.Cancel()
+}
+
+// resume starts the downloader threads for backfilling state and chain data.
+func (b *beaconBackfiller) resume() {
+ b.lock.Lock()
+ if b.filling {
+ // If a previous filling cycle is still running, just ignore this start
+ // request. // TODO(karalabe): We should make this channel driven
+ b.lock.Unlock()
+ return
+ }
+ b.filling = true
+ b.started = make(chan struct{})
+ mode := b.syncMode
+ b.lock.Unlock()
+
+ // Start the backfilling on its own thread since the downloader does not have
+ // its own lifecycle runloop.
+ go func() {
+ // Set the backfiller to non-filling when download completes
+ defer func() {
+ b.lock.Lock()
+ b.filling = false
+ b.lock.Unlock()
+ }()
+ // If the downloader fails, report an error as in beacon chain mode there
+ // should be no errors as long as the chain we're syncing to is valid.
+ if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil {
+ log.Error("Beacon backfilling failed", "err", err)
+ return
+ }
+ // Synchronization succeeded. Since this happens async, notify the outer
+ // context to disable snap syncing and enable transaction propagation.
+ if b.success != nil {
+ b.success()
+ }
+ }()
+}
+
+// setMode updates the sync mode from the current one to the requested one. If
+// there's an active sync in progress, it will be cancelled and restarted.
+func (b *beaconBackfiller) setMode(mode SyncMode) {
+ // Update the old sync mode and track if it was changed
+ b.lock.Lock()
+ updated := b.syncMode != mode
+ filling := b.filling
+ b.syncMode = mode
+ b.lock.Unlock()
+
+ // If the sync mode was changed mid-sync, restart. This should never ever
+ // really happen, we just handle it to detect programming errors.
+ if !updated || !filling {
+ return
+ }
+ log.Error("Downloader sync mode changed mid-run", "old", mode.String(), "new", mode.String())
+ b.suspend()
+ b.resume()
+}
+
+// BeaconSync is the post-merge version of the chain synchronization, where the
+// chain is not downloaded from genesis onward, rather from trusted head announces
+// backwards.
+//
+// Internally backfilling and state sync is done the same way, but the header
+// retrieval and scheduling is replaced.
+func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error {
+ return d.beaconSync(mode, head, true)
+}
+
+// BeaconExtend is an optimistic version of BeaconSync, where an attempt is made
+// to extend the current beacon chain with a new header, but in case of a mismatch,
+// the old sync will not be terminated and reorged, rather the new head is dropped.
+//
+// This is useful if a beacon client is feeding us large chunks of payloads to run,
+// but is not setting the head after each.
+func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error {
+ return d.beaconSync(mode, head, false)
+}
+
+// beaconSync is the post-merge version of the chain synchronization, where the
+// chain is not downloaded from genesis onward, rather from trusted head announces
+// backwards.
+//
+// Internally backfilling and state sync is done the same way, but the header
+// retrieval and scheduling is replaced.
+func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) error {
+ // When the downloader starts a sync cycle, it needs to be aware of the sync
+ // mode to use (full, snap). To keep the skeleton chain oblivious, inject the
+ // mode into the backfiller directly.
+ //
+ // Super crazy dangerous type cast. Should be fine (TM), we're only using a
+ // different backfiller implementation for skeleton tests.
+ d.skeleton.filler.(*beaconBackfiller).setMode(mode)
+
+ // Signal the skeleton sync to switch to a new head, however it wants
+ if err := d.skeleton.Sync(head, force); err != nil {
+ return err
+ }
+ return nil
+}
+
+// findBeaconAncestor tries to locate the common ancestor link of the local chain
+// and the beacon chain just requested. In the general case when our node was in
+// sync and on the correct chain, checking the top N links should already get us
+// a match. In the rare scenario when we ended up on a long reorganisation (i.e.
+// none of the head links match), we do a binary search to find the ancestor.
+func (d *Downloader) findBeaconAncestor() (uint64, error) {
+ // Figure out the current local head position
+ var chainHead *types.Header
+
+ switch d.getMode() {
+ case FullSync:
+ chainHead = d.blockchain.CurrentBlock().Header()
+ case SnapSync:
+ chainHead = d.blockchain.CurrentFastBlock().Header()
+ default:
+ chainHead = d.lightchain.CurrentHeader()
+ }
+ number := chainHead.Number.Uint64()
+
+ // Retrieve the skeleton bounds and ensure they are linked to the local chain
+ beaconHead, beaconTail, err := d.skeleton.Bounds()
+ if err != nil {
+ // This is a programming error. The chain backfiller was called with an
+ // invalid beacon sync state. Ideally we would panic here, but erroring
+ // gives us at least a remote chance to recover. It's still a big fault!
+ log.Error("Failed to retrieve beacon bounds", "err", err)
+ return 0, err
+ }
+ var linked bool
+ switch d.getMode() {
+ case FullSync:
+ linked = d.blockchain.HasBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
+ case SnapSync:
+ linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
+ default:
+ linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
+ }
+ if !linked {
+ // This is a programming error. The chain backfiller was called with a
+ // tail that's not linked to the local chain. Whilst this should never
+ // happen, there might be some weirdnesses if beacon sync backfilling
+ // races with the user (or beacon client) calling setHead. Whilst panic
+ // would be the ideal thing to do, it is safer long term to attempt a
+ // recovery and fix any noticed issue after the fact.
+ log.Error("Beacon sync linkup unavailable", "number", beaconTail.Number.Uint64()-1, "hash", beaconTail.ParentHash)
+ return 0, fmt.Errorf("beacon linkup unavailable locally: %d [%x]", beaconTail.Number.Uint64()-1, beaconTail.ParentHash)
+ }
+ // Binary search to find the ancestor
+ start, end := beaconTail.Number.Uint64()-1, number
+ if number := beaconHead.Number.Uint64(); end > number {
+ // This shouldn't really happen in a healty network, but if the consensus
+ // clients feeds us a shorter chain as the canonical, we should not attempt
+ // to access non-existent skeleton items.
+ log.Warn("Beacon head lower than local chain", "beacon", number, "local", end)
+ end = number
+ }
+ for start+1 < end {
+ // Split our chain interval in two, and request the hash to cross check
+ check := (start + end) / 2
+
+ h := d.skeleton.Header(check)
+ n := h.Number.Uint64()
+
+ var known bool
+ switch d.getMode() {
+ case FullSync:
+ known = d.blockchain.HasBlock(h.Hash(), n)
+ case SnapSync:
+ known = d.blockchain.HasFastBlock(h.Hash(), n)
+ default:
+ known = d.lightchain.HasHeader(h.Hash(), n)
+ }
+ if !known {
+ end = check
+ continue
+ }
+ start = check
+ }
+ return start, nil
+}
+
+// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling
+// until sync errors or is finished.
+func (d *Downloader) fetchBeaconHeaders(from uint64) error {
+ head, _, err := d.skeleton.Bounds()
+ if err != nil {
+ return err
+ }
+ for {
+ // Retrieve a batch of headers and feed it to the header processor
+ var (
+ headers = make([]*types.Header, 0, maxHeadersProcess)
+ hashes = make([]common.Hash, 0, maxHeadersProcess)
+ )
+ for i := 0; i < maxHeadersProcess && from <= head.Number.Uint64(); i++ {
+ headers = append(headers, d.skeleton.Header(from))
+ hashes = append(hashes, headers[i].Hash())
+ from++
+ }
+ if len(headers) > 0 {
+ log.Trace("Scheduling new beacon headers", "count", len(headers), "from", from-uint64(len(headers)))
+ select {
+ case d.headerProcCh <- &headerTask{
+ headers: headers,
+ hashes: hashes,
+ }:
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ }
+ // If we still have headers to import, loop and keep pushing them
+ if from <= head.Number.Uint64() {
+ continue
+ }
+ // If the pivot block is committed, signal header sync termination
+ if atomic.LoadInt32(&d.committed) == 1 {
+ select {
+ case d.headerProcCh <- nil:
+ return nil
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ }
+ // State sync still going, wait a bit for new headers and retry
+ log.Trace("Pivot not yet committed, waiting...")
+ select {
+ case <-time.After(fsHeaderContCheck):
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ head, _, err = d.skeleton.Bounds()
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 28ad18b81579..ebd414105f42 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -30,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -79,6 +78,7 @@ var (
errCanceled = errors.New("syncing canceled (requested)")
errTooOld = errors.New("peer's protocol version too old")
errNoAncestorFound = errors.New("no common ancestor found")
+ ErrMergeTransition = errors.New("legacy sync reached the merge")
)
// peerDropFn is a callback type for dropping a peer detected as malicious.
@@ -123,6 +123,9 @@ type Downloader struct {
// Channels
headerProcCh chan *headerTask // Channel to feed the header processor new tasks
+ // Skeleton sync
+ skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
+
// State sync
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
@@ -201,7 +204,7 @@ type BlockChain interface {
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
-func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
+func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader {
if lightchain == nil {
lightchain = chain
}
@@ -219,6 +222,8 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl
SnapSyncer: snap.NewSyncer(stateDb),
stateSyncStart: make(chan *stateSync),
}
+ dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
+
go dl.stateFetcher()
return dl
}
@@ -318,10 +323,10 @@ func (d *Downloader) UnregisterPeer(id string) error {
return nil
}
-// Synchronise tries to sync up our local block chain with a remote peer, both
+// LegacySync tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
-func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
- err := d.synchronise(id, head, td, mode)
+func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error {
+ err := d.synchronise(id, head, td, ttd, mode, false, nil)
switch err {
case nil, errBusy, errCanceled:
@@ -340,6 +345,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
}
return err
}
+ if errors.Is(err, ErrMergeTransition) {
+ return err // This is an expected fault, don't keep printing it in a spin-loop
+ }
log.Warn("Synchronisation failed, retrying", "err", err)
return err
}
@@ -347,7 +355,21 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
// synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
-func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
+func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error {
+ // The beacon header syncer is async. It will start this synchronization and
+ // will continue doing other tasks. However, if synchornization needs to be
+ // cancelled, the syncer needs to know if we reached the startup point (and
+ // inited the cancel cannel) or not yet. Make sure that we'll signal even in
+ // case of a failure.
+ if beaconPing != nil {
+ defer func() {
+ select {
+ case <-beaconPing: // already notified
+ default:
+ close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
+ }
+ }()
+ }
// Mock out the synchronisation if testing
if d.synchroniseMock != nil {
return d.synchroniseMock(id, hash)
@@ -362,9 +384,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
log.Info("Block synchronisation started")
}
- // If snap sync was requested, create the snap scheduler and switch to snap
- // sync mode. Long term we could drop snap sync or merge the two together,
- // but until snap becomes prevalent, we should support both. TODO(karalabe).
if mode == SnapSync {
// Snap sync uses the snapshot namespace to store potentially flakey data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean-
@@ -402,11 +421,17 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
atomic.StoreUint32(&d.mode, uint32(mode))
// Retrieve the origin peer and initiate the downloading process
- p := d.peers.Peer(id)
- if p == nil {
- return errUnknownPeer
+ var p *peerConnection
+ if !beaconMode { // Beacon mode doesn't need a peer to sync from
+ p = d.peers.Peer(id)
+ if p == nil {
+ return errUnknownPeer
+ }
+ }
+ if beaconPing != nil {
+ close(beaconPing)
}
- return d.syncWithPeer(p, hash, td)
+ return d.syncWithPeer(p, hash, td, ttd, beaconMode)
}
func (d *Downloader) getMode() SyncMode {
@@ -415,7 +440,7 @@ func (d *Downloader) getMode() SyncMode {
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
-func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
+func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) {
d.mux.Post(StartEvent{})
defer func() {
// reset on error
@@ -426,33 +451,57 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
d.mux.Post(DoneEvent{latest})
}
}()
- if p.version < eth.ETH66 {
- return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66)
- }
mode := d.getMode()
- log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
+ if !beaconMode {
+ log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
+ } else {
+ log.Debug("Backfilling with the network", "mode", mode)
+ }
defer func(start time.Time) {
log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
}(time.Now())
// Look up the sync boundaries: the common ancestor and the target block
- latest, pivot, err := d.fetchHead(p)
- if err != nil {
- return err
+ var latest, pivot *types.Header
+ if !beaconMode {
+ // In legacy mode, use the master peer to retrieve the headers from
+ latest, pivot, err = d.fetchHead(p)
+ if err != nil {
+ return err
+ }
+ } else {
+ // In beacon mode, user the skeleton chain to retrieve the headers from
+ latest, _, err = d.skeleton.Bounds()
+ if err != nil {
+ return err
+ }
+ if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
+ pivot = d.skeleton.Header(latest.Number.Uint64() - uint64(fsMinFullBlocks))
+ }
}
+ // If no pivot block was returned, the head is below the min full block
+ // threshold (i.e. new chain). In that case we won't really snap sync
+ // anyway, but still need a valid pivot block to avoid some code hitting
+ // nil panics on access.
if mode == SnapSync && pivot == nil {
- // If no pivot block was returned, the head is below the min full block
- // threshold (i.e. new chain). In that case we won't really snap sync
- // anyway, but still need a valid pivot block to avoid some code hitting
- // nil panics on an access.
pivot = d.blockchain.CurrentBlock().Header()
}
height := latest.Number.Uint64()
- origin, err := d.findAncestor(p, latest)
- if err != nil {
- return err
+ var origin uint64
+ if !beaconMode {
+ // In legacy mode, reach out to the network and find the ancestor
+ origin, err = d.findAncestor(p, latest)
+ if err != nil {
+ return err
+ }
+ } else {
+ // In beacon mode, use the skeleton chain for the ancestor lookup
+ origin, err = d.findBeaconAncestor()
+ if err != nil {
+ return err
+ }
}
d.syncStatsLock.Lock()
if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
@@ -523,11 +572,19 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
if d.syncInitHook != nil {
d.syncInitHook(origin, height)
}
+ var headerFetcher func() error
+ if !beaconMode {
+ // In legacy mode, headers are retrieved from the network
+ headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }
+ } else {
+ // In beacon mode, headers are served by the skeleton syncer
+ headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) }
+ }
fetchers := []func() error{
- func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }, // Headers are always retrieved
- func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync
- func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync
- func() error { return d.processHeaders(origin+1, td) },
+ headerFetcher, // Headers are always retrieved
+ func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync
+ func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync
+ func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) },
}
if mode == SnapSync {
d.pivotLock.Lock()
@@ -536,7 +593,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
} else if mode == FullSync {
- fetchers = append(fetchers, d.processFullSyncContent)
+ fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) })
}
return d.spawnSync(fetchers)
}
@@ -602,6 +659,9 @@ func (d *Downloader) Terminate() {
case <-d.quitCh:
default:
close(d.quitCh)
+
+ // Terminate the internal beacon syncer
+ d.skeleton.Terminate()
}
d.quitLock.Unlock()
@@ -1127,7 +1187,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
log.Debug("Filling up skeleton", "from", from)
d.queue.ScheduleSkeleton(from, skeleton)
- err := d.concurrentFetch((*headerQueue)(d))
+ err := d.concurrentFetch((*headerQueue)(d), false)
if err != nil {
log.Debug("Skeleton fill failed", "err", err)
}
@@ -1141,9 +1201,9 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
// fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts.
-func (d *Downloader) fetchBodies(from uint64) error {
+func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error {
log.Debug("Downloading block bodies", "origin", from)
- err := d.concurrentFetch((*bodyQueue)(d))
+ err := d.concurrentFetch((*bodyQueue)(d), beaconMode)
log.Debug("Block body download terminated", "err", err)
return err
@@ -1152,9 +1212,9 @@ func (d *Downloader) fetchBodies(from uint64) error {
// fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts.
-func (d *Downloader) fetchReceipts(from uint64) error {
+func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error {
log.Debug("Downloading receipts", "origin", from)
- err := d.concurrentFetch((*receiptQueue)(d))
+ err := d.concurrentFetch((*receiptQueue)(d), beaconMode)
log.Debug("Receipt download terminated", "err", err)
return err
@@ -1163,7 +1223,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
// processHeaders takes batches of retrieved headers from an input channel and
// keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs.
-func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
+func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error {
// Keep a count of uncertain headers to roll back
var (
rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
@@ -1211,35 +1271,40 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
case <-d.cancelCh:
}
}
- // If no headers were retrieved at all, the peer violated its TD promise that it had a
- // better chain compared to ours. The only exception is if its promised blocks were
- // already imported by other means (e.g. fetcher):
- //
- // R , L : Both at block 10
- // R: Mine block 11, and propagate it to L
- // L: Queue block 11 for import
- // L: Notice that R's head and TD increased compared to ours, start sync
- // L: Import of block 11 finishes
- // L: Sync begins, and finds common ancestor at 11
- // L: Request new headers up from 11 (R's TD was higher, it must have something)
- // R: Nothing to give
- if mode != LightSync {
- head := d.blockchain.CurrentBlock()
- if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
- return errStallingPeer
+ // If we're in legacy sync mode, we need to check total difficulty
+ // violations from malicious peers. That is not needed in beacon
+ // mode and we can skip to terminating sync.
+ if !beaconMode {
+ // If no headers were retrieved at all, the peer violated its TD promise that it had a
+ // better chain compared to ours. The only exception is if its promised blocks were
+ // already imported by other means (e.g. fetcher):
+ //
+ // R , L : Both at block 10
+ // R: Mine block 11, and propagate it to L
+ // L: Queue block 11 for import
+ // L: Notice that R's head and TD increased compared to ours, start sync
+ // L: Import of block 11 finishes
+ // L: Sync begins, and finds common ancestor at 11
+ // L: Request new headers up from 11 (R's TD was higher, it must have something)
+ // R: Nothing to give
+ if mode != LightSync {
+ head := d.blockchain.CurrentBlock()
+ if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
+ return errStallingPeer
+ }
}
- }
- // If snap or light syncing, ensure promised headers are indeed delivered. This is
- // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
- // of delivering the post-pivot blocks that would flag the invalid content.
- //
- // This check cannot be executed "as is" for full imports, since blocks may still be
- // queued for processing when the header download completes. However, as long as the
- // peer gave us something useful, we're already happy/progressed (above check).
- if mode == SnapSync || mode == LightSync {
- head := d.lightchain.CurrentHeader()
- if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
- return errStallingPeer
+ // If snap or light syncing, ensure promised headers are indeed delivered. This is
+ // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
+ // of delivering the post-pivot blocks that would flag the invalid content.
+ //
+ // This check cannot be executed "as is" for full imports, since blocks may still be
+ // queued for processing when the header download completes. However, as long as the
+ // peer gave us something useful, we're already happy/progressed (above check).
+ if mode == SnapSync || mode == LightSync {
+ head := d.lightchain.CurrentHeader()
+ if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
+ return errStallingPeer
+ }
}
}
// Disable any rollback and return
@@ -1281,24 +1346,64 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
- if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
- rollbackErr = err
-
- // If some headers were inserted, track them as uncertain
- if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
- rollback = chunkHeaders[0].Number.Uint64()
+ // Although the received headers might be all valid, a legacy
+ // PoW/PoA sync must not accept post-merge headers. Make sure
+ // that any transition is rejected at this point.
+ var (
+ rejected []*types.Header
+ td *big.Int
+ )
+ if !beaconMode && ttd != nil {
+ td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1)
+ if td == nil {
+ // This should never really happen, but handle gracefully for now
+ log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash)
+ return fmt.Errorf("%w: parent TD missing", errInvalidChain)
+ }
+ for i, header := range chunkHeaders {
+ td = new(big.Int).Add(td, header.Difficulty)
+ if td.Cmp(ttd) >= 0 {
+ // Terminal total difficulty reached, allow the last header in
+ if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 {
+ chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:]
+ if len(rejected) > 0 {
+ // Make a nicer user log as to the first TD truly rejected
+ td = new(big.Int).Add(td, rejected[0].Difficulty)
+ }
+ } else {
+ chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:]
+ }
+ break
+ }
}
- log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
- return fmt.Errorf("%w: %v", errInvalidChain, err)
}
- // All verifications passed, track all headers within the alloted limits
- if mode == SnapSync {
- head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
- if head-rollback > uint64(fsHeaderSafetyNet) {
- rollback = head - uint64(fsHeaderSafetyNet)
- } else {
- rollback = 1
+ if len(chunkHeaders) > 0 {
+ if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
+ rollbackErr = err
+
+ // If some headers were inserted, track them as uncertain
+ if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
+ rollback = chunkHeaders[0].Number.Uint64()
+ }
+ log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
+ // All verifications passed, track all headers within the allowed limits
+ if mode == SnapSync {
+ head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
+ if head-rollback > uint64(fsHeaderSafetyNet) {
+ rollback = head - uint64(fsHeaderSafetyNet)
+ } else {
+ rollback = 1
+ }
+ }
+ }
+ if len(rejected) != 0 {
+ // Merge threshold reached, stop importing, but don't roll back
+ rollback = 0
+
+ log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
+ return ErrMergeTransition
}
}
// Unless we're doing light chains, schedule the headers for associated content retrieval
@@ -1342,7 +1447,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
}
// processFullSyncContent takes fetch results from the queue and imports them into the chain.
-func (d *Downloader) processFullSyncContent() error {
+func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error {
for {
results := d.queue.Results(true)
if len(results) == 0 {
@@ -1351,9 +1456,44 @@ func (d *Downloader) processFullSyncContent() error {
if d.chainInsertHook != nil {
d.chainInsertHook(results)
}
+ // Although the received blocks might be all valid, a legacy PoW/PoA sync
+ // must not accept post-merge blocks. Make sure that pre-merge blocks are
+ // imported, but post-merge ones are rejected.
+ var (
+ rejected []*fetchResult
+ td *big.Int
+ )
+ if !beaconMode && ttd != nil {
+ td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1)
+ if td == nil {
+ // This should never really happen, but handle gracefully for now
+ log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash)
+ return fmt.Errorf("%w: parent TD missing", errInvalidChain)
+ }
+ for i, result := range results {
+ td = new(big.Int).Add(td, result.Header.Difficulty)
+ if td.Cmp(ttd) >= 0 {
+ // Terminal total difficulty reached, allow the last block in
+ if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 {
+ results, rejected = results[:i+1], results[i+1:]
+ if len(rejected) > 0 {
+ // Make a nicer user log as to the first TD truly rejected
+ td = new(big.Int).Add(td, rejected[0].Header.Difficulty)
+ }
+ } else {
+ results, rejected = results[:i], results[i:]
+ }
+ break
+ }
+ }
+ }
if err := d.importBlockResults(results); err != nil {
return err
}
+ if len(rejected) != 0 {
+ log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd)
+ return ErrMergeTransition
+ }
}
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 70c6a51215b5..6989252c11ac 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -75,7 +75,7 @@ func newTester() *downloadTester {
chain: chain,
peers: make(map[string]*downloadTesterPeer),
}
- tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer)
+ tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil)
return tester
}
@@ -96,7 +96,7 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64())
}
// Synchronise with the chosen peer and ensure proper cleanup afterwards
- err := dl.downloader.synchronise(id, head.Hash(), td, mode)
+ err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
select {
case <-dl.downloader.cancelCh:
// Ok, downloader fully cancelled after sync cycle
@@ -971,7 +971,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Simulate a synchronisation and check the required result
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
- tester.downloader.Synchronise(id, tester.chain.Genesis().Hash(), big.NewInt(1000), FullSync)
+ tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
if _, ok := tester.peers[id]; !ok != tt.drop {
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
}
diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go
index 4bade2b4c3dd..a0aa197175a3 100644
--- a/eth/downloader/fetchers_concurrent.go
+++ b/eth/downloader/fetchers_concurrent.go
@@ -76,7 +76,7 @@ type typedQueue interface {
// concurrentFetch iteratively downloads scheduled block parts, taking available
// peers, reserving a chunk of fetch requests for each and waiting for delivery
// or timeouts.
-func (d *Downloader) concurrentFetch(queue typedQueue) error {
+func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error {
// Create a delivery channel to accept responses from all peers
responses := make(chan *eth.Response)
@@ -127,7 +127,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error {
finished := false
for {
// Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
+ if d.peers.Len() == 0 && !beaconMode {
return errNoPeers
}
// If there's nothing more to fetch, wait or terminate
@@ -209,7 +209,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error {
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
- if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 {
+ if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode {
return errPeersUnavailable
}
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 324fdb9cd51f..d74d23e74d55 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -294,19 +294,19 @@ func (ps *peerSet) AllPeers() []*peerConnection {
// peerCapacitySort implements sort.Interface.
// It sorts peer connections by capacity (descending).
type peerCapacitySort struct {
- p []*peerConnection
- tp []int
+ peers []*peerConnection
+ caps []int
}
func (ps *peerCapacitySort) Len() int {
- return len(ps.p)
+ return len(ps.peers)
}
func (ps *peerCapacitySort) Less(i, j int) bool {
- return ps.tp[i] > ps.tp[j]
+ return ps.caps[i] > ps.caps[j]
}
func (ps *peerCapacitySort) Swap(i, j int) {
- ps.p[i], ps.p[j] = ps.p[j], ps.p[i]
- ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i]
+ ps.peers[i], ps.peers[j] = ps.peers[j], ps.peers[i]
+ ps.caps[i], ps.caps[j] = ps.caps[j], ps.caps[i]
}
diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go
new file mode 100644
index 000000000000..bebf273da52e
--- /dev/null
+++ b/eth/downloader/skeleton.go
@@ -0,0 +1,1063 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "encoding/json"
+ "errors"
+ "math/rand"
+ "sort"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// scratchHeaders is the number of headers to store in a scratch space to allow
+// concurrent downloads. A header is about 0.5KB in size, so there is no worry
+// about using too much memory. The only catch is that we can only validate gaps
+// afer they're linked to the head, so the bigger the scratch space, the larger
+// potential for invalid headers.
+//
+// The current scratch space of 131072 headers is expected to use 64MB RAM.
+const scratchHeaders = 131072
+
+// requestHeaders is the number of header to request from a remote peer in a single
+// network packet. Although the skeleton downloader takes into consideration peer
+// capacities when picking idlers, the packet size was decided to remain constant
+// since headers are relatively small and it's easier to work with fixed batches
+// vs. dynamic interval fillings.
+const requestHeaders = 512
+
+// errSyncLinked is an internal helper error to signal that the current sync
+// cycle linked up to the genesis block, this the skeleton syncer should ping
+// the backfiller to resume. Since we already have that logic on sync start,
+// piggie-back on that instead of 2 entrypoints.
+var errSyncLinked = errors.New("sync linked")
+
+// errSyncMerged is an internal helper error to signal that the current sync
+// cycle merged with a previously aborted subchain, thus the skeleton syncer
+// should abort and restart with the new state.
+var errSyncMerged = errors.New("sync merged")
+
+// errSyncReorged is an internal helper error to signal that the head chain of
+// the current sync cycle was (partially) reorged, thus the skeleton syncer
+// should abort and restart with the new state.
+var errSyncReorged = errors.New("sync reorged")
+
+// errTerminated is returned if the sync mechanism was terminated for this run of
+// the process. This is usually the case when Geth is shutting down and some events
+// might still be propagating.
+var errTerminated = errors.New("terminated")
+
+// errReorgDenied is returned if an attempt is made to extend the beacon chain
+// with a new header, but it does not link up to the existing sync.
+var errReorgDenied = errors.New("non-forced head reorg denied")
+
+func init() {
+ // Tuning parameters is nice, but the scratch space must be assignable in
+ // full to peers. It's a useless cornercase to support a dangling half-group.
+ if scratchHeaders%requestHeaders != 0 {
+ panic("Please make scratchHeaders divisible by requestHeaders")
+ }
+}
+
+// subchain is a contiguous header chain segment that is backed by the database,
+// but may not be linked to the live chain. The skeleton downloader may produce
+// a new one of these every time it is restarted until the subchain grows large
+// enough to connect with a previous subchain.
+//
+// The subchains use the exact same database namespace and are not disjoint from
+// each other. As such, extending one to overlap the other entails reducing the
+// second one first. This combined buffer model is used to avoid having to move
+// data on disk when two subchains are joined together.
+type subchain struct {
+ Head uint64 // Block number of the newest header in the subchain
+ Tail uint64 // Block number of the oldest header in the subchain
+ Next common.Hash // Block hash of the next oldest header in the subchain
+}
+
+// skeletonProgress is a database entry to allow suspending and resuming a chain
+// sync. As the skeleton header chain is downloaded backwards, restarts can and
+// will produce temporarily disjoint subchains. There is no way to restart a
+// suspended skeleton sync without prior knowledge of all prior suspension points.
+type skeletonProgress struct {
+ Subchains []*subchain // Disjoint subchains downloaded until now
+}
+
+// headUpdate is a notification that the beacon sync should switch to a new target.
+// The update might request whether to forcefully change the target, or only try to
+// extend it and fail if it's not possible.
+type headUpdate struct {
+ header *types.Header // Header to update the sync target to
+ force bool // Whether to force the update or only extend if possible
+ errc chan error // Channel to signal acceptance of the new head
+}
+
+// headerRequest tracks a pending header request to ensure responses are to
+// actual requests and to validate any security constraints.
+//
+// Concurrency note: header requests and responses are handled concurrently from
+// the main runloop to allow Keccak256 hash verifications on the peer's thread and
+// to drop on invalid response. The request struct must contain all the data to
+// construct the response without accessing runloop internals (i.e. subchains).
+// That is only included to allow the runloop to match a response to the task being
+// synced without having yet another set of maps.
+type headerRequest struct {
+ peer string // Peer to which this request is assigned
+ id uint64 // Request ID of this request
+
+ deliver chan *headerResponse // Channel to deliver successful response on
+ revert chan *headerRequest // Channel to deliver request failure on
+ cancel chan struct{} // Channel to track sync cancellation
+ stale chan struct{} // Channel to signal the request was dropped
+
+ head uint64 // Head number of the requested batch of headers
+}
+
+// headerResponse is an already verified remote response to a header request.
+type headerResponse struct {
+ peer *peerConnection // Peer from which this response originates
+ reqid uint64 // Request ID that this response fulfils
+ headers []*types.Header // Chain of headers
+}
+
+// backfiller is a callback interface through which the skeleton sync can tell
+// the downloader that it should suspend or resume backfilling on specific head
+// events (e.g. suspend on forks or gaps, resume on successful linkups).
+type backfiller interface {
+ // suspend requests the backfiller to abort any running full or snap sync
+ // based on the skeleton chain as it might be invalid. The backfiller should
+ // gracefully handle multiple consecutive suspends without a resume, even
+ // on initial sartup.
+ suspend()
+
+ // resume requests the backfiller to start running fill or snap sync based on
+ // the skeleton chain as it has successfully been linked. Appending new heads
+ // to the end of the chain will not result in suspend/resume cycles.
+ resume()
+}
+
+// skeleton represents a header chain synchronized after the merge where blocks
+// aren't validated any more via PoW in a forward fashion, rather are dictated
+// and extended at the head via the beacon chain and backfilled on the original
+// Ethereum block sync protocol.
+//
+// Since the skeleton is grown backwards from head to genesis, it is handled as
+// a separate entity, not mixed in with the logical sequential transition of the
+// blocks. Once the skeleton is connected to an existing, validated chain, the
+// headers will be moved into the main downloader for filling and execution.
+//
+// Opposed to the original Ethereum block synchronization which is trustless (and
+// uses a master peer to minimize the attack surface), post-merge block sync starts
+// from a trusted head. As such, there is no need for a master peer any more and
+// headers can be requested fully concurrently (though some batches might be
+// discarded if they don't link up correctly).
+//
+// Although a skeleton is part of a sync cycle, it is not recreated, rather stays
+// alive throughout the lifetime of the downloader. This allows it to be extended
+// concurrently with the sync cycle, since extensions arrive from an API surface,
+// not from within (vs. legacy Ethereum sync).
+//
+// Since the skeleton tracks the entire header chain until it is consumed by the
+// forward block filling, it needs 0.5KB/block storage. At current mainnet sizes
+// this is only possible with a disk backend. Since the skeleton is separate from
+// the node's header chain, storing the headers ephemerally until sync finishes
+// is wasted disk IO, but it's a price we're going to pay to keep things simple
+// for now.
+type skeleton struct {
+ db ethdb.Database // Database backing the skeleton
+ filler backfiller // Chain syncer suspended/resumed by head events
+
+ peers *peerSet // Set of peers we can sync from
+ idles map[string]*peerConnection // Set of idle peers in the current sync cycle
+ drop peerDropFn // Drops a peer for misbehaving
+
+ progress *skeletonProgress // Sync progress tracker for resumption and metrics
+ started time.Time // Timestamp when the skeleton syncer was created
+ logged time.Time // Timestamp when progress was last logged to the user
+ pulled uint64 // Number of headers downloaded in this run
+
+ scratchSpace []*types.Header // Scratch space to accumulate headers in (first = recent)
+ scratchOwners []string // Peer IDs owning chunks of the scratch space (pend or delivered)
+ scratchHead uint64 // Block number of the first item in the scratch space
+
+ requests map[uint64]*headerRequest // Header requests currently running
+
+ headEvents chan *headUpdate // Notification channel for new heads
+ terminate chan chan error // Termination channel to abort sync
+ terminated chan struct{} // Channel to signal that the syner is dead
+
+ // Callback hooks used during testing
+ syncStarting func() // callback triggered after a sync cycle is inited but before started
+}
+
+// newSkeleton creates a new sync skeleton that tracks a potentially dangling
+// header chain until it's linked into an existing set of blocks.
+func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller) *skeleton {
+ sk := &skeleton{
+ db: db,
+ filler: filler,
+ peers: peers,
+ drop: drop,
+ requests: make(map[uint64]*headerRequest),
+ headEvents: make(chan *headUpdate),
+ terminate: make(chan chan error),
+ terminated: make(chan struct{}),
+ }
+ go sk.startup()
+ return sk
+}
+
+// startup is an initial background loop which waits for an event to start or
+// tear the syncer down. This is required to make the skeleton sync loop once
+// per process but at the same time not start before the beacon chain announces
+// a new (existing) head.
+func (s *skeleton) startup() {
+ // Close a notification channel so anyone sending us events will know if the
+ // sync loop was torn down for good.
+ defer close(s.terminated)
+
+ // Wait for startup or teardown. This wait might loop a few times if a beacon
+ // client requests sync head extensions, but not forced reorgs (i.e. they are
+ // giving us new payloads without setting a starting head initially).
+ for {
+ select {
+ case errc := <-s.terminate:
+ // No head was announced but Geth is shutting down
+ errc <- nil
+ return
+
+ case event := <-s.headEvents:
+ // New head announced, start syncing to it, looping every time a current
+ // cycle is terminated due to a chain event (head reorg, old chain merge).
+ if !event.force {
+ event.errc <- errors.New("forced head needed for startup")
+ continue
+ }
+ event.errc <- nil // forced head accepted for startup
+ head := event.header
+ s.started = time.Now()
+
+ for {
+ // If the sync cycle terminated or was terminated, propagate up when
+ // higher layers request termination. There's no fancy explicit error
+ // signalling as the sync loop should never terminate (TM).
+ newhead, err := s.sync(head)
+ switch {
+ case err == errSyncLinked:
+ // Sync cycle linked up to the genesis block. Tear down the loop
+ // and restart it so, it can properly notify the backfiller. Don't
+ // account a new head.
+ head = nil
+
+ case err == errSyncMerged:
+ // Subchains were merged, we just need to reinit the internal
+ // start to continue on the tail of the merged chain. Don't
+ // announce a new head,
+ head = nil
+
+ case err == errSyncReorged:
+ // The subchain being synced got modified at the head in a
+ // way that requires resyncing it. Restart sync with the new
+ // head to force a cleanup.
+ head = newhead
+
+ case err == errTerminated:
+ // Sync was requested to be terminated from within, stop and
+ // return (no need to pass a message, was already done internally)
+ return
+
+ default:
+ // Sync either successfully terminated or failed with an unhandled
+ // error. Abort and wait until Geth requests a termination.
+ errc := <-s.terminate
+ errc <- err
+ return
+ }
+ }
+ }
+ }
+}
+
+// Terminate tears down the syncer indefinitely.
+func (s *skeleton) Terminate() error {
+ // Request termination and fetch any errors
+ errc := make(chan error)
+ s.terminate <- errc
+ err := <-errc
+
+ // Wait for full shutdown (not necessary, but cleaner)
+ <-s.terminated
+ return err
+}
+
+// Sync starts or resumes a previous sync cycle to download and maintain a reverse
+// header chain starting at the head and leading towards genesis to an available
+// ancestor.
+//
+// This method does not block, rather it just waits until the syncer receives the
+// fed header. What the syncer does with it is the syncer's problem.
+func (s *skeleton) Sync(head *types.Header, force bool) error {
+ log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash(), "force", force)
+ errc := make(chan error)
+
+ select {
+ case s.headEvents <- &headUpdate{header: head, force: force, errc: errc}:
+ return <-errc
+ case <-s.terminated:
+ return errTerminated
+ }
+}
+
+// sync is the internal version of Sync that executes a single sync cycle, either
+// until some termination condition is reached, or until the current cycle merges
+// with a previously aborted run.
+func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
+ // If we're continuing a previous merge interrupt, just access the existing
+ // old state without initing from disk.
+ if head == nil {
+ head = rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[0].Head)
+ } else {
+ // Otherwise, initialize the sync, trimming and previous leftovers until
+ // we're consistent with the newly requested chain head
+ s.initSync(head)
+ }
+ // Create the scratch space to fill with concurrently downloaded headers
+ s.scratchSpace = make([]*types.Header, scratchHeaders)
+ defer func() { s.scratchSpace = nil }() // don't hold on to references after sync
+
+ s.scratchOwners = make([]string, scratchHeaders/requestHeaders)
+ defer func() { s.scratchOwners = nil }() // don't hold on to references after sync
+
+ s.scratchHead = s.progress.Subchains[0].Tail - 1 // tail must not be 0!
+
+ // If the sync is already done, resume the backfiller. When the loop stops,
+ // terminate the backfiller too.
+ linked := len(s.progress.Subchains) == 1 &&
+ rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) &&
+ rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead)
+ if linked {
+ s.filler.resume()
+ }
+ defer s.filler.suspend()
+
+ // Create a set of unique channels for this sync cycle. We need these to be
+ // ephemeral so a data race doesn't accidentally deliver something stale on
+ // a persistent channel across syncs (yup, this happened)
+ var (
+ requestFails = make(chan *headerRequest)
+ responses = make(chan *headerResponse)
+ )
+ cancel := make(chan struct{})
+ defer close(cancel)
+
+ log.Debug("Starting reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead)
+
+ // Whether sync completed or not, disregard any future packets
+ defer func() {
+ log.Debug("Terminating reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead)
+ s.requests = make(map[uint64]*headerRequest)
+ }()
+
+ // Start tracking idle peers for task assignments
+ peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection
+
+ peeringSub := s.peers.SubscribeEvents(peering)
+ defer peeringSub.Unsubscribe()
+
+ s.idles = make(map[string]*peerConnection)
+ for _, peer := range s.peers.AllPeers() {
+ s.idles[peer.id] = peer
+ }
+ // Nofity any tester listening for startup events
+ if s.syncStarting != nil {
+ s.syncStarting()
+ }
+ for {
+ // Something happened, try to assign new tasks to any idle peers
+ if !linked {
+ s.assignTasks(responses, requestFails, cancel)
+ }
+ // Wait for something to happen
+ select {
+ case event := <-peering:
+ // A peer joined or left, the tasks queue and allocations need to be
+ // checked for potential assignment or reassignment
+ peerid := event.peer.id
+ if event.join {
+ log.Debug("Joining skeleton peer", "id", peerid)
+ s.idles[peerid] = event.peer
+ } else {
+ log.Debug("Leaving skeleton peer", "id", peerid)
+ s.revertRequests(peerid)
+ delete(s.idles, peerid)
+ }
+
+ case errc := <-s.terminate:
+ errc <- nil
+ return nil, errTerminated
+
+ case event := <-s.headEvents:
+ // New head was announced, try to integrate it. If successful, nothing
+ // needs to be done as the head simply extended the last range. For now
+ // we don't seamlessly integrate reorgs to keep things simple. If the
+ // network starts doing many mini reorgs, it might be worthwhile handling
+ // a limited depth without an error.
+ if reorged := s.processNewHead(event.header, event.force); reorged {
+ // If a reorg is needed, and we're forcing the new head, signal
+ // the syncer to tear down and start over. Otherwise, drop the
+ // non-force reorg.
+ if event.force {
+ event.errc <- nil // forced head reorg accepted
+ return event.header, errSyncReorged
+ }
+ event.errc <- errReorgDenied
+ continue
+ }
+ event.errc <- nil // head extension accepted
+
+ // New head was integrated into the skeleton chain. If the backfiller
+ // is still running, it will pick it up. If it already terminated,
+ // a new cycle needs to be spun up.
+ if linked {
+ s.filler.resume()
+ }
+
+ case req := <-requestFails:
+ s.revertRequest(req)
+
+ case res := <-responses:
+ // Process the batch of headers. If though processing we managed to
+ // link the current subchain to a previously downloaded one, abort the
+ // sync and restart with the merged subchains.
+ //
+ // If we managed to link to the existing local chain or genesis block,
+ // abort sync altogether.
+ linked, merged := s.processResponse(res)
+ if linked {
+ log.Debug("Beacon sync linked to local chain")
+ return nil, errSyncLinked
+ }
+ if merged {
+ log.Debug("Beacon sync merged subchains")
+ return nil, errSyncMerged
+ }
+ // We still have work to do, loop and repeat
+ }
+ }
+}
+
+// initSync attempts to get the skeleton sync into a consistent state wrt any
+// past state on disk and the newly requested head to sync to. If the new head
+// is nil, the method will return and continue from the previous head.
+func (s *skeleton) initSync(head *types.Header) {
+ // Extract the head number, we'll need it all over
+ number := head.Number.Uint64()
+
+ // Retrieve the previously saved sync progress
+ if status := rawdb.ReadSkeletonSyncStatus(s.db); len(status) > 0 {
+ s.progress = new(skeletonProgress)
+ if err := json.Unmarshal(status, s.progress); err != nil {
+ log.Error("Failed to decode skeleton sync status", "err", err)
+ } else {
+ // Previous sync was available, print some continuation logs
+ for _, subchain := range s.progress.Subchains {
+ log.Debug("Restarting skeleton subchain", "head", subchain.Head, "tail", subchain.Tail)
+ }
+ // Create a new subchain for the head (unless the last can be extended),
+ // trimming anything it would overwrite
+ headchain := &subchain{
+ Head: number,
+ Tail: number,
+ Next: head.ParentHash,
+ }
+ for len(s.progress.Subchains) > 0 {
+ // If the last chain is above the new head, delete altogether
+ lastchain := s.progress.Subchains[0]
+ if lastchain.Tail >= headchain.Tail {
+ log.Debug("Dropping skeleton subchain", "head", lastchain.Head, "tail", lastchain.Tail)
+ s.progress.Subchains = s.progress.Subchains[1:]
+ continue
+ }
+ // Otherwise truncate the last chain if needed and abort trimming
+ if lastchain.Head >= headchain.Tail {
+ log.Debug("Trimming skeleton subchain", "oldhead", lastchain.Head, "newhead", headchain.Tail-1, "tail", lastchain.Tail)
+ lastchain.Head = headchain.Tail - 1
+ }
+ break
+ }
+ // If the last subchain can be extended, we're lucky. Otherwise create
+ // a new subchain sync task.
+ var extended bool
+ if n := len(s.progress.Subchains); n > 0 {
+ lastchain := s.progress.Subchains[0]
+ if lastchain.Head == headchain.Tail-1 {
+ lasthead := rawdb.ReadSkeletonHeader(s.db, lastchain.Head)
+ if lasthead.Hash() == head.ParentHash {
+ log.Debug("Extended skeleton subchain with new head", "head", headchain.Tail, "tail", lastchain.Tail)
+ lastchain.Head = headchain.Tail
+ extended = true
+ }
+ }
+ }
+ if !extended {
+ log.Debug("Created new skeleton subchain", "head", number, "tail", number)
+ s.progress.Subchains = append([]*subchain{headchain}, s.progress.Subchains...)
+ }
+ // Update the database with the new sync stats and insert the new
+ // head header. We won't delete any trimmed skeleton headers since
+ // those will be outside the index space of the many subchains and
+ // the database space will be reclaimed eventually when processing
+ // blocks above the current head (TODO(karalabe): don't forget).
+ batch := s.db.NewBatch()
+
+ rawdb.WriteSkeletonHeader(batch, head)
+ s.saveSyncStatus(batch)
+
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write skeleton sync status", "err", err)
+ }
+ return
+ }
+ }
+ // Either we've failed to decode the previus state, or there was none. Start
+ // a fresh sync with a single subchain represented by the currently sent
+ // chain head.
+ s.progress = &skeletonProgress{
+ Subchains: []*subchain{
+ {
+ Head: number,
+ Tail: number,
+ Next: head.ParentHash,
+ },
+ },
+ }
+ batch := s.db.NewBatch()
+
+ rawdb.WriteSkeletonHeader(batch, head)
+ s.saveSyncStatus(batch)
+
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write initial skeleton sync status", "err", err)
+ }
+ log.Debug("Created initial skeleton subchain", "head", number, "tail", number)
+}
+
+// saveSyncStatus marshals the remaining sync tasks into leveldb.
+func (s *skeleton) saveSyncStatus(db ethdb.KeyValueWriter) {
+ status, err := json.Marshal(s.progress)
+ if err != nil {
+ panic(err) // This can only fail during implementation
+ }
+ rawdb.WriteSkeletonSyncStatus(db, status)
+}
+
+// processNewHead does the internal shuffling for a new head marker and either
+// accepts and integrates it into the skeleton or requests a reorg. Upon reorg,
+// the syncer will tear itself down and restart with a fresh head. It is simpler
+// to reconstruct the sync state than to mutate it and hope for the best.
+func (s *skeleton) processNewHead(head *types.Header, force bool) bool {
+ // If the header cannot be inserted without interruption, return an error for
+ // the outer loop to tear down the skeleton sync and restart it
+ number := head.Number.Uint64()
+
+ lastchain := s.progress.Subchains[0]
+ if lastchain.Tail >= number {
+ if force {
+ log.Warn("Beacon chain reorged", "tail", lastchain.Tail, "newHead", number)
+ }
+ return true
+ }
+ if lastchain.Head+1 < number {
+ if force {
+ log.Warn("Beacon chain gapped", "head", lastchain.Head, "newHead", number)
+ }
+ return true
+ }
+ if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash {
+ if force {
+ log.Warn("Beacon chain forked", "ancestor", parent.Number, "hash", parent.Hash(), "want", head.ParentHash)
+ }
+ return true
+ }
+ // New header seems to be in the last subchain range. Unwind any extra headers
+ // from the chain tip and insert the new head. We won't delete any trimmed
+ // skeleton headers since those will be outside the index space of the many
+ // subchains and the database space will be reclaimed eventually when processing
+ // blocks above the current head (TODO(karalabe): don't forget).
+ batch := s.db.NewBatch()
+
+ rawdb.WriteSkeletonHeader(batch, head)
+ lastchain.Head = number
+ s.saveSyncStatus(batch)
+
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write skeleton sync status", "err", err)
+ }
+ return false
+}
+
+// assignTasks attempts to match idle peers to pending header retrievals.
+func (s *skeleton) assignTasks(success chan *headerResponse, fail chan *headerRequest, cancel chan struct{}) {
+ // Sort the peers by download capacity to use faster ones if many available
+ idlers := &peerCapacitySort{
+ peers: make([]*peerConnection, 0, len(s.idles)),
+ caps: make([]int, 0, len(s.idles)),
+ }
+ targetTTL := s.peers.rates.TargetTimeout()
+ for _, peer := range s.idles {
+ idlers.peers = append(idlers.peers, peer)
+ idlers.caps = append(idlers.caps, s.peers.rates.Capacity(peer.id, eth.BlockHeadersMsg, targetTTL))
+ }
+ if len(idlers.peers) == 0 {
+ return
+ }
+ sort.Sort(idlers)
+
+ // Find header regions not yet downloading and fill them
+ for task, owner := range s.scratchOwners {
+ // If we're out of idle peers, stop assigning tasks
+ if len(idlers.peers) == 0 {
+ return
+ }
+ // Skip any tasks already filling
+ if owner != "" {
+ continue
+ }
+ // If we've reached the genesis, stop assigning tasks
+ if uint64(task*requestHeaders) >= s.scratchHead {
+ return
+ }
+ // Found a task and have peers available, assign it
+ idle := idlers.peers[0]
+
+ idlers.peers = idlers.peers[1:]
+ idlers.caps = idlers.caps[1:]
+
+ // Matched a pending task to an idle peer, allocate a unique request id
+ var reqid uint64
+ for {
+ reqid = uint64(rand.Int63())
+ if reqid == 0 {
+ continue
+ }
+ if _, ok := s.requests[reqid]; ok {
+ continue
+ }
+ break
+ }
+ // Generate the network query and send it to the peer
+ req := &headerRequest{
+ peer: idle.id,
+ id: reqid,
+ deliver: success,
+ revert: fail,
+ cancel: cancel,
+ stale: make(chan struct{}),
+ head: s.scratchHead - uint64(task*requestHeaders),
+ }
+ s.requests[reqid] = req
+ delete(s.idles, idle.id)
+
+ // Generate the network query and send it to the peer
+ go s.executeTask(idle, req)
+
+ // Inject the request into the task to block further assignments
+ s.scratchOwners[task] = idle.id
+ }
+}
+
+// executeTask executes a single fetch request, blocking until either a result
+// arrives or a timeouts / cancellation is triggered. The method should be run
+// on its own goroutine and will deliver on the requested channels.
+func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) {
+ start := time.Now()
+ resCh := make(chan *eth.Response)
+
+ // Figure out how many headers to fetch. Usually this will be a full batch,
+ // but for the very tail of the chain, trim the request to the number left.
+ // Since nodes may or may not return the genesis header for a batch request,
+ // don't even request it. The parent hash of block #1 is enough to link.
+ requestCount := requestHeaders
+ if req.head < requestHeaders {
+ requestCount = int(req.head)
+ }
+ peer.log.Trace("Fetching skeleton headers", "from", req.head, "count", requestCount)
+ netreq, err := peer.peer.RequestHeadersByNumber(req.head, requestCount, 0, true, resCh)
+ if err != nil {
+ peer.log.Trace("Failed to request headers", "err", err)
+ s.scheduleRevertRequest(req)
+ return
+ }
+ defer netreq.Close()
+
+ // Wait until the response arrives, the request is cancelled or times out
+ ttl := s.peers.rates.TargetTimeout()
+
+ timeoutTimer := time.NewTimer(ttl)
+ defer timeoutTimer.Stop()
+
+ select {
+ case <-req.cancel:
+ peer.log.Debug("Header request cancelled")
+ s.scheduleRevertRequest(req)
+
+ case <-timeoutTimer.C:
+ // Header retrieval timed out, update the metrics
+ peer.log.Warn("Header request timed out, dropping peer", "elapsed", ttl)
+ headerTimeoutMeter.Mark(1)
+ s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, 0, 0)
+ s.scheduleRevertRequest(req)
+
+ // At this point we either need to drop the offending peer, or we need a
+ // mechanism to allow waiting for the response and not cancel it. For now
+ // lets go with dropping since the header sizes are deterministic and the
+ // beacon sync runs exclusive (downloader is idle) so there should be no
+ // other load to make timeouts probable. If we notice that timeouts happen
+ // more often than we'd like, we can introduce a tracker for the requests
+ // gone stale and monitor them. However, in that case too, we need a way
+ // to protect against malicious peers never responding, so it would need
+ // a second, hard-timeout mechanism.
+ s.drop(peer.id)
+
+ case res := <-resCh:
+ // Headers successfully retrieved, update the metrics
+ headers := *res.Res.(*eth.BlockHeadersPacket)
+
+ headerReqTimer.Update(time.Since(start))
+ s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))
+
+ // Cross validate the headers with the requests
+ switch {
+ case len(headers) == 0:
+ // No headers were delivered, reject the response and reschedule
+ peer.log.Debug("No headers delivered")
+ res.Done <- errors.New("no headers delivered")
+ s.scheduleRevertRequest(req)
+
+ case headers[0].Number.Uint64() != req.head:
+ // Header batch anchored at non-requested number
+ peer.log.Debug("Invalid header response head", "have", headers[0].Number, "want", req.head)
+ res.Done <- errors.New("invalid header batch anchor")
+ s.scheduleRevertRequest(req)
+
+ case req.head >= requestHeaders && len(headers) != requestHeaders:
+ // Invalid number of non-genesis headers delivered, reject the response and reschedule
+ peer.log.Debug("Invalid non-genesis header count", "have", len(headers), "want", requestHeaders)
+ res.Done <- errors.New("not enough non-genesis headers delivered")
+ s.scheduleRevertRequest(req)
+
+ case req.head < requestHeaders && uint64(len(headers)) != req.head:
+ // Invalid number of genesis headers delivered, reject the response and reschedule
+ peer.log.Debug("Invalid genesis header count", "have", len(headers), "want", headers[0].Number.Uint64())
+ res.Done <- errors.New("not enough genesis headers delivered")
+ s.scheduleRevertRequest(req)
+
+ default:
+ // Packet seems structurally valid, check hash progression and if it
+ // is correct too, deliver for storage
+ for i := 0; i < len(headers)-1; i++ {
+ if headers[i].ParentHash != headers[i+1].Hash() {
+ peer.log.Debug("Invalid hash progression", "index", i, "wantparenthash", headers[i].ParentHash, "haveparenthash", headers[i+1].Hash())
+ res.Done <- errors.New("invalid hash progression")
+ s.scheduleRevertRequest(req)
+ return
+ }
+ }
+ // Hash chain is valid. The delivery might still be junk as we're
+ // downloading batches concurrently (so no way to link the headers
+ // until gaps are filled); in that case, we'll nuke the peer when
+ // we detect the fault.
+ res.Done <- nil
+
+ select {
+ case req.deliver <- &headerResponse{
+ peer: peer,
+ reqid: req.id,
+ headers: headers,
+ }:
+ case <-req.cancel:
+ }
+ }
+ }
+}
+
+// revertRequests locates all the currently pending reuqests from a particular
+// peer and reverts them, rescheduling for others to fulfill.
+func (s *skeleton) revertRequests(peer string) {
+ // Gather the requests first, revertals need the lock too
+ var requests []*headerRequest
+ for _, req := range s.requests {
+ if req.peer == peer {
+ requests = append(requests, req)
+ }
+ }
+ // Revert all the requests matching the peer
+ for _, req := range requests {
+ s.revertRequest(req)
+ }
+}
+
+// scheduleRevertRequest asks the event loop to clean up a request and return
+// all failed retrieval tasks to the scheduler for reassignment.
+func (s *skeleton) scheduleRevertRequest(req *headerRequest) {
+ select {
+ case req.revert <- req:
+ // Sync event loop notified
+ case <-req.cancel:
+ // Sync cycle got cancelled
+ case <-req.stale:
+ // Request already reverted
+ }
+}
+
+// revertRequest cleans up a request and returns all failed retrieval tasks to
+// the scheduler for reassignment.
+//
+// Note, this needs to run on the event runloop thread to reschedule to idle peers.
+// On peer threads, use scheduleRevertRequest.
+func (s *skeleton) revertRequest(req *headerRequest) {
+ log.Trace("Reverting header request", "peer", req.peer, "reqid", req.id)
+ select {
+ case <-req.stale:
+ log.Trace("Header request already reverted", "peer", req.peer, "reqid", req.id)
+ return
+ default:
+ }
+ close(req.stale)
+
+ // Remove the request from the tracked set
+ delete(s.requests, req.id)
+
+ // Remove the request from the tracked set and mark the task as not-pending,
+ // ready for resheduling
+ s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = ""
+}
+
+func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged bool) {
+ res.peer.log.Trace("Processing header response", "head", res.headers[0].Number, "hash", res.headers[0].Hash(), "count", len(res.headers))
+
+ // Whether the response is valid, we can mark the peer as idle and notify
+ // the scheduler to assign a new task. If the response is invalid, we'll
+ // drop the peer in a bit.
+ s.idles[res.peer.id] = res.peer
+
+ // Ensure the response is for a valid request
+ if _, ok := s.requests[res.reqid]; !ok {
+ // Some internal accounting is broken. A request either times out or it
+ // gets fulfilled successfully. It should not be possible to deliver a
+ // response to a non-existing request.
+ res.peer.log.Error("Unexpected header packet")
+ return false, false
+ }
+ delete(s.requests, res.reqid)
+
+ // Insert the delivered headers into the scratch space independent of the
+ // content or continuation; those will be validated in a moment
+ head := res.headers[0].Number.Uint64()
+ copy(s.scratchSpace[s.scratchHead-head:], res.headers)
+
+ // If there's still a gap in the head of the scratch space, abort
+ if s.scratchSpace[0] == nil {
+ return false, false
+ }
+ // Try to consume any head headers, validating the boundary conditions
+ batch := s.db.NewBatch()
+ for s.scratchSpace[0] != nil {
+ // Next batch of headers available, cross-reference with the subchain
+ // we are extending and either accept or discard
+ if s.progress.Subchains[0].Next != s.scratchSpace[0].Hash() {
+ // Print a log messages to track what's going on
+ tail := s.progress.Subchains[0].Tail
+ want := s.progress.Subchains[0].Next
+ have := s.scratchSpace[0].Hash()
+
+ log.Warn("Invalid skeleton headers", "peer", s.scratchOwners[0], "number", tail-1, "want", want, "have", have)
+
+ // The peer delivered junk, or at least not the subchain we are
+ // syncing to. Free up the scratch space and assignment, reassign
+ // and drop the original peer.
+ for i := 0; i < requestHeaders; i++ {
+ s.scratchSpace[i] = nil
+ }
+ s.drop(s.scratchOwners[0])
+ s.scratchOwners[0] = ""
+ break
+ }
+ // Scratch delivery matches required subchain, deliver the batch of
+ // headers and push the subchain forward
+ var consumed int
+ for _, header := range s.scratchSpace[:requestHeaders] {
+ if header != nil { // nil when the genesis is reached
+ consumed++
+
+ rawdb.WriteSkeletonHeader(batch, header)
+ s.pulled++
+
+ s.progress.Subchains[0].Tail--
+ s.progress.Subchains[0].Next = header.ParentHash
+
+ // If we've reached an existing block in the chain, stop retrieving
+ // headers. Note, if we want to support light clients with the same
+ // code we'd need to switch here based on the downloader mode. That
+ // said, there's no such functionality for now, so don't complicate.
+ //
+ // In the case of full sync it would be enough to check for the body,
+ // but even a full syncing node will generate a receipt once block
+ // processing is done, so it's just one more "needless" check.
+ var (
+ hasBody = rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1)
+ hasReceipt = rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1)
+ )
+ if hasBody && hasReceipt {
+ linked = true
+ break
+ }
+ }
+ }
+ head := s.progress.Subchains[0].Head
+ tail := s.progress.Subchains[0].Tail
+ next := s.progress.Subchains[0].Next
+
+ log.Trace("Primary subchain extended", "head", head, "tail", tail, "next", next)
+
+ // If the beacon chain was linked to the local chain, completely swap out
+ // all internal progress and abort header synchronization.
+ if linked {
+ // Note, linking into the local chain should also mean that there are
+ // no leftover subchains, but just in case there's some junk due to
+ // strange conditions or bugs, clean up all internal state.
+ if len(s.progress.Subchains) > 1 {
+ log.Error("Cleaning up leftovers after beacon link")
+ s.progress.Subchains = s.progress.Subchains[:1]
+ }
+ break
+ }
+ // Batch of headers consumed, shift the download window forward
+ copy(s.scratchSpace, s.scratchSpace[requestHeaders:])
+ for i := 0; i < requestHeaders; i++ {
+ s.scratchSpace[scratchHeaders-i-1] = nil
+ }
+ copy(s.scratchOwners, s.scratchOwners[1:])
+ s.scratchOwners[scratchHeaders/requestHeaders-1] = ""
+
+ s.scratchHead -= uint64(consumed)
+
+ // If the subchain extended into the next subchain, we need to handle
+ // the overlap. Since there could be many overlaps (come on), do this
+ // in a loop.
+ for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail {
+ // Extract some stats from the second subchain
+ head := s.progress.Subchains[1].Head
+ tail := s.progress.Subchains[1].Tail
+ next := s.progress.Subchains[1].Next
+
+ // Since we just overwrote part of the next subchain, we need to trim
+ // its head independent of matching or mismatching content
+ if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail {
+ // Fully overwritten, get rid of the subchain as a whole
+ log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next)
+ s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
+ continue
+ } else {
+ // Partially overwritten, trim the head to the overwritten size
+ log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next)
+ s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1
+ }
+ // If the old subchain is an extension of the new one, merge the two
+ // and let the skeleton syncer restart (to clean internal state)
+ if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next {
+ log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next)
+ s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail
+ s.progress.Subchains[0].Next = s.progress.Subchains[1].Next
+
+ s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
+ merged = true
+ }
+ }
+ // If subchains were merged, all further available headers in the scratch
+ // space are invalid since we skipped ahead. Stop processing the scratch
+ // space to avoid dropping peers thinking they delivered invalid data.
+ if merged {
+ break
+ }
+ }
+ s.saveSyncStatus(batch)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write skeleton headers and progress", "err", err)
+ }
+ // Print a progress report making the UX a bit nicer
+ left := s.progress.Subchains[0].Tail - 1
+ if linked {
+ left = 0
+ }
+ if time.Since(s.logged) > 8*time.Second || left == 0 {
+ s.logged = time.Now()
+
+ if s.pulled == 0 {
+ log.Info("Beacon sync starting", "left", left)
+ } else {
+ eta := float64(time.Since(s.started)) / float64(s.pulled) * float64(left)
+ log.Info("Syncing beacon headers", "downloaded", s.pulled, "left", left, "eta", common.PrettyDuration(eta))
+ }
+ }
+ return linked, merged
+}
+
+// Bounds retrieves the current head and tail tracked by the skeleton syncer.
+// This method is used by the backfiller, whose life cycle is controlled by the
+// skeleton syncer.
+//
+// Note, the method will not use the internal state of the skeleton, but will
+// rather blindly pull stuff from the database. This is fine, because the back-
+// filler will only run when the skeleton chain is fully downloaded and stable.
+// There might be new heads appended, but those are atomic from the perspective
+// of this method. Any head reorg will first tear down the backfiller and only
+// then make the modification.
+func (s *skeleton) Bounds() (head *types.Header, tail *types.Header, err error) {
+ // Read the current sync progress from disk and figure out the current head.
+ // Although there's a lot of error handling here, these are mostly as sanity
+ // checks to avoid crashing if a programming error happens. These should not
+ // happen in live code.
+ status := rawdb.ReadSkeletonSyncStatus(s.db)
+ if len(status) == 0 {
+ return nil, nil, errors.New("beacon sync not yet started")
+ }
+ progress := new(skeletonProgress)
+ if err := json.Unmarshal(status, progress); err != nil {
+ return nil, nil, err
+ }
+ head = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Head)
+ tail = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Tail)
+
+ return head, tail, nil
+}
+
+// Header retrieves a specific header tracked by the skeleton syncer. This method
+// is meant to be used by the backfiller, whose life cycle is controlled by the
+// skeleton syncer.
+//
+// Note, outside the permitted runtimes, this method might return nil results and
+// subsequent calls might return headers from different chains.
+func (s *skeleton) Header(number uint64) *types.Header {
+ return rawdb.ReadSkeletonHeader(s.db, number)
+}
diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go
new file mode 100644
index 000000000000..cbe0d51d3716
--- /dev/null
+++ b/eth/downloader/skeleton_test.go
@@ -0,0 +1,896 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// hookedBackfiller is a tester backfiller with all interface methods mocked and
+// hooked so tests can implement only the things they need.
+type hookedBackfiller struct {
+ // suspendHook is an optional hook to be called when the filler is requested
+ // to be suspended.
+ suspendHook func()
+
+ // resumeHook is an optional hook to be called when the filler is requested
+ // to be resumed.
+ resumeHook func()
+}
+
+// newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
+// essentially acting as a noop.
+func newHookedBackfiller() backfiller {
+ return new(hookedBackfiller)
+}
+
+// suspend requests the backfiller to abort any running full or snap sync
+// based on the skeleton chain as it might be invalid. The backfiller should
+// gracefully handle multiple consecutive suspends without a resume, even
+// on initial sartup.
+func (hf *hookedBackfiller) suspend() {
+ if hf.suspendHook != nil {
+ hf.suspendHook()
+ }
+}
+
+// resume requests the backfiller to start running fill or snap sync based on
+// the skeleton chain as it has successfully been linked. Appending new heads
+// to the end of the chain will not result in suspend/resume cycles.
+func (hf *hookedBackfiller) resume() {
+ if hf.resumeHook != nil {
+ hf.resumeHook()
+ }
+}
+
+// skeletonTestPeer is a mock peer that can only serve header requests from a
+// pre-perated header chain (which may be arbitrarily wrong for testing).
+//
+// Requesting anything else from these peers will hard panic. Note, do *not*
+// implement any other methods. We actually want to make sure that the skeleton
+// syncer only depends on - and will only ever do so - on header requests.
+type skeletonTestPeer struct {
+ id string // Unique identifier of the mock peer
+ headers []*types.Header // Headers to serve when requested
+
+ serve func(origin uint64) []*types.Header // Hook to allow custom responses
+
+ served uint64 // Number of headers served by this peer
+ dropped uint64 // Flag whether the peer was dropped (stop responding)
+}
+
+// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
+func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
+ return &skeletonTestPeer{
+ id: id,
+ headers: headers,
+ }
+}
+
+// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
+// and sets an optional serve hook that can return headers for delivery instead
+// of the predefined chain. Useful for emulating malicious behavior that would
+// otherwise require dedicated peer types.
+func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
+ return &skeletonTestPeer{
+ id: id,
+ headers: headers,
+ serve: serve,
+ }
+}
+
+// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
+// origin; associated with a particular peer in the download tester. The returned
+// function can be used to retrieve batches of headers from the particular peer.
+func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
+ // Since skeleton test peer are in-memory mocks, dropping the does not make
+ // them inaccepssible. As such, check a local `dropped` field to see if the
+ // peer has been dropped and should not respond any more.
+ if atomic.LoadUint64(&p.dropped) != 0 {
+ return nil, errors.New("peer already dropped")
+ }
+ // Skeleton sync retrieves batches of headers going backward without gaps.
+ // This ensures we can follow a clean parent progression without any reorg
+ // hiccups. There is no need for any other type of header retrieval, so do
+ // panic if there's such a request.
+ if !reverse || skip != 0 {
+ // Note, if other clients want to do these kinds of requests, it's their
+ // problem, it will still work. We just don't want *us* making complicated
+ // requests without a very strong reason to.
+ panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
+ }
+ // If the skeleton syncer requests the genesis block, panic. Whilst it could
+ // be considered a valid request, our code specifically should not request it
+ // ever since we want to link up headers to an existing local chain, which at
+ // worse will be the genesis.
+ if int64(origin)-int64(amount) < 0 {
+ panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
+ }
+ // To make concurrency easier, the skeleton syncer always requests fixed size
+ // batches of headers. Panic if the peer is requested an amount other than the
+ // configured batch size (apart from the request leading to the genesis).
+ if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
+ panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
+ }
+ // Simple reverse header retrieval. Fill from the peer's chain and return.
+ // If the tester has a serve hook set, try to use that before falling back
+ // to the default behavior.
+ var headers []*types.Header
+ if p.serve != nil {
+ headers = p.serve(origin)
+ }
+ if headers == nil {
+ headers = make([]*types.Header, 0, amount)
+ if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
+ for i := 0; i < amount; i++ {
+ // Consider nil headers as a form of attack and withhold them. Nil
+ // cannot be decoded from RLP, so it's not possible to produce an
+ // attack by sending/receiving those over eth.
+ header := p.headers[int(origin)-i]
+ if header == nil {
+ continue
+ }
+ headers = append(headers, header)
+ }
+ }
+ }
+ atomic.AddUint64(&p.served, uint64(len(headers)))
+
+ hashes := make([]common.Hash, len(headers))
+ for i, header := range headers {
+ hashes[i] = header.Hash()
+ }
+ // Deliver the headers to the downloader
+ req := ð.Request{
+ Peer: p.id,
+ }
+ res := ð.Response{
+ Req: req,
+ Res: (*eth.BlockHeadersPacket)(&headers),
+ Meta: hashes,
+ Time: 1,
+ Done: make(chan error),
+ }
+ go func() {
+ sink <- res
+ if err := <-res.Done; err != nil {
+ log.Warn("Skeleton test peer response rejected", "err", err)
+ atomic.AddUint64(&p.dropped, 1)
+ }
+ }()
+ return req, nil
+}
+
+func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) {
+ panic("skeleton sync must not request the remote head")
+}
+
+func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) {
+ panic("skeleton sync must not request headers by hash")
+}
+
+func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
+ panic("skeleton sync must not request block bodies")
+}
+
+func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
+ panic("skeleton sync must not request receipts")
+}
+
+// Tests various sync initialzations based on previous leftovers in the database
+// and announced heads.
+func TestSkeletonSyncInit(t *testing.T) {
+ // Create a few key headers
+ var (
+ genesis = &types.Header{Number: big.NewInt(0)}
+ block49 = &types.Header{Number: big.NewInt(49)}
+ block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
+ block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
+ )
+ tests := []struct {
+ headers []*types.Header // Database content (beside the genesis)
+ oldstate []*subchain // Old sync state with various interrupted subchains
+ head *types.Header // New head header to announce to reorg to
+ newstate []*subchain // Expected sync state after the reorg
+ }{
+ // Completely empty database with only the genesis set. The sync is expected
+ // to create a single subchain with the requested head.
+ {
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 50}},
+ },
+ // Empty database with only the genesis set with a leftover empty sync
+ // progess. This is a synthetic case, just for the sake of covering things.
+ {
+ oldstate: []*subchain{},
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 50}},
+ },
+ // A single leftover subchain is present, older than the new head. The
+ // old subchain should be left as is and a new one appended to the sync
+ // status.
+ {
+ oldstate: []*subchain{{Head: 10, Tail: 5}},
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 10, Tail: 5},
+ },
+ },
+ // Multiple leftover subchains are present, older than the new head. The
+ // old subchains should be left as is and a new one appended to the sync
+ // status.
+ {
+ oldstate: []*subchain{
+ {Head: 20, Tail: 15},
+ {Head: 10, Tail: 5},
+ },
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 20, Tail: 15},
+ {Head: 10, Tail: 5},
+ },
+ },
+ // A single leftover subchain is present, newer than the new head. The
+ // newer subchain should be deleted and a fresh one created for the head.
+ {
+ oldstate: []*subchain{{Head: 65, Tail: 60}},
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 50}},
+ },
+ // Multiple leftover subchain is present, newer than the new head. The
+ // newer subchains should be deleted and a fresh one created for the head.
+ {
+ oldstate: []*subchain{
+ {Head: 75, Tail: 70},
+ {Head: 65, Tail: 60},
+ },
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 50}},
+ },
+
+ // Two leftover subchains are present, one fully older and one fully
+ // newer than the announced head. The head should delete the newer one,
+ // keeping the older one.
+ {
+ oldstate: []*subchain{
+ {Head: 65, Tail: 60},
+ {Head: 10, Tail: 5},
+ },
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 10, Tail: 5},
+ },
+ },
+ // Multiple leftover subchains are present, some fully older and some
+ // fully newer than the announced head. The head should delete the newer
+ // ones, keeping the older ones.
+ {
+ oldstate: []*subchain{
+ {Head: 75, Tail: 70},
+ {Head: 65, Tail: 60},
+ {Head: 20, Tail: 15},
+ {Head: 10, Tail: 5},
+ },
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 20, Tail: 15},
+ {Head: 10, Tail: 5},
+ },
+ },
+ // A single leftover subchain is present and the new head is extending
+ // it with one more header. We expect the subchain head to be pushed
+ // forward.
+ {
+ headers: []*types.Header{block49},
+ oldstate: []*subchain{{Head: 49, Tail: 5}},
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 5}},
+ },
+ // A single leftover subchain is present and although the new head does
+ // extend it number wise, the hash chain does not link up. We expect a
+ // new subchain to be created for the dangling head.
+ {
+ headers: []*types.Header{block49B},
+ oldstate: []*subchain{{Head: 49, Tail: 5}},
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 49, Tail: 5},
+ },
+ },
+ // A single leftover subchain is present. A new head is announced that
+ // links into the middle of it, correctly anchoring into an existing
+ // header. We expect the old subchain to be truncated and extended with
+ // the new head.
+ {
+ headers: []*types.Header{block49},
+ oldstate: []*subchain{{Head: 100, Tail: 5}},
+ head: block50,
+ newstate: []*subchain{{Head: 50, Tail: 5}},
+ },
+ // A single leftover subchain is present. A new head is announced that
+ // links into the middle of it, but does not anchor into an existing
+ // header. We expect the old subchain to be truncated and a new chain
+ // be created for the dangling head.
+ {
+ headers: []*types.Header{block49B},
+ oldstate: []*subchain{{Head: 100, Tail: 5}},
+ head: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ {Head: 49, Tail: 5},
+ },
+ },
+ }
+ for i, tt := range tests {
+ // Create a fresh database and initialize it with the starting state
+ db := rawdb.NewMemoryDatabase()
+
+ rawdb.WriteHeader(db, genesis)
+ for _, header := range tt.headers {
+ rawdb.WriteSkeletonHeader(db, header)
+ }
+ if tt.oldstate != nil {
+ blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
+ rawdb.WriteSkeletonSyncStatus(db, blob)
+ }
+ // Create a skeleton sync and run a cycle
+ wait := make(chan struct{})
+
+ skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
+ skeleton.syncStarting = func() { close(wait) }
+ skeleton.Sync(tt.head, true)
+
+ <-wait
+ skeleton.Terminate()
+
+ // Ensure the correct resulting sync status
+ var progress skeletonProgress
+ json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
+
+ if len(progress.Subchains) != len(tt.newstate) {
+ t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
+ continue
+ }
+ for j := 0; j < len(progress.Subchains); j++ {
+ if progress.Subchains[j].Head != tt.newstate[j].Head {
+ t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
+ }
+ if progress.Subchains[j].Tail != tt.newstate[j].Tail {
+ t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
+ }
+ }
+ }
+}
+
+// Tests that a running skeleton sync can be extended with properly linked up
+// headers but not with side chains.
+func TestSkeletonSyncExtend(t *testing.T) {
+ // Create a few key headers
+ var (
+ genesis = &types.Header{Number: big.NewInt(0)}
+ block49 = &types.Header{Number: big.NewInt(49)}
+ block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
+ block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
+ block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
+ )
+ tests := []struct {
+ head *types.Header // New head header to announce to reorg to
+ extend *types.Header // New head header to announce to extend with
+ newstate []*subchain // Expected sync state after the reorg
+ err error // Whether extension succeeds or not
+ }{
+ // Initialize a sync and try to extend it with a subsequent block.
+ {
+ head: block49,
+ extend: block50,
+ newstate: []*subchain{
+ {Head: 50, Tail: 49},
+ },
+ },
+ // Initialize a sync and try to extend it with the existing head block.
+ {
+ head: block49,
+ extend: block49,
+ newstate: []*subchain{
+ {Head: 49, Tail: 49},
+ },
+ err: errReorgDenied,
+ },
+ // Initialize a sync and try to extend it with a sibling block.
+ {
+ head: block49,
+ extend: block49B,
+ newstate: []*subchain{
+ {Head: 49, Tail: 49},
+ },
+ err: errReorgDenied,
+ },
+ // Initialize a sync and try to extend it with a number-wise sequential
+ // header, but a hash wise non-linking one.
+ {
+ head: block49B,
+ extend: block50,
+ newstate: []*subchain{
+ {Head: 49, Tail: 49},
+ },
+ err: errReorgDenied,
+ },
+ // Initialize a sync and try to extend it with a non-linking future block.
+ {
+ head: block49,
+ extend: block51,
+ newstate: []*subchain{
+ {Head: 49, Tail: 49},
+ },
+ err: errReorgDenied,
+ },
+ // Initialize a sync and try to extend it with a past canonical block.
+ {
+ head: block50,
+ extend: block49,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ },
+ err: errReorgDenied,
+ },
+ // Initialize a sync and try to extend it with a past sidechain block.
+ {
+ head: block50,
+ extend: block49B,
+ newstate: []*subchain{
+ {Head: 50, Tail: 50},
+ },
+ err: errReorgDenied,
+ },
+ }
+ for i, tt := range tests {
+ // Create a fresh database and initialize it with the starting state
+ db := rawdb.NewMemoryDatabase()
+ rawdb.WriteHeader(db, genesis)
+
+ // Create a skeleton sync and run a cycle
+ wait := make(chan struct{})
+
+ skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
+ skeleton.syncStarting = func() { close(wait) }
+ skeleton.Sync(tt.head, true)
+
+ <-wait
+ if err := skeleton.Sync(tt.extend, false); err != tt.err {
+ t.Errorf("extension failure mismatch: have %v, want %v", err, tt.err)
+ }
+ skeleton.Terminate()
+
+ // Ensure the correct resulting sync status
+ var progress skeletonProgress
+ json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
+
+ if len(progress.Subchains) != len(tt.newstate) {
+ t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
+ continue
+ }
+ for j := 0; j < len(progress.Subchains); j++ {
+ if progress.Subchains[j].Head != tt.newstate[j].Head {
+ t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
+ }
+ if progress.Subchains[j].Tail != tt.newstate[j].Tail {
+ t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
+ }
+ }
+ }
+}
+
+// Tests that the skeleton sync correctly retrieves headers from one or more
+// peers without duplicates or other strange side effects.
+func TestSkeletonSyncRetrievals(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Since skeleton headers don't need to be meaningful, beyond a parent hash
+ // progression, create a long fake chain to test with.
+ chain := []*types.Header{{Number: big.NewInt(0)}}
+ for i := 1; i < 10000; i++ {
+ chain = append(chain, &types.Header{
+ ParentHash: chain[i-1].Hash(),
+ Number: big.NewInt(int64(i)),
+ })
+ }
+ tests := []struct {
+ headers []*types.Header // Database content (beside the genesis)
+ oldstate []*subchain // Old sync state with various interrupted subchains
+
+ head *types.Header // New head header to announce to reorg to
+ peers []*skeletonTestPeer // Initial peer set to start the sync with
+ midstate []*subchain // Expected sync state after initial cycle
+ midserve uint64 // Expected number of header retrievals after initial cycle
+ middrop uint64 // Expectd number of peers dropped after initial cycle
+
+ newHead *types.Header // New header to annount on top of the old one
+ newPeer *skeletonTestPeer // New peer to join the skeleton syncer
+ endstate []*subchain // Expected sync state after the post-init event
+ endserve uint64 // Expected number of header retrievals after the post-init event
+ enddrop uint64 // Expectd number of peers dropped after the post-init event
+ }{
+ // Completely empty database with only the genesis set. The sync is expected
+ // to create a single subchain with the requested head. No peers however, so
+ // the sync should be stuck without any progression.
+ //
+ // When a new peer is added, it should detect the join and fill the headers
+ // to the genesis block.
+ {
+ head: chain[len(chain)-1],
+ midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
+
+ newPeer: newSkeletonTestPeer("test-peer", chain),
+ endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
+ endserve: uint64(len(chain) - 2), // len - head - genesis
+ },
+ // Completely empty database with only the genesis set. The sync is expected
+ // to create a single subchain with the requested head. With one valid peer,
+ // the sync is expected to complete already in the initial round.
+ //
+ // Adding a second peer should not have any effect.
+ {
+ head: chain[len(chain)-1],
+ peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
+ midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
+ midserve: uint64(len(chain) - 2), // len - head - genesis
+
+ newPeer: newSkeletonTestPeer("test-peer-2", chain),
+ endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
+ endserve: uint64(len(chain) - 2), // len - head - genesis
+ },
+ // Completely empty database with only the genesis set. The sync is expected
+ // to create a single subchain with the requested head. With many valid peers,
+ // the sync is expected to complete already in the initial round.
+ //
+ // Adding a new peer should not have any effect.
+ {
+ head: chain[len(chain)-1],
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("test-peer-1", chain),
+ newSkeletonTestPeer("test-peer-2", chain),
+ newSkeletonTestPeer("test-peer-3", chain),
+ },
+ midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
+ midserve: uint64(len(chain) - 2), // len - head - genesis
+
+ newPeer: newSkeletonTestPeer("test-peer-4", chain),
+ endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
+ endserve: uint64(len(chain) - 2), // len - head - genesis
+ },
+ // This test checks if a peer tries to withhold a header - *on* the sync
+ // boundary - instead of sending the requested amount. The malicious short
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100],
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
+ middrop: 1, // penalize shortened header deliveries
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test checks if a peer tries to withhold a header - *off* the sync
+ // boundary - instead of sending the requested amount. The malicious short
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100],
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
+ middrop: 1, // penalize shortened header deliveries
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test checks if a peer tries to duplicate a header - *on* the sync
+ // boundary - instead of sending the correct sequence. The malicious duped
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 2, // len - head - genesis
+ middrop: 1, // penalize invalid header sequences
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test checks if a peer tries to duplicate a header - *off* the sync
+ // boundary - instead of sending the correct sequence. The malicious duped
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 2, // len - head - genesis
+ middrop: 1, // penalize invalid header sequences
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test checks if a peer tries to inject a different header - *on*
+ // the sync boundary - instead of sending the correct sequence. The bad
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-changer",
+ append(
+ append(
+ append([]*types.Header{}, chain[:99]...),
+ &types.Header{
+ ParentHash: chain[98].Hash(),
+ Number: big.NewInt(int64(99)),
+ GasLimit: 1,
+ },
+ ), chain[100:]...,
+ ),
+ ),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 2, // len - head - genesis
+ middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync?
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test checks if a peer tries to inject a different header - *off*
+ // the sync boundary - instead of sending the correct sequence. The bad
+ // package should not be accepted.
+ //
+ // Joining with a new peer should however unblock the sync.
+ {
+ head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeer("header-changer",
+ append(
+ append(
+ append([]*types.Header{}, chain[:50]...),
+ &types.Header{
+ ParentHash: chain[49].Hash(),
+ Number: big.NewInt(int64(50)),
+ GasLimit: 1,
+ },
+ ), chain[51:]...,
+ ),
+ ),
+ },
+ midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
+ midserve: requestHeaders + 101 - 2, // len - head - genesis
+ middrop: 1, // different set of headers, drop
+
+ newPeer: newSkeletonTestPeer("good-peer", chain),
+ endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
+ endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
+ enddrop: 1, // no new drops
+ },
+ // This test reproduces a bug caught during review (kudos to @holiman)
+ // where a subchain is merged with a previously interrupted one, causing
+ // pending data in the scratch space to become "invalid" (since we jump
+ // ahead during subchain merge). In that case it is expected to ignore
+ // the queued up data instead of trying to process on top of a shifted
+ // task set.
+ //
+ // The test is a bit convoluted since it needs to trigger a concurrency
+ // issue. First we sync up an initial chain of 2x512 items. Then announce
+ // 2x512+2 as head and delay delivering the head batch to fill the scratch
+ // space first. The delivery head should merge with the previous download
+ // and the scratch space must not be consumed further.
+ {
+ head: chain[2*requestHeaders],
+ peers: []*skeletonTestPeer{
+ newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
+ if origin == chain[2*requestHeaders+1].Number.Uint64() {
+ time.Sleep(100 * time.Millisecond)
+ }
+ return nil // Fallback to default behavior, just delayed
+ }),
+ newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
+ if origin == chain[2*requestHeaders+1].Number.Uint64() {
+ time.Sleep(100 * time.Millisecond)
+ }
+ return nil // Fallback to default behavior, just delayed
+ }),
+ },
+ midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
+ midserve: 2*requestHeaders - 1, // len - head - genesis
+
+ newHead: chain[2*requestHeaders+2],
+ endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
+ endserve: 4 * requestHeaders,
+ },
+ }
+ for i, tt := range tests {
+ // Create a fresh database and initialize it with the starting state
+ db := rawdb.NewMemoryDatabase()
+ rawdb.WriteHeader(db, chain[0])
+
+ // Create a peer set to feed headers through
+ peerset := newPeerSet()
+ for _, peer := range tt.peers {
+ peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
+ }
+ // Create a peer dropper to track malicious peers
+ dropped := make(map[string]int)
+ drop := func(peer string) {
+ if p := peerset.Peer(peer); p != nil {
+ atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
+ }
+ peerset.Unregister(peer)
+ dropped[peer]++
+ }
+ // Create a skeleton sync and run a cycle
+ skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller())
+ skeleton.Sync(tt.head, true)
+
+ var progress skeletonProgress
+ // Wait a bit (bleah) for the initial sync loop to go to idle. This might
+ // be either a finish or a never-start hence why there's no event to hook.
+ check := func() error {
+ if len(progress.Subchains) != len(tt.midstate) {
+ return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate))
+
+ }
+ for j := 0; j < len(progress.Subchains); j++ {
+ if progress.Subchains[j].Head != tt.midstate[j].Head {
+ return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head)
+ }
+ if progress.Subchains[j].Tail != tt.midstate[j].Tail {
+ return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail)
+ }
+ }
+ return nil
+ }
+
+ waitStart := time.Now()
+ for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
+ time.Sleep(waitTime)
+ // Check the post-init end state if it matches the required results
+ json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
+ if err := check(); err == nil {
+ break
+ }
+ }
+ if err := check(); err != nil {
+ t.Error(err)
+ continue
+ }
+ var served uint64
+ for _, peer := range tt.peers {
+ served += atomic.LoadUint64(&peer.served)
+ }
+ if served != tt.midserve {
+ t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
+ }
+ var drops uint64
+ for _, peer := range tt.peers {
+ drops += atomic.LoadUint64(&peer.dropped)
+ }
+ if drops != tt.middrop {
+ t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
+ }
+ // Apply the post-init events if there's any
+ if tt.newHead != nil {
+ skeleton.Sync(tt.newHead, true)
+ }
+ if tt.newPeer != nil {
+ if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
+ t.Errorf("test %d: failed to register new peer: %v", i, err)
+ }
+ }
+ // Wait a bit (bleah) for the second sync loop to go to idle. This might
+ // be either a finish or a never-start hence why there's no event to hook.
+ check = func() error {
+ if len(progress.Subchains) != len(tt.endstate) {
+ return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate))
+ }
+ for j := 0; j < len(progress.Subchains); j++ {
+ if progress.Subchains[j].Head != tt.endstate[j].Head {
+ return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head)
+ }
+ if progress.Subchains[j].Tail != tt.endstate[j].Tail {
+ return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail)
+ }
+ }
+ return nil
+ }
+ waitStart = time.Now()
+ for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
+ time.Sleep(waitTime)
+ // Check the post-init end state if it matches the required results
+ json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
+ if err := check(); err == nil {
+ break
+ }
+ }
+ if err := check(); err != nil {
+ t.Error(err)
+ continue
+ }
+ // Check that the peers served no more headers than we actually needed
+ served = 0
+ for _, peer := range tt.peers {
+ served += atomic.LoadUint64(&peer.served)
+ }
+ if tt.newPeer != nil {
+ served += atomic.LoadUint64(&tt.newPeer.served)
+ }
+ if served != tt.endserve {
+ t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
+ }
+ drops = 0
+ for _, peer := range tt.peers {
+ drops += atomic.LoadUint64(&peer.dropped)
+ }
+ if tt.newPeer != nil {
+ drops += atomic.LoadUint64(&tt.newPeer.dropped)
+ }
+ if drops != tt.middrop {
+ t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
+ }
+ // Clean up any leftover skeleton sync resources
+ skeleton.Terminate()
+ }
+}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 1dbd5a7f1fd8..ddc0e9e82977 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -138,8 +138,10 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
- // Whitelist of required block number -> hash values to accept
- Whitelist map[uint64]common.Hash `toml:"-"`
+ // PeerRequiredBlocks is a set of block number -> hash mappings which must be in the
+ // canonical chain of all remote peers. Setting the option makes geth verify the
+ // presence of these blocks for every new peer connection.
+ PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
// Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 70a9649bff83..874e30dffdac 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
NoPruning bool
NoPrefetch bool
TxLookupLimit uint64 `toml:",omitempty"`
- Whitelist map[uint64]common.Hash `toml:"-"`
+ PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ int `toml:",omitempty"`
LightIngress int `toml:",omitempty"`
LightEgress int `toml:",omitempty"`
@@ -71,7 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
enc.TxLookupLimit = c.TxLookupLimit
- enc.Whitelist = c.Whitelist
+ enc.PeerRequiredBlocks = c.PeerRequiredBlocks
enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress
enc.LightEgress = c.LightEgress
@@ -120,7 +120,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NoPruning *bool
NoPrefetch *bool
TxLookupLimit *uint64 `toml:",omitempty"`
- Whitelist map[uint64]common.Hash `toml:"-"`
+ PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ *int `toml:",omitempty"`
LightIngress *int `toml:",omitempty"`
LightEgress *int `toml:",omitempty"`
@@ -184,8 +184,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TxLookupLimit != nil {
c.TxLookupLimit = *dec.TxLookupLimit
}
- if dec.Whitelist != nil {
- c.Whitelist = dec.Whitelist
+ if dec.PeerRequiredBlocks != nil {
+ c.PeerRequiredBlocks = dec.PeerRequiredBlocks
}
if dec.LightServ != nil {
c.LightServ = *dec.LightServ
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index 628a5650424d..06c61ae55d20 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -364,6 +364,7 @@ func testSequentialAnnouncements(t *testing.T, light bool) {
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester(light)
+ defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -743,7 +744,7 @@ func testInvalidNumberAnnouncement(t *testing.T, light bool) {
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
imported := make(chan interface{})
- announced := make(chan interface{})
+ announced := make(chan interface{}, 2)
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
if light {
if header == nil {
@@ -806,6 +807,7 @@ func TestEmptyBlockShortCircuit(t *testing.T) {
hashes, blocks := makeChain(32, 0, genesis)
tester := newTester(false)
+ defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index 970dfd4467a5..4113089afb1e 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -117,7 +117,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
reward, _ := tx.EffectiveGasTip(bf.block.BaseFee())
sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward}
}
- sort.Sort(sorter)
+ sort.Stable(sorter)
var txIndex int
sumGasUsed := sorter[0].gasUsed
diff --git a/eth/handler.go b/eth/handler.go
index 55ca869c773a..40edfa2d1758 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -86,7 +86,8 @@ type handlerConfig struct {
BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
- Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
+
+ PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
}
type handler struct {
@@ -115,7 +116,7 @@ type handler struct {
txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
- whitelist map[uint64]common.Hash
+ peerRequiredBlocks map[uint64]common.Hash
// channels for fetcher, syncer, txsyncLoop
quitSync chan struct{}
@@ -132,16 +133,16 @@ func newHandler(config *handlerConfig) (*handler, error) {
config.EventMux = new(event.TypeMux) // Nicety initialization for tests
}
h := &handler{
- networkID: config.Network,
- forkFilter: forkid.NewFilter(config.Chain),
- eventMux: config.EventMux,
- database: config.Database,
- txpool: config.TxPool,
- chain: config.Chain,
- peers: newPeerSet(),
- merger: config.Merger,
- whitelist: config.Whitelist,
- quitSync: make(chan struct{}),
+ networkID: config.Network,
+ forkFilter: forkid.NewFilter(config.Chain),
+ eventMux: config.EventMux,
+ database: config.Database,
+ txpool: config.TxPool,
+ chain: config.Chain,
+ peers: newPeerSet(),
+ merger: config.Merger,
+ peerRequiredBlocks: config.PeerRequiredBlocks,
+ quitSync: make(chan struct{}),
}
if config.Sync == downloader.FullSync {
// The database seems empty as the current block is the genesis. Yet the snap
@@ -171,10 +172,30 @@ func newHandler(config *handlerConfig) (*handler, error) {
h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1
h.checkpointHash = config.Checkpoint.SectionHead
}
+ // If sync succeeds, pass a callback to potentially disable snap sync mode
+ // and enable transaction propagation.
+ success := func() {
+ // If we were running snap sync and it finished, disable doing another
+ // round on next sync cycle
+ if atomic.LoadUint32(&h.snapSync) == 1 {
+ log.Info("Snap sync complete, auto disabling")
+ atomic.StoreUint32(&h.snapSync, 0)
+ }
+ // If we've successfully finished a sync cycle and passed any required
+ // checkpoint, enable accepting transactions from the network
+ head := h.chain.CurrentBlock()
+ if head.NumberU64() >= h.checkpointNumber {
+ // Checkpoint passed, sanity check the timestamp to have a fallback mechanism
+ // for non-checkpointed (number = 0) private networks.
+ if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
+ atomic.StoreUint32(&h.acceptTxs, 1)
+ }
+ }
+ }
// Construct the downloader (long sync) and its backing state bloom if snap
// sync is requested. The downloader is responsible for deallocating the state
// bloom when it's done.
- h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer)
+ h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success)
// Construct the fetcher (short sync)
validator := func(header *types.Header) error {
@@ -403,8 +424,8 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
}
}()
}
- // If we have any explicit whitelist block hashes, request them
- for number, hash := range h.whitelist {
+ // If we have any explicit peer required block hashes, request them
+ for number := range h.peerRequiredBlocks {
resCh := make(chan *eth.Response)
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
return err
@@ -417,25 +438,25 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
case res := <-resCh:
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
if len(headers) == 0 {
- // Whitelisted blocks are allowed to be missing if the remote
+ // Required blocks are allowed to be missing if the remote
// node is not yet synced
res.Done <- nil
return
}
// Validate the header and either drop the peer or continue
if len(headers) > 1 {
- res.Done <- errors.New("too many headers in whitelist response")
+ res.Done <- errors.New("too many headers in required block response")
return
}
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
- peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
- res.Done <- errors.New("whitelist block mismatch")
+ peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
+ res.Done <- errors.New("required block mismatch")
return
}
- peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
-
+ peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
+ res.Done <- nil
case <-timeout.C:
- peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
+ peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
h.removePeer(peer.ID())
}
}(number, hash)
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index 6e1c57cb6c45..7d5027ae77f6 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -570,7 +570,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
t.Fatalf("failed to answer challenge: %v", err)
}
} else {
- responseRlp, _ := rlp.EncodeToBytes(types.Header{Number: response.Number})
+ responseRlp, _ := rlp.EncodeToBytes(&types.Header{Number: response.Number})
if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil {
t.Fatalf("failed to answer challenge: %v", err)
}
diff --git a/eth/peerset.go b/eth/peerset.go
index 1e864a8e46f2..3e54a481e36b 100644
--- a/eth/peerset.go
+++ b/eth/peerset.go
@@ -230,7 +230,7 @@ func (ps *peerSet) snapLen() int {
}
// peerWithHighestTD retrieves the known peer with the currently highest total
-// difficulty.
+// difficulty, but below the given PoS switchover threshold.
func (ps *peerSet) peerWithHighestTD() *eth.Peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index 7d9b378839b3..55e612b801e0 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -264,11 +264,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 123,
GetBlockHeadersPacket: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
RequestId: 123,
BlockHeadersPacket: headers,
}); err != nil {
@@ -279,14 +279,12 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 456,
GetBlockHeadersPacket: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
- RequestId: 456,
- BlockHeadersPacket: headers,
- }); err != nil {
+ expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err)
}
}
@@ -364,11 +362,11 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
}
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{
+ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
RequestId: 123,
GetBlockBodiesPacket: hashes,
})
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{
+ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
RequestId: 123,
BlockBodiesPacket: bodies,
}); err != nil {
@@ -436,7 +434,7 @@ func testGetNodeData(t *testing.T, protocol uint) {
it.Release()
// Request all hashes.
- p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{
+ p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
RequestId: 123,
GetNodeDataPacket: hashes,
})
@@ -546,11 +544,11 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{
+ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
RequestId: 123,
GetReceiptsPacket: hashes,
})
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{
+ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
RequestId: 123,
ReceiptsPacket: receipts,
}); err != nil {
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index 4161420f3a35..a8af9640bb89 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -241,7 +241,7 @@ func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs [
p.knownTxs.Add(hashes...)
// Not packed into PooledTransactionsPacket to avoid RLP decoding
- return p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{
+ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
RequestId: id,
PooledTransactionsRLPPacket: txs,
})
@@ -298,7 +298,7 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders.
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
- return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersRLPPacket66{
+ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{
RequestId: id,
BlockHeadersRLPPacket: headers,
})
@@ -307,7 +307,7 @@ func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding
- return p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesRLPPacket66{
+ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
RequestId: id,
BlockBodiesRLPPacket: bodies,
})
@@ -315,7 +315,7 @@ func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// ReplyNodeData is the eth/66 response to GetNodeData.
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
- return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{
+ return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
RequestId: id,
NodeDataPacket: data,
})
@@ -323,7 +323,7 @@ func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
- return p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{
+ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
RequestId: id,
ReceiptsRLPPacket: receipts,
})
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index 0a1ee2637fc3..314776dffe89 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -299,7 +299,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
size uint64
last common.Hash
)
- for it.Next() && size < req.Bytes {
+ for it.Next() {
hash, account := it.Hash(), common.CopyBytes(it.Account())
// Track the returned interval for the Merkle proofs
@@ -315,6 +315,9 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
break
}
+ if size > req.Bytes {
+ break
+ }
}
it.Release()
@@ -464,7 +467,7 @@ func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [
// Peers should not request the empty code, but if they do, at
// least sent them back a correct response without db lookups
codes = append(codes, []byte{})
- } else if blob, err := chain.ContractCode(hash); err == nil {
+ } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
codes = append(codes, blob)
bytes += uint64(len(blob))
}
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index be8644a5a4fb..665d7601cfe2 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -168,7 +168,7 @@ type bytecodeResponse struct {
// to actual requests and to validate any security constraints.
//
// Concurrency note: storage requests and responses are handled concurrently from
-// the main runloop to allow Merkel proof verifications on the peer's thread and
+// the main runloop to allow Merkle proof verifications on the peer's thread and
// to drop on invalid response. The request struct must contain all the data to
// construct the response without accessing runloop internals (i.e. tasks). That
// is only included to allow the runloop to match a response to the task being
@@ -1781,7 +1781,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
for i, account := range res.accounts {
// Check if the account is a contract with an unknown code
if !bytes.Equal(account.CodeHash, emptyCode[:]) {
- if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil {
+ if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
res.task.needCode[i] = true
res.task.pend++
@@ -1789,7 +1789,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
}
// Check if the account is a contract with an unknown storage trie
if account.Root != emptyRoot {
- if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
+ if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
// If there was a previous large state retrieval in progress,
// don't restart it from scratch. This happens if a sync cycle
// is interrupted and resumed later. However, *do* update the
@@ -2826,7 +2826,10 @@ func (s *Syncer) reportSyncProgress(force bool) {
new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
accountFills,
).Uint64())
-
+ // Don't report anything until we have a meaningful progress
+ if estBytes < 1.0 {
+ return
+ }
elapsed := time.Since(s.startTime)
estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 47ab1f026dd2..879ce8b6b2db 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1349,7 +1349,7 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
accTrie, _ := trie.New(common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1394,7 +1394,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill boundary accounts
for i := 0; i < len(boundaries); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1406,7 +1406,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1442,7 +1442,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
@@ -1489,7 +1489,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
if code {
codehash = getCodeHash(i)
}
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
diff --git a/eth/sync.go b/eth/sync.go
index b8ac67d3b2d1..d67d2311d0d9 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -17,6 +17,7 @@
package eth
import (
+ "errors"
"math/big"
"sync/atomic"
"time"
@@ -65,6 +66,7 @@ type chainSyncer struct {
handler *handler
force *time.Timer
forced bool // true when force timer fired
+ warned time.Time
peerEventCh chan struct{}
doneCh chan error // non-nil when sync is running
}
@@ -119,10 +121,18 @@ func (cs *chainSyncer) loop() {
select {
case <-cs.peerEventCh:
// Peer information changed, recheck.
- case <-cs.doneCh:
+ case err := <-cs.doneCh:
cs.doneCh = nil
cs.force.Reset(forceSyncCycle)
cs.forced = false
+
+ // If we've reached the merge transition but no beacon client is available, or
+ // it has not yet switched us over, keep warning the user that their infra is
+ // potentially flaky.
+ if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second {
+ log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
+ cs.warned = time.Now()
+ }
case <-cs.force.C:
cs.forced = true
@@ -143,9 +153,16 @@ func (cs *chainSyncer) loop() {
// nextSyncOp determines whether sync is required at this time.
func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
if cs.doneCh != nil {
- return nil // Sync already running.
+ return nil // Sync already running
}
- // Disable the td based sync trigger after the transition
+ // If a beacon client once took over control, disable the entire legacy sync
+ // path from here on end. Note, there is a slight "race" between reaching TTD
+ // and the beacon client taking over. The downloader will enforce that nothing
+ // above the first TTD will be delivered to the chain for import.
+ //
+ // An alternative would be to check the local chain for exceeding the TTD and
+ // avoid triggering a sync in that case, but that could also miss sibling or
+ // other family TTD block being accepted.
if cs.handler.merger.TDDReached() {
return nil
}
@@ -159,16 +176,24 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
if cs.handler.peers.len() < minPeers {
return nil
}
- // We have enough peers, check TD
+ // We have enough peers, pick the one with the highest TD, but avoid going
+ // over the terminal total difficulty. Above that we expect the consensus
+ // clients to direct the chain head to sync to.
peer := cs.handler.peers.peerWithHighestTD()
if peer == nil {
return nil
}
mode, ourTD := cs.modeAndLocalHead()
-
op := peerToSyncOp(mode, peer)
if op.td.Cmp(ourTD) <= 0 {
- return nil // We're in sync.
+ // We seem to be in sync according to the legacy rules. In the merge
+ // world, it can also mean we're stuck on the merge block, waiting for
+ // a beacon client. In the latter case, notify the user.
+ if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second {
+ log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
+ cs.warned = time.Now()
+ }
+ return nil // We're in sync
}
return op
}
@@ -227,7 +252,7 @@ func (h *handler) doSync(op *chainSyncOp) error {
}
}
// Run the sync cycle, and disable snap sync if we're past the pivot block
- err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode)
+ err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode)
if err != nil {
return err
}
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 6ba40f85af9f..3b73498ac98c 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -452,7 +452,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *
// TraceBlock returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
-func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) {
+func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) {
block := new(types.Block)
if err := rlp.Decode(bytes.NewReader(blob), block); err != nil {
return nil, fmt.Errorf("could not decode block: %v", err)
@@ -592,11 +592,11 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
if threads > len(txs) {
threads = len(txs)
}
- blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
blockHash := block.Hash()
for th := 0; th < threads; th++ {
pend.Add(1)
go func() {
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
defer pend.Done()
// Fetch and execute the next transaction trace tasks
for task := range jobs {
@@ -617,6 +617,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
// Feed the transactions into the tracers and return
var failed error
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
for i, tx := range txs {
// Send the trace task over for execution
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 7521a98f2406..cf7c1e6c0d0e 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -130,10 +130,6 @@ func TestCallTracerLegacy(t *testing.T) {
testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
}
-func TestCallTracerJs(t *testing.T) {
- testCallTracer("callTracerJs", "call_tracer", t)
-}
-
func TestCallTracerNative(t *testing.T) {
testCallTracer("callTracer", "call_tracer", t)
}
diff --git a/eth/tracers/js/internal/tracers/assets.go b/eth/tracers/js/internal/tracers/assets.go
index a2bb69dee401..a117c9f06e35 100644
--- a/eth/tracers/js/internal/tracers/assets.go
+++ b/eth/tracers/js/internal/tracers/assets.go
@@ -2,12 +2,11 @@
// sources:
// 4byte_tracer_legacy.js (2.933kB)
// bigram_tracer.js (1.712kB)
-// call_tracer_js.js (3.497kB)
// call_tracer_legacy.js (8.956kB)
// evmdis_tracer.js (4.215kB)
-// noop_tracer.js (1.271kB)
+// noop_tracer_legacy.js (1.271kB)
// opcount_tracer.js (1.372kB)
-// prestate_tracer.js (4.287kB)
+// prestate_tracer_legacy.js (4.483kB)
// trigram_tracer.js (1.788kB)
// unigram_tracer.js (1.469kB)
@@ -118,26 +117,6 @@ func bigram_tracerJs() (*asset, error) {
return a, nil
}
-var _call_tracer_jsJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x5f\x6f\xdb\x38\x0c\x7f\x8e\x3f\x05\xaf\x0f\x4b\x82\x65\x71\xbb\x03\xf6\xd0\x2d\x03\x72\x45\xbb\x05\xe8\xb5\x45\x9a\xde\x50\x14\x7d\x50\x6c\xda\xd6\xa6\x48\x86\x44\x37\xcd\x6d\xfd\xee\x07\x4a\x76\x6a\x67\x59\x6f\x2f\x06\x2c\x92\x3f\xfe\xfb\x51\x54\x1c\xc3\x89\x29\x37\x56\xe6\x05\xc1\xdb\xc3\xb7\x47\xb0\x28\x10\x72\xf3\x06\xa9\x40\x8b\xd5\x0a\xa6\x15\x15\xc6\xba\x28\x8e\x61\x51\x48\x07\x99\x54\x08\xd2\x41\x29\x2c\x81\xc9\x80\x76\xf4\x95\x5c\x5a\x61\x37\xe3\x28\x8e\x83\xcd\x5e\x31\x23\x64\x16\x11\x9c\xc9\x68\x2d\x2c\x1e\xc3\xc6\x54\x90\x08\x0d\x16\x53\xe9\xc8\xca\x65\x45\x08\x92\x40\xe8\x34\x36\x16\x56\x26\x95\xd9\x86\x21\x25\x41\xa5\x53\xb4\xde\x35\xa1\x5d\xb9\x26\x8e\x4f\x17\x37\x70\x8e\xce\xa1\x85\x4f\xa8\xd1\x0a\x05\x57\xd5\x52\xc9\x04\xce\x65\x82\xda\x21\x08\x07\x25\x9f\xb8\x02\x53\x58\x7a\x38\x36\x3c\xe3\x50\xae\xeb\x50\xe0\xcc\x54\x3a\x15\x24\x8d\x1e\x01\x4a\x8e\x1c\x1e\xd0\x3a\x69\x34\xfc\xd9\xb8\xaa\x01\x47\x60\x2c\x83\x0c\x04\x71\x02\x16\x4c\xc9\x76\x43\x10\x7a\x03\x4a\xd0\xb3\xe9\x6f\x14\xe4\x39\xef\x14\xa4\xf6\x6e\x0a\x53\x22\x50\x21\x88\xb3\x5e\x4b\xa5\x60\x89\x50\x39\xcc\x2a\x35\x62\xb4\x65\x45\xf0\x65\xb6\xf8\x7c\x79\xb3\x80\xe9\xc5\x2d\x7c\x99\xce\xe7\xd3\x8b\xc5\xed\x7b\x58\x4b\x2a\x4c\x45\x80\x0f\x18\xa0\xe4\xaa\x54\x12\x53\x58\x0b\x6b\x85\xa6\x0d\x98\x8c\x11\xfe\x3e\x9d\x9f\x7c\x9e\x5e\x2c\xa6\x7f\xcd\xce\x67\x8b\x5b\x30\x16\xce\x66\x8b\x8b\xd3\xeb\x6b\x38\xbb\x9c\xc3\x14\xae\xa6\xf3\xc5\xec\xe4\xe6\x7c\x3a\x87\xab\x9b\xf9\xd5\xe5\xf5\xe9\x18\xae\x91\xa3\x42\xb6\xff\xff\x9a\x67\xbe\x7b\x16\x21\x45\x12\x52\xb9\xa6\x12\xb7\xa6\x02\x57\x98\x4a\xa5\x50\x88\x07\x04\x8b\x09\xca\x07\x4c\x41\x40\x62\xca\xcd\x6f\x37\x95\xb1\x84\x32\x3a\xf7\x39\xff\x92\x90\x30\xcb\x40\x1b\x1a\x81\x43\x84\x0f\x05\x51\x79\x1c\xc7\xeb\xf5\x7a\x9c\xeb\x6a\x6c\x6c\x1e\xab\x00\xe7\xe2\x8f\xe3\x28\x62\xd0\x44\x28\x75\x66\xc5\x0a\x17\x56\x24\x68\xb9\xee\xce\xc3\x6b\x5c\x7b\x21\x64\x2c\x05\xb2\x22\x91\x3a\x87\x15\x52\x61\x52\x07\x64\xc0\x62\x69\x2c\xd5\x9d\x02\xa9\x33\x63\x57\x9e\x51\x3e\xd8\x25\x37\x46\x6a\x42\xab\x85\x82\x15\x3a\x27\x72\xf4\x2c\x16\x0c\xa6\x9d\x48\xc8\x53\xe6\x7b\xd4\x63\x3f\x8e\x44\xf2\xed\x18\xee\xbe\x3f\xdd\x8f\xa2\x5e\x26\x2a\x45\xc7\x90\x55\xda\x6b\x0d\x94\xc9\x47\x90\x2e\x87\xf0\xfd\x69\x14\xf5\x2c\xba\xae\x38\xa1\xc7\x5a\x1c\xf5\x7a\x71\x0c\x57\x16\x4b\x66\xb9\xa9\x98\x9d\xb5\x73\x1f\x62\xd4\xeb\x3d\x08\x0b\x01\x01\x26\xde\xa0\x47\x9b\x12\x8f\x01\x00\x12\x7a\x1c\xf3\xcf\x88\x4f\x33\x6b\x56\xfe\x94\xcc\x67\x7c\x64\x1f\x63\x3e\x1a\x7a\x21\x19\x2f\x6a\x0b\xc9\x04\xd1\x83\x50\x95\x87\xeb\x1f\x3e\xf6\xe1\xb5\x07\xf5\x67\x63\x32\xd7\x64\xa5\xce\x07\x47\xef\x82\x6a\x2e\x5c\x80\xa9\x55\x97\x32\x9f\x69\xf2\x68\xb9\x70\xc3\xbd\x06\x37\x0e\xd3\xe3\xfd\x06\x2c\xda\x63\x24\x75\x59\xd1\x71\x27\x56\x7f\x14\xa4\xa6\xa2\x20\x7e\x96\x86\x23\x2f\x7e\x8a\x7a\x3d\x99\xc1\x80\x0a\xe9\xc6\xdb\x3e\xdd\x1d\xde\x87\x1f\xf8\x63\x32\xf1\x37\x55\x26\x35\xa6\xa1\xfe\x75\x7b\x6a\x85\x09\xfc\xc2\xf4\x45\x70\xb4\xd6\xd8\x97\xc0\x83\xc2\x3e\x70\x2f\x61\x70\x40\xe5\x10\x18\x9f\x73\xfa\x6d\xc4\xad\x72\x2b\xc0\x8e\x4a\x07\x03\x5e\xbd\xda\x23\x3e\xc0\x47\x4c\x2a\xa6\x26\x58\x7c\x40\x4b\x98\x1e\xc0\x8f\x1f\x35\xed\xea\xfa\xc2\x64\x32\x39\x38\x7c\x3c\x18\xd6\x71\xa4\xa8\x90\xb0\xab\xe3\x63\x88\x38\x46\xaa\xac\x0e\xd9\x66\x52\x0b\x25\xff\xc5\xda\xed\x30\xea\xf1\x4c\x20\x8f\x5a\x6b\x24\xfc\xd8\x06\x64\x26\xbc\x1f\xe5\x0e\xdd\xbd\xc2\x38\x47\x5a\x6c\x4a\x1c\x0c\x5b\x94\x0f\x44\xd8\xca\xcf\xac\x59\x0d\x86\xcf\xb4\xdf\x11\x2f\x4c\x23\xac\x79\xb6\x23\x9f\xf1\x69\xa3\xe2\x09\xdf\xe5\xee\x56\xf1\x93\x70\x83\x61\x8b\xbe\xfd\xa3\x77\xfd\x0e\x07\xb7\x9a\xff\xf0\x34\x0d\x86\x3b\xdd\xf4\xb9\x71\x9e\x61\xda\x26\xbf\x70\x53\x1b\x77\xe7\xa4\xf6\xd2\x65\xd3\xb8\xac\x5c\x31\xe0\xdf\xa6\xc6\x8f\x92\x76\x4b\x3c\x0f\x4d\xd8\x16\x5a\xa1\xfe\x89\x96\x63\x85\x3a\xa7\xa2\x4e\x83\x35\x3e\xc2\x51\xdd\xf5\x56\x73\x76\xbd\x9b\x72\x30\xdc\xe6\x54\x8f\x37\x4c\xf6\x95\x2f\x04\x51\x17\x91\xd5\x7e\x2e\x64\xe3\xab\xa1\xf9\x8e\xdd\x29\x1f\x07\x77\x1c\x63\xad\xb5\x67\x5a\x42\x34\x0d\x83\xdb\xcd\x7e\x06\xbb\xf4\xd2\xc1\xd0\xc3\xd5\x73\xd8\x32\x6e\x42\x68\xa6\x2c\xb8\xf4\x22\xa6\xa6\x77\xdb\x3f\x99\x9f\x4e\x17\xa7\x7d\x9e\x9a\xbd\x92\xb7\xfd\x26\xa0\x66\x70\x82\x9a\xf1\x67\x4f\x51\xf3\xe1\x6a\xbf\x99\xc0\x51\x93\xd9\xce\x85\xa1\x50\xbf\x39\x6a\x2e\xb3\xbd\xf9\xbe\x68\x00\x77\xf7\x5b\x4f\x2f\x28\x76\x98\xc4\xda\xcc\xa6\x38\x86\x66\x94\xf9\x5d\x60\x51\x10\x3a\x7e\x18\x30\x1b\xcc\xf2\x2b\x26\xbc\x5c\x79\xe9\xf2\x3e\xf6\xaa\x90\xa2\x93\x16\x53\xc8\x24\xaa\x14\x0c\xbf\x10\xf9\xe9\xf1\xd5\x19\xed\x01\x1d\x5a\xc9\x88\x7e\x0f\x8f\xc3\x6b\x56\x32\xa8\x96\x09\xd2\x06\x32\x14\x54\x59\xe4\xf5\x5d\x0a\xe7\x60\x85\x42\x4b\x9d\x67\x95\x52\x1b\x30\x36\x45\x06\x0f\xf7\x8a\xf3\x80\x64\x78\xc1\x5b\x07\xeb\xc2\x40\x6a\x74\xbf\x5e\xea\xa5\x45\x7e\xaf\x8d\xe0\x6b\xe5\x88\x5f\x75\xa5\x12\x1b\x90\x34\x8e\x7a\x4d\x52\xed\xfd\xcc\x99\x6f\x47\xc4\x19\xbe\x10\x7f\x5e\xbe\x4d\x9b\xbb\xdb\xd7\x1f\xf3\x5f\x77\xef\xd6\xdd\xee\x6e\xdc\xe7\xe9\xef\xae\xd7\x66\x82\xba\x3b\xb4\x3d\x57\xdd\x45\xe9\x25\xfe\xaf\xbb\x22\x5b\xdc\xf7\x02\xcf\xe0\xad\x81\xff\x0b\x51\xca\x55\x3b\x27\xb9\x0a\xf1\x78\x2e\x6c\xd5\xfd\x5f\x73\xbf\x71\x17\x07\x5c\x9c\x6f\xb8\xe1\x87\x71\xa8\x51\xcd\x41\xe6\x6d\x38\xb8\xfb\x86\x9b\xfb\xfd\x3c\xad\xa7\xa0\xa5\xd7\x30\xb3\xb9\x3f\x83\xe8\x85\xc5\xbd\x0d\x42\x4e\x0e\xdf\x83\xfc\xd0\x36\xa8\xef\xb0\xf7\x20\x5f\xbf\x6e\x5c\xb6\xe5\x77\xf2\xbe\xb9\xc2\xb6\x0b\x6a\x47\x3e\x6c\x07\x54\x6f\xb4\xa0\x12\xf5\x9e\xa2\xa7\xe8\xbf\x00\x00\x00\xff\xff\x2a\xac\x9f\xff\xa9\x0d\x00\x00")
-
-func call_tracer_jsJsBytes() ([]byte, error) {
- return bindataRead(
- _call_tracer_jsJs,
- "call_tracer_js.js",
- )
-}
-
-func call_tracer_jsJs() (*asset, error) {
- bytes, err := call_tracer_jsJsBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "call_tracer_js.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x42, 0x13, 0x7a, 0x14, 0xbf, 0xa7, 0x49, 0x4f, 0xb4, 0x4f, 0x45, 0x1, 0xbc, 0x9e, 0xd1, 0x8e, 0xc7, 0xee, 0x61, 0xfa, 0x82, 0x52, 0xa4, 0x78, 0xfe, 0xff, 0xb1, 0x68, 0x1d, 0xcc, 0x1d, 0x8e}}
- return a, nil
-}
-
var _call_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x89\x1f\x6a\x09\x51\x24\x39\xe9\xb7\x5f\xc0\xae\x7a\x50\x1d\x25\x35\xe0\xc6\x81\xad\x34\x08\x82\x3c\x50\xbb\xb3\x12\x6b\x8a\xdc\x92\x5c\xc9\xba\xd6\xff\xfb\x61\x86\xdc\xd5\xae\x24\x3b\xbe\x5e\x71\xe8\xbd\x69\x97\x33\xc3\xe1\xcc\x67\x7e\x71\x35\x18\xc0\xb9\xc9\x37\x56\xce\x17\x1e\x5e\x0e\x4f\xfe\x1f\xa6\x0b\x84\xb9\x79\x81\x7e\x81\x16\x8b\x25\x8c\x0b\xbf\x30\xd6\xb5\x07\x03\x98\x2e\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\x39\xb3\xc2\x6e\xfa\xed\xc1\x20\xf0\x1c\x5c\x26\x09\x99\x45\x04\x67\x32\xbf\x16\x16\x4f\x61\x63\x0a\x48\x84\x06\x8b\xa9\x74\xde\xca\x59\xe1\x11\xa4\x07\xa1\xd3\x81\xb1\xb0\x34\xa9\xcc\x36\x24\x52\x7a\x28\x74\x8a\x96\xb7\xf6\x68\x97\xae\xd4\xe3\xed\xbb\x0f\x70\x89\xce\xa1\x85\xb7\xa8\xd1\x0a\x05\xef\x8b\x99\x92\x09\x5c\xca\x04\xb5\x43\x10\x0e\x72\x7a\xe3\x16\x98\xc2\x8c\xc5\x11\xe3\x1b\x52\xe5\x26\xaa\x02\x6f\x4c\xa1\x53\xe1\xa5\xd1\x3d\x40\x49\x9a\xc3\x0a\xad\x93\x46\xc3\xab\x72\xab\x28\xb0\x07\xc6\x92\x90\x8e\xf0\x74\x00\x0b\x26\x27\xbe\x2e\x08\xbd\x01\x25\xfc\x96\xf5\x09\x06\xd9\x9e\x3b\x05\xa9\x79\x9b\x85\xc9\x11\xfc\x42\x78\x3a\xf5\x5a\x2a\x05\x33\x84\xc2\x61\x56\xa8\x1e\x49\x9b\x15\x1e\x3e\x5e\x4c\x7f\xba\xfa\x30\x85\xf1\xbb\x4f\xf0\x71\x7c\x7d\x3d\x7e\x37\xfd\x74\x06\x6b\xe9\x17\xa6\xf0\x80\x2b\x0c\xa2\xe4\x32\x57\x12\x53\x58\x0b\x6b\x85\xf6\x1b\x30\x19\x49\xf8\x79\x72\x7d\xfe\xd3\xf8\xdd\x74\xfc\xe3\xc5\xe5\xc5\xf4\x13\x18\x0b\x6f\x2e\xa6\xef\x26\x37\x37\xf0\xe6\xea\x1a\xc6\xf0\x7e\x7c\x3d\xbd\x38\xff\x70\x39\xbe\x86\xf7\x1f\xae\xdf\x5f\xdd\x4c\xfa\x70\x83\xa4\x15\x12\xff\xd7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x4a\x4b\x7c\x32\x05\xb8\x85\x29\x54\x0a\x0b\xb1\x42\xb0\x98\xa0\x5c\x61\x0a\x02\x12\x93\x6f\x9e\xec\x54\x92\x25\x94\xd1\x73\x3e\xf3\x83\x80\x84\x8b\x0c\xb4\xf1\x3d\x70\x88\xf0\xfd\xc2\xfb\xfc\x74\x30\x58\xaf\xd7\xfd\xb9\x2e\xfa\xc6\xce\x07\x2a\x88\x73\x83\x1f\xfa\x6d\x92\x99\x08\xa5\xa6\x56\x24\x68\xc9\x39\x02\xb2\x82\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x77\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xdc\x58\xfa\xad\x54\x89\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\xb0\x14\x29\xc2\x6c\x03\xa2\x2e\xb0\x57\x3f\x0c\xc1\x28\xb8\x1b\xa4\xce\x8c\x5d\x32\x2c\xfb\xed\xdf\xdb\xad\xa8\xa1\xf3\x22\xb9\x25\x05\x49\x7e\x52\x58\x8b\xda\x93\x29\x0b\xeb\xe4\x0a\x99\x04\x02\x4d\xb4\xe7\xe4\x97\x9f\x01\xef\x30\x29\x82\xa4\x56\x25\xe4\x14\x3e\xff\x7e\xff\xa5\xd7\x66\xd1\x29\xba\x04\x75\x8a\x29\x9f\xef\xd6\xc1\x7a\xc1\x16\x85\x35\x1e\xaf\x10\x7e\x2d\x9c\xaf\xd1\x64\xd6\x2c\x41\x68\x30\x05\x21\xbe\x6e\x1d\xa9\xbd\x61\x81\x82\x7e\x6b\xb4\xac\x51\xbf\xdd\xaa\x98\x4f\x21\x13\xca\x61\xdc\xd7\x79\xcc\xe9\x34\x52\xaf\xcc\x2d\x49\x36\x96\x20\x6c\x37\x60\xf2\xc4\xa4\x31\x18\xe8\x1c\xd5\x31\xd0\xf5\xdb\x2d\xe2\x3b\x85\xac\xd0\xbc\x6d\x47\x99\x79\x0f\xd2\x59\x17\x7e\x6f\xb7\x48\xec\xb9\xc8\x7d\x61\x91\xed\x89\xd6\x1a\xeb\x40\x2e\x97\x98\x4a\xe1\x51\x6d\xda\xad\xd6\x4a\xd8\xb0\x00\x23\x50\x66\xde\x9f\xa3\x9f\xd0\x63\xa7\x7b\xd6\x6e\xb5\x64\x06\x9d\xb0\xfa\x6c\x34\xe2\xec\x93\x49\x8d\x69\x10\xdf\xf2\x0b\xe9\xfa\x99\x28\x94\xaf\xf6\x25\xa6\x96\x45\x5f\x58\x4d\x3f\xef\x83\x16\x1f\x11\x8c\x56\x1b\x48\x28\xcb\x88\x19\x85\xa7\xdb\x38\x8f\xcb\x78\x38\xd7\x83\x4c\x38\x32\xa1\xcc\x60\x8d\x90\x5b\x7c\x91\x2c\x90\x7c\xa7\x13\x8c\x5a\xba\x8d\x63\xa7\x8e\x80\x76\xeb\x9b\xbc\xef\xcd\xbb\x62\x39\x43\xdb\xe9\xc2\x37\x30\xbc\xcb\x86\x5d\x18\x8d\xf8\x47\xa9\x7b\xe4\x89\xfa\x92\x14\x93\xc7\x83\x32\xff\x8d\xb7\x52\xcf\xc3\x59\xa3\xae\x17\x19\x08\xd0\xb8\x86\xc4\x68\x06\x35\x79\x65\x86\x52\xcf\x21\xb1\x28\x3c\xa6\x3d\x10\x69\x0a\xde\x04\xe4\x55\x38\x6b\x6e\x09\xdf\x7c\x03\x1d\xda\x6c\x04\xc7\xe7\xd7\x93\xf1\x74\x72\x0c\x7f\xfc\x01\xe1\xcd\x51\x78\xf3\xf2\xa8\x5b\xd3\x4c\xea\xab\x2c\x8b\xca\xb1\xc0\x7e\x8e\x78\xdb\x39\xe9\xf6\x57\x42\x15\x78\x95\x05\x35\x23\xed\x44\xa7\x30\x8a\x3c\xcf\x77\x79\x5e\x36\x78\x88\x69\x30\x80\xb1\x73\xb8\x9c\x29\xdc\x0f\xc8\x18\xb1\x1c\xbc\xce\x53\xc6\x22\xf4\x25\x66\x99\x2b\x24\x54\x95\xbb\x46\xf3\xb3\xc6\x2d\xbf\xc9\xf1\x14\x00\xc0\xe4\x3d\x7e\x41\xb1\xc0\x2f\xbc\xf9\x09\xef\xd8\x47\xa5\x09\x09\x55\xe3\x34\xb5\xe8\x5c\xa7\xdb\x0d\xe4\x52\xe7\x85\x3f\x6d\x90\x2f\x71\x69\xec\xa6\xef\x28\x21\x75\xf8\x68\xbd\x70\xd2\x92\x67\x2e\xdc\x85\x26\x9e\x88\xd4\xb7\xc2\x75\xb6\x4b\xe7\xc6\xf9\xd3\x72\x89\x1e\xca\x35\xb6\x05\xb1\x1d\x0f\xef\x8e\xf7\xad\x35\xec\x6e\x91\x70\xf2\x5d\x97\x58\xee\xcf\x2a\x7c\x57\x69\xa2\x9f\x17\x6e\xd1\x61\x38\x6d\x57\xb7\xa9\x60\x04\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\x35\x17\x9c\x69\x38\xd2\x05\x65\x5e\x57\xcc\xd8\xe6\xde\x98\x7d\x74\x45\x70\xdd\x4c\x2e\xdf\xbc\x9e\xdc\x4c\xaf\x3f\x9c\x4f\x8f\x6b\x70\x52\x98\x79\x52\xaa\x79\x06\x85\x7a\xee\x17\xac\x3f\x89\x6b\xae\x7e\x26\x9e\x17\x27\x5f\xc2\x1b\x18\x1d\x08\xf9\xd6\xe3\x1c\xf0\xf9\x0b\xcb\xbe\xdf\x37\x5f\x93\x34\x18\xf3\xaf\x41\x92\x37\x4c\x5c\x92\x7b\x53\x12\x3c\xee\xe7\xbf\x18\x54\xe9\x8c\x28\x7e\x14\x4a\xe8\x04\x1f\xd1\x79\x1f\x6b\xf5\xa4\x79\x20\x0f\x2d\xd1\x2f\x4c\xca\x85\x21\x11\xa1\xb6\x94\x08\x4a\x8d\xc6\x7f\x3f\x1b\x8d\x2f\x2f\x6b\xb9\x88\x9f\xcf\xaf\x5e\xd7\xf3\xd3\xf1\xeb\xc9\xe5\xe4\xed\x78\x3a\xd9\xa5\xbd\x99\x8e\xa7\x17\xe7\xfc\xb6\x4c\x5d\x83\x01\xdc\xdc\xca\x9c\x2b\x0c\xe7\x6d\xb3\xcc\xb9\x55\xae\xf4\x75\x3d\xf0\x0b\x43\x4d\xa8\x8d\x05\x34\x13\x3a\x29\x0b\x9b\x2b\x01\xeb\x0d\xc1\xf5\x21\xe7\x9d\xec\x38\xaf\x82\xb0\x74\xef\x2d\xc6\x4d\xd3\x8e\x37\xa5\x5e\x5b\x83\x06\x34\x72\xf2\xe7\x04\xdb\x79\xfa\x21\xe1\x1f\x30\x84\x53\x38\x89\x59\xf4\x91\x34\xfd\x12\x9e\x93\xf8\x3f\x91\xac\x5f\x1d\xe0\xfc\x7b\xa6\xec\xbd\x40\xfb\xef\xa7\x72\x53\xf8\xab\x2c\x3b\x85\x5d\x23\x7e\xbb\x67\xc4\x8a\xfe\x12\xf5\x3e\xfd\xff\xed\xd1\x6f\xd3\x3e\xa1\xca\xe4\xf0\x6c\x0f\x22\x21\xe9\x3e\xdb\x89\x83\x68\x5c\x6e\xef\x58\x1a\x8c\x1e\x28\x34\x2f\x9b\x18\x7e\x28\x53\xfe\x47\x85\xe6\x60\x9b\x4a\xcd\x68\xb3\x11\xed\x81\x45\x6f\x25\xae\x68\xd4\x3c\x76\x2c\x92\x1a\x76\xb3\xa6\xf4\xd5\x87\x8f\x18\x24\x6a\x44\x4e\x2e\xb1\xc1\xa7\xfe\x8c\x7b\x5e\x6a\xd2\xe3\xa8\xc6\x10\x13\xdc\x87\x5b\x84\xa5\xd8\xd0\xa8\x96\x15\xfa\x76\x03\x73\xe1\x20\xdd\x68\xb1\x94\x89\x0b\xf2\xb8\xb9\xb7\x38\x17\x96\xc5\x5a\xfc\xad\x40\x47\x73\x1f\x01\x59\x24\xbe\x10\x4a\x6d\x60\x2e\x69\x78\x23\xee\xce\xcb\x57\xc3\x21\x38\x2f\x73\xd4\x69\x0f\xbe\x7b\x35\xf8\xee\x5b\xb0\x85\xc2\x6e\xbf\x5d\x2b\x61\xd5\x51\xa3\x37\x68\x21\xa2\xe7\x35\xe6\x7e\xd1\xe9\xc2\x0f\x0f\xd4\xc2\x07\x0a\xdb\x41\x5a\x78\x01\x27\x5f\xfa\xa4\xd7\xa8\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\x81\xf7\xea\xf5\x55\xe7\x56\x58\xa1\xc4\x0c\xbb\xa7\x3c\x00\xb3\xad\xd6\x22\x4e\x40\xe4\x14\xc8\x95\x90\x1a\x44\x92\x98\x42\x7b\x32\x7c\x39\xcc\xa8\x0d\xe5\xf7\x63\x5f\xca\xe3\x59\x51\x24\x09\x3a\x57\xa6\x7b\xf6\x1a\xa9\x23\x96\xc4\x0d\x52\x3b\x99\x62\xcd\x2b\x94\x1d\x0c\xa7\xe6\x48\x41\xa3\x74\x29\x70\x69\x1c\x6d\x32\x43\x58\x5b\x1a\xbc\x9c\xd4\x09\xdf\x3c\xa4\x48\xd6\x76\x60\x34\x08\x50\x86\xaf\x3b\x38\xc6\x41\xd8\xb9\xeb\x87\x7c\x4f\xdb\x52\xce\xd1\x66\xdd\x6f\x02\xb9\x0e\x55\x1e\x71\x76\x5a\x21\x0d\x78\x27\x9d\xe7\x8e\x9a\xb4\x94\x0e\x02\x92\xa5\x9e\xf7\x20\x37\x39\xe7\xe9\xaf\x95\xb3\x98\xac\xaf\x27\xbf\x4c\xae\xab\xc6\xe7\xe9\x4e\x2c\x67\x9e\xa3\x6a\x24\x04\x4b\xf3\x96\xc7\xf4\xe8\xc0\x10\x73\x00\x50\xa3\x07\x00\x45\xf2\xb7\xb5\xf1\x7d\xed\x38\x4a\x38\xbf\x75\xcc\x1c\xc3\x3c\x57\x57\xc0\x15\xca\xbb\x9d\xdc\xbd\x9b\x1c\x4c\x5e\x56\x08\x52\x8a\xd3\x0e\x25\xf6\xdd\x49\xa3\xb1\xb0\x1d\x38\xb6\xf8\xbc\xa8\xd9\x78\xcd\xed\x66\x20\xaa\xa5\x06\x5e\x2f\xfb\x56\x11\xaa\x01\xeb\x6e\x0a\x4f\x70\xa0\xfa\xbd\x4d\x7e\x73\xe1\x3e\x38\xf6\x7a\x4c\x7f\x33\x39\xbf\xd0\xbe\x53\x2e\x5e\x68\x78\x01\xe5\x03\x25\x75\x78\xd1\x88\xa2\x03\xd9\xb1\x95\xa2\x42\x8f\xb0\x15\x71\x06\x3b\xaf\x48\x50\x30\x07\x1b\xcd\xa2\xdf\x2f\xce\xc3\x28\x8d\x0c\xf6\xcc\xa2\xef\xe3\x6f\x85\x50\xae\x33\xac\x9a\x85\x70\x02\x6f\xb8\xbc\x8d\xf6\x3a\x49\xe2\x69\xf6\x8e\x67\x35\xb6\x68\x8d\x92\x2d\x74\x82\xe7\x26\xc5\x47\x25\x44\x11\x31\x6d\x54\xbe\x8c\xc0\x3c\xd4\x7b\xb7\xea\x04\x70\x54\x35\x04\x99\x90\xaa\xb0\x78\x74\x06\x07\xd2\x8e\x2b\x6c\x26\x12\xf6\xa5\x43\xe0\x69\xdd\x81\x33\x4b\x5c\x98\x75\x50\xe0\x50\xf2\xda\x07\x47\x85\x83\x9d\xf2\xc1\xd7\x4e\xc2\x41\xe1\xc4\x1c\x6b\xe0\xa8\x0c\x5e\x3a\xea\xe0\x15\xc2\x9f\x86\xce\xf3\xea\xf1\x09\x28\xba\xff\x6b\xe0\xb1\xe3\xe7\xbd\x3e\xa7\x24\xe2\x6e\xa7\xf6\x50\x2a\x1b\x9a\x91\xbf\x97\xe3\x9f\x1c\x61\xbb\xb4\xe1\x68\x4d\xe2\x70\xc0\x6d\x5f\xf3\x75\xf7\x57\xab\x0f\x79\xfe\xa1\x96\x89\x30\xaa\x7f\xc5\xc4\x6f\x71\xca\x5d\x0e\x3d\xe5\x16\x57\xd2\x14\x54\xc0\xf0\x7f\x69\x1c\xae\x5a\xbe\xfb\x76\xeb\x3e\xde\x0b\xb2\xdf\xea\x17\x83\xeb\x45\xbc\xd7\x0e\xdd\x52\xad\x7c\x18\xae\xad\xf1\xba\x30\x0b\x37\xce\x2d\xe6\x7f\xe4\x82\x30\x06\xba\x37\x39\xb5\x03\xb1\x3a\x29\x8b\x22\xdd\x54\x05\xb1\x17\x1a\x11\x58\x08\x9d\xc6\x61\x44\xa4\xa9\x24\x79\x0c\x42\xd2\x50\xcc\x85\xd4\xed\x83\x66\xfc\x6a\x15\x3e\x84\x8c\xbd\xde\xb6\x5e\x48\xe3\x10\x49\x13\x1f\x6b\xdc\x7e\x42\xc1\xdc\x09\xa2\xdd\xbb\xce\x78\x5d\x6a\xb4\x2b\x96\xdc\x09\x83\x58\x09\xa9\x04\x4d\x5f\xdc\x61\xe9\x14\x12\x85\x42\x87\x2f\x1c\x98\x79\xb3\x42\xeb\xda\x4f\x00\xf9\x9f\xc1\xf8\x4e\x56\x2c\x1f\xa3\x39\x9e\x1e\xb3\x4f\x8d\xd8\x70\xfc\x37\x4a\x78\x1f\xe1\x55\x33\x6f\x88\x2c\xe9\xf9\xe3\x17\x6a\xdf\x7e\x5a\x48\x71\xcf\x44\x34\x3f\xc0\xb0\xd6\x97\xff\x5d\x82\x6c\x1f\x62\x97\x55\x7f\x16\x0f\xef\x8d\xe9\x81\x42\xc1\x53\x52\xf9\x69\xaa\xec\x47\x1f\x1b\xda\xca\xe8\x0d\x1d\xdd\x5e\xf8\xf2\x9d\xde\x02\xcb\x1b\x90\xd0\xda\xcf\x10\x35\x48\x8f\x56\xd0\x3c\x44\xe8\x8a\x5f\x53\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\x9f\x36\xa8\x30\x4b\x3d\xef\xb7\x5b\xe1\x7d\x2d\xde\x13\x7f\xb7\x8d\xf7\x50\x01\x99\x33\xde\x09\x54\x57\x02\x89\xbf\xe3\x6e\x91\xc7\xe6\x9d\x7b\x01\x5a\xa3\x57\x61\xa6\xde\xb9\x05\x60\xc6\x78\x13\xb0\x7b\x27\x46\x6b\xfc\xae\x01\x70\x26\x9d\x0b\x17\xc4\xec\x84\x84\xbf\xdb\x8f\x88\x92\x81\x82\xe1\xf4\x30\x03\x2d\x1d\x60\xda\xb9\x99\x20\x62\x7e\x15\x56\x43\x3d\x3f\xad\xaf\x86\x57\xf1\xa0\x72\x59\xb3\x8d\x5c\xb2\x6d\xee\xcf\x0e\x27\xb9\x61\x89\xc7\xc3\xc9\x8c\x6c\x5e\x01\xf6\x01\xd6\xfa\xac\xb1\x4f\xf2\x58\xaa\x64\xe9\x65\x66\x7b\x80\x95\xa5\xd7\x5a\x0e\x7f\xf7\x74\x91\x15\x71\x5d\xc5\x06\x4d\x43\x08\xdf\x36\xee\x2d\x1f\x9a\xb4\x68\x50\x89\x84\x65\x73\x35\x1a\x1d\x0d\xef\xaa\x0f\x23\x31\x57\x35\x68\x4a\x25\x42\x64\x84\xf3\x72\x54\xc8\x7f\x62\xdc\xb6\x1e\x83\xe5\x12\x58\x0c\x1f\x70\xb8\x9b\xa5\x10\x34\x33\x6e\x20\x0a\x47\xa3\xe8\x36\xb6\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xa0\xfb\xab\x33\x3a\x7c\xaa\x43\x2b\x49\x62\xf8\x24\x19\xfe\x1d\xc0\x1f\x4a\xb5\x4c\xd0\x6f\x20\x43\xc1\xdf\xdc\xbc\x81\x5c\x38\x07\x4b\x14\x34\xda\x66\x85\x52\x1b\x30\x36\x45\x12\x5e\xcd\x7a\x14\xd6\x06\x0a\x87\xd6\xc1\x7a\x61\x62\xa9\xe5\x16\x2f\xa7\x6e\x55\xfa\x5e\xbc\xce\x91\x2e\x57\x62\x03\xd2\x53\x59\x8f\x87\xaa\x47\x7a\xf5\xa1\x8b\xbf\x96\x19\x32\xf0\x7e\x98\x97\x53\x61\x33\xce\xf9\x35\x3d\x35\x23\x3c\x0e\x45\xcd\xd8\xde\x5e\x74\x35\x03\xb9\x2c\x3d\xcd\x68\xad\x17\xb2\x66\x48\xf2\x0a\x3f\x35\x83\xb1\xd6\x6a\xf3\x02\x23\xa8\x62\xe0\xa7\x9d\xf0\x64\x2d\x63\x7c\x86\xcf\xba\x15\x39\x3f\xf5\x22\x60\xc8\x8b\x1d\x32\xce\x2d\x6e\x28\x9b\x07\x1b\xd5\x4a\x53\x78\xf1\xf9\x16\x37\x5f\x0e\x57\xa2\x08\xc7\x1a\x5d\x55\x7a\xca\xb0\x08\x6b\x8f\x24\x83\x4a\x0b\x39\x1a\x9e\x81\xfc\xbe\xce\x50\x56\x4f\x90\xcf\x9f\x97\x7b\xd6\xd7\x3f\xcb\x2f\x65\x84\x57\x88\xdf\x59\xef\x36\x34\x8a\x31\x12\x68\x28\x28\xda\xf7\xed\x7f\x05\x00\x00\xff\xff\xfb\x65\x93\x4f\xfc\x22\x00\x00")
func call_tracer_legacyJsBytes() ([]byte, error) {
@@ -178,22 +157,22 @@ func evmdis_tracerJs() (*asset, error) {
return a, nil
}
-var _noop_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8a\x02\xac\x61\x27\x2a\x1c\xdb\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x18\xfe\xee\xc5\x92\x12\x12\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x2b\x19\x8f\xea\xb6\x83\xe1\xc7\xef\x7f\xf8\x19\xf5\xc0\xd8\xca\x77\x6c\x03\x2b\xa7\x1d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb8\x2c\xca\x72\xd6\x7c\xf5\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x90\xf2\x25\x8e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x27\x9d\xeb\x8f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe2\x39\xc7\xdb\xbb\x47\xdc\x72\x8c\xac\x78\xcb\x81\x95\x3c\x1e\x52\xe3\x5d\x8b\x5b\xd7\x72\x88\x0c\x8a\x18\xf3\x4b\x1c\xb8\x43\x33\xe1\xb2\xf0\x26\x47\xd9\x9c\xa2\xe0\x46\x52\xe8\xc8\x9c\x84\x05\xd8\xe5\xe4\xd8\xb3\x46\x27\x01\x3f\x9d\xad\x4e\xc0\x05\x44\x33\xe4\x15\x59\x1e\x40\x21\x63\xd6\xbd\x06\x85\x23\x3c\xd9\x67\xe9\x37\x2c\xe4\xf3\xdc\x1d\x5c\x98\x6c\x06\x19\x19\x36\x90\xe5\xa9\x0f\xce\x7b\x34\x8c\x14\xb9\x4f\x7e\x91\x69\x4d\x32\x7c\x58\xd5\xef\xee\x1f\x6b\x54\x77\x4f\xf8\x50\xad\xd7\xd5\x5d\xfd\xf4\x06\x07\x67\x83\x24\x03\xef\x79\x46\xb9\xdd\xe8\x1d\x77\x38\x90\x2a\x05\x3b\x42\xfa\x4c\x78\x7f\xbd\xbe\x7a\x57\xdd\xd5\xd5\x6f\xab\xdb\x55\xfd\x04\x51\xdc\xac\xea\xbb\xeb\xcd\x06\x37\xf7\x6b\x54\x78\xa8\xd6\xf5\xea\xea\xf1\xb6\x5a\xe3\xe1\x71\xfd\x70\xbf\xb9\x5e\x62\xc3\x39\x15\x67\xfd\xff\xef\xbc\x9f\xda\x53\x46\xc7\x46\xce\xc7\xf3\x26\x9e\x24\x21\x0e\x92\x7c\x87\x81\xf6\x0c\xe5\x96\xdd\x9e\x3b\x10\x5a\x19\x8f\xdf\x5c\x6a\x66\x91\x97\xb0\x9d\x66\xfe\xd7\x83\xc4\xaa\x47\x10\x5b\x20\x32\xe3\x97\xc1\x6c\xbc\x2c\xcb\xc3\xe1\xb0\xdc\x86\xb4\x14\xdd\x96\x7e\xc6\xc5\xf2\xd7\x65\x91\x99\x41\x64\xac\x95\x5a\xd6\x5c\xce\xc7\x14\x6d\x62\x37\xa4\xdc\x48\x60\x34\xe2\x3c\xeb\x98\x5b\x46\x2b\x5d\x1e\xe0\xaf\xe4\x94\x3b\xf4\x2a\x3b\x10\x7e\xa7\x3d\x6d\x5a\x75\xa3\x65\x9c\x34\x1f\xb9\x35\x98\xcc\x15\x52\xe3\xa7\x73\x24\x98\x52\x88\xd4\xe6\xbb\xc9\x9f\x5b\xd6\x65\xf1\x5c\x5c\x94\x25\xa2\xf1\x98\xbd\x5d\xd8\xcb\x9f\x99\x2b\x9a\xfb\xd4\x23\x64\x9c\x1c\xa7\xcb\xc8\xa1\xfe\x78\x0f\xfe\xc4\x6d\x32\x8e\xcb\xe2\x22\xeb\x2e\xd1\xa7\x30\x41\x5f\x79\xd9\x2e\xd0\x35\xaf\xf1\x8c\x97\x45\x31\x91\x7b\x4a\xde\xbe\x44\x1f\x86\xd3\x99\x50\x6b\x89\xfc\x89\x96\x23\x49\x0f\x0a\x67\xc3\x7e\x2e\xf0\x62\xd2\xff\xb7\x85\x72\xfc\x9a\x07\x79\x3f\xf9\xcc\xc0\x38\x57\xdf\x30\x07\x38\x63\xa5\x7c\xfb\xb2\x67\xcd\x7f\x7b\x28\x5b\xd2\x10\x27\x5c\xd6\xf4\x2e\x90\x3f\x83\x4f\xe7\x91\x37\xe6\xc2\x76\x59\x5c\xcc\xef\x5f\x84\x6a\xed\xd3\x39\xd4\x4c\xc2\xf3\xcb\x1b\xbc\x14\x2f\xc5\xdf\x01\x00\x00\xff\xff\x77\x56\xe7\x1a\xf7\x04\x00\x00")
+var _noop_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8a\x02\xac\x61\x27\x2a\x1c\xdb\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x18\xfe\xee\xc5\x92\x12\x12\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x2b\x19\x8f\xea\xb6\x83\xe1\xc7\xef\x7f\xf8\x19\xf5\xc0\xd8\xca\x77\x6c\x03\x2b\xa7\x1d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb8\x2c\xca\x72\xd6\x7c\xf5\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x90\xf2\x25\x8e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x27\x9d\xeb\x8f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe2\x39\xc7\xdb\xbb\x47\xdc\x72\x8c\xac\x78\xcb\x81\x95\x3c\x1e\x52\xe3\x5d\x8b\x5b\xd7\x72\x88\x0c\x8a\x18\xf3\x4b\x1c\xb8\x43\x33\xe1\xb2\xf0\x26\x47\xd9\x9c\xa2\xe0\x46\x52\xe8\xc8\x9c\x84\x05\xd8\xe5\xe4\xd8\xb3\x46\x27\x01\x3f\x9d\xad\x4e\xc0\x05\x44\x33\xe4\x15\x59\x1e\x40\x21\x63\xd6\xbd\x06\x85\x23\x3c\xd9\x67\xe9\x37\x2c\xe4\xf3\xdc\x1d\x5c\x98\x6c\x06\x19\x19\x36\x90\xe5\xa9\x0f\xce\x7b\x34\x8c\x14\xb9\x4f\x7e\x91\x69\x4d\x32\x7c\x58\xd5\xef\xee\x1f\x6b\x54\x77\x4f\xf8\x50\xad\xd7\xd5\x5d\xfd\xf4\x06\x07\x67\x83\x24\x03\xef\x79\x46\xb9\xdd\xe8\x1d\x77\x38\x90\x2a\x05\x3b\x42\xfa\x4c\x78\x7f\xbd\xbe\x7a\x57\xdd\xd5\xd5\x6f\xab\xdb\x55\xfd\x04\x51\xdc\xac\xea\xbb\xeb\xcd\x06\x37\xf7\x6b\x54\x78\xa8\xd6\xf5\xea\xea\xf1\xb6\x5a\xe3\xe1\x71\xfd\x70\xbf\xb9\x5e\x62\xc3\x39\x15\x67\xfd\xff\xef\xbc\x9f\xda\x53\x46\xc7\x46\xce\xc7\xf3\x26\x9e\x24\x21\x0e\x92\x7c\x87\x81\xf6\x0c\xe5\x96\xdd\x9e\x3b\x10\x5a\x19\x8f\xdf\x5c\x6a\x66\x91\x97\xb0\x9d\x66\xfe\xd7\x83\xc4\xaa\x47\x10\x5b\x20\x32\xe3\x97\xc1\x6c\xbc\x2c\xcb\xc3\xe1\xb0\xdc\x86\xb4\x14\xdd\x96\x7e\xc6\xc5\xf2\xd7\x65\x91\x99\x41\x64\xac\x95\x5a\xd6\x5c\xce\xc7\x14\x6d\x62\x37\xa4\xdc\x48\x60\x34\xe2\x3c\xeb\x98\x5b\x46\x2b\x5d\x1e\xe0\xaf\xe4\x94\x3b\xf4\x2a\x3b\x10\x7e\xa7\x3d\x6d\x5a\x75\xa3\x65\x9c\x34\x1f\xb9\x35\x98\xcc\x15\x52\xe3\xa7\x73\x24\x98\x52\x88\xd4\xe6\xbb\xc9\x9f\x5b\xd6\x65\xf1\x5c\x5c\x94\x25\xa2\xf1\x98\xbd\x5d\xd8\xcb\x9f\x99\x2b\x9a\xfb\xd4\x23\x64\x9c\x1c\xa7\xcb\xc8\xa1\xfe\x78\x0f\xfe\xc4\x6d\x32\x8e\xcb\xe2\x22\xeb\x2e\xd1\xa7\x30\x41\x5f\x79\xd9\x2e\xd0\x35\xaf\xf1\x8c\x97\x45\x31\x91\x7b\x4a\xde\xbe\x44\x1f\x86\xd3\x99\x50\x6b\x89\xfc\x89\x96\x23\x49\x0f\x0a\x67\xc3\x7e\x2e\xf0\x62\xd2\xff\xb7\x85\x72\xfc\x9a\x07\x79\x3f\xf9\xcc\xc0\x38\x57\xdf\x30\x07\x38\x63\xa5\x7c\xfb\xb2\x67\xcd\x7f\x7b\x28\x5b\xd2\x10\x27\x5c\xd6\xf4\x2e\x90\x3f\x83\x4f\xe7\x91\x37\xe6\xc2\x76\x59\x5c\xcc\xef\x5f\x84\x6a\xed\xd3\x39\xd4\x4c\xc2\xf3\xcb\x1b\xbc\x14\x2f\xc5\xdf\x01\x00\x00\xff\xff\x77\x56\xe7\x1a\xf7\x04\x00\x00")
-func noop_tracerJsBytes() ([]byte, error) {
+func noop_tracer_legacyJsBytes() ([]byte, error) {
return bindataRead(
- _noop_tracerJs,
- "noop_tracer.js",
+ _noop_tracer_legacyJs,
+ "noop_tracer_legacy.js",
)
}
-func noop_tracerJs() (*asset, error) {
- bytes, err := noop_tracerJsBytes()
+func noop_tracer_legacyJs() (*asset, error) {
+ bytes, err := noop_tracer_legacyJsBytes()
if err != nil {
return nil, err
}
- info := bindataFileInfo{name: "noop_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ info := bindataFileInfo{name: "noop_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xf, 0x1c, 0x6f, 0x65, 0xaf, 0x90, 0x31, 0xab, 0xf, 0xe0, 0xca, 0x54, 0x7, 0xfd, 0xd3, 0xa1, 0x4a, 0x14, 0x1, 0x2a, 0x9d, 0xdc, 0xb9, 0x64, 0x69, 0x83, 0x30, 0xb1, 0x2a, 0xbd, 0xfb}}
return a, nil
}
@@ -218,23 +197,23 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\xb6\xfe\x8a\x41\x5f\x6c\x5d\x5d\xb9\xcd\x02\x7b\x80\x73\x39\x40\x75\xdd\x36\x40\x36\x09\x6c\xe7\x72\xb9\xc5\x3e\x50\xe4\x48\xe6\x9a\x26\x05\x92\xb2\xe3\x2b\xf2\xbf\x1f\x86\xfa\xf0\x47\x93\xa6\x7b\x6f\x16\x39\xfc\xcd\xf7\x6f\xc6\xa3\x11\x4c\x4c\xb9\xb3\xb2\x58\x7a\x38\x7b\xff\xe1\xef\xb0\x58\x22\x14\xe6\x1d\xfa\x25\x5a\xac\xd6\x90\x56\x7e\x69\xac\x8b\x46\x23\x58\x2c\xa5\x83\x5c\x2a\x04\xe9\xa0\x64\xd6\x83\xc9\xc1\x9f\xc8\x2b\x99\x59\x66\x77\x49\x34\x1a\xd5\x6f\x9e\xbd\x26\x84\xdc\x22\x82\x33\xb9\xdf\x32\x8b\x63\xd8\x99\x0a\x38\xd3\x60\x51\x48\xe7\xad\xcc\x2a\x8f\x20\x3d\x30\x2d\x46\xc6\xc2\xda\x08\x99\xef\x08\x52\x7a\xa8\xb4\x40\x1b\x54\x7b\xb4\x6b\xd7\xda\xf1\xe5\xfa\x0e\xae\xd0\x39\xb4\xf0\x05\x35\x5a\xa6\xe0\xb6\xca\x94\xe4\x70\x25\x39\x6a\x87\xc0\x1c\x94\x74\xe2\x96\x28\x20\x0b\x70\xf4\xf0\x33\x99\x32\x6f\x4c\x81\xcf\xa6\xd2\x82\x79\x69\xf4\x10\x50\x92\xe5\xb0\x41\xeb\xa4\xd1\xf0\x4b\xab\xaa\x01\x1c\x82\xb1\x04\x32\x60\x9e\x1c\xb0\x60\x4a\x7a\x17\x03\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\x2f\x17\x5f\x6f\xee\x16\x90\x5e\x3f\xc0\x7d\x3a\x9b\xa5\xd7\x8b\x87\x73\xd8\x4a\xbf\x34\x95\x07\xdc\x60\x0d\x25\xd7\xa5\x92\x28\x60\xcb\xac\x65\xda\xef\xc0\xe4\x84\xf0\xdb\x74\x36\xf9\x9a\x5e\x2f\xd2\x8f\x97\x57\x97\x8b\x07\x30\x16\x3e\x5f\x2e\xae\xa7\xf3\x39\x7c\xbe\x99\x41\x0a\xb7\xe9\x6c\x71\x39\xb9\xbb\x4a\x67\x70\x7b\x37\xbb\xbd\x99\x4f\x13\x98\x23\x59\x85\xf4\xfe\xf5\x98\xe7\x21\x7b\x16\x41\xa0\x67\x52\xb9\x36\x12\x0f\xa6\x02\xb7\x34\x95\x12\xb0\x64\x1b\x04\x8b\x1c\xe5\x06\x05\x30\xe0\xa6\xdc\xfd\x74\x52\x09\x8b\x29\xa3\x8b\xe0\xf3\x8b\x05\x09\x97\x39\x68\xe3\x87\xe0\x10\xe1\x1f\x4b\xef\xcb\xf1\x68\xb4\xdd\x6e\x93\x42\x57\x89\xb1\xc5\x48\xd5\x70\x6e\xf4\xcf\x24\x22\xcc\xd2\xa2\xf3\xcc\xe3\xc2\x32\x8e\x16\x4c\xe5\xcb\xca\x3b\x70\x55\x9e\x4b\x2e\x51\x7b\x90\x3a\x37\x76\x1d\x2a\x05\xbc\x01\x6e\x91\x79\x04\x06\xca\x70\xa6\x00\x1f\x91\x57\xe1\xae\x8e\x74\x28\x57\xcb\xb4\x63\x3c\x9c\xe6\xd6\xac\xc9\xd7\xca\x79\xfa\xe1\x1c\xae\x33\x85\x02\x0a\xd4\xe8\xa4\x83\x4c\x19\xbe\x4a\xa2\x6f\x51\xef\xc0\x18\xaa\x93\xe0\x61\x23\x14\x6a\x63\x8b\x7d\x8b\x90\x55\x52\x09\xa9\x8b\x24\xea\xb5\xd2\x63\xd0\x95\x52\xc3\x28\x40\x28\x63\x56\x55\x99\x72\x6e\xaa\x60\xfb\x9f\xc8\x7d\x0d\xe6\x4a\xe4\x32\xa7\xe2\x60\xdd\xad\x37\xe1\xaa\xd3\x6b\x32\x92\x4f\xa2\xde\x11\xcc\x18\xf2\x4a\x07\x77\x06\x4c\x08\x3b\x04\x91\xc5\xdf\xa2\x5e\x6f\xc3\x2c\x61\xc1\x05\x78\xf3\x15\x1f\xc3\x65\x7c\x1e\xf5\x7a\x32\x87\x81\x5f\x4a\x97\xb4\xc0\xbf\x33\xce\xff\x80\x8b\x8b\x8b\xd0\xd4\xb9\xd4\x28\x62\x20\x88\xde\x73\x62\xf5\x4d\x2f\x63\x8a\x69\x8e\x63\xe8\xbf\x7f\xec\xc3\x5b\x10\x59\x52\xa0\xff\x58\x9f\xd6\xca\x12\x6f\xe6\xde\x4a\x5d\x0c\x3e\xfc\x1a\x0f\xc3\x2b\x6d\xc2\x1b\x68\xc4\xaf\x4d\x27\x5c\xdf\x73\x23\xc2\x75\x63\x73\x2d\x35\x31\xa2\x11\x6a\xa4\x9c\x37\x96\x15\x38\x86\x6f\x4f\xf4\xfd\x44\x5e\x3d\x45\xbd\xa7\xa3\x28\xcf\x6b\xa1\x17\xa2\xdc\x40\x00\x6a\x6f\xbb\x3a\x2f\x24\x75\xea\x61\x02\x02\xde\x8f\x92\x30\x6f\x4d\x39\x49\xc2\x0a\x77\xaf\x67\x82\x2e\xa4\x78\xec\x2e\x56\xb8\x8b\xcf\xa3\x17\x53\x94\x34\x46\xff\x2e\xc5\xe3\xcf\xe6\xeb\xe4\xcd\x51\x5c\xe7\x24\xb5\xb7\x37\x8e\x4f\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x37\x4c\x55\xd8\xaf\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\x47\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x8f\x4c\xc1\x05\x64\xb2\xb8\xd4\xfe\x24\x79\x75\xd0\xdb\xa7\xf1\x1f\x49\xd3\x3c\x89\x23\xc2\x1b\x9c\xc5\x43\xf8\xf0\x6b\x57\x11\xde\x10\x14\xbc\x0e\xe6\xcd\xcb\x50\xd1\x69\x31\x3c\xff\x2c\xa8\xa1\x0e\x7e\x1b\xb4\x26\xae\xca\x28\x1d\xb5\x9f\x21\x8e\xc7\x5d\x7c\xfe\x03\xdc\x63\xdf\x5a\xdc\x26\x34\x09\x13\xe2\x10\x94\x3e\xc3\x77\xc1\xdc\x9d\x43\x01\x6f\x81\xbe\xa4\x26\x55\x4e\xf2\x2f\xcc\xc5\xf0\x37\x68\x24\x6e\xad\xe4\xdf\x59\x52\xe7\xf5\x13\x72\x8b\x6b\x1a\x05\x94\x3a\xce\x94\x42\xdb\x77\x10\x88\x66\xd8\xd4\x60\x48\x32\xae\x4b\xbf\x6b\x07\x84\x67\xb6\x40\xef\x5e\xf7\x26\xe0\xbc\x7b\xd7\xf2\x66\x88\xdf\xae\x44\xb8\xb8\x80\xfe\x64\x36\x4d\x17\xd3\x7e\xd3\x7b\xa3\x11\xdc\x63\x58\x9f\x32\x25\x33\xa1\x76\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x5c\x3b\x1e\x19\xd2\x1e\x44\x1b\x0a\x3e\x4a\xe7\xa5\x2e\xa0\xa6\x97\x2d\x0d\xe3\x06\x2e\x34\x16\x67\x15\x85\xe7\x74\x72\x79\x43\x6b\x88\x45\x22\x23\x1a\x1a\xa1\x47\x99\x92\xdd\xda\x92\x4b\xeb\x3c\x94\x8a\x71\x4c\x08\xaf\x33\xe6\xe5\xa2\x68\xda\x9f\x54\xcf\x42\xdf\x06\xa0\xfd\x54\x64\x8a\xa6\x2a\xa9\x77\x30\x68\x31\xe2\xa8\xd7\xb3\xad\xf4\x01\xf6\xf9\x9e\x47\x9c\xc7\xf2\x90\x45\x68\x1b\xc1\x0d\x12\xef\x06\x0a\xa9\x27\x28\xe9\xfa\xd7\x6f\xcd\xc8\x46\x97\x44\x3d\x7a\x77\x40\x06\xca\x14\xc7\x64\x20\xea\xb0\xf0\xca\x5a\xca\x7f\xc7\xdb\x39\x11\xc3\x9f\x95\xf3\x14\x53\x4b\xe1\x69\x28\xe6\x39\x66\x0d\x3c\x4a\x23\x3a\xfe\x9e\x41\x69\xd8\x85\xe1\x42\xea\x9a\xd1\x56\xaf\x80\xa5\xf1\xa8\xbd\x64\x4a\xed\x28\x0f\x5b\x4b\xbb\x0f\x6d\x3b\x43\x70\x92\xa4\x02\x4d\x05\x51\xa9\xb9\xaa\x44\x5d\x06\xa1\xf8\x1b\x3c\x17\x6c\x3e\x5e\x9a\xd6\xe8\x1c\x2b\x30\xa1\x4a\xca\xe5\x63\xb3\x76\x6a\xe8\xd7\xcc\x38\x88\xfb\x49\x67\xe4\x31\x2f\x29\x53\x24\x6d\x91\x11\xb7\xa7\x42\x58\x74\x6e\x10\x37\x44\xd5\x65\xf6\x7e\x89\x9a\x82\x0f\x1a\xb7\xd0\xed\x33\x8c\x73\xda\xef\xc4\x10\x98\x10\xc4\x87\x27\xbb\x47\xd4\xeb\xb9\xad\xf4\x7c\x09\x41\x93\x29\xf7\xbd\x18\x37\xf5\xcf\x99\x43\x78\x33\xfd\xf7\x62\x72\xf3\x69\x3a\xb9\xb9\x7d\x78\x33\x86\xa3\xb3\xf9\xe5\x7f\xa6\xdd\xd9\xc7\xf4\x2a\xbd\x9e\x4c\xdf\x8c\xc3\x40\x7f\xc6\x21\x6f\x5a\x17\x48\xa1\xf3\x8c\xaf\x92\x12\x71\x35\x78\x7f\xcc\x03\x7b\x07\x7b\xbd\xcc\x22\x5b\x9d\xef\x8d\xa9\x1b\xb4\xd1\xd1\xf2\x34\x5c\xc0\x8b\xc1\x3a\x7f\xd9\x9a\x49\x23\x3f\x68\xd9\x7f\xbf\xbf\x04\xaa\x78\xdd\x8e\xb3\xbf\x6c\x48\xe8\x1d\xc6\x57\x63\x70\x4c\xd1\xda\x2c\xff\x4b\x7f\x77\xf2\xdc\xa1\x1f\x02\x6a\x61\xb6\xc4\x7c\x1d\x6a\x7d\xd3\xe0\x1e\x84\xec\x43\x5c\xd3\xee\x4d\x3e\x88\x3b\x61\x02\xfb\x5e\xf4\xec\x39\x51\xd4\x02\x2e\x5a\xf4\xb7\xe1\xe5\xeb\x81\x3a\x6b\x22\x75\xa2\xe0\x97\x93\xb5\x30\xdc\xaf\x71\x6d\xec\xae\x99\x61\x07\xfe\xfd\x38\xaa\xe9\xd5\x55\x57\x4f\xf4\x41\x45\xd6\x1d\x7c\x9a\x5e\x4d\xbf\xa4\x8b\xe9\x91\xd4\x7c\x91\x2e\x2e\x27\xf5\xd1\x5f\x2e\xbc\x0f\x3f\x5d\x78\xfd\xf9\x7c\x71\x33\x9b\xf6\xc7\xcd\xd7\xd5\x4d\xfa\xa9\xff\x9d\xc2\x66\x75\xfc\x51\xeb\x7a\x73\x6f\xac\xf8\x7f\x3a\xe0\x60\x8d\xcb\xd9\x73\x5b\x5c\xa0\x76\xee\xab\x93\x7f\x49\xc0\x74\xcb\xca\x79\xfd\x4f\xb1\x17\xde\x3f\xcb\xc3\x4f\xd1\x53\xf4\xbf\x00\x00\x00\xff\xff\x3a\xb7\x37\x41\xbf\x10\x00\x00")
+var _prestate_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdb\x6e\x1b\x39\x12\x7d\x56\x7f\x45\x21\x2f\x92\x36\x4a\x2b\xf6\x00\xb3\x80\xbc\x5e\xa0\xa3\x28\xb1\x00\x8f\x6d\x48\xf2\x66\xbd\x83\x79\x60\x93\xd5\x2d\x8e\x28\xb2\x41\xb2\x25\x6b\x03\xff\xfb\xa2\xd8\x17\x5d\xe2\x4b\x66\xdf\xd4\x64\xf1\x54\xd5\x61\xd5\x61\x69\x38\x84\xb1\x29\x76\x56\xe6\x4b\x0f\xe7\x1f\xcf\xfe\x0e\x8b\x25\x42\x6e\x3e\xa0\x5f\xa2\xc5\x72\x0d\x49\xe9\x97\xc6\xba\x68\x38\x84\xc5\x52\x3a\xc8\xa4\x42\x90\x0e\x0a\x66\x3d\x98\x0c\xfc\x89\xbd\x92\xa9\x65\x76\x17\x47\xc3\x61\x75\xe6\xd9\x6d\x42\xc8\x2c\x22\x38\x93\xf9\x2d\xb3\x38\x82\x9d\x29\x81\x33\x0d\x16\x85\x74\xde\xca\xb4\xf4\x08\xd2\x03\xd3\x62\x68\x2c\xac\x8d\x90\xd9\x8e\x20\xa5\x87\x52\x0b\xb4\xc1\xb5\x47\xbb\x76\x4d\x1c\x5f\x6f\xee\xe1\x1a\x9d\x43\x0b\x5f\x51\xa3\x65\x0a\xee\xca\x54\x49\x0e\xd7\x92\xa3\x76\x08\xcc\x41\x41\x2b\x6e\x89\x02\xd2\x00\x47\x07\xbf\x50\x28\xf3\x3a\x14\xf8\x62\x4a\x2d\x98\x97\x46\x0f\x00\x25\x45\x0e\x1b\xb4\x4e\x1a\x0d\xbf\x34\xae\x6a\xc0\x01\x18\x4b\x20\x3d\xe6\x29\x01\x0b\xa6\xa0\x73\x7d\x60\x7a\x07\x8a\xf9\xfd\xd1\x9f\x20\x64\x9f\xb7\x00\xa9\x83\x9b\xa5\x29\x10\xfc\x92\x79\xca\x7a\x2b\x95\x82\x14\xa1\x74\x98\x95\x6a\x40\x68\x69\xe9\xe1\xdb\x74\x71\x75\x7b\xbf\x80\xe4\xe6\x01\xbe\x25\xb3\x59\x72\xb3\x78\xb8\x80\xad\xf4\x4b\x53\x7a\xc0\x0d\x56\x50\x72\x5d\x28\x89\x02\xb6\xcc\x5a\xa6\xfd\x0e\x4c\x46\x08\xbf\x4d\x66\xe3\xab\xe4\x66\x91\x7c\x9a\x5e\x4f\x17\x0f\x60\x2c\x7c\x99\x2e\x6e\x26\xf3\x39\x7c\xb9\x9d\x41\x02\x77\xc9\x6c\x31\x1d\xdf\x5f\x27\x33\xb8\xbb\x9f\xdd\xdd\xce\x27\x31\xcc\x91\xa2\x42\x3a\xff\x36\xe7\x59\xb8\x3d\x8b\x20\xd0\x33\xa9\x5c\xc3\xc4\x83\x29\xc1\x2d\x4d\xa9\x04\x2c\xd9\x06\xc1\x22\x47\xb9\x41\x01\x0c\xb8\x29\x76\x3f\x7d\xa9\x84\xc5\x94\xd1\x79\xc8\xf9\xc5\x82\x84\x69\x06\xda\xf8\x01\x38\x44\xf8\xc7\xd2\xfb\x62\x34\x1c\x6e\xb7\xdb\x38\xd7\x65\x6c\x6c\x3e\x54\x15\x9c\x1b\xfe\x33\x8e\x08\xb3\xb0\xe8\x3c\xf3\xb8\xb0\x8c\xa3\x05\x53\xfa\xa2\xf4\x0e\x5c\x99\x65\x92\x4b\xd4\x1e\xa4\xce\x8c\x5d\x87\x4a\x01\x6f\x80\x5b\x64\x1e\x81\x81\x32\x9c\x29\xc0\x47\xe4\x65\xd8\xab\x98\x0e\xe5\x6a\x99\x76\x8c\x87\xd5\xcc\x9a\x35\xe5\x5a\x3a\x4f\x3f\x9c\xc3\x75\xaa\x50\x40\x8e\x1a\x9d\x74\x90\x2a\xc3\x57\x71\xf4\x3d\xea\x1c\x04\x43\x75\x12\x32\xac\x8d\x42\x6d\x6c\xb1\x6b\x11\xd2\x52\x2a\x21\x75\x1e\x47\x9d\xc6\x7a\x04\xba\x54\x6a\x10\x05\x08\x65\xcc\xaa\x2c\x12\xce\x4d\x19\x62\xff\x13\xb9\xaf\xc0\x5c\x81\x5c\x66\x54\x1c\xac\xdd\xf5\x26\x6c\xb5\x7e\x4d\x4a\xf6\x71\xd4\x39\x82\x19\x41\x56\xea\x90\x4e\x8f\x09\x61\x07\x20\xd2\xfe\xf7\xa8\xd3\xd9\x30\x4b\x58\x70\x09\xde\x5c\xe1\x63\xd8\xec\x5f\x44\x9d\x8e\xcc\xa0\xe7\x97\xd2\xc5\x0d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x26\x35\x8a\x3e\x10\x44\xe7\x39\xb3\x6a\xa7\x93\x32\xc5\x34\xc7\x11\x74\x3f\x3e\x76\xe1\x3d\x88\x34\xce\xd1\x7f\xaa\x56\x2b\x67\xb1\x37\x73\x6f\xa5\xce\x7b\x67\xbf\xf6\x07\xe1\x94\x36\xe1\x0c\xd4\xe6\x37\xa6\x35\xae\xf6\xb9\x11\x61\xbb\x8e\xb9\xb2\x1a\x1b\x51\x1b\xd5\x56\xce\x1b\xcb\x72\x1c\xc1\xf7\x27\xfa\x7e\xa2\xac\x9e\xa2\xce\xd3\x11\xcb\xf3\xca\xe8\x05\x96\x6b\x08\x40\xed\x6d\x5b\xe7\xb9\xa4\x4e\x3d\xbc\x80\x80\xf7\xda\x25\xcc\x9b\x50\x4e\x2e\x61\x85\xbb\xb7\x6f\x82\x36\xa4\x78\x6c\x37\x56\xb8\xeb\x5f\x44\x2f\x5e\x51\x5c\x07\xfd\xbb\x14\x8f\x3f\x7b\x5f\x27\x67\x8e\x78\x9d\x93\xd5\x3e\xde\x7e\xff\x84\x47\x8b\xae\x54\x9e\xca\x5d\xea\x8d\x59\x91\x70\x2d\x89\x1f\xa5\x02\x25\xa6\xa0\xdb\x72\x95\x72\xa4\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x06\x2d\xbd\x1a\x60\xd1\x97\x56\xbb\x96\xc6\x4c\x6a\xa6\x1a\xe0\x9a\x75\x6f\x19\xaf\x7a\xa6\x5a\x3f\xe0\x92\xfb\xc7\xc0\x62\xc8\xee\x07\x52\x02\x05\xd4\x5d\xcf\x65\x4f\x85\x1a\x0a\x83\x5c\x4f\x33\xf0\x8f\xa1\x6f\xa9\xf9\x33\xb4\x1f\x8c\x56\xbb\x41\x70\x6f\x91\xcb\x22\x68\x49\x7d\xf1\xf5\x99\x25\x73\xba\xeb\xab\xc4\x0a\x53\x94\xf4\x94\x88\xb8\xf5\x73\xd4\x83\x14\x68\xec\x4d\x88\xb5\x22\x31\x0a\x18\x89\x07\x32\x86\xc2\x48\xed\x07\xb0\x45\xd0\x88\x82\x84\x4a\xa0\x28\xb9\x0f\x01\x74\x37\x4c\x95\xd8\xad\xc4\x88\x24\x3d\x1c\x35\x25\xbd\x5c\x07\x62\x35\x08\x84\xae\xcd\x26\x3c\xc9\x29\xe3\x2b\xa8\x05\xc2\x58\x99\x4b\x1d\xbd\x18\x18\x01\xd7\xa1\xd5\x45\x47\x2b\x9f\x98\x82\x4b\x48\x65\x3e\xd5\xfe\xa4\xd8\xaa\x22\x69\x8e\xf6\xff\x88\xeb\x66\x8f\x1d\x09\x74\xef\xbc\x3f\x80\xb3\x5f\xdb\x0a\xf6\x86\xa0\xe0\x6d\x30\x6f\x5e\x86\x8a\x4e\x8b\xf7\xf9\x63\xc1\x0d\x29\xce\xfb\xe0\x35\x76\x65\x4a\xe5\x53\xe5\x19\x78\x3c\x56\x9d\x8b\x57\x70\x8f\x73\x6b\x70\x6b\x6a\x62\x26\xc4\x21\x28\x7d\x86\xef\x9c\xb9\x7b\x87\x02\xde\x03\x7d\x49\x4d\xae\x9c\xe4\x5f\x99\xeb\xc3\xdf\xa0\xb6\xb8\xb3\x92\xff\x10\x49\x75\xaf\x9f\x91\x5b\x5c\x53\xb9\xd1\xd5\x71\xa6\x14\xda\xae\x83\x20\x8c\x83\xba\x67\xc2\x25\xe3\xba\xf0\xbb\xe6\x41\xf3\xcc\xe6\xe8\xdd\xdb\xd9\x04\x9c\x0f\x1f\x1a\x9d\x0f\xfc\xed\x0a\x6a\x15\xe8\x8e\x67\x93\x64\x31\xe9\xd6\xdd\x32\x1c\xc2\x37\x0c\xe3\x5e\xaa\x64\x2a\xd4\x0e\x04\x2a\xf4\x58\xc5\x65\x74\xe0\xb5\xd5\xbd\x01\xcd\x6d\x34\x51\xe1\xa3\x74\x5e\xea\x1c\xaa\x2e\xdb\xd2\xf0\xd0\x76\xcc\x86\x00\x4b\xa2\xe7\xf4\xa5\xf5\x86\xc6\x26\x8b\x24\x9e\xf4\xc8\x05\x4d\x61\x4a\xb6\x63\x56\x26\xad\xf3\x50\x28\xc6\x31\x34\x59\x1b\xcc\xcb\x45\x51\xcb\x15\xb9\x9e\x05\x9d\x09\x40\xfb\x57\x9c\x29\x9a\x02\xc8\xbd\x83\x5e\x83\xd1\x8f\x3a\x1d\xdb\x58\x1f\x60\x5f\xec\x75\xcf\x79\x2c\x0e\x55\x8f\xa6\x27\xdc\x20\xbd\x13\x41\xf2\xaa\x17\x9f\x7c\xfd\xeb\xb7\x7a\xc4\x40\x17\x47\x1d\x3a\x77\x20\x5e\xca\xe4\x7b\xf1\x22\x31\x10\x15\x2d\xbc\xb4\xf6\x40\x6e\x40\x66\x24\x0c\x7f\x96\xce\x13\xa7\x96\xe8\xa9\x25\xf1\x75\xd1\x7b\x43\xf3\xea\xa7\xb8\x1a\x59\x0b\xe3\x51\x7b\xc9\x94\xda\xd1\x3d\x6c\x2d\xcd\x6a\x34\x9d\x0d\xc0\x49\xb2\x0a\x32\x15\x4c\xa5\xe6\xaa\x14\x55\x19\x84\xe2\xaf\xf1\x5c\x88\xf9\x78\xc8\x5b\xa3\x73\x2c\xc7\x98\x2a\x29\x93\x8f\xf5\x98\xac\xa1\x5b\x29\x79\xaf\xdf\x7d\x49\x30\x95\xc9\xe3\xa6\xc8\xe8\x2d\x4a\x84\xb0\xe8\x5c\xaf\x7f\xa0\xa1\x55\x8d\x2e\x51\x13\xf9\xa0\x71\x0b\xed\xfc\xc5\x38\xa7\x79\x54\x0c\x80\x09\x41\x7a\x78\x32\x2b\x45\x9d\x8e\xdb\x4a\xcf\x97\x10\x3c\x99\x62\xdf\x8b\xfd\xba\xfe\x39\x73\x08\xef\x26\xff\x5e\x8c\x6f\x3f\x4f\xc6\xb7\x77\x0f\xef\x46\x70\xb4\x36\x9f\xfe\x67\x72\xba\x76\x95\xcc\xaf\xda\xb5\x4f\xc9\x75\x72\x33\x9e\xbc\x1b\x85\xa1\xe4\x99\x24\xbd\x69\xd2\xa2\x20\x9c\x67\x7c\x15\x17\x88\xab\xde\xc7\x63\x6d\xd8\x27\xdd\xe9\xa4\x16\xd9\xea\x62\x1f\x60\xd5\xb4\xb5\x8f\x46\xbb\xe1\x12\x5e\x24\xf0\xe2\xe5\x68\xc6\xb5\x7d\xaf\x79\x11\xf6\x33\x58\x90\x8f\xb7\xe3\x38\xff\xcb\x81\x84\x7e\x62\x7c\x35\x02\xc7\x14\x8d\xfe\xf2\xbf\xf4\x97\x2d\xcb\x1c\xfa\x01\xa0\x16\x66\x4b\x6a\xd8\xa2\x56\x3b\x35\xee\x01\x65\x67\xfd\x4a\x8a\x6f\xb3\x5e\xbf\x35\x26\xb0\x1f\x4d\xcf\x9f\x33\x45\x2d\xe0\xb2\x41\x7f\x1f\x4e\xbe\x4d\xd4\x79\xcd\xd4\x89\x83\x5f\x4e\x46\xdb\xb0\xbf\xc6\xb5\xb1\xbb\xfa\x5d\x3b\xc8\xef\x75\x56\x93\xeb\xeb\xb6\x9e\xe8\x83\x8a\xac\x5d\xf8\x3c\xb9\x9e\x7c\x4d\x16\x93\x23\xab\xf9\x22\x59\x4c\xc7\xd5\xd2\x5f\x2e\xbc\xb3\x9f\x2e\xbc\xee\x7c\xbe\xb8\x9d\x4d\xba\xa3\xfa\xeb\xfa\x36\xf9\xdc\xfd\xc1\x61\x3d\xfe\xbe\xd6\xce\xde\x7c\x33\x56\xfc\x3f\x1d\x70\x30\x8a\x66\xec\xb9\x49\x34\xc8\x3d\xf7\xe5\xc9\x3f\x3d\x60\xba\x51\xea\xac\xfa\xb7\xdb\x09\xe7\x9f\xd5\xe6\xa7\xe8\x29\xfa\x5f\x00\x00\x00\xff\xff\xfa\x53\xfa\x80\x83\x11\x00\x00")
-func prestate_tracerJsBytes() ([]byte, error) {
+func prestate_tracer_legacyJsBytes() ([]byte, error) {
return bindataRead(
- _prestate_tracerJs,
- "prestate_tracer.js",
+ _prestate_tracer_legacyJs,
+ "prestate_tracer_legacy.js",
)
}
-func prestate_tracerJs() (*asset, error) {
- bytes, err := prestate_tracerJsBytes()
+func prestate_tracer_legacyJs() (*asset, error) {
+ bytes, err := prestate_tracer_legacyJsBytes()
if err != nil {
return nil, err
}
- info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x9, 0xf9, 0x44, 0x13, 0x31, 0x89, 0xf7, 0x35, 0x9a, 0xc6, 0xf0, 0x86, 0x9d, 0xb2, 0xe3, 0x57, 0xe2, 0xc0, 0xde, 0xc9, 0x3a, 0x4c, 0x4a, 0x94, 0x90, 0xa5, 0x92, 0x2f, 0xbf, 0xc0, 0xb8}}
+ info := bindataFileInfo{name: "prestate_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x94, 0xcf, 0x10, 0x37, 0xae, 0x8f, 0xd5, 0xfe, 0xf3, 0x25, 0x15, 0x25, 0x9b, 0x6b, 0x56, 0x7b, 0x3c, 0xa9, 0xda, 0xe8, 0xa2, 0xd3, 0x5, 0x96, 0x9c, 0xfd, 0x23, 0x68, 0xa2, 0x5, 0xca, 0x16}}
return a, nil
}
@@ -369,16 +348,15 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
- "4byte_tracer_legacy.js": _4byte_tracer_legacyJs,
- "bigram_tracer.js": bigram_tracerJs,
- "call_tracer_js.js": call_tracer_jsJs,
- "call_tracer_legacy.js": call_tracer_legacyJs,
- "evmdis_tracer.js": evmdis_tracerJs,
- "noop_tracer.js": noop_tracerJs,
- "opcount_tracer.js": opcount_tracerJs,
- "prestate_tracer.js": prestate_tracerJs,
- "trigram_tracer.js": trigram_tracerJs,
- "unigram_tracer.js": unigram_tracerJs,
+ "4byte_tracer_legacy.js": _4byte_tracer_legacyJs,
+ "bigram_tracer.js": bigram_tracerJs,
+ "call_tracer_legacy.js": call_tracer_legacyJs,
+ "evmdis_tracer.js": evmdis_tracerJs,
+ "noop_tracer_legacy.js": noop_tracer_legacyJs,
+ "opcount_tracer.js": opcount_tracerJs,
+ "prestate_tracer_legacy.js": prestate_tracer_legacyJs,
+ "trigram_tracer.js": trigram_tracerJs,
+ "unigram_tracer.js": unigram_tracerJs,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
@@ -425,16 +403,15 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
- "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}},
- "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
- "call_tracer_js.js": {call_tracer_jsJs, map[string]*bintree{}},
- "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}},
- "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
- "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}},
- "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
- "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}},
- "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
- "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
+ "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}},
+ "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
+ "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}},
+ "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
+ "noop_tracer_legacy.js": {noop_tracer_legacyJs, map[string]*bintree{}},
+ "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
+ "prestate_tracer_legacy.js": {prestate_tracer_legacyJs, map[string]*bintree{}},
+ "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
+ "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
diff --git a/eth/tracers/js/internal/tracers/call_tracer_js.js b/eth/tracers/js/internal/tracers/call_tracer_js.js
deleted file mode 100644
index 7da7bf216a25..000000000000
--- a/eth/tracers/js/internal/tracers/call_tracer_js.js
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-
-// callFrameTracer uses the new call frame tracing methods to report useful information
-// about internal messages of a transaction.
-{
- callstack: [{}],
- fault: function(log, db) {},
- result: function(ctx, db) {
- // Prepare outer message info
- var result = {
- type: ctx.type,
- from: toHex(ctx.from),
- to: toHex(ctx.to),
- value: '0x' + ctx.value.toString(16),
- gas: '0x' + bigInt(ctx.gas).toString(16),
- gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
- input: toHex(ctx.input),
- output: toHex(ctx.output),
- }
- if (this.callstack[0].calls !== undefined) {
- result.calls = this.callstack[0].calls
- }
- if (this.callstack[0].error !== undefined) {
- result.error = this.callstack[0].error
- } else if (ctx.error !== undefined) {
- result.error = ctx.error
- }
- if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
- delete result.output
- }
-
- return this.finalize(result)
- },
- enter: function(frame) {
- var call = {
- type: frame.getType(),
- from: toHex(frame.getFrom()),
- to: toHex(frame.getTo()),
- input: toHex(frame.getInput()),
- gas: '0x' + bigInt(frame.getGas()).toString('16'),
- }
- if (frame.getValue() !== undefined){
- call.value='0x' + bigInt(frame.getValue()).toString(16)
- }
- this.callstack.push(call)
- },
- exit: function(frameResult) {
- var len = this.callstack.length
- if (len > 1) {
- var call = this.callstack.pop()
- call.gasUsed = '0x' + bigInt(frameResult.getGasUsed()).toString('16')
- var error = frameResult.getError()
- if (error === undefined) {
- call.output = toHex(frameResult.getOutput())
- } else {
- call.error = error
- if (call.type === 'CREATE' || call.type === 'CREATE2') {
- delete call.to
- }
- }
- len -= 1
- if (this.callstack[len-1].calls === undefined) {
- this.callstack[len-1].calls = []
- }
- this.callstack[len-1].calls.push(call)
- }
- },
- // finalize recreates a call object using the final desired field oder for json
- // serialization. This is a nicety feature to pass meaningfully ordered results
- // to users who don't interpret it, just display it.
- finalize: function(call) {
- var sorted = {
- type: call.type,
- from: call.from,
- to: call.to,
- value: call.value,
- gas: call.gas,
- gasUsed: call.gasUsed,
- input: call.input,
- output: call.output,
- error: call.error,
- time: call.time,
- calls: call.calls,
- }
- for (var key in sorted) {
- if (sorted[key] === undefined) {
- delete sorted[key]
- }
- }
- if (sorted.calls !== undefined) {
- for (var i=0; i.
+
+package native
+
+import (
+ "encoding/json"
+ "math/big"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/tracers"
+)
+
+func init() {
+ register("prestateTracer", newPrestateTracer)
+}
+
+type prestate = map[common.Address]*account
+type account struct {
+ Balance string `json:"balance"`
+ Nonce uint64 `json:"nonce"`
+ Code string `json:"code"`
+ Storage map[common.Hash]common.Hash `json:"storage"`
+}
+
+type prestateTracer struct {
+ env *vm.EVM
+ prestate prestate
+ create bool
+ to common.Address
+ interrupt uint32 // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
+}
+
+func newPrestateTracer() tracers.Tracer {
+ // First callframe contains tx context info
+ // and is populated on start and end.
+ return &prestateTracer{prestate: prestate{}}
+}
+
+// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
+func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+ t.env = env
+ t.create = create
+ t.to = to
+
+ // Compute intrinsic gas
+ isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber)
+ isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber)
+ intrinsicGas, err := core.IntrinsicGas(input, nil, create, isHomestead, isIstanbul)
+ if err != nil {
+ return
+ }
+
+ t.lookupAccount(from)
+ t.lookupAccount(to)
+
+ // The recipient balance includes the value transferred.
+ toBal := hexutil.MustDecodeBig(t.prestate[to].Balance)
+ toBal = new(big.Int).Sub(toBal, value)
+ t.prestate[to].Balance = hexutil.EncodeBig(toBal)
+
+ // The sender balance is after reducing: value, gasLimit, intrinsicGas.
+ // We need to re-add them to get the pre-tx balance.
+ fromBal := hexutil.MustDecodeBig(t.prestate[from].Balance)
+ gasPrice := env.TxContext.GasPrice
+ consumedGas := new(big.Int).Mul(
+ gasPrice,
+ new(big.Int).Add(
+ new(big.Int).SetUint64(intrinsicGas),
+ new(big.Int).SetUint64(gas),
+ ),
+ )
+ fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas))
+ t.prestate[from].Balance = hexutil.EncodeBig(fromBal)
+ t.prestate[from].Nonce--
+}
+
+// CaptureEnd is called after the call finishes to finalize the tracing.
+func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) {
+ if t.create {
+ // Exclude created contract.
+ delete(t.prestate, t.to)
+ }
+}
+
+// CaptureState implements the EVMLogger interface to trace a single step of VM execution.
+func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+ stack := scope.Stack
+ stackData := stack.Data()
+ stackLen := len(stackData)
+ switch {
+ case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE):
+ slot := common.Hash(stackData[stackLen-1].Bytes32())
+ t.lookupStorage(scope.Contract.Address(), slot)
+ case stackLen >= 1 && (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT):
+ addr := common.Address(stackData[stackLen-1].Bytes20())
+ t.lookupAccount(addr)
+ case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE):
+ addr := common.Address(stackData[stackLen-2].Bytes20())
+ t.lookupAccount(addr)
+ case op == vm.CREATE:
+ addr := scope.Contract.Address()
+ nonce := t.env.StateDB.GetNonce(addr)
+ t.lookupAccount(crypto.CreateAddress(addr, nonce))
+ case stackLen >= 4 && op == vm.CREATE2:
+ offset := stackData[stackLen-2]
+ size := stackData[stackLen-3]
+ init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
+ inithash := crypto.Keccak256(init)
+ salt := stackData[stackLen-4]
+ t.lookupAccount(crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), inithash))
+ }
+}
+
+// CaptureFault implements the EVMLogger interface to trace an execution fault.
+func (t *prestateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
+}
+
+// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
+func (t *prestateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+// CaptureExit is called when EVM exits a scope, even if the scope didn't
+// execute any code.
+func (t *prestateTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
+}
+
+// GetResult returns the json-encoded nested list of call traces, and any
+// error arising from the encoding or forceful termination (via `Stop`).
+func (t *prestateTracer) GetResult() (json.RawMessage, error) {
+ res, err := json.Marshal(t.prestate)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(res), t.reason
+}
+
+// Stop terminates execution of the tracer at the first opportune moment.
+func (t *prestateTracer) Stop(err error) {
+ t.reason = err
+ atomic.StoreUint32(&t.interrupt, 1)
+}
+
+// lookupAccount fetches details of an account and adds it to the prestate
+// if it doesn't exist there.
+func (t *prestateTracer) lookupAccount(addr common.Address) {
+ if _, ok := t.prestate[addr]; ok {
+ return
+ }
+ t.prestate[addr] = &account{
+ Balance: bigToHex(t.env.StateDB.GetBalance(addr)),
+ Nonce: t.env.StateDB.GetNonce(addr),
+ Code: bytesToHex(t.env.StateDB.GetCode(addr)),
+ Storage: make(map[common.Hash]common.Hash),
+ }
+}
+
+// lookupStorage fetches the requested storage slot and adds
+// it to the prestate of the given contract. It assumes `lookupAccount`
+// has been performed on the contract before.
+func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) {
+ if _, ok := t.prestate[addr].Storage[key]; ok {
+ return
+ }
+ t.prestate[addr].Storage[key] = t.env.StateDB.GetState(addr, key)
+}
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index e6a93c96f6a0..68389efbf437 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -456,6 +456,17 @@ func (ec *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockN
return hex, nil
}
+// CallContractAtHash is almost the same as CallContract except that it selects
+// the block by block hash instead of block height.
+func (ec *Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
+ var hex hexutil.Bytes
+ err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), rpc.BlockNumberOrHashWithHash(blockHash, false))
+ if err != nil {
+ return nil, err
+ }
+ return hex, nil
+}
+
// PendingCallContract executes a message call transaction using the EVM.
// The state seen by the contract call is the pending state.
func (ec *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) {
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index d56febc91d74..4a8727b37478 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -285,6 +285,9 @@ func TestEthClient(t *testing.T) {
"CallContract": {
func(t *testing.T) { testCallContract(t, client) },
},
+ "CallContractAtHash": {
+ func(t *testing.T) { testCallContractAtHash(t, client) },
+ },
"AtFunctions": {
func(t *testing.T) { testAtFunctions(t, client) },
},
@@ -507,6 +510,33 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) {
}
}
+func testCallContractAtHash(t *testing.T, client *rpc.Client) {
+ ec := NewClient(client)
+
+ // EstimateGas
+ msg := ethereum.CallMsg{
+ From: testAddr,
+ To: &common.Address{},
+ Gas: 21000,
+ Value: big.NewInt(1),
+ }
+ gas, err := ec.EstimateGas(context.Background(), msg)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if gas != 21000 {
+ t.Fatalf("unexpected gas price: %v", gas)
+ }
+ block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1))
+ if err != nil {
+ t.Fatalf("BlockByNumber error: %v", err)
+ }
+ // CallContract
+ if _, err := ec.CallContractAtHash(context.Background(), msg, block.Hash()); err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
func testCallContract(t *testing.T, client *rpc.Client) {
ec := NewClient(client)
diff --git a/ethdb/batch.go b/ethdb/batch.go
index 1353693318a6..541f40c838d2 100644
--- a/ethdb/batch.go
+++ b/ethdb/batch.go
@@ -43,6 +43,9 @@ type Batcher interface {
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
NewBatch() Batch
+
+ // NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+ NewBatchWithSize(size int) Batch
}
// HookedBatch wraps an arbitrary batch where each operation may be hooked into
diff --git a/ethdb/database.go b/ethdb/database.go
index 0a5729c6c1ec..b2e7c7228a0a 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -64,6 +64,7 @@ type KeyValueStore interface {
Iteratee
Stater
Compacter
+ Snapshotter
io.Closer
}
@@ -86,6 +87,10 @@ type AncientReader interface {
// Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error)
+ // Tail returns the number of first stored item in the freezer.
+ // This number can also be interpreted as the total deleted item numbers.
+ Tail() (uint64, error)
+
// AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error)
}
@@ -106,11 +111,24 @@ type AncientWriter interface {
// The integer return value is the total size of the written data.
ModifyAncients(func(AncientWriteOp) error) (int64, error)
- // TruncateAncients discards all but the first n ancient data from the ancient store.
- TruncateAncients(n uint64) error
+ // TruncateHead discards all but the first n ancient data from the ancient store.
+ // After the truncation, the latest item can be accessed it item_n-1(start from 0).
+ TruncateHead(n uint64) error
+
+ // TruncateTail discards the first n ancient data from the ancient store. The already
+ // deleted items are ignored. After the truncation, the earliest item can be accessed
+ // is item_n(start from 0). The deleted items may not be removed from the ancient store
+ // immediately, but only when the accumulated deleted data reach the threshold then
+ // will be removed all together.
+ TruncateTail(n uint64) error
// Sync flushes all in-memory ancient store data to disk.
Sync() error
+
+ // MigrateTable processes and migrates entries of a given table to a new format.
+ // The second argument is a function that takes a raw entry and returns it
+ // in the newest format.
+ MigrateTable(string, func([]byte) ([]byte, error)) error
}
// AncientWriteOp is given to the function argument of ModifyAncients.
@@ -153,5 +171,6 @@ type Database interface {
Iteratee
Stater
Compacter
+ Snapshotter
io.Closer
}
diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go
index 06ee2211e6f8..6b206af48d5e 100644
--- a/ethdb/dbtest/testsuite.go
+++ b/ethdb/dbtest/testsuite.go
@@ -313,6 +313,68 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
}
})
+ t.Run("Snapshot", func(t *testing.T) {
+ db := New()
+ defer db.Close()
+
+ initial := map[string]string{
+ "k1": "v1", "k2": "v2", "k3": "", "k4": "",
+ }
+ for k, v := range initial {
+ db.Put([]byte(k), []byte(v))
+ }
+ snapshot, err := db.NewSnapshot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k, v := range initial {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, []byte(v)) {
+ t.Fatalf("Unexpected value want: %v, got %v", v, got)
+ }
+ }
+
+ // Flush more modifications into the database, ensure the snapshot
+ // isn't affected.
+ var (
+ update = map[string]string{"k1": "v1-b", "k3": "v3-b"}
+ insert = map[string]string{"k5": "v5-b"}
+ delete = map[string]string{"k2": ""}
+ )
+ for k, v := range update {
+ db.Put([]byte(k), []byte(v))
+ }
+ for k, v := range insert {
+ db.Put([]byte(k), []byte(v))
+ }
+ for k := range delete {
+ db.Delete([]byte(k))
+ }
+ for k, v := range initial {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, []byte(v)) {
+ t.Fatalf("Unexpected value want: %v, got %v", v, got)
+ }
+ }
+ for k := range insert {
+ got, err := snapshot.Get([]byte(k))
+ if err == nil || len(got) != 0 {
+ t.Fatal("Unexpected value")
+ }
+ }
+ for k := range delete {
+ got, err := snapshot.Get([]byte(k))
+ if err != nil || len(got) == 0 {
+ t.Fatal("Unexpected deletion")
+ }
+ }
+ })
}
func iterateKeys(it ethdb.Iterator) []string {
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 9a782dedbe14..15bd4e6eb3b5 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -213,6 +213,14 @@ func (db *Database) NewBatch() ethdb.Batch {
}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
+ return &batch{
+ db: db.db,
+ b: leveldb.MakeBatch(size),
+ }
+}
+
// NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
@@ -220,6 +228,19 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
}
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+// Note don't forget to release the snapshot once it's used up, otherwise
+// the stale data will never be cleaned up by the underlying compactor.
+func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
+ snap, err := db.db.GetSnapshot()
+ if err != nil {
+ return nil, err
+ }
+ return &snapshot{db: snap}, nil
+}
+
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return db.db.GetProperty(property)
@@ -519,3 +540,26 @@ func bytesPrefixRange(prefix, start []byte) *util.Range {
r.Start = append(r.Start, start...)
return r
}
+
+// snapshot wraps a leveldb snapshot for implementing the Snapshot interface.
+type snapshot struct {
+ db *leveldb.Snapshot
+}
+
+// Has retrieves if a key is present in the snapshot backing by a key-value
+// data store.
+func (snap *snapshot) Has(key []byte) (bool, error) {
+ return snap.db.Has(key, nil)
+}
+
+// Get retrieves the given key if it's present in the snapshot backing by
+// key-value data store.
+func (snap *snapshot) Get(key []byte) ([]byte, error) {
+ return snap.db.Get(key, nil)
+}
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (snap *snapshot) Release() {
+ snap.db.Release()
+}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index 78181e860c15..95ec9bb8aa46 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -35,6 +35,10 @@ var (
// errMemorydbNotFound is returned if a key is requested that is not found in
// the provided memory database.
errMemorydbNotFound = errors.New("not found")
+
+ // errSnapshotReleased is returned if callers want to retrieve data from a
+ // released snapshot.
+ errSnapshotReleased = errors.New("snapshot released")
)
// Database is an ephemeral key-value store. Apart from basic data storage
@@ -53,7 +57,7 @@ func New() *Database {
}
}
-// NewWithCap returns a wrapped map pre-allocated to the provided capcity with
+// NewWithCap returns a wrapped map pre-allocated to the provided capacity with
// all the required database interface methods implemented.
func NewWithCap(size int) *Database {
return &Database{
@@ -129,6 +133,13 @@ func (db *Database) NewBatch() ethdb.Batch {
}
}
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
+ return &batch{
+ db: db,
+ }
+}
+
// NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
@@ -163,6 +174,13 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
}
}
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
+ return newSnapshot(db), nil
+}
+
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", errors.New("unknown property")
@@ -313,3 +331,59 @@ func (it *iterator) Value() []byte {
func (it *iterator) Release() {
it.keys, it.values = nil, nil
}
+
+// snapshot wraps a batch of key-value entries deep copied from the in-memory
+// database for implementing the Snapshot interface.
+type snapshot struct {
+ db map[string][]byte
+ lock sync.RWMutex
+}
+
+// newSnapshot initializes the snapshot with the given database instance.
+func newSnapshot(db *Database) *snapshot {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ copied := make(map[string][]byte)
+ for key, val := range db.db {
+ copied[key] = common.CopyBytes(val)
+ }
+ return &snapshot{db: copied}
+}
+
+// Has retrieves if a key is present in the snapshot backing by a key-value
+// data store.
+func (snap *snapshot) Has(key []byte) (bool, error) {
+ snap.lock.RLock()
+ defer snap.lock.RUnlock()
+
+ if snap.db == nil {
+ return false, errSnapshotReleased
+ }
+ _, ok := snap.db[string(key)]
+ return ok, nil
+}
+
+// Get retrieves the given key if it's present in the snapshot backing by
+// key-value data store.
+func (snap *snapshot) Get(key []byte) ([]byte, error) {
+ snap.lock.RLock()
+ defer snap.lock.RUnlock()
+
+ if snap.db == nil {
+ return nil, errSnapshotReleased
+ }
+ if entry, ok := snap.db[string(key)]; ok {
+ return common.CopyBytes(entry), nil
+ }
+ return nil, errMemorydbNotFound
+}
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (snap *snapshot) Release() {
+ snap.lock.Lock()
+ defer snap.lock.Unlock()
+
+ snap.db = nil
+}
diff --git a/ethdb/snapshot.go b/ethdb/snapshot.go
new file mode 100644
index 000000000000..753e0f6b1f1e
--- /dev/null
+++ b/ethdb/snapshot.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethdb
+
+type Snapshot interface {
+ // Has retrieves if a key is present in the snapshot backing by a key-value
+ // data store.
+ Has(key []byte) (bool, error)
+
+ // Get retrieves the given key if it's present in the snapshot backing by
+ // key-value data store.
+ Get(key []byte) ([]byte, error)
+
+ // Release releases associated resources. Release should always succeed and can
+ // be called multiple times without causing error.
+ Release()
+}
+
+// Snapshotter wraps the Snapshot method of a backing data store.
+type Snapshotter interface {
+ // NewSnapshot creates a database snapshot based on the current state.
+ // The created snapshot will not be affected by all following mutations
+ // happened on the database.
+ // Note don't forget to release the snapshot once it's used up, otherwise
+ // the stale data will never be cleaned up by the underlying compactor.
+ NewSnapshot() (Snapshot, error)
+}
diff --git a/go.mod b/go.mod
index ef6b365b2d1b..7d0c3abdf862 100644
--- a/go.mod
+++ b/go.mod
@@ -3,21 +3,19 @@ module github.com/ethereum/go-ethereum
go 1.15
require (
- github.com/Azure/azure-pipeline-go v0.2.2 // indirect
- github.com/Azure/azure-storage-blob-go v0.7.0
- github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0
github.com/aws/aws-sdk-go-v2 v1.2.0
github.com/aws/aws-sdk-go-v2/config v1.1.1
github.com/aws/aws-sdk-go-v2/credentials v1.1.1
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1
- github.com/btcsuite/btcd v0.20.1-beta
+ github.com/btcsuite/btcd/btcec/v2 v2.1.2
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
github.com/davecgh/go-spew v1.1.1
- github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
+ github.com/deckarep/golang-set v1.8.0
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
@@ -27,24 +25,25 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.0
+ github.com/golang-jwt/jwt/v4 v4.3.0
github.com/golang/protobuf v1.4.3
github.com/golang/snappy v0.0.4
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
- github.com/google/uuid v1.1.5
+ github.com/google/uuid v1.2.0
github.com/gorilla/websocket v1.4.2
- github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
+ github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.0
- github.com/huin/goupnp v1.0.2
+ github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
- github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
+ github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.2.0
- github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559
+ github.com/karalabe/usb v0.0.2
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.12
@@ -62,11 +61,11 @@ require (
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
- golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
- golang.org/x/text v0.3.6
+ golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
+ golang.org/x/tools v0.1.0
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
gopkg.in/urfave/cli.v1 v1.20.0
diff --git a/go.sum b/go.sum
index 36eb9e508454..95ff558f6c4c 100644
--- a/go.sum
+++ b/go.sum
@@ -18,27 +18,12 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck=
-github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
-github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I=
-github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
+github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk=
+github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@@ -47,7 +32,6 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIO
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
-github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -77,15 +61,10 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
-github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
-github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
-github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
-github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
-github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
-github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/btcsuite/btcd/btcec/v2 v2.1.2 h1:YoYoC9J0jwfukodSBMzZYUVQ8PTiYg4BnOWiJVzTmLs=
+github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0 h1:MSskdM4/xJYcFzy0altH/C/xHopifpWzHUi1JeVI34Q=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
@@ -107,21 +86,26 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
-github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
+github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
+github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
+github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
@@ -167,6 +151,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
+github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -208,16 +194,16 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=
-github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
+github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -229,8 +215,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
-github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
+github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204 h1:+EYBkW+dbi3F/atB+LSQZSWh7+HNrV3A/N0y6DSoy9k=
+github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -248,14 +234,12 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
-github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@@ -265,11 +249,10 @@ github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
-github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
-github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
@@ -299,9 +282,6 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
@@ -319,6 +299,7 @@ github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjU
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
@@ -331,11 +312,9 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
@@ -421,7 +400,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -460,6 +438,7 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -479,10 +458,12 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -496,7 +477,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -547,8 +527,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -580,6 +561,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -653,8 +635,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index 16e0eb654d97..cbd76465d668 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/filters"
@@ -100,6 +101,14 @@ func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {
}
func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {
+ // Ask transaction pool for the nonce which includes pending transactions
+ if blockNr, ok := a.blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber {
+ nonce, err := a.backend.GetPoolNonce(ctx, a.address)
+ if err != nil {
+ return 0, err
+ }
+ return hexutil.Uint64(nonce), nil
+ }
state, err := a.getState(ctx)
if err != nil {
return 0, err
@@ -245,6 +254,10 @@ func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, erro
if err != nil || tx == nil {
return nil, err
}
+ // Pending tx
+ if t.block == nil {
+ return nil, nil
+ }
header, err := t.block.resolveHeader(ctx)
if err != nil || header == nil {
return nil, err
@@ -285,6 +298,30 @@ func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, e
}
}
+func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return nil, err
+ }
+ // Pending tx
+ if t.block == nil {
+ return nil, nil
+ }
+ header, err := t.block.resolveHeader(ctx)
+ if err != nil || header == nil {
+ return nil, err
+ }
+ if header.BaseFee == nil {
+ return (*hexutil.Big)(tx.GasPrice()), nil
+ }
+
+ tip, err := tx.EffectiveGasTip(header.BaseFee)
+ if err != nil {
+ return nil, err
+ }
+ return (*hexutil.Big)(tip), nil
+}
+
func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
tx, err := t.resolve(ctx)
if err != nil || tx == nil {
@@ -598,6 +635,22 @@ func (b *Block) BaseFeePerGas(ctx context.Context) (*hexutil.Big, error) {
return (*hexutil.Big)(header.BaseFee), nil
}
+func (b *Block) NextBaseFeePerGas(ctx context.Context) (*hexutil.Big, error) {
+ header, err := b.resolveHeader(ctx)
+ if err != nil {
+ return nil, err
+ }
+ chaincfg := b.backend.ChainConfig()
+ if header.BaseFee == nil {
+ // Make sure next block doesn't enable EIP-1559
+ if !chaincfg.IsLondon(new(big.Int).Add(header.Number, common.Big1)) {
+ return nil, nil
+ }
+ }
+ nextBaseFee := misc.CalcBaseFee(chaincfg, header)
+ return (*hexutil.Big)(nextBaseFee), nil
+}
+
func (b *Block) Parent(ctx context.Context) (*Block, error) {
if _, err := b.resolveHeader(ctx); err != nil {
return nil, err
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 4e0f099e4208..a0b797906927 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -48,6 +48,7 @@ func TestBuildSchema(t *testing.T) {
conf := node.DefaultConfig
conf.DataDir = ddir
stack, err := node.New(&conf)
+ defer stack.Close()
if err != nil {
t.Fatalf("could not create new node: %v", err)
}
diff --git a/graphql/schema.go b/graphql/schema.go
index 86060cd2388c..0013e7bae75c 100644
--- a/graphql/schema.go
+++ b/graphql/schema.go
@@ -69,7 +69,7 @@ const schema string = `
transaction: Transaction!
}
- #EIP-2718
+ #EIP-2718
type AccessTuple{
address: Address!
storageKeys : [Bytes32!]!
@@ -94,10 +94,12 @@ const schema string = `
value: BigInt!
# GasPrice is the price offered to miners for gas, in wei per unit.
gasPrice: BigInt!
- # MaxFeePerGas is the maximum fee per gas offered to include a transaction, in wei.
- maxFeePerGas: BigInt
- # MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
- maxPriorityFeePerGas: BigInt
+ # MaxFeePerGas is the maximum fee per gas offered to include a transaction, in wei.
+ maxFeePerGas: BigInt
+ # MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
+ maxPriorityFeePerGas: BigInt
+ # EffectiveTip is the actual amount of reward going to miner after considering the max fee cap.
+ effectiveTip: BigInt
# Gas is the maximum amount of gas this transaction can consume.
gas: Long!
# InputData is the data supplied to the target of the transaction.
@@ -187,8 +189,10 @@ const schema string = `
gasLimit: Long!
# GasUsed is the amount of gas that was used executing transactions in this block.
gasUsed: Long!
- # BaseFeePerGas is the fee perunit of gas burned by the protocol in this block.
- baseFeePerGas: BigInt
+ # BaseFeePerGas is the fee per unit of gas burned by the protocol in this block.
+ baseFeePerGas: BigInt
+ # NextBaseFeePerGas is the fee per unit of gas which needs to be burned in the next block.
+ nextBaseFeePerGas: BigInt
# Timestamp is the unix timestamp at which this block was mined.
timestamp: Long!
# LogsBloom is a bloom filter that can be used to check if a block may
@@ -244,10 +248,10 @@ const schema string = `
gas: Long
# GasPrice is the price, in wei, offered for each unit of gas.
gasPrice: BigInt
- # MaxFeePerGas is the maximum fee per gas offered, in wei.
- maxFeePerGas: BigInt
- # MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei.
- maxPriorityFeePerGas: BigInt
+ # MaxFeePerGas is the maximum fee per gas offered, in wei.
+ maxFeePerGas: BigInt
+ # MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei.
+ maxPriorityFeePerGas: BigInt
# Value is the value, in wei, sent along with the call.
value: BigInt
# Data is the data sent to the callee.
diff --git a/graphql/service.go b/graphql/service.go
index bcb0a4990d64..29d98ad74683 100644
--- a/graphql/service.go
+++ b/graphql/service.go
@@ -74,7 +74,7 @@ func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string)
return err
}
h := handler{Schema: s}
- handler := node.NewHTTPHandlerStack(h, cors, vhosts)
+ handler := node.NewHTTPHandlerStack(h, cors, vhosts, nil)
stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{})
stack.RegisterHandler("GraphQL", "/graphql", handler)
diff --git a/internal/build/azure.go b/internal/build/azure.go
index 9c9cc2dcc5f1..9d1c4f300a88 100644
--- a/internal/build/azure.go
+++ b/internal/build/azure.go
@@ -19,10 +19,9 @@ package build
import (
"context"
"fmt"
- "net/url"
"os"
- "github.com/Azure/azure-storage-blob-go/azblob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
)
// AzureBlobstoreConfig is an authentication and configuration struct containing
@@ -49,15 +48,11 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
if err != nil {
return err
}
-
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- container := service.NewContainerURL(config.Container)
- blockblob := container.NewBlockBlobURL(name)
-
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return err
+ }
// Stream the file to upload into the designated blobstore container
in, err := os.Open(path)
if err != nil {
@@ -65,49 +60,41 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
}
defer in.Close()
- _, err = blockblob.Upload(context.Background(), in, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
+ blockblob := container.NewBlockBlobClient(name)
+ _, err = blockblob.Upload(context.Background(), in, nil)
return err
}
// AzureBlobstoreList lists all the files contained within an azure blobstore.
-func AzureBlobstoreList(config AzureBlobstoreConfig) ([]azblob.BlobItem, error) {
- credential := azblob.NewAnonymousCredential()
- if len(config.Token) > 0 {
- c, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
- if err != nil {
- return nil, err
- }
- credential = c
+func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*azblob.BlobItemInternal, error) {
+ // Create an authenticated client against the Azure cloud
+ credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
+ if err != nil {
+ return nil, err
}
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- var allBlobs []azblob.BlobItem
- // List all the blobs from the container and return them
- container := service.NewContainerURL(config.Container)
- nextMarker := azblob.Marker{}
- for nextMarker.NotDone() {
- res, err := container.ListBlobsFlatSegment(context.Background(), nextMarker, azblob.ListBlobsSegmentOptions{
- MaxResults: 5000, // The server only gives max 5K items
- })
- if err != nil {
- return nil, err
- }
- allBlobs = append(allBlobs, res.Segment.BlobItems...)
- nextMarker = res.NextMarker
-
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return nil, err
+ }
+ var maxResults int32 = 5000
+ pager := container.ListBlobsFlat(&azblob.ContainerListBlobFlatSegmentOptions{
+ Maxresults: &maxResults,
+ })
+ var allBlobs []*azblob.BlobItemInternal
+ for pager.NextPage(context.Background()) {
+ res := pager.PageResponse()
+ allBlobs = append(allBlobs, res.ContainerListBlobFlatSegmentResult.Segment.BlobItems...)
}
- return allBlobs, nil
+ return allBlobs, pager.Err()
}
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
// from the blobstore.
-func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []azblob.BlobItem) error {
+func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*azblob.BlobItemInternal) error {
if *DryRunFlag {
for _, blob := range blobs {
- fmt.Printf("would delete %s (%s) from %s/%s\n", blob.Name, blob.Properties.LastModified, config.Account, config.Container)
+ fmt.Printf("would delete %s (%s) from %s/%s\n", *blob.Name, blob.Properties.LastModified, config.Account, config.Container)
}
return nil
}
@@ -116,21 +103,18 @@ func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []azblob.BlobItem)
if err != nil {
return err
}
-
- pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
-
- u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
- service := azblob.NewServiceURL(*u, pipeline)
-
- container := service.NewContainerURL(config.Container)
-
+ u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
+ container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ if err != nil {
+ return err
+ }
// Iterate over the blobs and delete them
for _, blob := range blobs {
- blockblob := container.NewBlockBlobURL(blob.Name)
- if _, err := blockblob.Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
+ blockblob := container.NewBlockBlobClient(*blob.Name)
+ if _, err := blockblob.Delete(context.Background(), &azblob.DeleteBlobOptions{}); err != nil {
return err
}
- fmt.Printf("deleted %s (%s)\n", blob.Name, blob.Properties.LastModified)
+ fmt.Printf("deleted %s (%s)\n", *blob.Name, blob.Properties.LastModified)
}
return nil
}
diff --git a/internal/build/download.go b/internal/build/download.go
index 0ed0b5e130d1..efb223b32701 100644
--- a/internal/build/download.go
+++ b/internal/build/download.go
@@ -58,7 +58,7 @@ func (db *ChecksumDB) Verify(path string) error {
}
fileHash := hex.EncodeToString(h.Sum(nil))
if !db.findHash(filepath.Base(path), fileHash) {
- return fmt.Errorf("invalid file hash %s", fileHash)
+ return fmt.Errorf("invalid file hash %s for %s", fileHash, filepath.Base(path))
}
return nil
}
diff --git a/internal/build/util.go b/internal/build/util.go
index 2bdced82ee2f..cd6db09d006b 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -17,6 +17,7 @@
package build
import (
+ "bufio"
"bytes"
"flag"
"fmt"
@@ -31,6 +32,7 @@ import (
"path/filepath"
"strings"
"text/template"
+ "time"
)
var DryRunFlag = flag.Bool("n", false, "dry run, don't execute commands")
@@ -115,7 +117,6 @@ func render(tpl *template.Template, outputFile string, outputPerm os.FileMode, x
// the form sftp://[user@]host[:port].
func UploadSFTP(identityFile, host, dir string, files []string) error {
sftp := exec.Command("sftp")
- sftp.Stdout = nil
sftp.Stderr = os.Stderr
if identityFile != "" {
sftp.Args = append(sftp.Args, "-i", identityFile)
@@ -130,6 +131,10 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
if err != nil {
return fmt.Errorf("can't create stdin pipe for sftp: %v", err)
}
+ stdout, err := sftp.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("can't create stdout pipe for sftp: %v", err)
+ }
if err := sftp.Start(); err != nil {
return err
}
@@ -137,8 +142,35 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
for _, f := range files {
fmt.Fprintln(in, "put", f, path.Join(dir, filepath.Base(f)))
}
+ fmt.Fprintln(in, "exit")
+ // Some issue with the PPA sftp server makes it so the server does not
+ // respond properly to a 'bye', 'exit' or 'quit' from the client.
+ // To work around that, we check the output, and when we see the client
+ // exit command, we do a hard exit.
+ // See
+ // https://github.com/kolban-google/sftp-gcs/issues/23
+ // https://github.com/mscdex/ssh2/pull/1111
+ aborted := false
+ go func() {
+ scanner := bufio.NewScanner(stdout)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fmt.Println(txt)
+ if txt == "sftp> exit" {
+ // Give it .5 seconds to exit (server might be fixed), then
+ // hard kill it from the outside
+ time.Sleep(500 * time.Millisecond)
+ aborted = true
+ sftp.Process.Kill()
+ }
+ }
+ }()
stdin.Close()
- return sftp.Wait()
+ err = sftp.Wait()
+ if aborted {
+ return nil
+ }
+ return err
}
// FindMainPackages finds all 'main' packages in the given directory and returns their
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 65e34752bf41..f352e9b42ad6 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -18,6 +18,8 @@ package ethapi
import (
"context"
+ "crypto/rand"
+ "encoding/hex"
"errors"
"fmt"
"math/big"
@@ -287,7 +289,7 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
}
}
-// listAccounts will return a list of addresses for accounts this node manages.
+// ListAccounts will return a list of addresses for accounts this node manages.
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
return s.am.Accounts()
}
@@ -767,8 +769,7 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Ha
return nil, err
}
-// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index. When fullTx is true
-// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
+// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index.
func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByNumber(ctx, blockNr)
if block != nil {
@@ -783,8 +784,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
return nil, err
}
-// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. When fullTx is true
-// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
+// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index.
func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByHash(ctx, blockHash)
if block != nil {
@@ -1432,8 +1432,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
} else {
to = crypto.CreateAddress(args.from(), uint64(*args.Nonce))
}
+ isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
// Retrieve the precompiles since they don't need to be added to the access list
- precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number))
+ precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge))
// Create an initial tracer
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
@@ -1657,7 +1658,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha
fields["status"] = hexutil.Uint(receipt.Status)
}
if receipt.Logs == nil {
- fields["logs"] = [][]*types.Log{}
+ fields["logs"] = []*types.Log{}
}
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) {
@@ -2083,3 +2084,162 @@ func toHexSlice(b [][]byte) []string {
}
return r
}
+
+// PrivateTxBundleAPI offers an API for accepting bundled transactions
+type PrivateTxBundleAPI struct {
+ b Backend
+}
+
+// NewPrivateTxBundleAPI creates a new Tx Bundle API instance.
+func NewPrivateTxBundleAPI(b Backend) *PrivateTxBundleAPI {
+ return &PrivateTxBundleAPI{b}
+}
+
+// BundleAPI offers an API for accepting bundled transactions
+type BundleAPI struct {
+ b Backend
+ chain *core.BlockChain
+}
+
+// NewBundleAPI creates a new Tx Bundle API instance.
+func NewBundleAPI(b Backend, chain *core.BlockChain) *BundleAPI {
+ return &BundleAPI{b, chain}
+}
+
+// CallBundleArgs represents the arguments for a bundle of calls.
+type CallBundleArgs struct {
+ Txs []TransactionArgs `json:"txs"`
+ Coinbase *string `json:"coinbase"`
+ Timestamp *uint64 `json:"timestamp"`
+ Timeout *int64 `json:"timeout"`
+ GasLimit *uint64 `json:"gasLimit"`
+ Difficulty *big.Int `json:"difficulty"`
+ BaseFee *big.Int `json:"baseFee"`
+}
+
+//
+// CallBundle will simulate a bundle of transactions on top of
+// the most recent block. Partially follows flashbots spec v0.5.
+func (s *BundleAPI) CallBundle(ctx context.Context, args CallBundleArgs) (map[string]interface{}, error) {
+ if len(args.Txs) == 0 {
+ return nil, errors.New("bundle missing unsigned txs")
+ }
+
+ defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
+
+ timeoutMilliSeconds := int64(5000)
+ if args.Timeout != nil {
+ timeoutMilliSeconds = *args.Timeout
+ }
+ timeout := time.Millisecond * time.Duration(timeoutMilliSeconds)
+ blockNumberRPC := rpc.BlockNumber(-1)
+ state, parent, err := s.b.StateAndHeaderByNumber(ctx, blockNumberRPC)
+
+ if state == nil || err != nil {
+ return nil, err
+ }
+
+ timestamp := parent.Time + 1
+ if args.Timestamp != nil {
+ timestamp = *args.Timestamp
+ }
+ coinbase := parent.Coinbase
+ if args.Coinbase != nil {
+ coinbase = common.HexToAddress(*args.Coinbase)
+ }
+ difficulty := parent.Difficulty
+ if args.Difficulty != nil {
+ difficulty = args.Difficulty
+ }
+ gasLimit := parent.GasLimit
+ if args.GasLimit != nil {
+ gasLimit = *args.GasLimit
+ }
+ var baseFee *big.Int
+ // Assume bn simulaton occur after london hardfork
+
+ baseFee = misc.CalcBaseFee(s.b.ChainConfig(), parent)
+ header := &types.Header{
+ ParentHash: parent.Hash(),
+ Number: big.NewInt(parent.Number.Int64()),
+ GasLimit: gasLimit,
+ Time: timestamp,
+ Difficulty: difficulty,
+ Coinbase: coinbase,
+ BaseFee: baseFee,
+ }
+ // Setup context so it may be cancelled the call has completed
+ // or, in case of unmetered gas, setup a context with a timeout.
+ var cancel context.CancelFunc
+ if timeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+ // Make sure the context is cancelled when the call has completed
+ // this makes sure resources are cleaned up.
+ defer cancel()
+
+ vmconfig := vm.Config{}
+
+ // Setup the gas pool (also for unmetered requests)
+ // and apply the message.
+ gp := new(core.GasPool).AddGas(math.MaxUint64)
+
+ results := []map[string]interface{}{}
+
+ var totalGasUsed uint64
+ gasFees := new(big.Int)
+
+ // RPC Call gas cap
+ globalGasCap := s.b.RPCGasCap()
+
+ for i, tx := range args.Txs {
+ // Since its a txCall we'll just prepare the
+ // state with a random hash
+ var randomHash common.Hash
+ rand.Read(randomHash[:])
+ // New random hash since its a call
+ state.Prepare(randomHash, i)
+
+ msg, err := tx.ToMessage(globalGasCap, header.BaseFee)
+ if err != nil {
+ return nil, err
+ }
+
+ receipt, result, traceResult, err := core.ApplyUnsignedTransactionWithResult(s.b.ChainConfig(), s.chain, &coinbase, gp, state, header, msg, &header.GasUsed, vmconfig)
+ if err != nil {
+ return nil, fmt.Errorf("err: %w; txhash %s", err, tx.From)
+ }
+
+ jsonResult := map[string]interface{}{
+ "gasUsed": receipt.GasUsed,
+ "fromAddress": tx.from(),
+ "toAddress": tx.To,
+ "traceResult": traceResult,
+ }
+ totalGasUsed += receipt.GasUsed
+ if result.Err != nil {
+ jsonResult["error"] = result.Err.Error()
+ revert := result.Revert()
+ if len(revert) > 0 {
+ jsonResult["revert"] = string(revert)
+ }
+ } else {
+ dst := make([]byte, hex.EncodedLen(len(result.Return())))
+ hex.Encode(dst, result.Return())
+ jsonResult["value"] = "0x" + string(dst)
+ }
+
+ results = append(results, jsonResult)
+ }
+
+ ret := map[string]interface{}{}
+ ret["results"] = results
+ ret["gasFees"] = gasFees.String()
+ ret["gasUsed"] = totalGasUsed
+ ret["blockNumber"] = parent.Number.Int64()
+
+ ret["args"] = header
+ return ret, nil
+}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 362b1bf02d35..93aebf337625 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -97,7 +97,7 @@ type Backend interface {
Engine() consensus.Engine
}
-func GetAPIs(apiBackend Backend) []rpc.API {
+func GetAPIs(apiBackend Backend, chain *core.BlockChain) []rpc.API {
nonceLock := new(AddrLocker)
return []rpc.API{
{
@@ -139,6 +139,11 @@ func GetAPIs(apiBackend Backend) []rpc.API {
Version: "1.0",
Service: NewPrivateAccountAPI(apiBackend, nonceLock),
Public: false,
+ }, {
+ Namespace: "eth",
+ Version: "1.0",
+ Service: NewBundleAPI(apiBackend, chain),
+ Public: true,
},
}
}
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 2d08d3008f38..9c5950af58fe 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -55,20 +55,20 @@ type TransactionArgs struct {
}
// from retrieves the transaction sender address.
-func (arg *TransactionArgs) from() common.Address {
- if arg.From == nil {
+func (args *TransactionArgs) from() common.Address {
+ if args.From == nil {
return common.Address{}
}
- return *arg.From
+ return *args.From
}
// data retrieves the transaction calldata. Input field is preferred.
-func (arg *TransactionArgs) data() []byte {
- if arg.Input != nil {
- return *arg.Input
+func (args *TransactionArgs) data() []byte {
+ if args.Input != nil {
+ return *args.Input
}
- if arg.Data != nil {
- return *arg.Data
+ if args.Data != nil {
+ return *args.Data
}
return nil
}
diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go
index 43bbcf0201d8..095df03807d7 100644
--- a/internal/flags/helpers.go
+++ b/internal/flags/helpers.go
@@ -51,7 +51,7 @@ OPTIONS:
AppHelpTemplate = `NAME:
{{.App.Name}} - {{.App.Usage}}
- Copyright 2013-2021 The go-ethereum Authors
+ Copyright 2013-2022 The go-ethereum Authors
USAGE:
{{.App.HelpName}} [options]{{if .App.Commands}} [command] [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}}
@@ -77,7 +77,7 @@ COPYRIGHT:
ClefAppHelpTemplate = `NAME:
{{.App.Name}} - {{.App.Usage}}
- Copyright 2013-2021 The go-ethereum Authors
+ Copyright 2013-2022 The go-ethereum Authors
USAGE:
{{.App.HelpName}} [options]{{if .App.Commands}} command [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}}
@@ -143,6 +143,7 @@ func FlagCategory(flag cli.Flag, flagGroups []FlagGroup) string {
// NewApp creates an app with sane defaults.
func NewApp(gitCommit, gitDate, usage string) *cli.App {
app := cli.NewApp()
+ app.EnableBashCompletion = true
app.Name = filepath.Base(os.Args[0])
app.Author = ""
app.Email = ""
diff --git a/internal/jsre/jsre_test.go b/internal/jsre/jsre_test.go
index bc38f7a44a86..57acdaed90ae 100644
--- a/internal/jsre/jsre_test.go
+++ b/internal/jsre/jsre_test.go
@@ -83,20 +83,20 @@ func TestNatto(t *testing.T) {
err := jsre.Exec("test.js")
if err != nil {
- t.Errorf("expected no error, got %v", err)
+ t.Fatalf("expected no error, got %v", err)
}
time.Sleep(100 * time.Millisecond)
val, err := jsre.Run("msg")
if err != nil {
- t.Errorf("expected no error, got %v", err)
+ t.Fatalf("expected no error, got %v", err)
}
if val.ExportType().Kind() != reflect.String {
- t.Errorf("expected string value, got %v", val)
+ t.Fatalf("expected string value, got %v", val)
}
exp := "testMsg"
got := val.ToString().String()
if exp != got {
- t.Errorf("expected '%v', got '%v'", exp, got)
+ t.Fatalf("expected '%v', got '%v'", exp, got)
}
jsre.Stop(false)
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index c4bdbaeb8d20..87bf464157ba 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -576,6 +576,11 @@ web3._extend({
params: 3,
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter, null]
}),
+ new web3._extend.Method({
+ name: 'getLogs',
+ call: 'eth_getLogs',
+ params: 1,
+ }),
],
properties: [
new web3._extend.Property({
diff --git a/les/catalyst/api.go b/les/catalyst/api.go
new file mode 100644
index 000000000000..141df0585b01
--- /dev/null
+++ b/les/catalyst/api.go
@@ -0,0 +1,189 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package catalyst implements the temporary eth1/eth2 RPC integration.
+package catalyst
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/les"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// Register adds catalyst APIs to the light client.
+func Register(stack *node.Node, backend *les.LightEthereum) error {
+ log.Warn("Catalyst mode enabled", "protocol", "les")
+ stack.RegisterAPIs([]rpc.API{
+ {
+ Namespace: "engine",
+ Version: "1.0",
+ Service: NewConsensusAPI(backend),
+ Public: true,
+ Authenticated: true,
+ },
+ })
+ return nil
+}
+
+type ConsensusAPI struct {
+ les *les.LightEthereum
+}
+
+// NewConsensusAPI creates a new consensus api for the given backend.
+// The underlying blockchain needs to have a valid terminal total difficulty set.
+func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI {
+ if les.BlockChain().Config().TerminalTotalDifficulty == nil {
+ panic("Catalyst started without valid total difficulty")
+ }
+ return &ConsensusAPI{les: les}
+}
+
+// ForkchoiceUpdatedV1 has several responsibilities:
+// If the method is called with an empty head block:
+// we return success, which can be used to check if the catalyst mode is enabled
+// If the total difficulty was not reached:
+// we return INVALID
+// If the finalizedBlockHash is set:
+// we check if we have the finalizedBlockHash in our db, if not we start a sync
+// We try to set our blockchain to the headBlock
+// If there are payloadAttributes:
+// we return an error since block creation is not supported in les mode
+func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
+ if heads.HeadBlockHash == (common.Hash{}) {
+ log.Warn("Forkchoice requested update to zero hash")
+ return beacon.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this?
+ }
+ if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
+ if header := api.les.BlockChain().GetHeaderByHash(heads.HeadBlockHash); header == nil {
+ // TODO (MariusVanDerWijden) trigger sync
+ return beacon.STATUS_SYNCING, nil
+ }
+ return beacon.STATUS_INVALID, err
+ }
+ // If the finalized block is set, check if it is in our blockchain
+ if heads.FinalizedBlockHash != (common.Hash{}) {
+ if header := api.les.BlockChain().GetHeaderByHash(heads.FinalizedBlockHash); header == nil {
+ // TODO (MariusVanDerWijden) trigger sync
+ return beacon.STATUS_SYNCING, nil
+ }
+ }
+ // SetHead
+ if err := api.setHead(heads.HeadBlockHash); err != nil {
+ return beacon.STATUS_INVALID, err
+ }
+ if payloadAttributes != nil {
+ return beacon.STATUS_INVALID, errors.New("not supported")
+ }
+ return api.validForkChoiceResponse(), nil
+}
+
+// GetPayloadV1 returns a cached payload by id. It's not supported in les mode.
+func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
+ return nil, &beacon.GenericServerError
+}
+
+// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.PayloadStatusV1, error) {
+ block, err := beacon.ExecutableDataToBlock(params)
+ if err != nil {
+ return api.invalid(), err
+ }
+ if !api.les.BlockChain().HasHeader(block.ParentHash(), block.NumberU64()-1) {
+ /*
+ TODO (MariusVanDerWijden) reenable once sync is merged
+ if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil {
+ return SYNCING, err
+ }
+ */
+ // TODO (MariusVanDerWijden) we should return nil here not empty hash
+ return beacon.PayloadStatusV1{Status: beacon.SYNCING, LatestValidHash: nil}, nil
+ }
+ parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
+ if parent == nil {
+ return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
+ }
+ td := api.les.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
+ ttd := api.les.BlockChain().Config().TerminalTotalDifficulty
+ if td.Cmp(ttd) < 0 {
+ return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
+ }
+ if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
+ return api.invalid(), err
+ }
+ if merger := api.les.Merger(); !merger.TDDReached() {
+ merger.ReachTTD()
+ }
+ hash := block.Hash()
+ return beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &hash}, nil
+}
+
+func (api *ConsensusAPI) validForkChoiceResponse() beacon.ForkChoiceResponse {
+ currentHash := api.les.BlockChain().CurrentHeader().Hash()
+ return beacon.ForkChoiceResponse{
+ PayloadStatus: beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: ¤tHash},
+ }
+}
+
+// invalid returns a response "INVALID" with the latest valid hash set to the current head.
+func (api *ConsensusAPI) invalid() beacon.PayloadStatusV1 {
+ currentHash := api.les.BlockChain().CurrentHeader().Hash()
+ return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: ¤tHash}
+}
+
+func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
+ // shortcut if we entered PoS already
+ if api.les.Merger().PoSFinalized() {
+ return nil
+ }
+ // make sure the parent has enough terminal total difficulty
+ header := api.les.BlockChain().GetHeaderByHash(head)
+ if header == nil {
+ return &beacon.GenericServerError
+ }
+ td := api.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
+ if td != nil && td.Cmp(api.les.BlockChain().Config().TerminalTotalDifficulty) < 0 {
+ return &beacon.InvalidTB
+ }
+ return nil
+}
+
+// setHead is called to perform a force choice.
+func (api *ConsensusAPI) setHead(newHead common.Hash) error {
+ log.Info("Setting head", "head", newHead)
+
+ headHeader := api.les.BlockChain().CurrentHeader()
+ if headHeader.Hash() == newHead {
+ return nil
+ }
+ newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
+ if newHeadHeader == nil {
+ return &beacon.GenericServerError
+ }
+ if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
+ return err
+ }
+ // Trigger the transition if it's the first `NewHead` event.
+ if merger := api.les.Merger(); !merger.PoSFinalized() {
+ merger.FinalizePoS()
+ }
+ return nil
+}
diff --git a/les/catalyst/api_test.go b/les/catalyst/api_test.go
new file mode 100644
index 000000000000..c1cbf645ccc8
--- /dev/null
+++ b/les/catalyst/api_test.go
@@ -0,0 +1,244 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package catalyst
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/les"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+var (
+ // testKey is a private key to use for funding a tester account.
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+
+ // testAddr is the Ethereum address of the tester account.
+ testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+
+ testBalance = big.NewInt(2e18)
+)
+
+func generatePreMergeChain(n int) (*core.Genesis, []*types.Header, []*types.Block) {
+ db := rawdb.NewMemoryDatabase()
+ config := params.AllEthashProtocolChanges
+ genesis := &core.Genesis{
+ Config: config,
+ Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
+ ExtraData: []byte("test genesis"),
+ Timestamp: 9000,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ gblock := genesis.ToBlock(db)
+ engine := ethash.NewFaker()
+ blocks, _ := core.GenerateChain(config, gblock, engine, db, n, nil)
+ totalDifficulty := big.NewInt(0)
+
+ var headers []*types.Header
+ for _, b := range blocks {
+ totalDifficulty.Add(totalDifficulty, b.Difficulty())
+ headers = append(headers, b.Header())
+ }
+ config.TerminalTotalDifficulty = totalDifficulty
+
+ return genesis, headers, blocks
+}
+
+func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
+ genesis, headers, blocks := generatePreMergeChain(10)
+ n, lesService := startLesService(t, genesis, headers)
+ defer n.Close()
+
+ api := NewConsensusAPI(lesService)
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: blocks[5].Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil {
+ t.Errorf("fork choice updated before total terminal difficulty should fail")
+ }
+}
+
+func TestExecutePayloadV1(t *testing.T) {
+ genesis, headers, blocks := generatePreMergeChain(10)
+ n, lesService := startLesService(t, genesis, headers[:9])
+ lesService.Merger().ReachTTD()
+ defer n.Close()
+
+ api := NewConsensusAPI(lesService)
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: blocks[8].Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ t.Errorf("Failed to update head %v", err)
+ }
+ block := blocks[9]
+
+ fakeBlock := types.NewBlock(&types.Header{
+ ParentHash: block.ParentHash(),
+ UncleHash: crypto.Keccak256Hash(nil),
+ Coinbase: block.Coinbase(),
+ Root: block.Root(),
+ TxHash: crypto.Keccak256Hash(nil),
+ ReceiptHash: crypto.Keccak256Hash(nil),
+ Bloom: block.Bloom(),
+ Difficulty: big.NewInt(0),
+ Number: block.Number(),
+ GasLimit: block.GasLimit(),
+ GasUsed: block.GasUsed(),
+ Time: block.Time(),
+ Extra: block.Extra(),
+ MixDigest: block.MixDigest(),
+ Nonce: types.BlockNonce{},
+ BaseFee: block.BaseFee(),
+ }, nil, nil, nil, trie.NewStackTrie(nil))
+
+ _, err := api.ExecutePayloadV1(beacon.ExecutableDataV1{
+ ParentHash: fakeBlock.ParentHash(),
+ FeeRecipient: fakeBlock.Coinbase(),
+ StateRoot: fakeBlock.Root(),
+ ReceiptsRoot: fakeBlock.ReceiptHash(),
+ LogsBloom: fakeBlock.Bloom().Bytes(),
+ Random: fakeBlock.MixDigest(),
+ Number: fakeBlock.NumberU64(),
+ GasLimit: fakeBlock.GasLimit(),
+ GasUsed: fakeBlock.GasUsed(),
+ Timestamp: fakeBlock.Time(),
+ ExtraData: fakeBlock.Extra(),
+ BaseFeePerGas: fakeBlock.BaseFee(),
+ BlockHash: fakeBlock.Hash(),
+ Transactions: encodeTransactions(fakeBlock.Transactions()),
+ })
+ if err != nil {
+ t.Errorf("Failed to execute payload %v", err)
+ }
+ headHeader := api.les.BlockChain().CurrentHeader()
+ if headHeader.Number.Uint64() != fakeBlock.NumberU64()-1 {
+ t.Fatal("Unexpected chain head update")
+ }
+ fcState = beacon.ForkchoiceStateV1{
+ HeadBlockHash: fakeBlock.Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ t.Fatal("Failed to update head")
+ }
+ headHeader = api.les.BlockChain().CurrentHeader()
+ if headHeader.Number.Uint64() != fakeBlock.NumberU64() {
+ t.Fatal("Failed to update chain head")
+ }
+}
+
+func TestEth2DeepReorg(t *testing.T) {
+ // TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg
+ // before the totalTerminalDifficulty threshold
+ /*
+ genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2)
+ n, ethservice := startEthService(t, genesis, preMergeBlocks)
+ defer n.Close()
+
+ var (
+ api = NewConsensusAPI(ethservice, nil)
+ parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1]
+ head = ethservice.BlockChain().CurrentBlock().NumberU64()
+ )
+ if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) {
+ t.Errorf("Block %d not pruned", parent.NumberU64())
+ }
+ for i := 0; i < 10; i++ {
+ execData, err := api.assembleBlock(AssembleBlockParams{
+ ParentHash: parent.Hash(),
+ Timestamp: parent.Time() + 5,
+ })
+ if err != nil {
+ t.Fatalf("Failed to create the executable data %v", err)
+ }
+ block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData)
+ if err != nil {
+ t.Fatalf("Failed to convert executable data to block %v", err)
+ }
+ newResp, err := api.ExecutePayload(*execData)
+ if err != nil || newResp.Status != "VALID" {
+ t.Fatalf("Failed to insert block: %v", err)
+ }
+ if ethservice.BlockChain().CurrentBlock().NumberU64() != head {
+ t.Fatalf("Chain head shouldn't be updated")
+ }
+ if err := api.setHead(block.Hash()); err != nil {
+ t.Fatalf("Failed to set head: %v", err)
+ }
+ if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() {
+ t.Fatalf("Chain head should be updated")
+ }
+ parent, head = block, block.NumberU64()
+ }
+ */
+}
+
+// startEthService creates a full node instance for testing.
+func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Header) (*node.Node, *les.LightEthereum) {
+ t.Helper()
+
+ n, err := node.New(&node.Config{})
+ if err != nil {
+ t.Fatal("can't create node:", err)
+ }
+ ethcfg := ðconfig.Config{
+ Genesis: genesis,
+ Ethash: ethash.Config{PowMode: ethash.ModeFake},
+ SyncMode: downloader.LightSync,
+ TrieDirtyCache: 256,
+ TrieCleanCache: 256,
+ LightPeers: 10,
+ }
+ lesService, err := les.New(n, ethcfg)
+ if err != nil {
+ t.Fatal("can't create eth service:", err)
+ }
+ if err := n.Start(); err != nil {
+ t.Fatal("can't start node:", err)
+ }
+ if _, err := lesService.BlockChain().InsertHeaderChain(headers, 0); err != nil {
+ n.Close()
+ t.Fatal("can't import test headers:", err)
+ }
+ return n, lesService
+}
+
+func encodeTransactions(txs []*types.Transaction) [][]byte {
+ var enc = make([][]byte, len(txs))
+ for i, tx := range txs {
+ enc[i], _ = tx.MarshalBinary()
+ }
+ return enc
+}
diff --git a/les/client.go b/les/client.go
index 43207f3443ec..922c51627824 100644
--- a/les/client.go
+++ b/les/client.go
@@ -282,7 +282,7 @@ func (s *LightDummyAPI) Mining() bool {
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *LightEthereum) APIs() []rpc.API {
- apis := ethapi.GetAPIs(s.ApiBackend)
+ apis := ethapi.GetAPIs(s.ApiBackend, nil)
apis = append(apis, s.engine.APIs(s.BlockChain().HeaderChain())...)
return append(apis, []rpc.API{
{
diff --git a/les/peer.go b/les/peer.go
index c6c672942b59..499429739d23 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -213,7 +213,7 @@ func (p *peerCommons) sendReceiveHandshake(sendList keyValueList) (keyValueList,
)
// Send out own handshake in a new thread
go func() {
- errc <- p2p.Send(p.rw, StatusMsg, sendList)
+ errc <- p2p.Send(p.rw, StatusMsg, &sendList)
}()
go func() {
// In the mean time retrieve the remote status message
@@ -421,7 +421,7 @@ func sendRequest(w p2p.MsgWriter, msgcode, reqID uint64, data interface{}) error
ReqID uint64
Data interface{}
}
- return p2p.Send(w, msgcode, req{reqID, data})
+ return p2p.Send(w, msgcode, &req{reqID, data})
}
func (p *serverPeer) sendRequest(msgcode, reqID uint64, data interface{}, amount int) error {
@@ -871,7 +871,7 @@ func (r *reply) send(bv uint64) error {
ReqID, BV uint64
Data rlp.RawValue
}
- return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
+ return p2p.Send(r.w, r.msgcode, &resp{r.reqID, bv, r.data})
}
// size returns the RLP encoded size of the message data
diff --git a/les/test_helper.go b/les/test_helper.go
index 10367ea800c4..480d249dca40 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -356,7 +356,7 @@ func (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Ha
if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {
t.Fatalf("status recv: %v", err)
}
- if err := p2p.Send(p.app, StatusMsg, sendList); err != nil {
+ if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil {
t.Fatalf("status send: %v", err)
}
}
@@ -389,7 +389,7 @@ func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Ha
if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {
t.Fatalf("status recv: %v", err)
}
- if err := p2p.Send(p.app, StatusMsg, sendList); err != nil {
+ if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil {
t.Fatalf("status send: %v", err)
}
}
diff --git a/les/vflux/client/fillset_test.go b/les/vflux/client/fillset_test.go
index 58240682c60d..ca5af8f07ecc 100644
--- a/les/vflux/client/fillset_test.go
+++ b/les/vflux/client/fillset_test.go
@@ -34,16 +34,20 @@ type testIter struct {
}
func (i *testIter) Next() bool {
- i.waitCh <- struct{}{}
+ if _, ok := <-i.waitCh; !ok {
+ return false
+ }
i.node = <-i.nodeCh
- return i.node != nil
+ return true
}
func (i *testIter) Node() *enode.Node {
return i.node
}
-func (i *testIter) Close() {}
+func (i *testIter) Close() {
+ close(i.waitCh)
+}
func (i *testIter) push() {
var id enode.ID
@@ -53,7 +57,7 @@ func (i *testIter) push() {
func (i *testIter) waiting(timeout time.Duration) bool {
select {
- case <-i.waitCh:
+ case i.waitCh <- struct{}{}:
return true
case <-time.After(timeout):
return false
diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go
index 87d783ebab2a..805de2d41b03 100644
--- a/les/vflux/server/clientpool.go
+++ b/les/vflux/server/clientpool.go
@@ -34,7 +34,7 @@ import (
var (
ErrNotConnected = errors.New("client not connected")
ErrNoPriority = errors.New("priority too low to raise capacity")
- ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity")
+ ErrCantFindMaximum = errors.New("unable to find maximum allowed capacity")
)
// ClientPool implements a client database that assigns a priority to each client
@@ -177,7 +177,7 @@ func (cp *ClientPool) Unregister(peer clientPeer) {
cp.ns.SetField(peer.Node(), cp.setup.clientField, nil)
}
-// setConnectedBias sets the connection bias, which is applied to already connected clients
+// SetConnectedBias sets the connection bias, which is applied to already connected clients
// So that already connected client won't be kicked out very soon and we can ensure all
// connected clients can have enough time to request or sync some data.
func (cp *ClientPool) SetConnectedBias(bias time.Duration) {
diff --git a/light/lightchain.go b/light/lightchain.go
index 61309ce35601..0cc88b46e7e0 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -419,6 +419,9 @@ func (lc *LightChain) SetChainHead(header *types.Header) error {
// In the case of a light chain, InsertHeaderChain also creates and posts light
// chain events when necessary.
func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ if len(chain) == 0 {
+ return 0, nil
+ }
if atomic.LoadInt32(&lc.disableCheckFreq) == 1 {
checkFreq = 0
}
diff --git a/miner/miner.go b/miner/miner.go
index c8aaa5b92842..20e12c240e12 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -35,10 +35,12 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-// Backend wraps all methods required for mining.
+// Backend wraps all methods required for mining. Only full node is capable
+// to offer all the functions here.
type Backend interface {
BlockChain() *core.BlockChain
TxPool() *core.TxPool
+ StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error)
}
// Config is the configuration parameters of mining.
@@ -68,7 +70,7 @@ type Miner struct {
wg sync.WaitGroup
}
-func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool, merger *consensus.Merger) *Miner {
+func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool) *Miner {
miner := &Miner{
eth: eth,
mux: mux,
@@ -76,7 +78,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
exitCh: make(chan struct{}),
startCh: make(chan common.Address),
stopCh: make(chan struct{}),
- worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, merger),
+ worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true),
}
miner.wg.Add(1)
go miner.update()
@@ -233,6 +235,12 @@ func (miner *Miner) DisablePreseal() {
miner.worker.disablePreseal()
}
+// GetSealingBlock retrieves a sealing block based on the given parameters.
+// The returned block is not sealed but all other fields should be filled.
+func (miner *Miner) GetSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
+ return miner.worker.getSealingBlock(parent, timestamp, coinbase, random)
+}
+
// SubscribePendingLogs starts delivering logs from pending transactions
// to the given channel.
func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
diff --git a/miner/miner_test.go b/miner/miner_test.go
index de7ca73e260e..cf619845dd47 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -18,11 +18,11 @@
package miner
import (
+ "errors"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -55,6 +55,10 @@ func (m *mockBackend) TxPool() *core.TxPool {
return m.txPool
}
+func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
+ return nil, errors.New("not supported")
+}
+
type testBlockChain struct {
statedb *state.StateDB
gasLimit uint64
@@ -80,7 +84,8 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent)
}
func TestMiner(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -107,7 +112,8 @@ func TestMiner(t *testing.T) {
// An initial FailedEvent should allow mining to stop on a subsequent
// downloader StartEvent.
func TestMinerDownloaderFirstFails(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -138,8 +144,8 @@ func TestMinerDownloaderFirstFails(t *testing.T) {
}
func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
- miner, mux := createMiner(t)
-
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -161,7 +167,8 @@ func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
}
func TestStartWhileDownload(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
@@ -174,16 +181,19 @@ func TestStartWhileDownload(t *testing.T) {
}
func TestStartStopMiner(t *testing.T) {
- miner, _ := createMiner(t)
+ miner, _, cleanup := createMiner(t)
+ defer cleanup(false)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
miner.Stop()
waitForMiningState(t, miner, false)
+
}
func TestCloseMiner(t *testing.T) {
- miner, _ := createMiner(t)
+ miner, _, cleanup := createMiner(t)
+ defer cleanup(true)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
@@ -195,7 +205,8 @@ func TestCloseMiner(t *testing.T) {
// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't
// possible at the moment
func TestMinerSetEtherbase(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
// Start with a 'bad' mining address
miner.Start(common.HexToAddress("0xdead"))
waitForMiningState(t, miner, true)
@@ -230,7 +241,7 @@ func waitForMiningState(t *testing.T, m *Miner, mining bool) {
t.Fatalf("Mining() == %t, want %t", state, mining)
}
-func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
+func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
// Create Ethash config
config := Config{
Etherbase: common.HexToAddress("123456789"),
@@ -246,7 +257,6 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
// Create consensus engine
engine := clique.New(chainConfig.Clique, chainDB)
// Create Ethereum backend
- merger := consensus.NewMerger(rawdb.NewMemoryDatabase())
bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("can't create new chain %v", err)
@@ -259,5 +269,14 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
// Create event Mux
mux := new(event.TypeMux)
// Create Miner
- return New(backend, &config, chainConfig, mux, engine, nil, merger), mux
+ miner := New(backend, &config, chainConfig, mux, engine, nil)
+ cleanup := func(skipMiner bool) {
+ bc.Stop()
+ engine.Close()
+ pool.Stop()
+ if !skipMiner {
+ miner.Close()
+ }
+ }
+ return miner, mux, cleanup
}
diff --git a/miner/stress/beacon/main.go b/miner/stress/beacon/main.go
index 70005e20dbe9..ccb7279b047e 100644
--- a/miner/stress/beacon/main.go
+++ b/miner/stress/beacon/main.go
@@ -32,13 +32,15 @@ import (
"github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
+ ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/les"
+ lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
@@ -88,24 +90,26 @@ var (
type ethNode struct {
typ nodetype
- api *catalyst.ConsensusAPI
- ethBackend *eth.Ethereum
- lesBackend *les.LightEthereum
stack *node.Node
enode *enode.Node
+ api *ethcatalyst.ConsensusAPI
+ ethBackend *eth.Ethereum
+ lapi *lescatalyst.ConsensusAPI
+ lesBackend *les.LightEthereum
}
func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode {
var (
err error
- api *catalyst.ConsensusAPI
+ api *ethcatalyst.ConsensusAPI
+ lapi *lescatalyst.ConsensusAPI
stack *node.Node
ethBackend *eth.Ethereum
lesBackend *les.LightEthereum
)
// Start the node and wait until it's up
if typ == eth2LightClient {
- stack, lesBackend, api, err = makeLightNode(genesis)
+ stack, lesBackend, lapi, err = makeLightNode(genesis)
} else {
stack, ethBackend, api, err = makeFullNode(genesis)
}
@@ -131,20 +135,27 @@ func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode
typ: typ,
api: api,
ethBackend: ethBackend,
+ lapi: lapi,
lesBackend: lesBackend,
stack: stack,
enode: enode,
}
}
-func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*catalyst.ExecutableDataV1, error) {
+func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*beacon.ExecutableDataV1, error) {
if n.typ != eth2MiningNode {
return nil, errors.New("invalid node type")
}
- payloadAttribute := catalyst.PayloadAttributesV1{
- Timestamp: uint64(time.Now().Unix()),
+ timestamp := uint64(time.Now().Unix())
+ if timestamp <= parentTimestamp {
+ timestamp = parentTimestamp + 1
+ }
+ payloadAttribute := beacon.PayloadAttributesV1{
+ Timestamp: timestamp,
+ Random: common.Hash{},
+ SuggestedFeeRecipient: common.HexToAddress("0xdeadbeef"),
}
- fcState := catalyst.ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: parentHash,
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -156,39 +167,62 @@ func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64)
return n.api.GetPayloadV1(*payload.PayloadID)
}
-func (n *ethNode) insertBlock(eb catalyst.ExecutableDataV1) error {
+func (n *ethNode) insertBlock(eb beacon.ExecutableDataV1) error {
if !eth2types(n.typ) {
return errors.New("invalid node type")
}
- newResp, err := n.api.ExecutePayloadV1(eb)
- if err != nil {
- return err
- } else if newResp.Status != "VALID" {
- return errors.New("failed to insert block")
+ switch n.typ {
+ case eth2NormalNode, eth2MiningNode:
+ newResp, err := n.api.NewPayloadV1(eb)
+ if err != nil {
+ return err
+ } else if newResp.Status != "VALID" {
+ return errors.New("failed to insert block")
+ }
+ return nil
+ case eth2LightClient:
+ newResp, err := n.lapi.ExecutePayloadV1(eb)
+ if err != nil {
+ return err
+ } else if newResp.Status != "VALID" {
+ return errors.New("failed to insert block")
+ }
+ return nil
+ default:
+ return errors.New("undefined node")
}
- return nil
}
-func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed catalyst.ExecutableDataV1) error {
+func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed beacon.ExecutableDataV1) error {
if !eth2types(n.typ) {
return errors.New("invalid node type")
}
if err := n.insertBlock(ed); err != nil {
return err
}
- block, err := catalyst.ExecutableDataToBlock(ed)
+ block, err := beacon.ExecutableDataToBlock(ed)
if err != nil {
return err
}
- fcState := catalyst.ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.ParentHash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
- if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
- return err
+ switch n.typ {
+ case eth2NormalNode, eth2MiningNode:
+ if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ return err
+ }
+ return nil
+ case eth2LightClient:
+ if _, err := n.lapi.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ return err
+ }
+ return nil
+ default:
+ return errors.New("undefined node")
}
- return nil
}
type nodeManager struct {
@@ -284,12 +318,15 @@ func (mgr *nodeManager) run() {
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
for _, node := range append(nodes) {
- fcState := catalyst.ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: oldest.Hash(),
SafeBlockHash: common.Hash{},
- FinalizedBlockHash: common.Hash{},
+ FinalizedBlockHash: oldest.Hash(),
}
- node.api.ForkchoiceUpdatedV1(fcState, nil)
+ // TODO(rjl493456442) finalization doesn't work properly, FIX IT
+ _ = fcState
+ _ = node
+ //node.api.ForkchoiceUpdatedV1(fcState, nil)
}
log.Info("Finalised eth2 block", "number", oldest.NumberU64(), "hash", oldest.Hash())
waitFinalise = waitFinalise[1:]
@@ -327,12 +364,11 @@ func (mgr *nodeManager) run() {
log.Error("Failed to assemble the block", "err", err)
continue
}
- block, _ := catalyst.ExecutableDataToBlock(*ed)
+ block, _ := beacon.ExecutableDataToBlock(*ed)
nodes := mgr.getNodes(eth2MiningNode)
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
-
for _, node := range nodes {
if err := node.insertBlockAndSetHead(parentBlock.Header(), *ed); err != nil {
log.Error("Failed to insert block", "type", node.typ, "err", err)
@@ -410,9 +446,8 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
genesis.Difficulty = params.MinimumDifficulty
genesis.GasLimit = 25000000
- genesis.Config.ChainID = big.NewInt(18)
- genesis.Config.EIP150Hash = common.Hash{}
genesis.BaseFee = big.NewInt(params.InitialBaseFee)
+ genesis.Config = params.AllEthashProtocolChanges
genesis.Config.TerminalTotalDifficulty = transitionDifficulty
genesis.Alloc = core.GenesisAlloc{}
@@ -424,7 +459,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
return genesis
}
-func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.ConsensusAPI, error) {
+func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *ethcatalyst.ConsensusAPI, error) {
// Define the basic configurations for the Ethereum node
datadir, _ := ioutil.TempDir("", "")
@@ -472,10 +507,10 @@ func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.C
log.Crit("Failed to create the LES server", "err", err)
}
err = stack.Start()
- return stack, ethBackend, catalyst.NewConsensusAPI(ethBackend, nil), err
+ return stack, ethBackend, ethcatalyst.NewConsensusAPI(ethBackend), err
}
-func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *catalyst.ConsensusAPI, error) {
+func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *lescatalyst.ConsensusAPI, error) {
// Define the basic configurations for the Ethereum node
datadir, _ := ioutil.TempDir("", "")
@@ -510,7 +545,7 @@ func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *cata
return nil, nil, nil, err
}
err = stack.Start()
- return stack, lesBackend, catalyst.NewConsensusAPI(nil, lesBackend), err
+ return stack, lesBackend, lescatalyst.NewConsensusAPI(lesBackend), err
}
func eth2types(typ nodetype) bool {
diff --git a/miner/worker.go b/miner/worker.go
index 2c576ad08272..c6927a1ca1e8 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -17,8 +17,8 @@
package miner
import (
- "bytes"
"errors"
+ "fmt"
"math/big"
"sync"
"sync/atomic"
@@ -54,14 +54,14 @@ const (
// resubmitAdjustChanSize is the size of resubmitting interval adjustment channel.
resubmitAdjustChanSize = 10
- // miningLogAtDepth is the number of confirmations before logging successful mining.
- miningLogAtDepth = 7
+ // sealingLogAtDepth is the number of confirmations before logging successful sealing.
+ sealingLogAtDepth = 7
- // minRecommitInterval is the minimal time interval to recreate the mining block with
+ // minRecommitInterval is the minimal time interval to recreate the sealing block with
// any newly arrived transactions.
minRecommitInterval = 1 * time.Second
- // maxRecommitInterval is the maximum time interval to recreate the mining block with
+ // maxRecommitInterval is the maximum time interval to recreate the sealing block with
// any newly arrived transactions.
maxRecommitInterval = 15 * time.Second
@@ -77,20 +77,68 @@ const (
staleThreshold = 7
)
-// environment is the worker's current environment and holds all of the current state information.
+// environment is the worker's current environment and holds all
+// information of the sealing block generation.
type environment struct {
signer types.Signer
state *state.StateDB // apply state changes here
ancestors mapset.Set // ancestor set (used for checking uncle parent validity)
family mapset.Set // family set (used for checking uncle invalidity)
- uncles mapset.Set // uncle set
tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions
+ coinbase common.Address
header *types.Header
txs []*types.Transaction
receipts []*types.Receipt
+ uncles map[common.Hash]*types.Header
+}
+
+// copy creates a deep copy of environment.
+func (env *environment) copy() *environment {
+ cpy := &environment{
+ signer: env.signer,
+ state: env.state.Copy(),
+ ancestors: env.ancestors.Clone(),
+ family: env.family.Clone(),
+ tcount: env.tcount,
+ coinbase: env.coinbase,
+ header: types.CopyHeader(env.header),
+ receipts: copyReceipts(env.receipts),
+ }
+ if env.gasPool != nil {
+ gasPool := *env.gasPool
+ cpy.gasPool = &gasPool
+ }
+ // The content of txs and uncles are immutable, unnecessary
+ // to do the expensive deep copy for them.
+ cpy.txs = make([]*types.Transaction, len(env.txs))
+ copy(cpy.txs, env.txs)
+ cpy.uncles = make(map[common.Hash]*types.Header)
+ for hash, uncle := range env.uncles {
+ cpy.uncles[hash] = uncle
+ }
+ return cpy
+}
+
+// unclelist returns the contained uncles as the list format.
+func (env *environment) unclelist() []*types.Header {
+ var uncles []*types.Header
+ for _, uncle := range env.uncles {
+ uncles = append(uncles, uncle)
+ }
+ return uncles
+}
+
+// discard terminates the background prefetcher go-routine. It should
+// always be called for all created environment instances otherwise
+// the go-routine leak can happen.
+func (env *environment) discard() {
+ if env.state == nil {
+ return
+ }
+ env.state.StopPrefetcher()
}
// task contains all information for consensus engine sealing and result submitting.
@@ -114,6 +162,13 @@ type newWorkReq struct {
timestamp int64
}
+// getWorkReq represents a request for getting a new sealing work with provided parameters.
+type getWorkReq struct {
+ params *generateParams
+ err error
+ result chan *types.Block
+}
+
// intervalAdjust represents a resubmitting interval adjustment.
type intervalAdjust struct {
ratio float64
@@ -128,7 +183,6 @@ type worker struct {
engine consensus.Engine
eth Backend
chain *core.BlockChain
- merger *consensus.Merger
// Feeds
pendingLogsFeed event.Feed
@@ -144,6 +198,7 @@ type worker struct {
// Channels
newWorkCh chan *newWorkReq
+ getWorkCh chan *getWorkReq
taskCh chan *task
resultCh chan *types.Block
startCh chan struct{}
@@ -191,7 +246,7 @@ type worker struct {
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
}
-func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, merger *consensus.Merger) *worker {
+func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker {
worker := &worker{
config: config,
chainConfig: chainConfig,
@@ -199,16 +254,16 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
eth: eth,
mux: mux,
chain: eth.BlockChain(),
- merger: merger,
isLocalBlock: isLocalBlock,
localUncles: make(map[common.Hash]*types.Block),
remoteUncles: make(map[common.Hash]*types.Block),
- unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
+ unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
pendingTasks: make(map[common.Hash]*task),
txsCh: make(chan core.NewTxsEvent, txChanSize),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
newWorkCh: make(chan *newWorkReq),
+ getWorkCh: make(chan *getWorkReq),
taskCh: make(chan *task),
resultCh: make(chan *types.Block, resultQueueSize),
exitCh: make(chan struct{}),
@@ -264,15 +319,18 @@ func (w *worker) setExtra(extra []byte) {
// setRecommitInterval updates the interval for miner sealing work recommitting.
func (w *worker) setRecommitInterval(interval time.Duration) {
- w.resubmitIntervalCh <- interval
+ select {
+ case w.resubmitIntervalCh <- interval:
+ case <-w.exitCh:
+ }
}
-// disablePreseal disables pre-sealing mining feature
+// disablePreseal disables pre-sealing feature
func (w *worker) disablePreseal() {
atomic.StoreUint32(&w.noempty, 1)
}
-// enablePreseal enables pre-sealing mining feature
+// enablePreseal enables pre-sealing feature
func (w *worker) enablePreseal() {
atomic.StoreUint32(&w.noempty, 0)
}
@@ -350,13 +408,13 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t
return time.Duration(int64(next))
}
-// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
+// newWorkLoop is a standalone goroutine to submit new sealing work upon received events.
func (w *worker) newWorkLoop(recommit time.Duration) {
defer w.wg.Done()
var (
interrupt *int32
minRecommit = recommit // minimal resubmit interval specified by user.
- timestamp int64 // timestamp for each round of mining.
+ timestamp int64 // timestamp for each round of sealing.
)
timer := time.NewTimer(0)
@@ -401,7 +459,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
commit(false, commitInterruptNewHead)
case <-timer.C:
- // If mining is running resubmit a new work cycle periodically to pull in
+ // If sealing is running resubmit a new work cycle periodically to pull in
// higher priced transactions. Disable this overhead for pending blocks.
if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) {
// Short circuit if no new transaction arrives.
@@ -448,22 +506,36 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
}
}
-// mainLoop is a standalone goroutine to regenerate the sealing task based on the received event.
+// mainLoop is responsible for generating and submitting sealing work based on
+// the received event. It can support two modes: automatically generate task and
+// submit it or return task according to given parameters for various proposes.
func (w *worker) mainLoop() {
defer w.wg.Done()
defer w.txsSub.Unsubscribe()
defer w.chainHeadSub.Unsubscribe()
defer w.chainSideSub.Unsubscribe()
defer func() {
- if w.current != nil && w.current.state != nil {
- w.current.state.StopPrefetcher()
+ if w.current != nil {
+ w.current.discard()
}
}()
+ cleanTicker := time.NewTicker(time.Second * 10)
+ defer cleanTicker.Stop()
+
for {
select {
case req := <-w.newWorkCh:
- w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
+ w.commitWork(req.interrupt, req.noempty, req.timestamp)
+
+ case req := <-w.getWorkCh:
+ block, err := w.generateWork(req.params)
+ if err != nil {
+ req.err = err
+ req.result <- nil
+ } else {
+ req.result <- block
+ }
case ev := <-w.chainSideCh:
// Short circuit for duplicate side blocks
@@ -479,46 +551,40 @@ func (w *worker) mainLoop() {
} else {
w.remoteUncles[ev.Block.Hash()] = ev.Block
}
- // If our mining block contains less than 2 uncle blocks,
- // add the new uncle block if valid and regenerate a mining block.
- if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
+ // If our sealing block contains less than 2 uncle blocks,
+ // add the new uncle block if valid and regenerate a new
+ // sealing block for higher profit.
+ if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 {
start := time.Now()
if err := w.commitUncle(w.current, ev.Block.Header()); err == nil {
- var uncles []*types.Header
- w.current.uncles.Each(func(item interface{}) bool {
- hash, ok := item.(common.Hash)
- if !ok {
- return false
- }
- uncle, exist := w.localUncles[hash]
- if !exist {
- uncle, exist = w.remoteUncles[hash]
- }
- if !exist {
- return false
- }
- uncles = append(uncles, uncle.Header())
- return false
- })
- w.commit(uncles, nil, true, start)
+ w.commit(w.current.copy(), nil, true, start)
+ }
+ }
+
+ case <-cleanTicker.C:
+ chainHead := w.chain.CurrentBlock()
+ for hash, uncle := range w.localUncles {
+ if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
+ delete(w.localUncles, hash)
+ }
+ }
+ for hash, uncle := range w.remoteUncles {
+ if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
+ delete(w.remoteUncles, hash)
}
}
case ev := <-w.txsCh:
- // Apply transactions to the pending state if we're not mining.
+ // Apply transactions to the pending state if we're not sealing
//
// Note all transactions received may not be continuous with transactions
- // already included in the current mining block. These transactions will
+ // already included in the current sealing block. These transactions will
// be automatically eliminated.
if !w.isRunning() && w.current != nil {
// If block is already full, abort
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
continue
}
- w.mu.RLock()
- coinbase := w.coinbase
- w.mu.RUnlock()
-
txs := make(map[common.Address]types.Transactions)
for _, tx := range ev.Txs {
acc, _ := types.Sender(w.current.signer, tx)
@@ -526,18 +592,19 @@ func (w *worker) mainLoop() {
}
txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
tcount := w.current.tcount
- w.commitTransactions(txset, coinbase, nil)
- // Only update the snapshot if any new transactons were added
+ w.commitTransactions(w.current, txset, nil)
+
+ // Only update the snapshot if any new transactions were added
// to the pending block
if tcount != w.current.tcount {
- w.updateSnapshot()
+ w.updateSnapshot(w.current)
}
} else {
// Special case, if the consensus engine is 0 period clique(dev mode),
- // submit mining work here since all empty submission will be rejected
+ // submit sealing work here since all empty submission will be rejected
// by clique. Of course the advance sealing(empty submission) is disabled.
if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
- w.commitNewWork(nil, true, time.Now().Unix())
+ w.commitWork(nil, true, time.Now().Unix())
}
}
atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
@@ -679,23 +746,35 @@ func (w *worker) resultLoop() {
}
}
-// makeCurrent creates a new environment for the current cycle.
-func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
+// makeEnv creates a new environment for the sealing block.
+func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) {
// Retrieve the parent state to execute on top and start a prefetcher for
- // the miner to speed block sealing up a bit
+ // the miner to speed block sealing up a bit.
state, err := w.chain.StateAt(parent.Root())
if err != nil {
- return err
+ // Note since the sealing block can be created upon the arbitrary parent
+ // block, but the state of parent block may already be pruned, so the necessary
+ // state recovery is needed here in the future.
+ //
+ // The maximum acceptable reorg depth can be limited by the finalised block
+ // somehow. TODO(rjl493456442) fix the hard-coded number here later.
+ state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false)
+ log.Warn("Recovered mining state", "root", parent.Root(), "err", err)
+ }
+ if err != nil {
+ return nil, err
}
state.StartPrefetcher("miner")
+ // Note the passed coinbase may be different with header.Coinbase.
env := &environment{
signer: types.MakeSigner(w.chainConfig, header.Number),
state: state,
+ coinbase: coinbase,
ancestors: mapset.NewSet(),
family: mapset.NewSet(),
- uncles: mapset.NewSet(),
header: header,
+ uncles: make(map[common.Hash]*types.Header),
}
// when 08 is processed ancestors contain 07 (quick block)
for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) {
@@ -707,20 +786,16 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
}
// Keep track of transactions which return errors so they can be removed
env.tcount = 0
-
- // Swap out the old work with the new one, terminating any leftover prefetcher
- // processes in the mean time and starting a new one.
- if w.current != nil && w.current.state != nil {
- w.current.state.StopPrefetcher()
- }
- w.current = env
- return nil
+ return env, nil
}
// commitUncle adds the given block to uncle block set, returns error if failed to add.
func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
+ if w.isTTDReached(env.header) {
+ return errors.New("ignore uncle for beacon block")
+ }
hash := uncle.Hash()
- if env.uncles.Contains(hash) {
+ if _, exist := env.uncles[hash]; exist {
return errors.New("uncle not unique")
}
if env.header.ParentHash == uncle.ParentHash {
@@ -732,82 +807,58 @@ func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
if env.family.Contains(hash) {
return errors.New("uncle already included")
}
- env.uncles.Add(uncle.Hash())
+ env.uncles[hash] = uncle
return nil
}
-// updateSnapshot updates pending snapshot block and state.
-// Note this function assumes the current variable is thread safe.
-func (w *worker) updateSnapshot() {
+// updateSnapshot updates pending snapshot block, receipts and state.
+func (w *worker) updateSnapshot(env *environment) {
w.snapshotMu.Lock()
defer w.snapshotMu.Unlock()
- var uncles []*types.Header
- w.current.uncles.Each(func(item interface{}) bool {
- hash, ok := item.(common.Hash)
- if !ok {
- return false
- }
- uncle, exist := w.localUncles[hash]
- if !exist {
- uncle, exist = w.remoteUncles[hash]
- }
- if !exist {
- return false
- }
- uncles = append(uncles, uncle.Header())
- return false
- })
-
w.snapshotBlock = types.NewBlock(
- w.current.header,
- w.current.txs,
- uncles,
- w.current.receipts,
+ env.header,
+ env.txs,
+ env.unclelist(),
+ env.receipts,
trie.NewStackTrie(nil),
)
- w.snapshotReceipts = copyReceipts(w.current.receipts)
- w.snapshotState = w.current.state.Copy()
+ w.snapshotReceipts = copyReceipts(env.receipts)
+ w.snapshotState = env.state.Copy()
}
-func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
- snap := w.current.state.Snapshot()
+func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
+ snap := env.state.Snapshot()
- receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
+ receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
- w.current.state.RevertToSnapshot(snap)
+ env.state.RevertToSnapshot(snap)
return nil, err
}
- w.current.txs = append(w.current.txs, tx)
- w.current.receipts = append(w.current.receipts, receipt)
+ env.txs = append(env.txs, tx)
+ env.receipts = append(env.receipts, receipt)
return receipt.Logs, nil
}
-func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool {
- // Short circuit if current is nil
- if w.current == nil {
- return true
+func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool {
+ gasLimit := env.header.GasLimit
+ if env.gasPool == nil {
+ env.gasPool = new(core.GasPool).AddGas(gasLimit)
}
-
- gasLimit := w.current.header.GasLimit
- if w.current.gasPool == nil {
- w.current.gasPool = new(core.GasPool).AddGas(gasLimit)
- }
-
var coalescedLogs []*types.Log
for {
// In the following three cases, we will interrupt the execution of the transaction.
// (1) new head block event arrival, the interrupt signal is 1
// (2) worker start or restart, the interrupt signal is 1
- // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2.
+ // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
// For the first two cases, the semi-finished work will be discarded.
// For the third case, the semi-finished work will be submitted to the consensus engine.
if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
// Notify resubmit loop to increase resubmitting interval due to too frequent commits.
if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
- ratio := float64(gasLimit-w.current.gasPool.Gas()) / float64(gasLimit)
+ ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit)
if ratio < 0.1 {
ratio = 0.1
}
@@ -819,8 +870,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
return atomic.LoadInt32(interrupt) == commitInterruptNewHead
}
// If we don't have enough gas for any further transactions then we're done
- if w.current.gasPool.Gas() < params.TxGas {
- log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas)
+ if env.gasPool.Gas() < params.TxGas {
+ log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
break
}
// Retrieve the next transaction and abort if all done
@@ -832,19 +883,19 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
// during transaction acceptance is the transaction pool.
//
// We use the eip155 signer regardless of the current hf.
- from, _ := types.Sender(w.current.signer, tx)
+ from, _ := types.Sender(env.signer, tx)
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
- if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) {
+ if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
// Start executing the transaction
- w.current.state.Prepare(tx.Hash(), w.current.tcount)
+ env.state.Prepare(tx.Hash(), env.tcount)
- logs, err := w.commitTransaction(tx, coinbase)
+ logs, err := w.commitTransaction(env, tx)
switch {
case errors.Is(err, core.ErrGasLimitReached):
// Pop the current out-of-gas transaction without shifting in the next from the account
@@ -864,7 +915,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
coalescedLogs = append(coalescedLogs, logs...)
- w.current.tcount++
+ env.tcount++
txs.Shift()
case errors.Is(err, core.ErrTxTypeNotSupported):
@@ -881,8 +932,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
}
if !w.isRunning() && len(coalescedLogs) > 0 {
- // We don't push the pendingLogsEvent while we are mining. The reason is that
- // when we are mining, the worker will regenerate a mining block every 3 seconds.
+ // We don't push the pendingLogsEvent while we are sealing. The reason is that
+ // when we are sealing, the worker will regenerate a sealing block every 3 seconds.
// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
@@ -903,24 +954,56 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
return false
}
-// commitNewWork generates several new sealing tasks based on the parent block.
-func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) {
+// generateParams wraps various of settings for generating sealing task.
+type generateParams struct {
+ timestamp uint64 // The timstamp for sealing task
+ forceTime bool // Flag whether the given timestamp is immutable or not
+ parentHash common.Hash // Parent block hash, empty means the latest chain head
+ coinbase common.Address // The fee recipient address for including transaction
+ random common.Hash // The randomness generated by beacon chain, empty before the merge
+ noUncle bool // Flag whether the uncle block inclusion is allowed
+ noExtra bool // Flag whether the extra field assignment is allowed
+}
+
+// prepareWork constructs the sealing task according to the given parameters,
+// either based on the last chain head or specified parent. In this function
+// the pending transactions are not filled yet, only the empty task returned.
+func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
w.mu.RLock()
defer w.mu.RUnlock()
- tstart := time.Now()
+ // Find the parent block for sealing task
parent := w.chain.CurrentBlock()
-
- if parent.Time() >= uint64(timestamp) {
- timestamp = int64(parent.Time() + 1)
+ if genParams.parentHash != (common.Hash{}) {
+ parent = w.chain.GetBlockByHash(genParams.parentHash)
+ }
+ if parent == nil {
+ return nil, fmt.Errorf("missing parent")
+ }
+ // Sanity check the timestamp correctness, recap the timestamp
+ // to parent+1 if the mutation is allowed.
+ timestamp := genParams.timestamp
+ if parent.Time() >= timestamp {
+ if genParams.forceTime {
+ return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp)
+ }
+ timestamp = parent.Time() + 1
}
+ // Construct the sealing block header, set the extra field if it's allowed
num := parent.Number()
header := &types.Header{
ParentHash: parent.Hash(),
Number: num.Add(num, common.Big1),
GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil),
- Extra: w.extra,
- Time: uint64(timestamp),
+ Time: timestamp,
+ Coinbase: genParams.coinbase,
+ }
+ if !genParams.noExtra && len(w.extra) != 0 {
+ header.Extra = w.extra
+ }
+ // Set the randomness field from the beacon chain if it's available.
+ if genParams.random != (common.Hash{}) {
+ header.MixDigest = genParams.random
}
// Set baseFee and GasLimit if we are on an EIP-1559 chain
if w.chainConfig.IsLondon(header.Number) {
@@ -930,83 +1013,47 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
}
}
- // Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
- if w.isRunning() {
- if w.coinbase == (common.Address{}) {
- log.Error("Refusing to mine without etherbase")
- return
- }
- header.Coinbase = w.coinbase
- }
+ // Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil {
- log.Error("Failed to prepare header for mining", "err", err)
- return
- }
- // If we are care about TheDAO hard-fork check whether to override the extra-data or not
- if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil {
- // Check whether the block is among the fork extra-override range
- limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
- if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 {
- // Depending whether we support or oppose the fork, override differently
- if w.chainConfig.DAOForkSupport {
- header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
- } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
- header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
- }
- }
+ log.Error("Failed to prepare header for sealing", "err", err)
+ return nil, err
}
// Could potentially happen if starting to mine in an odd state.
- err := w.makeCurrent(parent, header)
+ // Note genParams.coinbase can be different with header.Coinbase
+ // since clique algorithm can modify the coinbase field in header.
+ env, err := w.makeEnv(parent, header, genParams.coinbase)
if err != nil {
- log.Error("Failed to create mining context", "err", err)
- return
- }
- // Create the current work task and check any fork transitions needed
- env := w.current
- if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 {
- misc.ApplyDAOHardFork(env.state)
+ log.Error("Failed to create sealing context", "err", err)
+ return nil, err
}
- // Accumulate the uncles for the current block
- uncles := make([]*types.Header, 0, 2)
- commitUncles := func(blocks map[common.Hash]*types.Block) {
- // Clean up stale uncle blocks first
- for hash, uncle := range blocks {
- if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
- delete(blocks, hash)
- }
- }
- for hash, uncle := range blocks {
- if len(uncles) == 2 {
- break
- }
- if err := w.commitUncle(env, uncle.Header()); err != nil {
- log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
- } else {
- log.Debug("Committing new uncle to block", "hash", hash)
- uncles = append(uncles, uncle.Header())
+ // Accumulate the uncles for the sealing work only if it's allowed.
+ if !genParams.noUncle {
+ commitUncles := func(blocks map[common.Hash]*types.Block) {
+ for hash, uncle := range blocks {
+ if len(env.uncles) == 2 {
+ break
+ }
+ if err := w.commitUncle(env, uncle.Header()); err != nil {
+ log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
+ } else {
+ log.Debug("Committing new uncle to block", "hash", hash)
+ }
}
}
+ // Prefer to locally generated uncle
+ commitUncles(w.localUncles)
+ commitUncles(w.remoteUncles)
}
- // Prefer to locally generated uncle
- commitUncles(w.localUncles)
- commitUncles(w.remoteUncles)
-
- // Create an empty block based on temporary copied state for
- // sealing in advance without waiting block execution finished.
- if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
- w.commit(uncles, nil, false, tstart)
- }
+ return env, nil
+}
+// fillTransactions retrieves the pending transactions from the txpool and fills them
+// into the given sealing block. The transaction selection and ordering strategy can
+// be customized with the plugin in the future.
+func (w *worker) fillTransactions(interrupt *int32, env *environment) {
+ // Split the pending transactions into locals and remotes
// Fill the block with all available pending transactions.
pending := w.eth.TxPool().Pending(true)
- // Short circuit if there is no available pending transactions.
- // But if we disable empty precommit already, ignore it. Since
- // empty block is necessary to keep the liveness of the network.
- if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 {
- w.updateSnapshot()
- return
- }
- // Split the pending transactions into locals and remotes
localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
@@ -1015,57 +1062,139 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
}
}
if len(localTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs, header.BaseFee)
- if w.commitTransactions(txs, w.coinbase, interrupt) {
+ txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
+ if w.commitTransactions(env, txs, interrupt) {
return
}
}
if len(remoteTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs, header.BaseFee)
- if w.commitTransactions(txs, w.coinbase, interrupt) {
+ txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
+ if w.commitTransactions(env, txs, interrupt) {
return
}
}
- w.commit(uncles, w.fullTaskHook, true, tstart)
}
-// commit runs any post-transaction state modifications, assembles the final block
-// and commits new work if consensus engine is running.
-func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
- // Deep copy receipts here to avoid interaction between different tasks.
- receipts := copyReceipts(w.current.receipts)
- s := w.current.state.Copy()
- block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts)
+// generateWork generates a sealing block based on the given parameters.
+func (w *worker) generateWork(params *generateParams) (*types.Block, error) {
+ work, err := w.prepareWork(params)
if err != nil {
- return err
+ return nil, err
}
+ defer work.discard()
+
+ w.fillTransactions(nil, work)
+ return w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts)
+}
+
+// commitWork generates several new sealing tasks based on the parent block
+// and submit them to the sealer.
+func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) {
+ start := time.Now()
+
+ // Set the coinbase if the worker is running or it's required
+ var coinbase common.Address
+ if w.isRunning() {
+ if w.coinbase == (common.Address{}) {
+ log.Error("Refusing to mine without etherbase")
+ return
+ }
+ coinbase = w.coinbase // Use the preset address as the fee recipient
+ }
+ work, err := w.prepareWork(&generateParams{
+ timestamp: uint64(timestamp),
+ coinbase: coinbase,
+ })
+ if err != nil {
+ return
+ }
+ // Create an empty block based on temporary copied state for
+ // sealing in advance without waiting block execution finished.
+ if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
+ w.commit(work.copy(), nil, false, start)
+ }
+ // Fill pending transactions from the txpool
+ w.fillTransactions(interrupt, work)
+ w.commit(work.copy(), w.fullTaskHook, true, start)
+
+ // Swap out the old work with the new one, terminating any leftover
+ // prefetcher processes in the mean time and starting a new one.
+ if w.current != nil {
+ w.current.discard()
+ }
+ w.current = work
+}
+
+// commit runs any post-transaction state modifications, assembles the final block
+// and commits new work if consensus engine is running.
+// Note the assumption is held that the mutation is allowed to the passed env, do
+// the deep copy first.
+func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error {
if w.isRunning() {
if interval != nil {
interval()
}
- // If we're post merge, just ignore
- td, ttd := w.chain.GetTd(block.ParentHash(), block.NumberU64()-1), w.chain.Config().TerminalTotalDifficulty
- if td != nil && ttd != nil && td.Cmp(ttd) >= 0 {
- return nil
+ // Create a local environment copy, avoid the data race with snapshot state.
+ // https://github.com/ethereum/go-ethereum/issues/24299
+ env := env.copy()
+ block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts)
+ if err != nil {
+ return err
}
- select {
- case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}:
- w.unconfirmed.Shift(block.NumberU64() - 1)
- log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
- "uncles", len(uncles), "txs", w.current.tcount,
- "gas", block.GasUsed(), "fees", totalFees(block, receipts),
- "elapsed", common.PrettyDuration(time.Since(start)))
-
- case <-w.exitCh:
- log.Info("Worker has exited")
+ // If we're post merge, just ignore
+ if !w.isTTDReached(block.Header()) {
+ select {
+ case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}:
+ w.unconfirmed.Shift(block.NumberU64() - 1)
+ log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
+ "uncles", len(env.uncles), "txs", env.tcount,
+ "gas", block.GasUsed(), "fees", totalFees(block, env.receipts),
+ "elapsed", common.PrettyDuration(time.Since(start)))
+
+ case <-w.exitCh:
+ log.Info("Worker has exited")
+ }
}
}
if update {
- w.updateSnapshot()
+ w.updateSnapshot(env)
}
return nil
}
+// getSealingBlock generates the sealing block based on the given parameters.
+func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
+ req := &getWorkReq{
+ params: &generateParams{
+ timestamp: timestamp,
+ forceTime: true,
+ parentHash: parent,
+ coinbase: coinbase,
+ random: random,
+ noUncle: true,
+ noExtra: true,
+ },
+ result: make(chan *types.Block, 1),
+ }
+ select {
+ case w.getWorkCh <- req:
+ block := <-req.result
+ if block == nil {
+ return nil, req.err
+ }
+ return block, nil
+ case <-w.exitCh:
+ return nil, errors.New("miner closed")
+ }
+}
+
+// isTTDReached returns the indicator if the given block has reached the total
+// terminal difficulty for The Merge transition.
+func (w *worker) isTTDReached(header *types.Header) bool {
+ td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty
+ return td != nil && ttd != nil && td.Cmp(ttd) >= 0
+}
+
// copyReceipts makes a deep copy of the given receipts.
func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
result := make([]*types.Receipt, len(receipts))
diff --git a/miner/worker_test.go b/miner/worker_test.go
index c8ddd2c320b8..dd029433b8bf 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -17,6 +17,7 @@
package miner
import (
+ "errors"
"math/big"
"math/rand"
"sync/atomic"
@@ -30,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@@ -166,6 +168,9 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
+func (b *testWorkerBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
+ return nil, errors.New("not supported")
+}
func (b *testWorkerBackend) newRandomUncle() *types.Block {
var parent *types.Block
@@ -197,7 +202,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
backend.txPool.AddLocals(pendingTxs)
- w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, consensus.NewMerger(rawdb.NewMemoryDatabase()))
+ w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false)
w.setEtherbase(testBankAddress)
return w, backend
}
@@ -382,7 +387,7 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
defer w.close()
- var taskCh = make(chan struct{})
+ var taskCh = make(chan struct{}, 3)
taskIndex := 0
w.newTaskHook = func(task *task) {
@@ -521,3 +526,144 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
t.Error("interval reset timeout")
}
}
+
+func TestGetSealingWorkEthash(t *testing.T) {
+ testGetSealingWork(t, ethashChainConfig, ethash.NewFaker(), false)
+}
+
+func TestGetSealingWorkClique(t *testing.T) {
+ testGetSealingWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()), false)
+}
+
+func TestGetSealingWorkPostMerge(t *testing.T) {
+ local := new(params.ChainConfig)
+ *local = *ethashChainConfig
+ local.TerminalTotalDifficulty = big.NewInt(0)
+ testGetSealingWork(t, local, ethash.NewFaker(), true)
+}
+
+func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, postMerge bool) {
+ defer engine.Close()
+
+ w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
+ defer w.close()
+
+ w.setExtra([]byte{0x01, 0x02})
+ w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
+
+ w.skipSealHook = func(task *task) bool {
+ return true
+ }
+ w.fullTaskHook = func() {
+ time.Sleep(100 * time.Millisecond)
+ }
+ timestamp := uint64(time.Now().Unix())
+ assertBlock := func(block *types.Block, number uint64, coinbase common.Address, random common.Hash) {
+ if block.Time() != timestamp {
+ // Sometime the timestamp will be mutated if the timestamp
+ // is even smaller than parent block's. It's OK.
+ t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time())
+ }
+ if len(block.Uncles()) != 0 {
+ t.Error("Unexpected uncle block")
+ }
+ _, isClique := engine.(*clique.Clique)
+ if !isClique {
+ if len(block.Extra()) != 0 {
+ t.Error("Unexpected extra field")
+ }
+ if block.Coinbase() != coinbase {
+ t.Errorf("Unexpected coinbase got %x want %x", block.Coinbase(), coinbase)
+ }
+ } else {
+ if block.Coinbase() != (common.Address{}) {
+ t.Error("Unexpected coinbase")
+ }
+ }
+ if !isClique {
+ if block.MixDigest() != random {
+ t.Error("Unexpected mix digest")
+ }
+ }
+ if block.Nonce() != 0 {
+ t.Error("Unexpected block nonce")
+ }
+ if block.NumberU64() != number {
+ t.Errorf("Mismatched block number, want %d got %d", number, block.NumberU64())
+ }
+ }
+ var cases = []struct {
+ parent common.Hash
+ coinbase common.Address
+ random common.Hash
+ expectNumber uint64
+ expectErr bool
+ }{
+ {
+ b.chain.Genesis().Hash(),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ uint64(1),
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.Address{},
+ common.HexToHash("0xcafebabe"),
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.Address{},
+ common.Hash{},
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ common.HexToHash("0xdeadbeef"),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ 0,
+ true,
+ },
+ }
+
+ // This API should work even when the automatic sealing is not enabled
+ for _, c := range cases {
+ block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
+ if c.expectErr {
+ if err == nil {
+ t.Error("Expect error but get nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ assertBlock(block, c.expectNumber, c.coinbase, c.random)
+ }
+ }
+
+ // This API should work even when the automatic sealing is enabled
+ w.start()
+ for _, c := range cases {
+ block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
+ if c.expectErr {
+ if err == nil {
+ t.Error("Expect error but get nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ assertBlock(block, c.expectNumber, c.coinbase, c.random)
+ }
+ }
+}
diff --git a/mobile/geth.go b/mobile/geth.go
index bad9e0589f92..709b68cbded8 100644
--- a/mobile/geth.go
+++ b/mobile/geth.go
@@ -220,14 +220,6 @@ func (n *Node) Start() error {
return n.node.Start()
}
-// Stop terminates a running node along with all its services. If the node was not started,
-// an error is returned. It is not possible to restart a stopped node.
-//
-// Deprecated: use Close()
-func (n *Node) Stop() error {
- return n.node.Close()
-}
-
// GetEthereumClient retrieves a client to access the Ethereum subsystem.
func (n *Node) GetEthereumClient() (client *EthereumClient, _ error) {
rpc, err := n.node.Attach()
diff --git a/node/api.go b/node/api.go
index a685ecd6b334..1b32399f635c 100644
--- a/node/api.go
+++ b/node/api.go
@@ -274,11 +274,12 @@ func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str
}
// Enable WebSocket on the server.
- server := api.node.wsServerForPort(*port)
+ server := api.node.wsServerForPort(*port, false)
if err := server.setListenAddr(*host, *port); err != nil {
return false, err
}
- if err := server.enableWS(api.node.rpcAPIs, config); err != nil {
+ openApis, _ := api.node.GetAPIs()
+ if err := server.enableWS(openApis, config); err != nil {
return false, err
}
if err := server.start(); err != nil {
diff --git a/node/config.go b/node/config.go
index 26f00cd678ab..853190c95f71 100644
--- a/node/config.go
+++ b/node/config.go
@@ -36,6 +36,7 @@ import (
const (
datadirPrivateKey = "nodekey" // Path within the datadir to the node's private key
+ datadirJWTKey = "jwtsecret" // Path within the datadir to the node's jwt secret
datadirDefaultKeyStore = "keystore" // Path within the datadir to the keystore
datadirStaticNodes = "static-nodes.json" // Path within the datadir to the static node list
datadirTrustedNodes = "trusted-nodes.json" // Path within the datadir to the trusted node list
@@ -138,6 +139,16 @@ type Config struct {
// HTTPPathPrefix specifies a path prefix on which http-rpc is to be served.
HTTPPathPrefix string `toml:",omitempty"`
+ // AuthAddr is the listening address on which authenticated APIs are provided.
+ AuthAddr string `toml:",omitempty"`
+
+ // AuthPort is the port number on which authenticated APIs are provided.
+ AuthPort int `toml:",omitempty"`
+
+ // AuthVirtualHosts is the list of virtual hostnames which are allowed on incoming requests
+ // for the authenticated api. This is by default {'localhost'}.
+ AuthVirtualHosts []string `toml:",omitempty"`
+
// WSHost is the host interface on which to start the websocket RPC server. If
// this field is empty, no websocket API endpoint will be started.
WSHost string
@@ -190,6 +201,9 @@ type Config struct {
// AllowUnprotectedTxs allows non EIP-155 protected transactions to be send over RPC.
AllowUnprotectedTxs bool `toml:",omitempty"`
+
+ // JWTSecret is the hex-encoded jwt secret.
+ JWTSecret string `toml:",omitempty"`
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -248,7 +262,7 @@ func (c *Config) HTTPEndpoint() string {
// DefaultHTTPEndpoint returns the HTTP endpoint used by default.
func DefaultHTTPEndpoint() string {
- config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort}
+ config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort, AuthPort: DefaultAuthPort}
return config.HTTPEndpoint()
}
diff --git a/node/defaults.go b/node/defaults.go
index c685dde5d127..fd0277e29dc9 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -34,12 +34,25 @@ const (
DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server
DefaultGraphQLPort = 8547 // Default TCP port for the GraphQL server
+ DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
+ DefaultAuthPort = 8551 // Default port for the authenticated apis
+)
+
+var (
+ DefaultAuthCors = []string{"localhost"} // Default cors domain for the authenticated apis
+ DefaultAuthVhosts = []string{"localhost"} // Default virtual hosts for the authenticated apis
+ DefaultAuthOrigins = []string{"localhost"} // Default origins for the authenticated apis
+ DefaultAuthPrefix = "" // Default prefix for the authenticated apis
+ DefaultAuthModules = []string{"eth", "engine"}
)
// DefaultConfig contains reasonable default settings.
var DefaultConfig = Config{
DataDir: DefaultDataDir(),
HTTPPort: DefaultHTTPPort,
+ AuthAddr: DefaultAuthHost,
+ AuthPort: DefaultAuthPort,
+ AuthVirtualHosts: DefaultAuthVhosts,
HTTPModules: []string{"net", "web3"},
HTTPVirtualHosts: []string{"localhost"},
HTTPTimeouts: rpc.DefaultHTTPTimeouts,
diff --git a/node/endpoints.go b/node/endpoints.go
index 1f85a5213168..166e39adb46f 100644
--- a/node/endpoints.go
+++ b/node/endpoints.go
@@ -60,8 +60,10 @@ func checkModuleAvailability(modules []string, apis []rpc.API) (bad, available [
}
}
for _, name := range modules {
- if _, ok := availableSet[name]; !ok && name != rpc.MetadataApi {
- bad = append(bad, name)
+ if _, ok := availableSet[name]; !ok {
+ if name != rpc.MetadataApi && name != rpc.EngineApi {
+ bad = append(bad, name)
+ }
}
}
return bad, available
diff --git a/node/jwt_handler.go b/node/jwt_handler.go
new file mode 100644
index 000000000000..28d5b87c60bc
--- /dev/null
+++ b/node/jwt_handler.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package node
+
+import (
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt/v4"
+)
+
+type jwtHandler struct {
+ keyFunc func(token *jwt.Token) (interface{}, error)
+ next http.Handler
+}
+
+// newJWTHandler creates a http.Handler with jwt authentication support.
+func newJWTHandler(secret []byte, next http.Handler) http.Handler {
+ return &jwtHandler{
+ keyFunc: func(token *jwt.Token) (interface{}, error) {
+ return secret, nil
+ },
+ next: next,
+ }
+}
+
+// ServeHTTP implements http.Handler
+func (handler *jwtHandler) ServeHTTP(out http.ResponseWriter, r *http.Request) {
+ var (
+ strToken string
+ claims jwt.RegisteredClaims
+ )
+ if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "Bearer ") {
+ strToken = strings.TrimPrefix(auth, "Bearer ")
+ }
+ if len(strToken) == 0 {
+ http.Error(out, "missing token", http.StatusForbidden)
+ return
+ }
+ // We explicitly set only HS256 allowed, and also disables the
+ // claim-check: the RegisteredClaims internally requires 'iat' to
+ // be no later than 'now', but we allow for a bit of drift.
+ token, err := jwt.ParseWithClaims(strToken, &claims, handler.keyFunc,
+ jwt.WithValidMethods([]string{"HS256"}),
+ jwt.WithoutClaimsValidation())
+
+ switch {
+ case err != nil:
+ http.Error(out, err.Error(), http.StatusForbidden)
+ case !token.Valid:
+ http.Error(out, "invalid token", http.StatusForbidden)
+ case !claims.VerifyExpiresAt(time.Now(), false): // optional
+ http.Error(out, "token is expired", http.StatusForbidden)
+ case claims.IssuedAt == nil:
+ http.Error(out, "missing issued-at", http.StatusForbidden)
+ case time.Since(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "stale token", http.StatusForbidden)
+ case time.Until(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "future token", http.StatusForbidden)
+ default:
+ handler.next.ServeHTTP(out, r)
+ }
+}
diff --git a/node/node.go b/node/node.go
index eb55c8376155..904f2bdd1b51 100644
--- a/node/node.go
+++ b/node/node.go
@@ -17,6 +17,7 @@
package node
import (
+ crand "crypto/rand"
"errors"
"fmt"
"net/http"
@@ -27,6 +28,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/accounts"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -56,6 +59,8 @@ type Node struct {
rpcAPIs []rpc.API // List of APIs currently provided by the node
http *httpServer //
ws *httpServer //
+ httpAuth *httpServer //
+ wsAuth *httpServer //
ipc *ipcServer // Stores information about the ipc http server
inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
@@ -149,7 +154,9 @@ func New(conf *Config) (*Node, error) {
// Configure RPC servers.
node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
+ node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts)
node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
+ node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
return node, nil
@@ -337,7 +344,41 @@ func (n *Node) closeDataDir() {
}
}
-// configureRPC is a helper method to configure all the various RPC endpoints during node
+// obtainJWTSecret loads the jwt-secret, either from the provided config,
+// or from the default location. If neither of those are present, it generates
+// a new secret and stores to the default location.
+func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
+ fileName := cliParam
+ if len(fileName) == 0 {
+ // no path provided, use default
+ fileName = n.ResolvePath(datadirJWTKey)
+ }
+ // try reading from file
+ log.Debug("Reading JWT secret", "path", fileName)
+ if data, err := os.ReadFile(fileName); err == nil {
+ jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
+ if len(jwtSecret) == 32 {
+ return jwtSecret, nil
+ }
+ log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret))
+ return nil, errors.New("invalid JWT secret")
+ }
+ // Need to generate one
+ jwtSecret := make([]byte, 32)
+ crand.Read(jwtSecret)
+ // if we're in --dev mode, don't bother saving, just show it
+ if fileName == "" {
+ log.Info("Generated ephemeral JWT secret", "secret", hexutil.Encode(jwtSecret))
+ return jwtSecret, nil
+ }
+ if err := os.WriteFile(fileName, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil {
+ return nil, err
+ }
+ log.Info("Generated JWT secret", "path", fileName)
+ return jwtSecret, nil
+}
+
+// startRPC is a helper method to configure all the various RPC endpoints during node
// startup. It's not meant to be called at any time afterwards as it makes certain
// assumptions about the state of the node.
func (n *Node) startRPC() error {
@@ -351,55 +392,125 @@ func (n *Node) startRPC() error {
return err
}
}
+ var (
+ servers []*httpServer
+ open, all = n.GetAPIs()
+ )
- // Configure HTTP.
- if n.config.HTTPHost != "" {
- config := httpConfig{
+ initHttp := func(server *httpServer, apis []rpc.API, port int) error {
+ if err := server.setListenAddr(n.config.HTTPHost, port); err != nil {
+ return err
+ }
+ if err := server.enableRPC(apis, httpConfig{
CorsAllowedOrigins: n.config.HTTPCors,
Vhosts: n.config.HTTPVirtualHosts,
Modules: n.config.HTTPModules,
prefix: n.config.HTTPPathPrefix,
+ }); err != nil {
+ return err
}
- if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil {
+ servers = append(servers, server)
+ return nil
+ }
+
+ initWS := func(apis []rpc.API, port int) error {
+ server := n.wsServerForPort(port, false)
+ if err := server.setListenAddr(n.config.WSHost, port); err != nil {
return err
}
- if err := n.http.enableRPC(n.rpcAPIs, config); err != nil {
+ if err := server.enableWS(n.rpcAPIs, wsConfig{
+ Modules: n.config.WSModules,
+ Origins: n.config.WSOrigins,
+ prefix: n.config.WSPathPrefix,
+ }); err != nil {
return err
}
+ servers = append(servers, server)
+ return nil
}
+ initAuth := func(apis []rpc.API, port int, secret []byte) error {
+ // Enable auth via HTTP
+ server := n.httpAuth
+ if err := server.setListenAddr(n.config.AuthAddr, port); err != nil {
+ return err
+ }
+ if err := server.enableRPC(apis, httpConfig{
+ CorsAllowedOrigins: DefaultAuthCors,
+ Vhosts: n.config.AuthVirtualHosts,
+ Modules: DefaultAuthModules,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ // Enable auth via WS
+ server = n.wsServerForPort(port, true)
+ if err := server.setListenAddr(n.config.AuthAddr, port); err != nil {
+ return err
+ }
+ if err := server.enableWS(apis, wsConfig{
+ Modules: DefaultAuthModules,
+ Origins: DefaultAuthOrigins,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
+ }
+
+ // Set up HTTP.
+ if n.config.HTTPHost != "" {
+ // Configure legacy unauthenticated HTTP.
+ if err := initHttp(n.http, open, n.config.HTTPPort); err != nil {
+ return err
+ }
+ }
// Configure WebSocket.
if n.config.WSHost != "" {
- server := n.wsServerForPort(n.config.WSPort)
- config := wsConfig{
- Modules: n.config.WSModules,
- Origins: n.config.WSOrigins,
- prefix: n.config.WSPathPrefix,
+ // legacy unauthenticated
+ if err := initWS(open, n.config.WSPort); err != nil {
+ return err
}
- if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil {
+ }
+ // Configure authenticated API
+ if len(open) != len(all) {
+ jwtSecret, err := n.obtainJWTSecret(n.config.JWTSecret)
+ if err != nil {
return err
}
- if err := server.enableWS(n.rpcAPIs, config); err != nil {
+ if err := initAuth(all, n.config.AuthPort, jwtSecret); err != nil {
return err
}
}
-
- if err := n.http.start(); err != nil {
- return err
+ // Start the servers
+ for _, server := range servers {
+ if err := server.start(); err != nil {
+ return err
+ }
}
- return n.ws.start()
+ return nil
}
-func (n *Node) wsServerForPort(port int) *httpServer {
- if n.config.HTTPHost == "" || n.http.port == port {
- return n.http
+func (n *Node) wsServerForPort(port int, authenticated bool) *httpServer {
+ httpServer, wsServer := n.http, n.ws
+ if authenticated {
+ httpServer, wsServer = n.httpAuth, n.wsAuth
}
- return n.ws
+ if n.config.HTTPHost == "" || httpServer.port == port {
+ return httpServer
+ }
+ return wsServer
}
func (n *Node) stopRPC() {
n.http.stop()
n.ws.stop()
+ n.httpAuth.stop()
+ n.wsAuth.stop()
n.ipc.stop()
n.stopInProc()
}
@@ -460,6 +571,17 @@ func (n *Node) RegisterAPIs(apis []rpc.API) {
n.rpcAPIs = append(n.rpcAPIs, apis...)
}
+// GetAPIs return two sets of APIs, both the ones that do not require
+// authentication, and the complete set
+func (n *Node) GetAPIs() (unauthenticated, all []rpc.API) {
+ for _, api := range n.rpcAPIs {
+ if !api.Authenticated {
+ unauthenticated = append(unauthenticated, api)
+ }
+ }
+ return unauthenticated, n.rpcAPIs
+}
+
// RegisterHandler mounts a handler on the given path on the canonical HTTP server.
//
// The name of the handler is shown in a log message when the HTTP server starts
diff --git a/node/node_test.go b/node/node_test.go
index e10463060004..84f61f0c44c4 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -393,7 +393,7 @@ func TestLifecycleTerminationGuarantee(t *testing.T) {
// on the given prefix
func TestRegisterHandler_Successful(t *testing.T) {
node := createNode(t, 7878, 7979)
-
+ defer node.Close()
// create and mount handler
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("success"))
@@ -577,13 +577,13 @@ func (test rpcPrefixTest) check(t *testing.T, node *Node) {
}
}
for _, path := range test.wantWS {
- err := wsRequest(t, wsBase+path, "")
+ err := wsRequest(t, wsBase+path)
if err != nil {
t.Errorf("Error: %s: WebSocket connection failed: %v", path, err)
}
}
for _, path := range test.wantNoWS {
- err := wsRequest(t, wsBase+path, "")
+ err := wsRequest(t, wsBase+path)
if err == nil {
t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path)
}
diff --git a/node/rpcstack.go b/node/rpcstack.go
index 2c55a070b229..d9c41cca5781 100644
--- a/node/rpcstack.go
+++ b/node/rpcstack.go
@@ -40,13 +40,15 @@ type httpConfig struct {
CorsAllowedOrigins []string
Vhosts []string
prefix string // path prefix on which to mount http handler
+ jwtSecret []byte // optional JWT secret
}
// wsConfig is the JSON-RPC/Websocket configuration
type wsConfig struct {
- Origins []string
- Modules []string
- prefix string // path prefix on which to mount ws handler
+ Origins []string
+ Modules []string
+ prefix string // path prefix on which to mount ws handler
+ jwtSecret []byte // optional JWT secret
}
type rpcHandler struct {
@@ -157,7 +159,7 @@ func (h *httpServer) start() error {
}
// Log http endpoint.
h.log.Info("HTTP server started",
- "endpoint", listener.Addr(),
+ "endpoint", listener.Addr(), "auth", (h.httpConfig.jwtSecret != nil),
"prefix", h.httpConfig.prefix,
"cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","),
"vhosts", strings.Join(h.httpConfig.Vhosts, ","),
@@ -285,7 +287,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
}
h.httpConfig = config
h.httpHandler.Store(&rpcHandler{
- Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts),
+ Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts, config.jwtSecret),
server: srv,
})
return nil
@@ -309,7 +311,6 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
if h.wsAllowed() {
return fmt.Errorf("JSON-RPC over WebSocket is already enabled")
}
-
// Create RPC server and handler.
srv := rpc.NewServer()
if err := RegisterApis(apis, config.Modules, srv, false); err != nil {
@@ -317,7 +318,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
}
h.wsConfig = config
h.wsHandler.Store(&rpcHandler{
- Handler: srv.WebsocketHandler(config.Origins),
+ Handler: NewWSHandlerStack(srv.WebsocketHandler(config.Origins), config.jwtSecret),
server: srv,
})
return nil
@@ -362,13 +363,24 @@ func isWebsocket(r *http.Request) bool {
}
// NewHTTPHandlerStack returns wrapped http-related handlers
-func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string) http.Handler {
+func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string, jwtSecret []byte) http.Handler {
// Wrap the CORS-handler within a host-handler
handler := newCorsHandler(srv, cors)
handler = newVHostHandler(vhosts, handler)
+ if len(jwtSecret) != 0 {
+ handler = newJWTHandler(jwtSecret, handler)
+ }
return newGzipHandler(handler)
}
+// NewWSHandlerStack returns a wrapped ws-related handler.
+func NewWSHandlerStack(srv http.Handler, jwtSecret []byte) http.Handler {
+ if len(jwtSecret) != 0 {
+ return newJWTHandler(jwtSecret, srv)
+ }
+ return srv
+}
+
func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
// disable CORS support if user has not specified a custom CORS configuration
if len(allowedOrigins) == 0 {
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index f92f0ba39693..60fcab5a9001 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -24,10 +24,12 @@ import (
"strconv"
"strings"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/internal/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/golang-jwt/jwt/v4"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
)
@@ -146,12 +148,12 @@ func TestWebsocketOrigins(t *testing.T) {
srv := createAndStartServer(t, &httpConfig{}, true, &wsConfig{Origins: splitAndTrim(tc.spec)})
url := fmt.Sprintf("ws://%v", srv.listenAddr())
for _, origin := range tc.expOk {
- if err := wsRequest(t, url, origin); err != nil {
+ if err := wsRequest(t, url, "Origin", origin); err != nil {
t.Errorf("spec '%v', origin '%v': expected ok, got %v", tc.spec, origin, err)
}
}
for _, origin := range tc.expFail {
- if err := wsRequest(t, url, origin); err == nil {
+ if err := wsRequest(t, url, "Origin", origin); err == nil {
t.Errorf("spec '%v', origin '%v': expected not to allow, got ok", tc.spec, origin)
}
}
@@ -243,13 +245,18 @@ func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsCon
}
// wsRequest attempts to open a WebSocket connection to the given URL.
-func wsRequest(t *testing.T, url, browserOrigin string) error {
+func wsRequest(t *testing.T, url string, extraHeaders ...string) error {
t.Helper()
- t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin)
+ //t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin)
headers := make(http.Header)
- if browserOrigin != "" {
- headers.Set("Origin", browserOrigin)
+ // Apply extra headers.
+ if len(extraHeaders)%2 != 0 {
+ panic("odd extraHeaders length")
+ }
+ for i := 0; i < len(extraHeaders); i += 2 {
+ key, value := extraHeaders[i], extraHeaders[i+1]
+ headers.Set(key, value)
}
conn, _, err := websocket.DefaultDialer.Dial(url, headers)
if conn != nil {
@@ -291,3 +298,79 @@ func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response
}
return resp
}
+
+type testClaim map[string]interface{}
+
+func (testClaim) Valid() error {
+ return nil
+}
+
+func TestJWT(t *testing.T) {
+ var secret = []byte("secret")
+ issueToken := func(secret []byte, method jwt.SigningMethod, input map[string]interface{}) string {
+ if method == nil {
+ method = jwt.SigningMethodHS256
+ }
+ ss, _ := jwt.NewWithClaims(method, testClaim(input)).SignedString(secret)
+ return ss
+ }
+ expOk := []string{
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "exp": time.Now().Unix() + 2,
+ })),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "bar": "baz",
+ })),
+ }
+ expFail := []string{
+ // future
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 6})),
+ // stale
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 6})),
+ // wrong algo
+ fmt.Sprintf("Bearer %v", issueToken(secret, jwt.SigningMethodHS512, testClaim{"iat": time.Now().Unix() + 4})),
+ // expired
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix(), "exp": time.Now().Unix()})),
+ // missing mandatory iat
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{})),
+ // wrong secret
+ fmt.Sprintf("Bearer %v", issueToken([]byte("wrong"), nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken([]byte{}, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(nil, nil, testClaim{"iat": time.Now().Unix()})),
+ // Various malformed syntax
+ fmt.Sprintf("%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer: %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer:%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer\t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer \t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ }
+ srv := createAndStartServer(t, &httpConfig{jwtSecret: []byte("secret")},
+ true, &wsConfig{Origins: []string{"*"}, jwtSecret: []byte("secret")})
+ wsUrl := fmt.Sprintf("ws://%v", srv.listenAddr())
+ htUrl := fmt.Sprintf("http://%v", srv.listenAddr())
+
+ for i, token := range expOk {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err != nil {
+ t.Errorf("test %d-ws, token '%v': expected ok, got %v", i, token, err)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 200 {
+ t.Errorf("test %d-http, token '%v': expected ok, got %v", i, token, resp.StatusCode)
+ }
+ }
+ for i, token := range expFail {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err == nil {
+ t.Errorf("tc %d-ws, token '%v': expected not to allow, got ok", i, token)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 403 {
+ t.Errorf("tc %d-http, token '%v': expected not to allow, got %v", i, token, resp.StatusCode)
+ }
+ }
+ srv.stop()
+}
diff --git a/p2p/peer.go b/p2p/peer.go
index 8f564e776de5..257027a5b74d 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -332,11 +332,11 @@ func (p *Peer) handle(msg Msg) error {
msg.Discard()
go SendItems(p.rw, pongMsg)
case msg.Code == discMsg:
- var reason [1]DiscReason
// This is the last message. We don't need to discard or
// check errors because, the connection will be closed after it.
- rlp.Decode(msg.Payload, &reason)
- return reason[0]
+ var m struct{ R DiscReason }
+ rlp.Decode(msg.Payload, &m)
+ return m.R
case msg.Code < baseProtocolLength:
// ignore other base protocol messages
return msg.Discard()
diff --git a/p2p/peer_error.go b/p2p/peer_error.go
index 393cc86b0970..aad1a65c7ac3 100644
--- a/p2p/peer_error.go
+++ b/p2p/peer_error.go
@@ -54,7 +54,7 @@ func (pe *peerError) Error() string {
var errProtocolReturned = errors.New("protocol returned")
-type DiscReason uint
+type DiscReason uint8
const (
DiscRequested DiscReason = iota
diff --git a/p2p/server.go b/p2p/server.go
index bcfc1bd10bd7..138975e54bf5 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -943,9 +943,8 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
}
// If dialing, figure out the remote public key.
- var dialPubkey *ecdsa.PublicKey
if dialDest != nil {
- dialPubkey = new(ecdsa.PublicKey)
+ dialPubkey := new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
err = errors.New("dial destination doesn't have a secp256k1 public key")
srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go
index 6d7f0b6d7a31..f5172f3f23db 100644
--- a/p2p/simulations/http_test.go
+++ b/p2p/simulations/http_test.go
@@ -141,7 +141,7 @@ func (t *testService) Stop() error {
// message with the given code
func (t *testService) handshake(rw p2p.MsgReadWriter, code uint64) error {
errc := make(chan error, 2)
- go func() { errc <- p2p.Send(rw, code, struct{}{}) }()
+ go func() { errc <- p2p.SendItems(rw, code) }()
go func() { errc <- p2p.ExpectMsg(rw, code, struct{}{}) }()
for i := 0; i < 2; i++ {
if err := <-errc; err != nil {
diff --git a/params/bootnodes.go b/params/bootnodes.go
index e3b5570d55b5..ed52e8ee64b1 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -24,12 +24,12 @@ var MainnetBootnodes = []string{
// Ethereum Foundation Go Bootnodes
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001
- "enode://ca6de62fce278f96aea6ec5a2daadb877e51651247cb96ee310a318def462913b653963c155a0ef6c7d50048bba6e6cea881130857413d9f50a621546b590758@34.255.23.113:30303", // bootnode-aws-eu-west-1-001
- "enode://279944d8dcd428dffaa7436f25ca0ca43ae19e7bcf94a8fb7d1641651f92d121e972ac2e8f381414b80cc8e5555811c2ec6e1a99bb009b3f53c4c69923e11bd8@35.158.244.151:30303", // bootnode-aws-eu-central-1-001
"enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303", // bootnode-azure-australiaeast-001
"enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", // bootnode-azure-brazilsouth-001
"enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", // bootnode-azure-koreasouth-001
"enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", // bootnode-azure-westus-001
+ "enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel
+ "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn
}
// RopstenBootnodes are the enode URLs of the P2P bootstrap nodes running on the
@@ -76,6 +76,13 @@ var GoerliBootnodes = []string{
"enode://a59e33ccd2b3e52d578f1fbd70c6f9babda2650f0760d6ff3b37742fdcdfdb3defba5d56d315b40c46b70198c7621e63ffa3f987389c7118634b0fefbbdfa7fd@51.15.119.157:40303",
}
+var KilnBootnodes = []string{
+ "enode://c354db99124f0faf677ff0e75c3cbbd568b2febc186af664e0c51ac435609badedc67a18a63adb64dacc1780a28dcefebfc29b83fd1a3f4aa3c0eb161364cf94@164.92.130.5:30303",
+ "enode://d41af1662434cad0a88fe3c7c92375ec5719f4516ab6d8cb9695e0e2e815382c767038e72c224e04040885157da47422f756c040a9072676c6e35c5b1a383cce@138.68.66.103:30303",
+ "enode://91a745c3fb069f6b99cad10b75c463d527711b106b622756e9ef9f12d2631b6cb885f831d1c8731b9bc7177cae5e1ea1f1be087f86d7d30b590a91f22bc041b0@165.232.180.230:30303",
+ "enode://b74bd2e8a9f0c53f0c93bcce80818f2f19439fd807af5c7fbc3efb10130c6ee08be8f3aaec7dc0a057ad7b2a809c8f34dc62431e9b6954b07a6548cc59867884@164.92.140.200:30303",
+}
+
var V5Bootnodes = []string{
// Teku team's bootnode
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA",
diff --git a/params/config.go b/params/config.go
index 36482f2380a4..aee5b7e1c855 100644
--- a/params/config.go
+++ b/params/config.go
@@ -32,6 +32,7 @@ var (
SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9")
RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
+ KilnGenesisHash = common.HexToHash("0x51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8")
)
// TrustedCheckpoints associates each known checkpoint with the genesis hash of
@@ -267,7 +268,7 @@ var (
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
- TestRules = TestChainConfig.Rules(new(big.Int))
+ TestRules = TestChainConfig.Rules(new(big.Int), false)
)
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
@@ -387,7 +388,7 @@ func (c *ChainConfig) String() string {
default:
engine = "unknown"
}
- return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, MergeFork: %v, Engine: %v}",
+ return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, MergeFork: %v, Terminal TD: %v, Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
@@ -404,6 +405,7 @@ func (c *ChainConfig) String() string {
c.LondonBlock,
c.ArrowGlacierBlock,
c.MergeForkBlock,
+ c.TerminalTotalDifficulty,
engine,
)
}
@@ -668,10 +670,11 @@ type Rules struct {
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsBerlin, IsLondon bool
+ IsMerge bool
}
// Rules ensures c's ChainID is not nil.
-func (c *ChainConfig) Rules(num *big.Int) Rules {
+func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
chainID := c.ChainID
if chainID == nil {
chainID = new(big.Int)
@@ -688,5 +691,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
IsIstanbul: c.IsIstanbul(num),
IsBerlin: c.IsBerlin(num),
IsLondon: c.IsLondon(num),
+ IsMerge: isMerge,
}
}
diff --git a/params/version.go b/params/version.go
index 9c463da27e22..4c7c625dbdf2 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 10 // Minor version component of the current release
- VersionPatch = 15 // Patch version component of the current release
+ VersionPatch = 17 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/rlp/decode.go b/rlp/decode.go
index 5f2e5ad5fea0..9214dbfb3720 100644
--- a/rlp/decode.go
+++ b/rlp/decode.go
@@ -27,6 +27,8 @@ import (
"reflect"
"strings"
"sync"
+
+ "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
)
//lint:ignore ST1012 EOL is not an error.
@@ -148,7 +150,7 @@ var (
bigInt = reflect.TypeOf(big.Int{})
)
-func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {
+func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) {
kind := typ.Kind()
switch {
case typ == rawValueType:
@@ -220,55 +222,20 @@ func decodeBigIntNoPtr(s *Stream, val reflect.Value) error {
}
func decodeBigInt(s *Stream, val reflect.Value) error {
- var buffer []byte
- kind, size, err := s.Kind()
- switch {
- case err != nil:
- return wrapStreamError(err, val.Type())
- case kind == List:
- return wrapStreamError(ErrExpectedString, val.Type())
- case kind == Byte:
- buffer = s.uintbuf[:1]
- buffer[0] = s.byteval
- s.kind = -1 // re-arm Kind
- case size == 0:
- // Avoid zero-length read.
- s.kind = -1
- case size <= uint64(len(s.uintbuf)):
- // For integers smaller than s.uintbuf, allocating a buffer
- // can be avoided.
- buffer = s.uintbuf[:size]
- if err := s.readFull(buffer); err != nil {
- return wrapStreamError(err, val.Type())
- }
- // Reject inputs where single byte encoding should have been used.
- if size == 1 && buffer[0] < 128 {
- return wrapStreamError(ErrCanonSize, val.Type())
- }
- default:
- // For large integers, a temporary buffer is needed.
- buffer = make([]byte, size)
- if err := s.readFull(buffer); err != nil {
- return wrapStreamError(err, val.Type())
- }
- }
-
- // Reject leading zero bytes.
- if len(buffer) > 0 && buffer[0] == 0 {
- return wrapStreamError(ErrCanonInt, val.Type())
- }
-
- // Set the integer bytes.
i := val.Interface().(*big.Int)
if i == nil {
i = new(big.Int)
val.Set(reflect.ValueOf(i))
}
- i.SetBytes(buffer)
+
+ err := s.decodeBigInt(i)
+ if err != nil {
+ return wrapStreamError(err, val.Type())
+ }
return nil
}
-func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
+func makeListDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) {
etype := typ.Elem()
if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) {
if typ.Kind() == reflect.Array {
@@ -276,7 +243,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
}
return decodeByteSlice, nil
}
- etypeinfo := theTC.infoWhileGenerating(etype, tags{})
+ etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{})
if etypeinfo.decoderErr != nil {
return nil, etypeinfo.decoderErr
}
@@ -286,7 +253,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
dec = func(s *Stream, val reflect.Value) error {
return decodeListArray(s, val, etypeinfo.decoder)
}
- case tag.tail:
+ case tag.Tail:
// A slice with "tail" tag can occur as the last field
// of a struct and is supposed to swallow all remaining
// list elements. The struct decoder already called s.List,
@@ -451,16 +418,16 @@ func zeroFields(structval reflect.Value, fields []field) {
}
// makePtrDecoder creates a decoder that decodes into the pointer's element type.
-func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) {
+func makePtrDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) {
etype := typ.Elem()
- etypeinfo := theTC.infoWhileGenerating(etype, tags{})
+ etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{})
switch {
case etypeinfo.decoderErr != nil:
return nil, etypeinfo.decoderErr
- case !tag.nilOK:
+ case !tag.NilOK:
return makeSimplePtrDecoder(etype, etypeinfo), nil
default:
- return makeNilPtrDecoder(etype, etypeinfo, tag.nilKind), nil
+ return makeNilPtrDecoder(etype, etypeinfo, tag), nil
}
}
@@ -481,9 +448,13 @@ func makeSimplePtrDecoder(etype reflect.Type, etypeinfo *typeinfo) decoder {
// values are decoded into a value of the element type, just like makePtrDecoder does.
//
// This decoder is used for pointer-typed struct fields with struct tag "nil".
-func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, nilKind Kind) decoder {
+func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tags) decoder {
typ := reflect.PtrTo(etype)
nilPtr := reflect.Zero(typ)
+
+ // Determine the value kind that results in nil pointer.
+ nilKind := typeNilKind(etype, ts)
+
return func(s *Stream, val reflect.Value) (err error) {
kind, size, err := s.Kind()
if err != nil {
@@ -659,6 +630,37 @@ func (s *Stream) Bytes() ([]byte, error) {
}
}
+// ReadBytes decodes the next RLP value and stores the result in b.
+// The value size must match len(b) exactly.
+func (s *Stream) ReadBytes(b []byte) error {
+ kind, size, err := s.Kind()
+ if err != nil {
+ return err
+ }
+ switch kind {
+ case Byte:
+ if len(b) != 1 {
+ return fmt.Errorf("input value has wrong size 1, want %d", len(b))
+ }
+ b[0] = s.byteval
+ s.kind = -1 // rearm Kind
+ return nil
+ case String:
+ if uint64(len(b)) != size {
+ return fmt.Errorf("input value has wrong size %d, want %d", size, len(b))
+ }
+ if err = s.readFull(b); err != nil {
+ return err
+ }
+ if size == 1 && b[0] < 128 {
+ return ErrCanonSize
+ }
+ return nil
+ default:
+ return ErrExpectedString
+ }
+}
+
// Raw reads a raw encoded value including RLP type information.
func (s *Stream) Raw() ([]byte, error) {
kind, size, err := s.Kind()
@@ -687,10 +689,31 @@ func (s *Stream) Raw() ([]byte, error) {
// Uint reads an RLP string of up to 8 bytes and returns its contents
// as an unsigned integer. If the input does not contain an RLP string, the
// returned error will be ErrExpectedString.
+//
+// Deprecated: use s.Uint64 instead.
func (s *Stream) Uint() (uint64, error) {
return s.uint(64)
}
+func (s *Stream) Uint64() (uint64, error) {
+ return s.uint(64)
+}
+
+func (s *Stream) Uint32() (uint32, error) {
+ i, err := s.uint(32)
+ return uint32(i), err
+}
+
+func (s *Stream) Uint16() (uint16, error) {
+ i, err := s.uint(16)
+ return uint16(i), err
+}
+
+func (s *Stream) Uint8() (uint8, error) {
+ i, err := s.uint(8)
+ return uint8(i), err
+}
+
func (s *Stream) uint(maxbits int) (uint64, error) {
kind, size, err := s.Kind()
if err != nil {
@@ -781,6 +804,65 @@ func (s *Stream) ListEnd() error {
return nil
}
+// MoreDataInList reports whether the current list context contains
+// more data to be read.
+func (s *Stream) MoreDataInList() bool {
+ _, listLimit := s.listLimit()
+ return listLimit > 0
+}
+
+// BigInt decodes an arbitrary-size integer value.
+func (s *Stream) BigInt() (*big.Int, error) {
+ i := new(big.Int)
+ if err := s.decodeBigInt(i); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (s *Stream) decodeBigInt(dst *big.Int) error {
+ var buffer []byte
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case kind == List:
+ return ErrExpectedString
+ case kind == Byte:
+ buffer = s.uintbuf[:1]
+ buffer[0] = s.byteval
+ s.kind = -1 // re-arm Kind
+ case size == 0:
+ // Avoid zero-length read.
+ s.kind = -1
+ case size <= uint64(len(s.uintbuf)):
+ // For integers smaller than s.uintbuf, allocating a buffer
+ // can be avoided.
+ buffer = s.uintbuf[:size]
+ if err := s.readFull(buffer); err != nil {
+ return err
+ }
+ // Reject inputs where single byte encoding should have been used.
+ if size == 1 && buffer[0] < 128 {
+ return ErrCanonSize
+ }
+ default:
+ // For large integers, a temporary buffer is needed.
+ buffer = make([]byte, size)
+ if err := s.readFull(buffer); err != nil {
+ return err
+ }
+ }
+
+ // Reject leading zero bytes.
+ if len(buffer) > 0 && buffer[0] == 0 {
+ return ErrCanonInt
+ }
+ // Set the integer bytes.
+ dst.SetBytes(buffer)
+ return nil
+}
+
// Decode decodes a value and stores the result in the value pointed
// to by val. Please see the documentation for the Decode function
// to learn about the decoding rules.
diff --git a/rlp/decode_test.go b/rlp/decode_test.go
index 7c3dafeac44d..babdf3891bba 100644
--- a/rlp/decode_test.go
+++ b/rlp/decode_test.go
@@ -286,6 +286,47 @@ func TestStreamRaw(t *testing.T) {
}
}
+func TestStreamReadBytes(t *testing.T) {
+ tests := []struct {
+ input string
+ size int
+ err string
+ }{
+ // kind List
+ {input: "C0", size: 1, err: "rlp: expected String or Byte"},
+ // kind Byte
+ {input: "04", size: 0, err: "input value has wrong size 1, want 0"},
+ {input: "04", size: 1},
+ {input: "04", size: 2, err: "input value has wrong size 1, want 2"},
+ // kind String
+ {input: "820102", size: 0, err: "input value has wrong size 2, want 0"},
+ {input: "820102", size: 1, err: "input value has wrong size 2, want 1"},
+ {input: "820102", size: 2},
+ {input: "820102", size: 3, err: "input value has wrong size 2, want 3"},
+ }
+
+ for _, test := range tests {
+ test := test
+ name := fmt.Sprintf("input_%s/size_%d", test.input, test.size)
+ t.Run(name, func(t *testing.T) {
+ s := NewStream(bytes.NewReader(unhex(test.input)), 0)
+ b := make([]byte, test.size)
+ err := s.ReadBytes(b)
+ if test.err == "" {
+ if err != nil {
+ t.Errorf("unexpected error %q", err)
+ }
+ } else {
+ if err == nil {
+ t.Errorf("expected error, got nil")
+ } else if err.Error() != test.err {
+ t.Errorf("wrong error %q", err)
+ }
+ }
+ })
+ }
+}
+
func TestDecodeErrors(t *testing.T) {
r := bytes.NewReader(nil)
@@ -990,7 +1031,7 @@ func TestInvalidOptionalField(t *testing.T) {
v interface{}
err string
}{
- {v: new(invalid1), err: `rlp: struct field rlp.invalid1.B needs "optional" tag`},
+ {v: new(invalid1), err: `rlp: invalid struct tag "" for rlp.invalid1.B (must be optional because preceding field "A" is optional)`},
{v: new(invalid2), err: `rlp: invalid struct tag "optional" for rlp.invalid2.T (also has "tail" tag)`},
{v: new(invalid3), err: `rlp: invalid struct tag "tail" for rlp.invalid3.T (also has "optional" tag)`},
}
diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go
new file mode 100644
index 000000000000..289e7448c6ca
--- /dev/null
+++ b/rlp/encbuffer.go
@@ -0,0 +1,382 @@
+package rlp
+
+import (
+ "io"
+ "math/big"
+ "reflect"
+ "sync"
+)
+
+type encBuffer struct {
+ str []byte // string data, contains everything except list headers
+ lheads []listhead // all list headers
+ lhsize int // sum of sizes of all encoded list headers
+ sizebuf [9]byte // auxiliary buffer for uint encoding
+}
+
+// The global encBuffer pool.
+var encBufferPool = sync.Pool{
+ New: func() interface{} { return new(encBuffer) },
+}
+
+func getEncBuffer() *encBuffer {
+ buf := encBufferPool.Get().(*encBuffer)
+ buf.reset()
+ return buf
+}
+
+func (buf *encBuffer) reset() {
+ buf.lhsize = 0
+ buf.str = buf.str[:0]
+ buf.lheads = buf.lheads[:0]
+}
+
+// size returns the length of the encoded data.
+func (buf *encBuffer) size() int {
+ return len(buf.str) + buf.lhsize
+}
+
+// makeBytes creates the encoder output.
+func (w *encBuffer) makeBytes() []byte {
+ out := make([]byte, w.size())
+ w.copyTo(out)
+ return out
+}
+
+func (w *encBuffer) copyTo(dst []byte) {
+ strpos := 0
+ pos := 0
+ for _, head := range w.lheads {
+ // write string data before header
+ n := copy(dst[pos:], w.str[strpos:head.offset])
+ pos += n
+ strpos += n
+ // write the header
+ enc := head.encode(dst[pos:])
+ pos += len(enc)
+ }
+ // copy string data after the last list header
+ copy(dst[pos:], w.str[strpos:])
+}
+
+// writeTo writes the encoder output to w.
+func (buf *encBuffer) writeTo(w io.Writer) (err error) {
+ strpos := 0
+ for _, head := range buf.lheads {
+ // write string data before header
+ if head.offset-strpos > 0 {
+ n, err := w.Write(buf.str[strpos:head.offset])
+ strpos += n
+ if err != nil {
+ return err
+ }
+ }
+ // write the header
+ enc := head.encode(buf.sizebuf[:])
+ if _, err = w.Write(enc); err != nil {
+ return err
+ }
+ }
+ if strpos < len(buf.str) {
+ // write string data after the last list header
+ _, err = w.Write(buf.str[strpos:])
+ }
+ return err
+}
+
+// Write implements io.Writer and appends b directly to the output.
+func (buf *encBuffer) Write(b []byte) (int, error) {
+ buf.str = append(buf.str, b...)
+ return len(b), nil
+}
+
+// writeBool writes b as the integer 0 (false) or 1 (true).
+func (buf *encBuffer) writeBool(b bool) {
+ if b {
+ buf.str = append(buf.str, 0x01)
+ } else {
+ buf.str = append(buf.str, 0x80)
+ }
+}
+
+func (buf *encBuffer) writeUint64(i uint64) {
+ if i == 0 {
+ buf.str = append(buf.str, 0x80)
+ } else if i < 128 {
+ // fits single byte
+ buf.str = append(buf.str, byte(i))
+ } else {
+ s := putint(buf.sizebuf[1:], i)
+ buf.sizebuf[0] = 0x80 + byte(s)
+ buf.str = append(buf.str, buf.sizebuf[:s+1]...)
+ }
+}
+
+func (buf *encBuffer) writeBytes(b []byte) {
+ if len(b) == 1 && b[0] <= 0x7F {
+ // fits single byte, no string header
+ buf.str = append(buf.str, b[0])
+ } else {
+ buf.encodeStringHeader(len(b))
+ buf.str = append(buf.str, b...)
+ }
+}
+
+func (buf *encBuffer) writeString(s string) {
+ buf.writeBytes([]byte(s))
+}
+
+// wordBytes is the number of bytes in a big.Word
+const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
+
+// writeBigInt writes i as an integer.
+func (w *encBuffer) writeBigInt(i *big.Int) {
+ bitlen := i.BitLen()
+ if bitlen <= 64 {
+ w.writeUint64(i.Uint64())
+ return
+ }
+ // Integer is larger than 64 bits, encode from i.Bits().
+ // The minimal byte length is bitlen rounded up to the next
+ // multiple of 8, divided by 8.
+ length := ((bitlen + 7) & -8) >> 3
+ w.encodeStringHeader(length)
+ w.str = append(w.str, make([]byte, length)...)
+ index := length
+ buf := w.str[len(w.str)-length:]
+ for _, d := range i.Bits() {
+ for j := 0; j < wordBytes && index > 0; j++ {
+ index--
+ buf[index] = byte(d)
+ d >>= 8
+ }
+ }
+}
+
+// list adds a new list header to the header stack. It returns the index of the header.
+// Call listEnd with this index after encoding the content of the list.
+func (buf *encBuffer) list() int {
+ buf.lheads = append(buf.lheads, listhead{offset: len(buf.str), size: buf.lhsize})
+ return len(buf.lheads) - 1
+}
+
+func (buf *encBuffer) listEnd(index int) {
+ lh := &buf.lheads[index]
+ lh.size = buf.size() - lh.offset - lh.size
+ if lh.size < 56 {
+ buf.lhsize++ // length encoded into kind tag
+ } else {
+ buf.lhsize += 1 + intsize(uint64(lh.size))
+ }
+}
+
+func (buf *encBuffer) encode(val interface{}) error {
+ rval := reflect.ValueOf(val)
+ writer, err := cachedWriter(rval.Type())
+ if err != nil {
+ return err
+ }
+ return writer(rval, buf)
+}
+
+func (buf *encBuffer) encodeStringHeader(size int) {
+ if size < 56 {
+ buf.str = append(buf.str, 0x80+byte(size))
+ } else {
+ sizesize := putint(buf.sizebuf[1:], uint64(size))
+ buf.sizebuf[0] = 0xB7 + byte(sizesize)
+ buf.str = append(buf.str, buf.sizebuf[:sizesize+1]...)
+ }
+}
+
+// encReader is the io.Reader returned by EncodeToReader.
+// It releases its encbuf at EOF.
+type encReader struct {
+ buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF.
+ lhpos int // index of list header that we're reading
+ strpos int // current position in string buffer
+ piece []byte // next piece to be read
+}
+
+func (r *encReader) Read(b []byte) (n int, err error) {
+ for {
+ if r.piece = r.next(); r.piece == nil {
+ // Put the encode buffer back into the pool at EOF when it
+ // is first encountered. Subsequent calls still return EOF
+ // as the error but the buffer is no longer valid.
+ if r.buf != nil {
+ encBufferPool.Put(r.buf)
+ r.buf = nil
+ }
+ return n, io.EOF
+ }
+ nn := copy(b[n:], r.piece)
+ n += nn
+ if nn < len(r.piece) {
+ // piece didn't fit, see you next time.
+ r.piece = r.piece[nn:]
+ return n, nil
+ }
+ r.piece = nil
+ }
+}
+
+// next returns the next piece of data to be read.
+// it returns nil at EOF.
+func (r *encReader) next() []byte {
+ switch {
+ case r.buf == nil:
+ return nil
+
+ case r.piece != nil:
+ // There is still data available for reading.
+ return r.piece
+
+ case r.lhpos < len(r.buf.lheads):
+ // We're before the last list header.
+ head := r.buf.lheads[r.lhpos]
+ sizebefore := head.offset - r.strpos
+ if sizebefore > 0 {
+ // String data before header.
+ p := r.buf.str[r.strpos:head.offset]
+ r.strpos += sizebefore
+ return p
+ }
+ r.lhpos++
+ return head.encode(r.buf.sizebuf[:])
+
+ case r.strpos < len(r.buf.str):
+ // String data at the end, after all list headers.
+ p := r.buf.str[r.strpos:]
+ r.strpos = len(r.buf.str)
+ return p
+
+ default:
+ return nil
+ }
+}
+
+func encBufferFromWriter(w io.Writer) *encBuffer {
+ switch w := w.(type) {
+ case EncoderBuffer:
+ return w.buf
+ case *EncoderBuffer:
+ return w.buf
+ case *encBuffer:
+ return w
+ default:
+ return nil
+ }
+}
+
+// EncoderBuffer is a buffer for incremental encoding.
+//
+// The zero value is NOT ready for use. To get a usable buffer,
+// create it using NewEncoderBuffer or call Reset.
+type EncoderBuffer struct {
+ buf *encBuffer
+ dst io.Writer
+
+ ownBuffer bool
+}
+
+// NewEncoderBuffer creates an encoder buffer.
+func NewEncoderBuffer(dst io.Writer) EncoderBuffer {
+ var w EncoderBuffer
+ w.Reset(dst)
+ return w
+}
+
+// Reset truncates the buffer and sets the output destination.
+func (w *EncoderBuffer) Reset(dst io.Writer) {
+ if w.buf != nil && !w.ownBuffer {
+ panic("can't Reset derived EncoderBuffer")
+ }
+
+ // If the destination writer has an *encBuffer, use it.
+ // Note that w.ownBuffer is left false here.
+ if dst != nil {
+ if outer := encBufferFromWriter(dst); outer != nil {
+ *w = EncoderBuffer{outer, nil, false}
+ return
+ }
+ }
+
+ // Get a fresh buffer.
+ if w.buf == nil {
+ w.buf = encBufferPool.Get().(*encBuffer)
+ w.ownBuffer = true
+ }
+ w.buf.reset()
+ w.dst = dst
+}
+
+// Flush writes encoded RLP data to the output writer. This can only be called once.
+// If you want to re-use the buffer after Flush, you must call Reset.
+func (w *EncoderBuffer) Flush() error {
+ var err error
+ if w.dst != nil {
+ err = w.buf.writeTo(w.dst)
+ }
+ // Release the internal buffer.
+ if w.ownBuffer {
+ encBufferPool.Put(w.buf)
+ }
+ *w = EncoderBuffer{}
+ return err
+}
+
+// ToBytes returns the encoded bytes.
+func (w *EncoderBuffer) ToBytes() []byte {
+ return w.buf.makeBytes()
+}
+
+// AppendToBytes appends the encoded bytes to dst.
+func (w *EncoderBuffer) AppendToBytes(dst []byte) []byte {
+ size := w.buf.size()
+ out := append(dst, make([]byte, size)...)
+ w.buf.copyTo(out[len(dst):])
+ return out
+}
+
+// Write appends b directly to the encoder output.
+func (w EncoderBuffer) Write(b []byte) (int, error) {
+ return w.buf.Write(b)
+}
+
+// WriteBool writes b as the integer 0 (false) or 1 (true).
+func (w EncoderBuffer) WriteBool(b bool) {
+ w.buf.writeBool(b)
+}
+
+// WriteUint64 encodes an unsigned integer.
+func (w EncoderBuffer) WriteUint64(i uint64) {
+ w.buf.writeUint64(i)
+}
+
+// WriteBigInt encodes a big.Int as an RLP string.
+// Note: Unlike with Encode, the sign of i is ignored.
+func (w EncoderBuffer) WriteBigInt(i *big.Int) {
+ w.buf.writeBigInt(i)
+}
+
+// WriteBytes encodes b as an RLP string.
+func (w EncoderBuffer) WriteBytes(b []byte) {
+ w.buf.writeBytes(b)
+}
+
+// WriteBytes encodes s as an RLP string.
+func (w EncoderBuffer) WriteString(s string) {
+ w.buf.writeString(s)
+}
+
+// List starts a list. It returns an internal index. Call EndList with
+// this index after encoding the content to finish the list.
+func (w EncoderBuffer) List() int {
+ return w.buf.list()
+}
+
+// ListEnd finishes the given list.
+func (w EncoderBuffer) ListEnd(index int) {
+ w.buf.listEnd(index)
+}
diff --git a/rlp/encbuffer_example_test.go b/rlp/encbuffer_example_test.go
new file mode 100644
index 000000000000..ee15d82a77b9
--- /dev/null
+++ b/rlp/encbuffer_example_test.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rlp_test
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func ExampleEncoderBuffer() {
+ var w bytes.Buffer
+
+ // Encode [4, [5, 6]] to w.
+ buf := rlp.NewEncoderBuffer(&w)
+ l1 := buf.List()
+ buf.WriteUint64(4)
+ l2 := buf.List()
+ buf.WriteUint64(5)
+ buf.WriteUint64(6)
+ buf.ListEnd(l2)
+ buf.ListEnd(l1)
+
+ if err := buf.Flush(); err != nil {
+ panic(err)
+ }
+ fmt.Printf("%X\n", w.Bytes())
+ // Output:
+ // C404C20506
+}
diff --git a/rlp/encode.go b/rlp/encode.go
index 1623e97a3e9e..b96505f56dfe 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -17,11 +17,13 @@
package rlp
import (
+ "errors"
"fmt"
"io"
"math/big"
"reflect"
- "sync"
+
+ "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
)
var (
@@ -31,6 +33,8 @@ var (
EmptyList = []byte{0xC0}
)
+var ErrNegativeBigInt = errors.New("rlp: cannot encode negative big.Int")
+
// Encoder is implemented by types that require custom
// encoding rules or want to encode private fields.
type Encoder interface {
@@ -51,30 +55,29 @@ type Encoder interface {
//
// Please see package-level documentation of encoding rules.
func Encode(w io.Writer, val interface{}) error {
- if outer, ok := w.(*encbuf); ok {
- // Encode was called by some type's EncodeRLP.
- // Avoid copying by writing to the outer encbuf directly.
- return outer.encode(val)
+ // Optimization: reuse *encBuffer when called by EncodeRLP.
+ if buf := encBufferFromWriter(w); buf != nil {
+ return buf.encode(val)
}
- eb := encbufPool.Get().(*encbuf)
- defer encbufPool.Put(eb)
- eb.reset()
- if err := eb.encode(val); err != nil {
+
+ buf := getEncBuffer()
+ defer encBufferPool.Put(buf)
+ if err := buf.encode(val); err != nil {
return err
}
- return eb.toWriter(w)
+ return buf.writeTo(w)
}
// EncodeToBytes returns the RLP encoding of val.
// Please see package-level documentation for the encoding rules.
func EncodeToBytes(val interface{}) ([]byte, error) {
- eb := encbufPool.Get().(*encbuf)
- defer encbufPool.Put(eb)
- eb.reset()
- if err := eb.encode(val); err != nil {
+ buf := getEncBuffer()
+ defer encBufferPool.Put(buf)
+
+ if err := buf.encode(val); err != nil {
return nil, err
}
- return eb.toBytes(), nil
+ return buf.makeBytes(), nil
}
// EncodeToReader returns a reader from which the RLP encoding of val
@@ -83,12 +86,15 @@ func EncodeToBytes(val interface{}) ([]byte, error) {
//
// Please see the documentation of Encode for the encoding rules.
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
- eb := encbufPool.Get().(*encbuf)
- eb.reset()
- if err := eb.encode(val); err != nil {
+ buf := getEncBuffer()
+ if err := buf.encode(val); err != nil {
+ encBufferPool.Put(buf)
return 0, nil, err
}
- return eb.size(), &encReader{buf: eb}, nil
+ // Note: can't put the reader back into the pool here
+ // because it is held by encReader. The reader puts it
+ // back when it has been fully consumed.
+ return buf.size(), &encReader{buf: buf}, nil
}
type listhead struct {
@@ -123,207 +129,10 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
return sizesize + 1
}
-type encbuf struct {
- str []byte // string data, contains everything except list headers
- lheads []listhead // all list headers
- lhsize int // sum of sizes of all encoded list headers
- sizebuf [9]byte // auxiliary buffer for uint encoding
-}
-
-// encbufs are pooled.
-var encbufPool = sync.Pool{
- New: func() interface{} { return new(encbuf) },
-}
-
-func (w *encbuf) reset() {
- w.lhsize = 0
- w.str = w.str[:0]
- w.lheads = w.lheads[:0]
-}
-
-// encbuf implements io.Writer so it can be passed it into EncodeRLP.
-func (w *encbuf) Write(b []byte) (int, error) {
- w.str = append(w.str, b...)
- return len(b), nil
-}
-
-func (w *encbuf) encode(val interface{}) error {
- rval := reflect.ValueOf(val)
- writer, err := cachedWriter(rval.Type())
- if err != nil {
- return err
- }
- return writer(rval, w)
-}
-
-func (w *encbuf) encodeStringHeader(size int) {
- if size < 56 {
- w.str = append(w.str, 0x80+byte(size))
- } else {
- sizesize := putint(w.sizebuf[1:], uint64(size))
- w.sizebuf[0] = 0xB7 + byte(sizesize)
- w.str = append(w.str, w.sizebuf[:sizesize+1]...)
- }
-}
-
-func (w *encbuf) encodeString(b []byte) {
- if len(b) == 1 && b[0] <= 0x7F {
- // fits single byte, no string header
- w.str = append(w.str, b[0])
- } else {
- w.encodeStringHeader(len(b))
- w.str = append(w.str, b...)
- }
-}
-
-func (w *encbuf) encodeUint(i uint64) {
- if i == 0 {
- w.str = append(w.str, 0x80)
- } else if i < 128 {
- // fits single byte
- w.str = append(w.str, byte(i))
- } else {
- s := putint(w.sizebuf[1:], i)
- w.sizebuf[0] = 0x80 + byte(s)
- w.str = append(w.str, w.sizebuf[:s+1]...)
- }
-}
-
-// list adds a new list header to the header stack. It returns the index
-// of the header. The caller must call listEnd with this index after encoding
-// the content of the list.
-func (w *encbuf) list() int {
- w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
- return len(w.lheads) - 1
-}
-
-func (w *encbuf) listEnd(index int) {
- lh := &w.lheads[index]
- lh.size = w.size() - lh.offset - lh.size
- if lh.size < 56 {
- w.lhsize++ // length encoded into kind tag
- } else {
- w.lhsize += 1 + intsize(uint64(lh.size))
- }
-}
-
-func (w *encbuf) size() int {
- return len(w.str) + w.lhsize
-}
-
-func (w *encbuf) toBytes() []byte {
- out := make([]byte, w.size())
- strpos := 0
- pos := 0
- for _, head := range w.lheads {
- // write string data before header
- n := copy(out[pos:], w.str[strpos:head.offset])
- pos += n
- strpos += n
- // write the header
- enc := head.encode(out[pos:])
- pos += len(enc)
- }
- // copy string data after the last list header
- copy(out[pos:], w.str[strpos:])
- return out
-}
-
-func (w *encbuf) toWriter(out io.Writer) (err error) {
- strpos := 0
- for _, head := range w.lheads {
- // write string data before header
- if head.offset-strpos > 0 {
- n, err := out.Write(w.str[strpos:head.offset])
- strpos += n
- if err != nil {
- return err
- }
- }
- // write the header
- enc := head.encode(w.sizebuf[:])
- if _, err = out.Write(enc); err != nil {
- return err
- }
- }
- if strpos < len(w.str) {
- // write string data after the last list header
- _, err = out.Write(w.str[strpos:])
- }
- return err
-}
-
-// encReader is the io.Reader returned by EncodeToReader.
-// It releases its encbuf at EOF.
-type encReader struct {
- buf *encbuf // the buffer we're reading from. this is nil when we're at EOF.
- lhpos int // index of list header that we're reading
- strpos int // current position in string buffer
- piece []byte // next piece to be read
-}
-
-func (r *encReader) Read(b []byte) (n int, err error) {
- for {
- if r.piece = r.next(); r.piece == nil {
- // Put the encode buffer back into the pool at EOF when it
- // is first encountered. Subsequent calls still return EOF
- // as the error but the buffer is no longer valid.
- if r.buf != nil {
- encbufPool.Put(r.buf)
- r.buf = nil
- }
- return n, io.EOF
- }
- nn := copy(b[n:], r.piece)
- n += nn
- if nn < len(r.piece) {
- // piece didn't fit, see you next time.
- r.piece = r.piece[nn:]
- return n, nil
- }
- r.piece = nil
- }
-}
-
-// next returns the next piece of data to be read.
-// it returns nil at EOF.
-func (r *encReader) next() []byte {
- switch {
- case r.buf == nil:
- return nil
-
- case r.piece != nil:
- // There is still data available for reading.
- return r.piece
-
- case r.lhpos < len(r.buf.lheads):
- // We're before the last list header.
- head := r.buf.lheads[r.lhpos]
- sizebefore := head.offset - r.strpos
- if sizebefore > 0 {
- // String data before header.
- p := r.buf.str[r.strpos:head.offset]
- r.strpos += sizebefore
- return p
- }
- r.lhpos++
- return head.encode(r.buf.sizebuf[:])
-
- case r.strpos < len(r.buf.str):
- // String data at the end, after all list headers.
- p := r.buf.str[r.strpos:]
- r.strpos = len(r.buf.str)
- return p
-
- default:
- return nil
- }
-}
-
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
// makeWriter creates a writer function for the given type.
-func makeWriter(typ reflect.Type, ts tags) (writer, error) {
+func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
kind := typ.Kind()
switch {
case typ == rawValueType:
@@ -357,71 +166,45 @@ func makeWriter(typ reflect.Type, ts tags) (writer, error) {
}
}
-func writeRawValue(val reflect.Value, w *encbuf) error {
+func writeRawValue(val reflect.Value, w *encBuffer) error {
w.str = append(w.str, val.Bytes()...)
return nil
}
-func writeUint(val reflect.Value, w *encbuf) error {
- w.encodeUint(val.Uint())
+func writeUint(val reflect.Value, w *encBuffer) error {
+ w.writeUint64(val.Uint())
return nil
}
-func writeBool(val reflect.Value, w *encbuf) error {
- if val.Bool() {
- w.str = append(w.str, 0x01)
- } else {
- w.str = append(w.str, 0x80)
- }
+func writeBool(val reflect.Value, w *encBuffer) error {
+ w.writeBool(val.Bool())
return nil
}
-func writeBigIntPtr(val reflect.Value, w *encbuf) error {
+func writeBigIntPtr(val reflect.Value, w *encBuffer) error {
ptr := val.Interface().(*big.Int)
if ptr == nil {
w.str = append(w.str, 0x80)
return nil
}
- return writeBigInt(ptr, w)
+ if ptr.Sign() == -1 {
+ return ErrNegativeBigInt
+ }
+ w.writeBigInt(ptr)
+ return nil
}
-func writeBigIntNoPtr(val reflect.Value, w *encbuf) error {
+func writeBigIntNoPtr(val reflect.Value, w *encBuffer) error {
i := val.Interface().(big.Int)
- return writeBigInt(&i, w)
-}
-
-// wordBytes is the number of bytes in a big.Word
-const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
-
-func writeBigInt(i *big.Int, w *encbuf) error {
if i.Sign() == -1 {
- return fmt.Errorf("rlp: cannot encode negative *big.Int")
- }
- bitlen := i.BitLen()
- if bitlen <= 64 {
- w.encodeUint(i.Uint64())
- return nil
- }
- // Integer is larger than 64 bits, encode from i.Bits().
- // The minimal byte length is bitlen rounded up to the next
- // multiple of 8, divided by 8.
- length := ((bitlen + 7) & -8) >> 3
- w.encodeStringHeader(length)
- w.str = append(w.str, make([]byte, length)...)
- index := length
- buf := w.str[len(w.str)-length:]
- for _, d := range i.Bits() {
- for j := 0; j < wordBytes && index > 0; j++ {
- index--
- buf[index] = byte(d)
- d >>= 8
- }
+ return ErrNegativeBigInt
}
+ w.writeBigInt(&i)
return nil
}
-func writeBytes(val reflect.Value, w *encbuf) error {
- w.encodeString(val.Bytes())
+func writeBytes(val reflect.Value, w *encBuffer) error {
+ w.writeBytes(val.Bytes())
return nil
}
@@ -433,7 +216,7 @@ func makeByteArrayWriter(typ reflect.Type) writer {
return writeLengthOneByteArray
default:
length := typ.Len()
- return func(val reflect.Value, w *encbuf) error {
+ return func(val reflect.Value, w *encBuffer) error {
if !val.CanAddr() {
// Getting the byte slice of val requires it to be addressable. Make it
// addressable by copying.
@@ -449,12 +232,12 @@ func makeByteArrayWriter(typ reflect.Type) writer {
}
}
-func writeLengthZeroByteArray(val reflect.Value, w *encbuf) error {
+func writeLengthZeroByteArray(val reflect.Value, w *encBuffer) error {
w.str = append(w.str, 0x80)
return nil
}
-func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
+func writeLengthOneByteArray(val reflect.Value, w *encBuffer) error {
b := byte(val.Index(0).Uint())
if b <= 0x7f {
w.str = append(w.str, b)
@@ -464,7 +247,7 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
return nil
}
-func writeString(val reflect.Value, w *encbuf) error {
+func writeString(val reflect.Value, w *encBuffer) error {
s := val.String()
if len(s) == 1 && s[0] <= 0x7f {
// fits single byte, no string header
@@ -476,7 +259,7 @@ func writeString(val reflect.Value, w *encbuf) error {
return nil
}
-func writeInterface(val reflect.Value, w *encbuf) error {
+func writeInterface(val reflect.Value, w *encBuffer) error {
if val.IsNil() {
// Write empty list. This is consistent with the previous RLP
// encoder that we had and should therefore avoid any
@@ -492,17 +275,17 @@ func writeInterface(val reflect.Value, w *encbuf) error {
return writer(eval, w)
}
-func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
- etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
+func makeSliceWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
+ etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
var wfn writer
- if ts.tail {
+ if ts.Tail {
// This is for struct tail slices.
// w.list is not called for them.
- wfn = func(val reflect.Value, w *encbuf) error {
+ wfn = func(val reflect.Value, w *encBuffer) error {
vlen := val.Len()
for i := 0; i < vlen; i++ {
if err := etypeinfo.writer(val.Index(i), w); err != nil {
@@ -513,7 +296,7 @@ func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
}
} else {
// This is for regular slices and arrays.
- wfn = func(val reflect.Value, w *encbuf) error {
+ wfn = func(val reflect.Value, w *encBuffer) error {
vlen := val.Len()
if vlen == 0 {
w.str = append(w.str, 0xC0)
@@ -547,7 +330,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
firstOptionalField := firstOptionalField(fields)
if firstOptionalField == len(fields) {
// This is the writer function for structs without any optional fields.
- writer = func(val reflect.Value, w *encbuf) error {
+ writer = func(val reflect.Value, w *encBuffer) error {
lh := w.list()
for _, f := range fields {
if err := f.info.writer(val.Field(f.index), w); err != nil {
@@ -560,7 +343,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
} else {
// If there are any "optional" fields, the writer needs to perform additional
// checks to determine the output list length.
- writer = func(val reflect.Value, w *encbuf) error {
+ writer = func(val reflect.Value, w *encBuffer) error {
lastField := len(fields) - 1
for ; lastField >= firstOptionalField; lastField-- {
if !val.Field(fields[lastField].index).IsZero() {
@@ -580,33 +363,18 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
return writer, nil
}
-// nilEncoding returns the encoded value of a nil pointer.
-func nilEncoding(typ reflect.Type, ts tags) uint8 {
- var nilKind Kind
- if ts.nilOK {
- nilKind = ts.nilKind // use struct tag if provided
- } else {
- nilKind = defaultNilKind(typ.Elem())
+func makePtrWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
+ nilEncoding := byte(0xC0)
+ if typeNilKind(typ.Elem(), ts) == String {
+ nilEncoding = 0x80
}
- switch nilKind {
- case String:
- return 0x80
- case List:
- return 0xC0
- default:
- panic(fmt.Errorf("rlp: invalid nil kind %d", nilKind))
- }
-}
-
-func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
- etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
+ etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
- nilEncoding := nilEncoding(typ, ts)
- writer := func(val reflect.Value, w *encbuf) error {
+ writer := func(val reflect.Value, w *encBuffer) error {
if ev := val.Elem(); ev.IsValid() {
return etypeinfo.writer(ev, w)
}
@@ -618,11 +386,11 @@ func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
func makeEncoderWriter(typ reflect.Type) writer {
if typ.Implements(encoderInterface) {
- return func(val reflect.Value, w *encbuf) error {
+ return func(val reflect.Value, w *encBuffer) error {
return val.Interface().(Encoder).EncodeRLP(w)
}
}
- w := func(val reflect.Value, w *encbuf) error {
+ w := func(val reflect.Value, w *encBuffer) error {
if !val.CanAddr() {
// package json simply doesn't call MarshalJSON for this case, but encodes the
// value as if it didn't implement the interface. We don't want to handle it that
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index a63743440d39..1d715e3776c3 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -145,7 +145,8 @@ var encTests = []encTest{
{val: *big.NewInt(0xFFFFFF), output: "83FFFFFF"},
// negative ints are not supported
- {val: big.NewInt(-1), error: "rlp: cannot encode negative *big.Int"},
+ {val: big.NewInt(-1), error: "rlp: cannot encode negative big.Int"},
+ {val: *big.NewInt(-1), error: "rlp: cannot encode negative big.Int"},
// byte arrays
{val: [0]byte{}, output: "80"},
@@ -398,6 +399,21 @@ func TestEncodeToBytes(t *testing.T) {
runEncTests(t, EncodeToBytes)
}
+func TestEncodeAppendToBytes(t *testing.T) {
+ buffer := make([]byte, 20)
+ runEncTests(t, func(val interface{}) ([]byte, error) {
+ w := NewEncoderBuffer(nil)
+ defer w.Flush()
+
+ err := Encode(w, val)
+ if err != nil {
+ return nil, err
+ }
+ output := w.AppendToBytes(buffer[:0])
+ return output, nil
+ })
+}
+
func TestEncodeToReader(t *testing.T) {
runEncTests(t, func(val interface{}) ([]byte, error) {
_, r, err := EncodeToReader(val)
diff --git a/rlp/encoder_example_test.go b/rlp/encoder_example_test.go
index 42c1c5c89064..4cd3cb867375 100644
--- a/rlp/encoder_example_test.go
+++ b/rlp/encoder_example_test.go
@@ -14,11 +14,13 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package rlp
+package rlp_test
import (
"fmt"
"io"
+
+ "github.com/ethereum/go-ethereum/rlp"
)
type MyCoolType struct {
@@ -28,16 +30,16 @@ type MyCoolType struct {
// EncodeRLP writes x as RLP list [a, b] that omits the Name field.
func (x *MyCoolType) EncodeRLP(w io.Writer) (err error) {
- return Encode(w, []uint{x.a, x.b})
+ return rlp.Encode(w, []uint{x.a, x.b})
}
func ExampleEncoder() {
var t *MyCoolType // t is nil pointer to MyCoolType
- bytes, _ := EncodeToBytes(t)
+ bytes, _ := rlp.EncodeToBytes(t)
fmt.Printf("%v → %X\n", t, bytes)
t = &MyCoolType{Name: "foobar", a: 5, b: 6}
- bytes, _ = EncodeToBytes(t)
+ bytes, _ = rlp.EncodeToBytes(t)
fmt.Printf("%v → %X\n", t, bytes)
// Output:
diff --git a/rlp/internal/rlpstruct/rlpstruct.go b/rlp/internal/rlpstruct/rlpstruct.go
new file mode 100644
index 000000000000..1ebaa960e38c
--- /dev/null
+++ b/rlp/internal/rlpstruct/rlpstruct.go
@@ -0,0 +1,213 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package rlpstruct implements struct processing for RLP encoding/decoding.
+//
+// In particular, this package handles all rules around field filtering,
+// struct tags and nil value determination.
+package rlpstruct
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Field represents a struct field.
+type Field struct {
+ Name string
+ Index int
+ Exported bool
+ Type Type
+ Tag string
+}
+
+// Type represents the attributes of a Go type.
+type Type struct {
+ Name string
+ Kind reflect.Kind
+ IsEncoder bool // whether type implements rlp.Encoder
+ IsDecoder bool // whether type implements rlp.Decoder
+ Elem *Type // non-nil for Kind values of Ptr, Slice, Array
+}
+
+// defaultNilValue determines whether a nil pointer to t encodes/decodes
+// as an empty string or empty list.
+func (t Type) DefaultNilValue() NilKind {
+ k := t.Kind
+ if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(t) {
+ return NilKindString
+ }
+ return NilKindList
+}
+
+// NilKind is the RLP value encoded in place of nil pointers.
+type NilKind uint8
+
+const (
+ NilKindString NilKind = 0x80
+ NilKindList NilKind = 0xC0
+)
+
+// Tags represents struct tags.
+type Tags struct {
+ // rlp:"nil" controls whether empty input results in a nil pointer.
+ // nilKind is the kind of empty value allowed for the field.
+ NilKind NilKind
+ NilOK bool
+
+ // rlp:"optional" allows for a field to be missing in the input list.
+ // If this is set, all subsequent fields must also be optional.
+ Optional bool
+
+ // rlp:"tail" controls whether this field swallows additional list elements. It can
+ // only be set for the last field, which must be of slice type.
+ Tail bool
+
+ // rlp:"-" ignores fields.
+ Ignored bool
+}
+
+// TagError is raised for invalid struct tags.
+type TagError struct {
+ StructType string
+
+ // These are set by this package.
+ Field string
+ Tag string
+ Err string
+}
+
+func (e TagError) Error() string {
+ field := "field " + e.Field
+ if e.StructType != "" {
+ field = e.StructType + "." + e.Field
+ }
+ return fmt.Sprintf("rlp: invalid struct tag %q for %s (%s)", e.Tag, field, e.Err)
+}
+
+// ProcessFields filters the given struct fields, returning only fields
+// that should be considered for encoding/decoding.
+func ProcessFields(allFields []Field) ([]Field, []Tags, error) {
+ lastPublic := lastPublicField(allFields)
+
+ // Gather all exported fields and their tags.
+ var fields []Field
+ var tags []Tags
+ for _, field := range allFields {
+ if !field.Exported {
+ continue
+ }
+ ts, err := parseTag(field, lastPublic)
+ if err != nil {
+ return nil, nil, err
+ }
+ if ts.Ignored {
+ continue
+ }
+ fields = append(fields, field)
+ tags = append(tags, ts)
+ }
+
+ // Verify optional field consistency. If any optional field exists,
+ // all fields after it must also be optional. Note: optional + tail
+ // is supported.
+ var anyOptional bool
+ var firstOptionalName string
+ for i, ts := range tags {
+ name := fields[i].Name
+ if ts.Optional || ts.Tail {
+ if !anyOptional {
+ firstOptionalName = name
+ }
+ anyOptional = true
+ } else {
+ if anyOptional {
+ msg := fmt.Sprintf("must be optional because preceding field %q is optional", firstOptionalName)
+ return nil, nil, TagError{Field: name, Err: msg}
+ }
+ }
+ }
+ return fields, tags, nil
+}
+
+func parseTag(field Field, lastPublic int) (Tags, error) {
+ name := field.Name
+ tag := reflect.StructTag(field.Tag)
+ var ts Tags
+ for _, t := range strings.Split(tag.Get("rlp"), ",") {
+ switch t = strings.TrimSpace(t); t {
+ case "":
+ // empty tag is allowed for some reason
+ case "-":
+ ts.Ignored = true
+ case "nil", "nilString", "nilList":
+ ts.NilOK = true
+ if field.Type.Kind != reflect.Ptr {
+ return ts, TagError{Field: name, Tag: t, Err: "field is not a pointer"}
+ }
+ switch t {
+ case "nil":
+ ts.NilKind = field.Type.Elem.DefaultNilValue()
+ case "nilString":
+ ts.NilKind = NilKindString
+ case "nilList":
+ ts.NilKind = NilKindList
+ }
+ case "optional":
+ ts.Optional = true
+ if ts.Tail {
+ return ts, TagError{Field: name, Tag: t, Err: `also has "tail" tag`}
+ }
+ case "tail":
+ ts.Tail = true
+ if field.Index != lastPublic {
+ return ts, TagError{Field: name, Tag: t, Err: "must be on last field"}
+ }
+ if ts.Optional {
+ return ts, TagError{Field: name, Tag: t, Err: `also has "optional" tag`}
+ }
+ if field.Type.Kind != reflect.Slice {
+ return ts, TagError{Field: name, Tag: t, Err: "field type is not slice"}
+ }
+ default:
+ return ts, TagError{Field: name, Tag: t, Err: "unknown tag"}
+ }
+ }
+ return ts, nil
+}
+
+func lastPublicField(fields []Field) int {
+ last := 0
+ for _, f := range fields {
+ if f.Exported {
+ last = f.Index
+ }
+ }
+ return last
+}
+
+func isUint(k reflect.Kind) bool {
+ return k >= reflect.Uint && k <= reflect.Uintptr
+}
+
+func isByte(typ Type) bool {
+ return typ.Kind == reflect.Uint8 && !typ.IsEncoder
+}
+
+func isByteArray(typ Type) bool {
+ return (typ.Kind == reflect.Slice || typ.Kind == reflect.Array) && isByte(*typ.Elem)
+}
diff --git a/rlp/rlpgen/gen.go b/rlp/rlpgen/gen.go
new file mode 100644
index 000000000000..b36b26947834
--- /dev/null
+++ b/rlp/rlpgen/gen.go
@@ -0,0 +1,735 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/types"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
+)
+
+// buildContext keeps the data needed for make*Op.
+type buildContext struct {
+ topType *types.Named // the type we're creating methods for
+
+ encoderIface *types.Interface
+ decoderIface *types.Interface
+ rawValueType *types.Named
+
+ typeToStructCache map[types.Type]*rlpstruct.Type
+}
+
+func newBuildContext(packageRLP *types.Package) *buildContext {
+ enc := packageRLP.Scope().Lookup("Encoder").Type().Underlying()
+ dec := packageRLP.Scope().Lookup("Decoder").Type().Underlying()
+ rawv := packageRLP.Scope().Lookup("RawValue").Type()
+ return &buildContext{
+ typeToStructCache: make(map[types.Type]*rlpstruct.Type),
+ encoderIface: enc.(*types.Interface),
+ decoderIface: dec.(*types.Interface),
+ rawValueType: rawv.(*types.Named),
+ }
+}
+
+func (bctx *buildContext) isEncoder(typ types.Type) bool {
+ return types.Implements(typ, bctx.encoderIface)
+}
+
+func (bctx *buildContext) isDecoder(typ types.Type) bool {
+ return types.Implements(typ, bctx.decoderIface)
+}
+
+// typeToStructType converts typ to rlpstruct.Type.
+func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type {
+ if prev := bctx.typeToStructCache[typ]; prev != nil {
+ return prev // short-circuit for recursive types.
+ }
+
+ // Resolve named types to their underlying type, but keep the name.
+ name := types.TypeString(typ, nil)
+ for {
+ utype := typ.Underlying()
+ if utype == typ {
+ break
+ }
+ typ = utype
+ }
+
+ // Create the type and store it in cache.
+ t := &rlpstruct.Type{
+ Name: name,
+ Kind: typeReflectKind(typ),
+ IsEncoder: bctx.isEncoder(typ),
+ IsDecoder: bctx.isDecoder(typ),
+ }
+ bctx.typeToStructCache[typ] = t
+
+ // Assign element type.
+ switch typ.(type) {
+ case *types.Array, *types.Slice, *types.Pointer:
+ etype := typ.(interface{ Elem() types.Type }).Elem()
+ t.Elem = bctx.typeToStructType(etype)
+ }
+ return t
+}
+
+// genContext is passed to the gen* methods of op when generating
+// the output code. It tracks packages to be imported by the output
+// file and assigns unique names of temporary variables.
+type genContext struct {
+ inPackage *types.Package
+ imports map[string]struct{}
+ tempCounter int
+}
+
+func newGenContext(inPackage *types.Package) *genContext {
+ return &genContext{
+ inPackage: inPackage,
+ imports: make(map[string]struct{}),
+ }
+}
+
+func (ctx *genContext) temp() string {
+ v := fmt.Sprintf("_tmp%d", ctx.tempCounter)
+ ctx.tempCounter++
+ return v
+}
+
+func (ctx *genContext) resetTemp() {
+ ctx.tempCounter = 0
+}
+
+func (ctx *genContext) addImport(path string) {
+ if path == ctx.inPackage.Path() {
+ return // avoid importing the package that we're generating in.
+ }
+ // TODO: renaming?
+ ctx.imports[path] = struct{}{}
+}
+
+// importsList returns all packages that need to be imported.
+func (ctx *genContext) importsList() []string {
+ imp := make([]string, 0, len(ctx.imports))
+ for k := range ctx.imports {
+ imp = append(imp, k)
+ }
+ sort.Strings(imp)
+ return imp
+}
+
+// qualify is the types.Qualifier used for printing types.
+func (ctx *genContext) qualify(pkg *types.Package) string {
+ if pkg.Path() == ctx.inPackage.Path() {
+ return ""
+ }
+ ctx.addImport(pkg.Path())
+ // TODO: renaming?
+ return pkg.Name()
+}
+
+type op interface {
+ // genWrite creates the encoder. The generated code should write v,
+ // which is any Go expression, to the rlp.EncoderBuffer 'w'.
+ genWrite(ctx *genContext, v string) string
+
+ // genDecode creates the decoder. The generated code should read
+ // a value from the rlp.Stream 'dec' and store it to dst.
+ genDecode(ctx *genContext) (string, string)
+}
+
+// basicOp handles basic types bool, uint*, string.
+type basicOp struct {
+ typ types.Type
+ writeMethod string // calle write the value
+ writeArgType types.Type // parameter type of writeMethod
+ decMethod string
+ decResultType types.Type // return type of decMethod
+ decUseBitSize bool // if true, result bit size is appended to decMethod
+}
+
+func (*buildContext) makeBasicOp(typ *types.Basic) (op, error) {
+ op := basicOp{typ: typ}
+ kind := typ.Kind()
+ switch {
+ case kind == types.Bool:
+ op.writeMethod = "WriteBool"
+ op.writeArgType = types.Typ[types.Bool]
+ op.decMethod = "Bool"
+ op.decResultType = types.Typ[types.Bool]
+ case kind >= types.Uint8 && kind <= types.Uint64:
+ op.writeMethod = "WriteUint64"
+ op.writeArgType = types.Typ[types.Uint64]
+ op.decMethod = "Uint"
+ op.decResultType = typ
+ op.decUseBitSize = true
+ case kind == types.String:
+ op.writeMethod = "WriteString"
+ op.writeArgType = types.Typ[types.String]
+ op.decMethod = "String"
+ op.decResultType = types.Typ[types.String]
+ default:
+ return nil, fmt.Errorf("unhandled basic type: %v", typ)
+ }
+ return op, nil
+}
+
+func (*buildContext) makeByteSliceOp(typ *types.Slice) op {
+ if !isByte(typ.Elem()) {
+ panic("non-byte slice type in makeByteSliceOp")
+ }
+ bslice := types.NewSlice(types.Typ[types.Uint8])
+ return basicOp{
+ typ: typ,
+ writeMethod: "WriteBytes",
+ writeArgType: bslice,
+ decMethod: "Bytes",
+ decResultType: bslice,
+ }
+}
+
+func (bctx *buildContext) makeRawValueOp() op {
+ bslice := types.NewSlice(types.Typ[types.Uint8])
+ return basicOp{
+ typ: bctx.rawValueType,
+ writeMethod: "Write",
+ writeArgType: bslice,
+ decMethod: "Raw",
+ decResultType: bslice,
+ }
+}
+
+func (op basicOp) writeNeedsConversion() bool {
+ return !types.AssignableTo(op.typ, op.writeArgType)
+}
+
+func (op basicOp) decodeNeedsConversion() bool {
+ return !types.AssignableTo(op.decResultType, op.typ)
+}
+
+func (op basicOp) genWrite(ctx *genContext, v string) string {
+ if op.writeNeedsConversion() {
+ v = fmt.Sprintf("%s(%s)", op.writeArgType, v)
+ }
+ return fmt.Sprintf("w.%s(%s)\n", op.writeMethod, v)
+}
+
+func (op basicOp) genDecode(ctx *genContext) (string, string) {
+ var (
+ resultV = ctx.temp()
+ result = resultV
+ method = op.decMethod
+ )
+ if op.decUseBitSize {
+ // Note: For now, this only works for platform-independent integer
+ // sizes. makeBasicOp forbids the platform-dependent types.
+ var sizes types.StdSizes
+ method = fmt.Sprintf("%s%d", op.decMethod, sizes.Sizeof(op.typ)*8)
+ }
+
+ // Call the decoder method.
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s, err := dec.%s()\n", resultV, method)
+ fmt.Fprintf(&b, "if err != nil { return err }\n")
+ if op.decodeNeedsConversion() {
+ conv := ctx.temp()
+ fmt.Fprintf(&b, "%s := %s(%s)\n", conv, types.TypeString(op.typ, ctx.qualify), resultV)
+ result = conv
+ }
+ return result, b.String()
+}
+
+// byteArrayOp handles [...]byte.
+type byteArrayOp struct {
+ typ types.Type
+ name types.Type // name != typ for named byte array types (e.g. common.Address)
+}
+
+func (bctx *buildContext) makeByteArrayOp(name *types.Named, typ *types.Array) byteArrayOp {
+ nt := types.Type(name)
+ if name == nil {
+ nt = typ
+ }
+ return byteArrayOp{typ, nt}
+}
+
+func (op byteArrayOp) genWrite(ctx *genContext, v string) string {
+ return fmt.Sprintf("w.WriteBytes(%s[:])\n", v)
+}
+
+func (op byteArrayOp) genDecode(ctx *genContext) (string, string) {
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(op.name, ctx.qualify))
+ fmt.Fprintf(&b, "if err := dec.ReadBytes(%s[:]); err != nil { return err }\n", resultV)
+ return resultV, b.String()
+}
+
+// bigIntNoPtrOp handles non-pointer big.Int.
+// This exists because big.Int has it's own decoder operation on rlp.Stream,
+// but the decode method returns *big.Int, so it needs to be dereferenced.
+type bigIntOp struct {
+ pointer bool
+}
+
+func (op bigIntOp) genWrite(ctx *genContext, v string) string {
+ var b bytes.Buffer
+
+ fmt.Fprintf(&b, "if %s.Sign() == -1 {\n", v)
+ fmt.Fprintf(&b, " return rlp.ErrNegativeBigInt\n")
+ fmt.Fprintf(&b, "}\n")
+ dst := v
+ if !op.pointer {
+ dst = "&" + v
+ }
+ fmt.Fprintf(&b, "w.WriteBigInt(%s)\n", dst)
+
+ // Wrap with nil check.
+ if op.pointer {
+ code := b.String()
+ b.Reset()
+ fmt.Fprintf(&b, "if %s == nil {\n", v)
+ fmt.Fprintf(&b, " w.Write(rlp.EmptyString)")
+ fmt.Fprintf(&b, "} else {\n")
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, "}\n")
+ }
+
+ return b.String()
+}
+
+func (op bigIntOp) genDecode(ctx *genContext) (string, string) {
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s, err := dec.BigInt()\n", resultV)
+ fmt.Fprintf(&b, "if err != nil { return err }\n")
+
+ result := resultV
+ if !op.pointer {
+ result = "(*" + resultV + ")"
+ }
+ return result, b.String()
+}
+
+// encoderDecoderOp handles rlp.Encoder and rlp.Decoder.
+// In order to be used with this, the type must implement both interfaces.
+// This restriction may be lifted in the future by creating separate ops for
+// encoding and decoding.
+type encoderDecoderOp struct {
+ typ types.Type
+}
+
+func (op encoderDecoderOp) genWrite(ctx *genContext, v string) string {
+ return fmt.Sprintf("if err := %s.EncodeRLP(w); err != nil { return err }\n", v)
+}
+
+func (op encoderDecoderOp) genDecode(ctx *genContext) (string, string) {
+ // DecodeRLP must have pointer receiver, and this is verified in makeOp.
+ etyp := op.typ.(*types.Pointer).Elem()
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s := new(%s)\n", resultV, types.TypeString(etyp, ctx.qualify))
+ fmt.Fprintf(&b, "if err := %s.DecodeRLP(dec); err != nil { return err }\n", resultV)
+ return resultV, b.String()
+}
+
+// ptrOp handles pointer types.
+type ptrOp struct {
+ elemTyp types.Type
+ elem op
+ nilOK bool
+ nilValue rlpstruct.NilKind
+}
+
+func (bctx *buildContext) makePtrOp(elemTyp types.Type, tags rlpstruct.Tags) (op, error) {
+ elemOp, err := bctx.makeOp(nil, elemTyp, rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+ op := ptrOp{elemTyp: elemTyp, elem: elemOp}
+
+ // Determine nil value.
+ if tags.NilOK {
+ op.nilOK = true
+ op.nilValue = tags.NilKind
+ } else {
+ styp := bctx.typeToStructType(elemTyp)
+ op.nilValue = styp.DefaultNilValue()
+ }
+ return op, nil
+}
+
+func (op ptrOp) genWrite(ctx *genContext, v string) string {
+ // Note: in writer functions, accesses to v are read-only, i.e. v is any Go
+ // expression. To make all accesses work through the pointer, we substitute
+ // v with (*v). This is required for most accesses including `v`, `call(v)`,
+ // and `v[index]` on slices.
+ //
+ // For `v.field` and `v[:]` on arrays, the dereference operation is not required.
+ var vv string
+ _, isStruct := op.elem.(structOp)
+ _, isByteArray := op.elem.(byteArrayOp)
+ if isStruct || isByteArray {
+ vv = v
+ } else {
+ vv = fmt.Sprintf("(*%s)", v)
+ }
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "if %s == nil {\n", v)
+ fmt.Fprintf(&b, " w.Write([]byte{0x%X})\n", op.nilValue)
+ fmt.Fprintf(&b, "} else {\n")
+ fmt.Fprintf(&b, " %s", op.elem.genWrite(ctx, vv))
+ fmt.Fprintf(&b, "}\n")
+ return b.String()
+}
+
+func (op ptrOp) genDecode(ctx *genContext) (string, string) {
+ result, code := op.elem.genDecode(ctx)
+ if !op.nilOK {
+ // If nil pointers are not allowed, we can just decode the element.
+ return "&" + result, code
+ }
+
+ // nil is allowed, so check the kind and size first.
+ // If size is zero and kind matches the nilKind of the type,
+ // the value decodes as a nil pointer.
+ var (
+ resultV = ctx.temp()
+ kindV = ctx.temp()
+ sizeV = ctx.temp()
+ wantKind string
+ )
+ if op.nilValue == rlpstruct.NilKindList {
+ wantKind = "rlp.List"
+ } else {
+ wantKind = "rlp.String"
+ }
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(types.NewPointer(op.elemTyp), ctx.qualify))
+ fmt.Fprintf(&b, "if %s, %s, err := dec.Kind(); err != nil {\n", kindV, sizeV)
+ fmt.Fprintf(&b, " return err\n")
+ fmt.Fprintf(&b, "} else if %s != 0 || %s != %s {\n", sizeV, kindV, wantKind)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, " %s = &%s\n", resultV, result)
+ fmt.Fprintf(&b, "}\n")
+ return resultV, b.String()
+}
+
+// structOp handles struct types.
+type structOp struct {
+ named *types.Named
+ typ *types.Struct
+ fields []*structField
+ optionalFields []*structField
+}
+
+type structField struct {
+ name string
+ typ types.Type
+ elem op
+}
+
+func (bctx *buildContext) makeStructOp(named *types.Named, typ *types.Struct) (op, error) {
+ // Convert fields to []rlpstruct.Field.
+ var allStructFields []rlpstruct.Field
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ allStructFields = append(allStructFields, rlpstruct.Field{
+ Name: f.Name(),
+ Exported: f.Exported(),
+ Index: i,
+ Tag: typ.Tag(i),
+ Type: *bctx.typeToStructType(f.Type()),
+ })
+ }
+
+ // Filter/validate fields.
+ fields, tags, err := rlpstruct.ProcessFields(allStructFields)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create field ops.
+ var op = structOp{named: named, typ: typ}
+ for i, field := range fields {
+ // Advanced struct tags are not supported yet.
+ tag := tags[i]
+ if err := checkUnsupportedTags(field.Name, tag); err != nil {
+ return nil, err
+ }
+ typ := typ.Field(field.Index).Type()
+ elem, err := bctx.makeOp(nil, typ, tags[i])
+ if err != nil {
+ return nil, fmt.Errorf("field %s: %v", field.Name, err)
+ }
+ f := &structField{name: field.Name, typ: typ, elem: elem}
+ if tag.Optional {
+ op.optionalFields = append(op.optionalFields, f)
+ } else {
+ op.fields = append(op.fields, f)
+ }
+ }
+ return op, nil
+}
+
+func checkUnsupportedTags(field string, tag rlpstruct.Tags) error {
+ if tag.Tail {
+ return fmt.Errorf(`field %s has unsupported struct tag "tail"`, field)
+ }
+ return nil
+}
+
+func (op structOp) genWrite(ctx *genContext, v string) string {
+ var b bytes.Buffer
+ var listMarker = ctx.temp()
+ fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
+ for _, field := range op.fields {
+ selector := v + "." + field.name
+ fmt.Fprint(&b, field.elem.genWrite(ctx, selector))
+ }
+ op.writeOptionalFields(&b, ctx, v)
+ fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
+ return b.String()
+}
+
+func (op structOp) writeOptionalFields(b *bytes.Buffer, ctx *genContext, v string) {
+ if len(op.optionalFields) == 0 {
+ return
+ }
+ // First check zero-ness of all optional fields.
+ var zeroV = make([]string, len(op.optionalFields))
+ for i, field := range op.optionalFields {
+ selector := v + "." + field.name
+ zeroV[i] = ctx.temp()
+ fmt.Fprintf(b, "%s := %s\n", zeroV[i], nonZeroCheck(selector, field.typ, ctx.qualify))
+ }
+ // Now write the fields.
+ for i, field := range op.optionalFields {
+ selector := v + "." + field.name
+ cond := ""
+ for j := i; j < len(op.optionalFields); j++ {
+ if j > i {
+ cond += " || "
+ }
+ cond += zeroV[j]
+ }
+ fmt.Fprintf(b, "if %s {\n", cond)
+ fmt.Fprint(b, field.elem.genWrite(ctx, selector))
+ fmt.Fprintf(b, "}\n")
+ }
+}
+
+func (op structOp) genDecode(ctx *genContext) (string, string) {
+ // Get the string representation of the type.
+ // Here, named types are handled separately because the output
+ // would contain a copy of the struct definition otherwise.
+ var typeName string
+ if op.named != nil {
+ typeName = types.TypeString(op.named, ctx.qualify)
+ } else {
+ typeName = types.TypeString(op.typ, ctx.qualify)
+ }
+
+ // Create struct object.
+ var resultV = ctx.temp()
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, typeName)
+
+ // Decode fields.
+ fmt.Fprintf(&b, "{\n")
+ fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
+ for _, field := range op.fields {
+ result, code := field.elem.genDecode(ctx)
+ fmt.Fprintf(&b, "// %s:\n", field.name)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, "%s.%s = %s\n", resultV, field.name, result)
+ }
+ op.decodeOptionalFields(&b, ctx, resultV)
+ fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
+ fmt.Fprintf(&b, "}\n")
+ return resultV, b.String()
+}
+
+func (op structOp) decodeOptionalFields(b *bytes.Buffer, ctx *genContext, resultV string) {
+ var suffix bytes.Buffer
+ for _, field := range op.optionalFields {
+ result, code := field.elem.genDecode(ctx)
+ fmt.Fprintf(b, "// %s:\n", field.name)
+ fmt.Fprintf(b, "if dec.MoreDataInList() {\n")
+ fmt.Fprint(b, code)
+ fmt.Fprintf(b, "%s.%s = %s\n", resultV, field.name, result)
+ fmt.Fprintf(&suffix, "}\n")
+ }
+ suffix.WriteTo(b)
+}
+
+// sliceOp handles slice types.
+type sliceOp struct {
+ typ *types.Slice
+ elemOp op
+}
+
+func (bctx *buildContext) makeSliceOp(typ *types.Slice) (op, error) {
+ elemOp, err := bctx.makeOp(nil, typ.Elem(), rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+ return sliceOp{typ: typ, elemOp: elemOp}, nil
+}
+
+func (op sliceOp) genWrite(ctx *genContext, v string) string {
+ var (
+ listMarker = ctx.temp() // holds return value of w.List()
+ iterElemV = ctx.temp() // iteration variable
+ elemCode = op.elemOp.genWrite(ctx, iterElemV)
+ )
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
+ fmt.Fprintf(&b, "for _, %s := range %s {\n", iterElemV, v)
+ fmt.Fprint(&b, elemCode)
+ fmt.Fprintf(&b, "}\n")
+ fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
+ return b.String()
+}
+
+func (op sliceOp) genDecode(ctx *genContext) (string, string) {
+ var sliceV = ctx.temp() // holds the output slice
+ elemResult, elemCode := op.elemOp.genDecode(ctx)
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", sliceV, types.TypeString(op.typ, ctx.qualify))
+ fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
+ fmt.Fprintf(&b, "for dec.MoreDataInList() {\n")
+ fmt.Fprintf(&b, " %s", elemCode)
+ fmt.Fprintf(&b, " %s = append(%s, %s)\n", sliceV, sliceV, elemResult)
+ fmt.Fprintf(&b, "}\n")
+ fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
+ return sliceV, b.String()
+}
+
+func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstruct.Tags) (op, error) {
+ switch typ := typ.(type) {
+ case *types.Named:
+ if isBigInt(typ) {
+ return bigIntOp{}, nil
+ }
+ if typ == bctx.rawValueType {
+ return bctx.makeRawValueOp(), nil
+ }
+ if bctx.isDecoder(typ) {
+ return nil, fmt.Errorf("type %v implements rlp.Decoder with non-pointer receiver", typ)
+ }
+ // TODO: same check for encoder?
+ return bctx.makeOp(typ, typ.Underlying(), tags)
+ case *types.Pointer:
+ if isBigInt(typ.Elem()) {
+ return bigIntOp{pointer: true}, nil
+ }
+ // Encoder/Decoder interfaces.
+ if bctx.isEncoder(typ) {
+ if bctx.isDecoder(typ) {
+ return encoderDecoderOp{typ}, nil
+ }
+ return nil, fmt.Errorf("type %v implements rlp.Encoder but not rlp.Decoder", typ)
+ }
+ if bctx.isDecoder(typ) {
+ return nil, fmt.Errorf("type %v implements rlp.Decoder but not rlp.Encoder", typ)
+ }
+ // Default pointer handling.
+ return bctx.makePtrOp(typ.Elem(), tags)
+ case *types.Basic:
+ return bctx.makeBasicOp(typ)
+ case *types.Struct:
+ return bctx.makeStructOp(name, typ)
+ case *types.Slice:
+ etyp := typ.Elem()
+ if isByte(etyp) && !bctx.isEncoder(etyp) {
+ return bctx.makeByteSliceOp(typ), nil
+ }
+ return bctx.makeSliceOp(typ)
+ case *types.Array:
+ etyp := typ.Elem()
+ if isByte(etyp) && !bctx.isEncoder(etyp) {
+ return bctx.makeByteArrayOp(name, typ), nil
+ }
+ return nil, fmt.Errorf("unhandled array type: %v", typ)
+ default:
+ return nil, fmt.Errorf("unhandled type: %v", typ)
+ }
+}
+
+// generateDecoder generates the DecodeRLP method on 'typ'.
+func generateDecoder(ctx *genContext, typ string, op op) []byte {
+ ctx.resetTemp()
+ ctx.addImport(pathOfPackageRLP)
+
+ result, code := op.genDecode(ctx)
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "func (obj *%s) DecodeRLP(dec *rlp.Stream) error {\n", typ)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, " *obj = %s\n", result)
+ fmt.Fprintf(&b, " return nil\n")
+ fmt.Fprintf(&b, "}\n")
+ return b.Bytes()
+}
+
+// generateEncoder generates the EncodeRLP method on 'typ'.
+func generateEncoder(ctx *genContext, typ string, op op) []byte {
+ ctx.resetTemp()
+ ctx.addImport("io")
+ ctx.addImport(pathOfPackageRLP)
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ)
+ fmt.Fprintf(&b, " w := rlp.NewEncoderBuffer(_w)\n")
+ fmt.Fprint(&b, op.genWrite(ctx, "obj"))
+ fmt.Fprintf(&b, " return w.Flush()\n")
+ fmt.Fprintf(&b, "}\n")
+ return b.Bytes()
+}
+
+func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]byte, error) {
+ bctx.topType = typ
+
+ pkg := typ.Obj().Pkg()
+ op, err := bctx.makeOp(nil, typ, rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ ctx = newGenContext(pkg)
+ encSource []byte
+ decSource []byte
+ )
+ if encoder {
+ encSource = generateEncoder(ctx, typ.Obj().Name(), op)
+ }
+ if decoder {
+ decSource = generateDecoder(ctx, typ.Obj().Name(), op)
+ }
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "package %s\n\n", pkg.Name())
+ for _, imp := range ctx.importsList() {
+ fmt.Fprintf(&b, "import %q\n", imp)
+ }
+ if encoder {
+ fmt.Fprintln(&b)
+ b.Write(encSource)
+ }
+ if decoder {
+ fmt.Fprintln(&b)
+ b.Write(decSource)
+ }
+
+ source := b.Bytes()
+ // fmt.Println(string(source))
+ return format.Source(source)
+}
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
new file mode 100644
index 000000000000..9940db188da9
--- /dev/null
+++ b/rlp/rlpgen/gen_test.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+// Package RLP is loaded only once and reused for all tests.
+var (
+ testFset = token.NewFileSet()
+ testImporter = importer.ForCompiler(testFset, "source", nil).(types.ImporterFrom)
+ testPackageRLP *types.Package
+)
+
+func init() {
+ cwd, err := os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ testPackageRLP, err = testImporter.ImportFrom(pathOfPackageRLP, cwd, 0)
+ if err != nil {
+ panic(fmt.Errorf("can't load package RLP: %v", err))
+ }
+}
+
+var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint"}
+
+func TestOutput(t *testing.T) {
+ for _, test := range tests {
+ test := test
+ t.Run(test, func(t *testing.T) {
+ inputFile := filepath.Join("testdata", test+".in.txt")
+ outputFile := filepath.Join("testdata", test+".out.txt")
+ bctx, typ, err := loadTestSource(inputFile, "Test")
+ if err != nil {
+ t.Fatal("error loading test source:", err)
+ }
+ output, err := bctx.generate(typ, true, true)
+ if err != nil {
+ t.Fatal("error in generate:", err)
+ }
+
+ // Set this environment variable to regenerate the test outputs.
+ if os.Getenv("WRITE_TEST_FILES") != "" {
+ ioutil.WriteFile(outputFile, output, 0644)
+ }
+
+ // Check if output matches.
+ wantOutput, err := ioutil.ReadFile(outputFile)
+ if err != nil {
+ t.Fatal("error loading expected test output:", err)
+ }
+ if !bytes.Equal(output, wantOutput) {
+ t.Fatal("output mismatch:\n", string(output))
+ }
+ })
+ }
+}
+
+func loadTestSource(file string, typeName string) (*buildContext, *types.Named, error) {
+ // Load the test input.
+ content, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, nil, err
+ }
+ f, err := parser.ParseFile(testFset, file, content, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ conf := types.Config{Importer: testImporter}
+ pkg, err := conf.Check("test", testFset, []*ast.File{f}, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Find the test struct.
+ bctx := newBuildContext(testPackageRLP)
+ typ, err := lookupStructType(pkg.Scope(), typeName)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't find type %s: %v", typeName, err)
+ }
+ return bctx, typ, nil
+}
diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go
new file mode 100644
index 000000000000..5b240bfd85fb
--- /dev/null
+++ b/rlp/rlpgen/main.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/types"
+ "io/ioutil"
+ "os"
+
+ "golang.org/x/tools/go/packages"
+)
+
+const pathOfPackageRLP = "github.com/ethereum/go-ethereum/rlp"
+
+func main() {
+ var (
+ pkgdir = flag.String("dir", ".", "input package")
+ output = flag.String("out", "-", "output file (default is stdout)")
+ genEncoder = flag.Bool("encoder", true, "generate EncodeRLP?")
+ genDecoder = flag.Bool("decoder", false, "generate DecodeRLP?")
+ typename = flag.String("type", "", "type to generate methods for")
+ )
+ flag.Parse()
+
+ cfg := Config{
+ Dir: *pkgdir,
+ Type: *typename,
+ GenerateEncoder: *genEncoder,
+ GenerateDecoder: *genDecoder,
+ }
+ code, err := cfg.process()
+ if err != nil {
+ fatal(err)
+ }
+ if *output == "-" {
+ os.Stdout.Write(code)
+ } else if err := ioutil.WriteFile(*output, code, 0644); err != nil {
+ fatal(err)
+ }
+}
+
+func fatal(args ...interface{}) {
+ fmt.Fprintln(os.Stderr, args...)
+ os.Exit(1)
+}
+
+type Config struct {
+ Dir string // input package directory
+ Type string
+
+ GenerateEncoder bool
+ GenerateDecoder bool
+}
+
+// process generates the Go code.
+func (cfg *Config) process() (code []byte, err error) {
+ // Load packages.
+ pcfg := &packages.Config{
+ Mode: packages.NeedName | packages.NeedTypes | packages.NeedImports | packages.NeedDeps,
+ Dir: cfg.Dir,
+ BuildFlags: []string{"-tags", "norlpgen"},
+ }
+ ps, err := packages.Load(pcfg, pathOfPackageRLP, ".")
+ if err != nil {
+ return nil, err
+ }
+ if len(ps) == 0 {
+ return nil, fmt.Errorf("no Go package found in %s", cfg.Dir)
+ }
+ packages.PrintErrors(ps)
+
+ // Find the packages that were loaded.
+ var (
+ pkg *types.Package
+ packageRLP *types.Package
+ )
+ for _, p := range ps {
+ if len(p.Errors) > 0 {
+ return nil, fmt.Errorf("package %s has errors", p.PkgPath)
+ }
+ if p.PkgPath == pathOfPackageRLP {
+ packageRLP = p.Types
+ } else {
+ pkg = p.Types
+ }
+ }
+ bctx := newBuildContext(packageRLP)
+
+ // Find the type and generate.
+ typ, err := lookupStructType(pkg.Scope(), cfg.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't find %s in %s: %v", typ, pkg, err)
+ }
+ code, err = bctx.generate(typ, cfg.GenerateEncoder, cfg.GenerateDecoder)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add build comments.
+ // This is done here to avoid processing these lines with gofmt.
+ var header bytes.Buffer
+ fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n")
+ fmt.Fprint(&header, "//go:build !norlpgen\n")
+ fmt.Fprint(&header, "// +build !norlpgen\n\n")
+ return append(header.Bytes(), code...), nil
+}
+
+func lookupStructType(scope *types.Scope, name string) (*types.Named, error) {
+ typ, err := lookupType(scope, name)
+ if err != nil {
+ return nil, err
+ }
+ _, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return nil, errors.New("not a struct type")
+ }
+ return typ, nil
+}
+
+func lookupType(scope *types.Scope, name string) (*types.Named, error) {
+ obj := scope.Lookup(name)
+ if obj == nil {
+ return nil, errors.New("no such identifier")
+ }
+ typ, ok := obj.(*types.TypeName)
+ if !ok {
+ return nil, errors.New("not a type")
+ }
+ return typ.Type().(*types.Named), nil
+}
diff --git a/rlp/rlpgen/testdata/bigint.in.txt b/rlp/rlpgen/testdata/bigint.in.txt
new file mode 100644
index 000000000000..d23d84a28763
--- /dev/null
+++ b/rlp/rlpgen/testdata/bigint.in.txt
@@ -0,0 +1,10 @@
+// -*- mode: go -*-
+
+package test
+
+import "math/big"
+
+type Test struct {
+ Int *big.Int
+ IntNoPtr big.Int
+}
diff --git a/rlp/rlpgen/testdata/bigint.out.txt b/rlp/rlpgen/testdata/bigint.out.txt
new file mode 100644
index 000000000000..f54d1faa15f7
--- /dev/null
+++ b/rlp/rlpgen/testdata/bigint.out.txt
@@ -0,0 +1,49 @@
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ if obj.Int == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Int.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Int)
+ }
+ if obj.IntNoPtr.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(&obj.IntNoPtr)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Int:
+ _tmp1, err := dec.BigInt()
+ if err != nil {
+ return err
+ }
+ _tmp0.Int = _tmp1
+ // IntNoPtr:
+ _tmp2, err := dec.BigInt()
+ if err != nil {
+ return err
+ }
+ _tmp0.IntNoPtr = (*_tmp2)
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/nil.in.txt b/rlp/rlpgen/testdata/nil.in.txt
new file mode 100644
index 000000000000..a28ff344874d
--- /dev/null
+++ b/rlp/rlpgen/testdata/nil.in.txt
@@ -0,0 +1,30 @@
+// -*- mode: go -*-
+
+package test
+
+type Aux struct{
+ A uint32
+}
+
+type Test struct{
+ Uint8 *byte `rlp:"nil"`
+ Uint8List *byte `rlp:"nilList"`
+
+ Uint32 *uint32 `rlp:"nil"`
+ Uint32List *uint32 `rlp:"nilList"`
+
+ Uint64 *uint64 `rlp:"nil"`
+ Uint64List *uint64 `rlp:"nilList"`
+
+ String *string `rlp:"nil"`
+ StringList *string `rlp:"nilList"`
+
+ ByteArray *[3]byte `rlp:"nil"`
+ ByteArrayList *[3]byte `rlp:"nilList"`
+
+ ByteSlice *[]byte `rlp:"nil"`
+ ByteSliceList *[]byte `rlp:"nilList"`
+
+ Struct *Aux `rlp:"nil"`
+ StructString *Aux `rlp:"nilString"`
+}
diff --git a/rlp/rlpgen/testdata/nil.out.txt b/rlp/rlpgen/testdata/nil.out.txt
new file mode 100644
index 000000000000..e0d5dcebad3b
--- /dev/null
+++ b/rlp/rlpgen/testdata/nil.out.txt
@@ -0,0 +1,289 @@
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ if obj.Uint8 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint8)))
+ }
+ if obj.Uint8List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint8List)))
+ }
+ if obj.Uint32 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint32)))
+ }
+ if obj.Uint32List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint32List)))
+ }
+ if obj.Uint64 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.Uint64))
+ }
+ if obj.Uint64List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64((*obj.Uint64List))
+ }
+ if obj.String == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteString((*obj.String))
+ }
+ if obj.StringList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteString((*obj.StringList))
+ }
+ if obj.ByteArray == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteBytes(obj.ByteArray[:])
+ }
+ if obj.ByteArrayList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteBytes(obj.ByteArrayList[:])
+ }
+ if obj.ByteSlice == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteBytes((*obj.ByteSlice))
+ }
+ if obj.ByteSliceList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteBytes((*obj.ByteSliceList))
+ }
+ if obj.Struct == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ _tmp1 := w.List()
+ w.WriteUint64(uint64(obj.Struct.A))
+ w.ListEnd(_tmp1)
+ }
+ if obj.StructString == nil {
+ w.Write([]byte{0x80})
+ } else {
+ _tmp2 := w.List()
+ w.WriteUint64(uint64(obj.StructString.A))
+ w.ListEnd(_tmp2)
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Uint8:
+ var _tmp2 *byte
+ if _tmp3, _tmp4, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp4 != 0 || _tmp3 != rlp.String {
+ _tmp1, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp2 = &_tmp1
+ }
+ _tmp0.Uint8 = _tmp2
+ // Uint8List:
+ var _tmp6 *byte
+ if _tmp7, _tmp8, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp8 != 0 || _tmp7 != rlp.List {
+ _tmp5, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp6 = &_tmp5
+ }
+ _tmp0.Uint8List = _tmp6
+ // Uint32:
+ var _tmp10 *uint32
+ if _tmp11, _tmp12, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp12 != 0 || _tmp11 != rlp.String {
+ _tmp9, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp10 = &_tmp9
+ }
+ _tmp0.Uint32 = _tmp10
+ // Uint32List:
+ var _tmp14 *uint32
+ if _tmp15, _tmp16, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp16 != 0 || _tmp15 != rlp.List {
+ _tmp13, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp14 = &_tmp13
+ }
+ _tmp0.Uint32List = _tmp14
+ // Uint64:
+ var _tmp18 *uint64
+ if _tmp19, _tmp20, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp20 != 0 || _tmp19 != rlp.String {
+ _tmp17, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp18 = &_tmp17
+ }
+ _tmp0.Uint64 = _tmp18
+ // Uint64List:
+ var _tmp22 *uint64
+ if _tmp23, _tmp24, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp24 != 0 || _tmp23 != rlp.List {
+ _tmp21, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp22 = &_tmp21
+ }
+ _tmp0.Uint64List = _tmp22
+ // String:
+ var _tmp26 *string
+ if _tmp27, _tmp28, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp28 != 0 || _tmp27 != rlp.String {
+ _tmp25, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp26 = &_tmp25
+ }
+ _tmp0.String = _tmp26
+ // StringList:
+ var _tmp30 *string
+ if _tmp31, _tmp32, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp32 != 0 || _tmp31 != rlp.List {
+ _tmp29, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp30 = &_tmp29
+ }
+ _tmp0.StringList = _tmp30
+ // ByteArray:
+ var _tmp34 *[3]byte
+ if _tmp35, _tmp36, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp36 != 0 || _tmp35 != rlp.String {
+ var _tmp33 [3]byte
+ if err := dec.ReadBytes(_tmp33[:]); err != nil {
+ return err
+ }
+ _tmp34 = &_tmp33
+ }
+ _tmp0.ByteArray = _tmp34
+ // ByteArrayList:
+ var _tmp38 *[3]byte
+ if _tmp39, _tmp40, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp40 != 0 || _tmp39 != rlp.List {
+ var _tmp37 [3]byte
+ if err := dec.ReadBytes(_tmp37[:]); err != nil {
+ return err
+ }
+ _tmp38 = &_tmp37
+ }
+ _tmp0.ByteArrayList = _tmp38
+ // ByteSlice:
+ var _tmp42 *[]byte
+ if _tmp43, _tmp44, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp44 != 0 || _tmp43 != rlp.String {
+ _tmp41, err := dec.Bytes()
+ if err != nil {
+ return err
+ }
+ _tmp42 = &_tmp41
+ }
+ _tmp0.ByteSlice = _tmp42
+ // ByteSliceList:
+ var _tmp46 *[]byte
+ if _tmp47, _tmp48, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp48 != 0 || _tmp47 != rlp.List {
+ _tmp45, err := dec.Bytes()
+ if err != nil {
+ return err
+ }
+ _tmp46 = &_tmp45
+ }
+ _tmp0.ByteSliceList = _tmp46
+ // Struct:
+ var _tmp51 *Aux
+ if _tmp52, _tmp53, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp53 != 0 || _tmp52 != rlp.List {
+ var _tmp49 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp50, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp49.A = _tmp50
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp51 = &_tmp49
+ }
+ _tmp0.Struct = _tmp51
+ // StructString:
+ var _tmp56 *Aux
+ if _tmp57, _tmp58, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp58 != 0 || _tmp57 != rlp.String {
+ var _tmp54 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp55, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp54.A = _tmp55
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp56 = &_tmp54
+ }
+ _tmp0.StructString = _tmp56
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/optional.in.txt b/rlp/rlpgen/testdata/optional.in.txt
new file mode 100644
index 000000000000..f1ac9f7899d1
--- /dev/null
+++ b/rlp/rlpgen/testdata/optional.in.txt
@@ -0,0 +1,17 @@
+// -*- mode: go -*-
+
+package test
+
+type Aux struct {
+ A uint64
+}
+
+type Test struct {
+ Uint64 uint64 `rlp:"optional"`
+ Pointer *uint64 `rlp:"optional"`
+ String string `rlp:"optional"`
+ Slice []uint64 `rlp:"optional"`
+ Array [3]byte `rlp:"optional"`
+ NamedStruct Aux `rlp:"optional"`
+ AnonStruct struct{ A string } `rlp:"optional"`
+}
diff --git a/rlp/rlpgen/testdata/optional.out.txt b/rlp/rlpgen/testdata/optional.out.txt
new file mode 100644
index 000000000000..02df8e457f94
--- /dev/null
+++ b/rlp/rlpgen/testdata/optional.out.txt
@@ -0,0 +1,153 @@
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ _tmp1 := obj.Uint64 != 0
+ _tmp2 := obj.Pointer != nil
+ _tmp3 := obj.String != ""
+ _tmp4 := len(obj.Slice) > 0
+ _tmp5 := obj.Array != ([3]byte{})
+ _tmp6 := obj.NamedStruct != (Aux{})
+ _tmp7 := obj.AnonStruct != (struct{ A string }{})
+ if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ w.WriteUint64(obj.Uint64)
+ }
+ if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ if obj.Pointer == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.Pointer))
+ }
+ }
+ if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ w.WriteString(obj.String)
+ }
+ if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ _tmp8 := w.List()
+ for _, _tmp9 := range obj.Slice {
+ w.WriteUint64(_tmp9)
+ }
+ w.ListEnd(_tmp8)
+ }
+ if _tmp5 || _tmp6 || _tmp7 {
+ w.WriteBytes(obj.Array[:])
+ }
+ if _tmp6 || _tmp7 {
+ _tmp10 := w.List()
+ w.WriteUint64(obj.NamedStruct.A)
+ w.ListEnd(_tmp10)
+ }
+ if _tmp7 {
+ _tmp11 := w.List()
+ w.WriteString(obj.AnonStruct.A)
+ w.ListEnd(_tmp11)
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Uint64:
+ if dec.MoreDataInList() {
+ _tmp1, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.Uint64 = _tmp1
+ // Pointer:
+ if dec.MoreDataInList() {
+ _tmp2, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.Pointer = &_tmp2
+ // String:
+ if dec.MoreDataInList() {
+ _tmp3, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp0.String = _tmp3
+ // Slice:
+ if dec.MoreDataInList() {
+ var _tmp4 []uint64
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ _tmp5, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp4 = append(_tmp4, _tmp5)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.Slice = _tmp4
+ // Array:
+ if dec.MoreDataInList() {
+ var _tmp6 [3]byte
+ if err := dec.ReadBytes(_tmp6[:]); err != nil {
+ return err
+ }
+ _tmp0.Array = _tmp6
+ // NamedStruct:
+ if dec.MoreDataInList() {
+ var _tmp7 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp8, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp7.A = _tmp8
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.NamedStruct = _tmp7
+ // AnonStruct:
+ if dec.MoreDataInList() {
+ var _tmp9 struct{ A string }
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp10, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp9.A = _tmp10
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.AnonStruct = _tmp9
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/rawvalue.in.txt b/rlp/rlpgen/testdata/rawvalue.in.txt
new file mode 100644
index 000000000000..3a657bc907bb
--- /dev/null
+++ b/rlp/rlpgen/testdata/rawvalue.in.txt
@@ -0,0 +1,11 @@
+// -*- mode: go -*-
+
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+
+type Test struct {
+ RawValue rlp.RawValue
+ PointerToRawValue *rlp.RawValue
+ SliceOfRawValue []rlp.RawValue
+}
diff --git a/rlp/rlpgen/testdata/rawvalue.out.txt b/rlp/rlpgen/testdata/rawvalue.out.txt
new file mode 100644
index 000000000000..3607c9863676
--- /dev/null
+++ b/rlp/rlpgen/testdata/rawvalue.out.txt
@@ -0,0 +1,64 @@
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.Write(obj.RawValue)
+ if obj.PointerToRawValue == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.Write((*obj.PointerToRawValue))
+ }
+ _tmp1 := w.List()
+ for _, _tmp2 := range obj.SliceOfRawValue {
+ w.Write(_tmp2)
+ }
+ w.ListEnd(_tmp1)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // RawValue:
+ _tmp1, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp0.RawValue = _tmp1
+ // PointerToRawValue:
+ _tmp2, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp0.PointerToRawValue = &_tmp2
+ // SliceOfRawValue:
+ var _tmp3 []rlp.RawValue
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ _tmp4, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp3 = append(_tmp3, _tmp4)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.SliceOfRawValue = _tmp3
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/uints.in.txt b/rlp/rlpgen/testdata/uints.in.txt
new file mode 100644
index 000000000000..8095da997d96
--- /dev/null
+++ b/rlp/rlpgen/testdata/uints.in.txt
@@ -0,0 +1,10 @@
+// -*- mode: go -*-
+
+package test
+
+type Test struct{
+ A uint8
+ B uint16
+ C uint32
+ D uint64
+}
diff --git a/rlp/rlpgen/testdata/uints.out.txt b/rlp/rlpgen/testdata/uints.out.txt
new file mode 100644
index 000000000000..1a354956a409
--- /dev/null
+++ b/rlp/rlpgen/testdata/uints.out.txt
@@ -0,0 +1,53 @@
+package test
+
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteUint64(uint64(obj.A))
+ w.WriteUint64(uint64(obj.B))
+ w.WriteUint64(uint64(obj.C))
+ w.WriteUint64(obj.D)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp1, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp0.A = _tmp1
+ // B:
+ _tmp2, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp0.B = _tmp2
+ // C:
+ _tmp3, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp0.C = _tmp3
+ // D:
+ _tmp4, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.D = _tmp4
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/types.go b/rlp/rlpgen/types.go
new file mode 100644
index 000000000000..5926a801eecb
--- /dev/null
+++ b/rlp/rlpgen/types.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+ "fmt"
+ "go/types"
+ "reflect"
+)
+
+// typeReflectKind gives the reflect.Kind that represents typ.
+func typeReflectKind(typ types.Type) reflect.Kind {
+ switch typ := typ.(type) {
+ case *types.Basic:
+ k := typ.Kind()
+ if k >= types.Bool && k <= types.Complex128 {
+ // value order matches for Bool..Complex128
+ return reflect.Bool + reflect.Kind(k-types.Bool)
+ }
+ if k == types.String {
+ return reflect.String
+ }
+ if k == types.UnsafePointer {
+ return reflect.UnsafePointer
+ }
+ panic(fmt.Errorf("unhandled BasicKind %v", k))
+ case *types.Array:
+ return reflect.Array
+ case *types.Chan:
+ return reflect.Chan
+ case *types.Interface:
+ return reflect.Interface
+ case *types.Map:
+ return reflect.Map
+ case *types.Pointer:
+ return reflect.Ptr
+ case *types.Signature:
+ return reflect.Func
+ case *types.Slice:
+ return reflect.Slice
+ case *types.Struct:
+ return reflect.Struct
+ default:
+ panic(fmt.Errorf("unhandled type %T", typ))
+ }
+}
+
+// nonZeroCheck returns the expression that checks whether 'v' is a non-zero value of type 'vtyp'.
+func nonZeroCheck(v string, vtyp types.Type, qualify types.Qualifier) string {
+ // Resolve type name.
+ typ := resolveUnderlying(vtyp)
+ switch typ := typ.(type) {
+ case *types.Basic:
+ k := typ.Kind()
+ switch {
+ case k == types.Bool:
+ return v
+ case k >= types.Uint && k <= types.Complex128:
+ return fmt.Sprintf("%s != 0", v)
+ case k == types.String:
+ return fmt.Sprintf(`%s != ""`, v)
+ default:
+ panic(fmt.Errorf("unhandled BasicKind %v", k))
+ }
+ case *types.Array, *types.Struct:
+ return fmt.Sprintf("%s != (%s{})", v, types.TypeString(vtyp, qualify))
+ case *types.Interface, *types.Pointer, *types.Signature:
+ return fmt.Sprintf("%s != nil", v)
+ case *types.Slice, *types.Map:
+ return fmt.Sprintf("len(%s) > 0", v)
+ default:
+ panic(fmt.Errorf("unhandled type %T", typ))
+ }
+}
+
+// isBigInt checks whether 'typ' is "math/big".Int.
+func isBigInt(typ types.Type) bool {
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ name := named.Obj()
+ return name.Pkg().Path() == "math/big" && name.Name() == "Int"
+}
+
+// isByte checks whether the underlying type of 'typ' is uint8.
+func isByte(typ types.Type) bool {
+ basic, ok := resolveUnderlying(typ).(*types.Basic)
+ return ok && basic.Kind() == types.Uint8
+}
+
+func resolveUnderlying(typ types.Type) types.Type {
+ for {
+ t := typ.Underlying()
+ if t == typ {
+ return t
+ }
+ typ = t
+ }
+}
diff --git a/rlp/typecache.go b/rlp/typecache.go
index 62553d3b55c1..3e37c9d2fcc7 100644
--- a/rlp/typecache.go
+++ b/rlp/typecache.go
@@ -19,9 +19,10 @@ package rlp
import (
"fmt"
"reflect"
- "strings"
"sync"
"sync/atomic"
+
+ "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
)
// typeinfo is an entry in the type cache.
@@ -32,35 +33,16 @@ type typeinfo struct {
writerErr error // error from makeWriter
}
-// tags represents struct tags.
-type tags struct {
- // rlp:"nil" controls whether empty input results in a nil pointer.
- // nilKind is the kind of empty value allowed for the field.
- nilKind Kind
- nilOK bool
-
- // rlp:"optional" allows for a field to be missing in the input list.
- // If this is set, all subsequent fields must also be optional.
- optional bool
-
- // rlp:"tail" controls whether this field swallows additional list elements. It can
- // only be set for the last field, which must be of slice type.
- tail bool
-
- // rlp:"-" ignores fields.
- ignored bool
-}
-
// typekey is the key of a type in typeCache. It includes the struct tags because
// they might generate a different decoder.
type typekey struct {
reflect.Type
- tags
+ rlpstruct.Tags
}
type decoder func(*Stream, reflect.Value) error
-type writer func(reflect.Value, *encbuf) error
+type writer func(reflect.Value, *encBuffer) error
var theTC = newTypeCache()
@@ -95,10 +77,10 @@ func (c *typeCache) info(typ reflect.Type) *typeinfo {
}
// Not in the cache, need to generate info for this type.
- return c.generate(typ, tags{})
+ return c.generate(typ, rlpstruct.Tags{})
}
-func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
+func (c *typeCache) generate(typ reflect.Type, tags rlpstruct.Tags) *typeinfo {
c.mu.Lock()
defer c.mu.Unlock()
@@ -122,7 +104,7 @@ func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
return info
}
-func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags tags) *typeinfo {
+func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags rlpstruct.Tags) *typeinfo {
key := typekey{typ, tags}
if info := c.next[key]; info != nil {
return info
@@ -144,35 +126,40 @@ type field struct {
// structFields resolves the typeinfo of all public fields in a struct type.
func structFields(typ reflect.Type) (fields []field, err error) {
- var (
- lastPublic = lastPublicField(typ)
- anyOptional = false
- )
+ // Convert fields to rlpstruct.Field.
+ var allStructFields []rlpstruct.Field
for i := 0; i < typ.NumField(); i++ {
- if f := typ.Field(i); f.PkgPath == "" { // exported
- tags, err := parseStructTag(typ, i, lastPublic)
- if err != nil {
- return nil, err
- }
-
- // Skip rlp:"-" fields.
- if tags.ignored {
- continue
- }
- // If any field has the "optional" tag, subsequent fields must also have it.
- if tags.optional || tags.tail {
- anyOptional = true
- } else if anyOptional {
- return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name)
- }
- info := theTC.infoWhileGenerating(f.Type, tags)
- fields = append(fields, field{i, info, tags.optional})
+ rf := typ.Field(i)
+ allStructFields = append(allStructFields, rlpstruct.Field{
+ Name: rf.Name,
+ Index: i,
+ Exported: rf.PkgPath == "",
+ Tag: string(rf.Tag),
+ Type: *rtypeToStructType(rf.Type, nil),
+ })
+ }
+
+ // Filter/validate fields.
+ structFields, structTags, err := rlpstruct.ProcessFields(allStructFields)
+ if err != nil {
+ if tagErr, ok := err.(rlpstruct.TagError); ok {
+ tagErr.StructType = typ.String()
+ return nil, tagErr
}
+ return nil, err
+ }
+
+ // Resolve typeinfo.
+ for i, sf := range structFields {
+ typ := typ.Field(sf.Index).Type
+ tags := structTags[i]
+ info := theTC.infoWhileGenerating(typ, tags)
+ fields = append(fields, field{sf.Index, info, tags.Optional})
}
return fields, nil
}
-// anyOptionalFields returns the index of the first field with "optional" tag.
+// firstOptionalField returns the index of the first field with "optional" tag.
func firstOptionalField(fields []field) int {
for i, f := range fields {
if f.optional {
@@ -192,82 +179,56 @@ func (e structFieldError) Error() string {
return fmt.Sprintf("%v (struct field %v.%s)", e.err, e.typ, e.typ.Field(e.field).Name)
}
-type structTagError struct {
- typ reflect.Type
- field, tag, err string
+func (i *typeinfo) generate(typ reflect.Type, tags rlpstruct.Tags) {
+ i.decoder, i.decoderErr = makeDecoder(typ, tags)
+ i.writer, i.writerErr = makeWriter(typ, tags)
}
-func (e structTagError) Error() string {
- return fmt.Sprintf("rlp: invalid struct tag %q for %v.%s (%s)", e.tag, e.typ, e.field, e.err)
-}
+// rtypeToStructType converts typ to rlpstruct.Type.
+func rtypeToStructType(typ reflect.Type, rec map[reflect.Type]*rlpstruct.Type) *rlpstruct.Type {
+ k := typ.Kind()
+ if k == reflect.Invalid {
+ panic("invalid kind")
+ }
-func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) {
- f := typ.Field(fi)
- var ts tags
- for _, t := range strings.Split(f.Tag.Get("rlp"), ",") {
- switch t = strings.TrimSpace(t); t {
- case "":
- case "-":
- ts.ignored = true
- case "nil", "nilString", "nilList":
- ts.nilOK = true
- if f.Type.Kind() != reflect.Ptr {
- return ts, structTagError{typ, f.Name, t, "field is not a pointer"}
- }
- switch t {
- case "nil":
- ts.nilKind = defaultNilKind(f.Type.Elem())
- case "nilString":
- ts.nilKind = String
- case "nilList":
- ts.nilKind = List
- }
- case "optional":
- ts.optional = true
- if ts.tail {
- return ts, structTagError{typ, f.Name, t, `also has "tail" tag`}
- }
- case "tail":
- ts.tail = true
- if fi != lastPublic {
- return ts, structTagError{typ, f.Name, t, "must be on last field"}
- }
- if ts.optional {
- return ts, structTagError{typ, f.Name, t, `also has "optional" tag`}
- }
- if f.Type.Kind() != reflect.Slice {
- return ts, structTagError{typ, f.Name, t, "field type is not slice"}
- }
- default:
- return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name)
- }
+ if prev := rec[typ]; prev != nil {
+ return prev // short-circuit for recursive types
+ }
+ if rec == nil {
+ rec = make(map[reflect.Type]*rlpstruct.Type)
}
- return ts, nil
-}
-func lastPublicField(typ reflect.Type) int {
- last := 0
- for i := 0; i < typ.NumField(); i++ {
- if typ.Field(i).PkgPath == "" {
- last = i
- }
+ t := &rlpstruct.Type{
+ Name: typ.String(),
+ Kind: k,
+ IsEncoder: typ.Implements(encoderInterface),
+ IsDecoder: typ.Implements(decoderInterface),
+ }
+ rec[typ] = t
+ if k == reflect.Array || k == reflect.Slice || k == reflect.Ptr {
+ t.Elem = rtypeToStructType(typ.Elem(), rec)
}
- return last
+ return t
}
-func (i *typeinfo) generate(typ reflect.Type, tags tags) {
- i.decoder, i.decoderErr = makeDecoder(typ, tags)
- i.writer, i.writerErr = makeWriter(typ, tags)
-}
+// typeNilKind gives the RLP value kind for nil pointers to 'typ'.
+func typeNilKind(typ reflect.Type, tags rlpstruct.Tags) Kind {
+ styp := rtypeToStructType(typ, nil)
-// defaultNilKind determines whether a nil pointer to typ encodes/decodes
-// as an empty string or empty list.
-func defaultNilKind(typ reflect.Type) Kind {
- k := typ.Kind()
- if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(typ) {
+ var nk rlpstruct.NilKind
+ if tags.NilOK {
+ nk = tags.NilKind
+ } else {
+ nk = styp.DefaultNilValue()
+ }
+ switch nk {
+ case rlpstruct.NilKindString:
return String
+ case rlpstruct.NilKindList:
+ return List
+ default:
+ panic("invalid nil kind value")
}
- return List
}
func isUint(k reflect.Kind) bool {
@@ -277,7 +238,3 @@ func isUint(k reflect.Kind) bool {
func isByte(typ reflect.Type) bool {
return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface)
}
-
-func isByteArray(typ reflect.Type) bool {
- return (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Array) && isByte(typ.Elem())
-}
diff --git a/rpc/client.go b/rpc/client.go
index e43760c22c5c..d3ce0297754c 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -58,12 +58,6 @@ const (
maxClientSubscriptionBuffer = 20000
)
-const (
- httpScheme = "http"
- wsScheme = "ws"
- ipcScheme = "ipc"
-)
-
// BatchElem is an element in a batch request.
type BatchElem struct {
Method string
@@ -80,7 +74,7 @@ type BatchElem struct {
// Client represents a connection to an RPC server.
type Client struct {
idgen func() ID // for subscriptions
- scheme string // connection type: http, ws or ipc
+ isHTTP bool // connection type: http, ws or ipc
services *serviceRegistry
idCounter uint32
@@ -115,11 +109,9 @@ type clientConn struct {
}
func (c *Client) newClientConn(conn ServerCodec) *clientConn {
- ctx := context.WithValue(context.Background(), clientContextKey{}, c)
- // Http connections have already set the scheme
- if !c.isHTTP() && c.scheme != "" {
- ctx = context.WithValue(ctx, "scheme", c.scheme)
- }
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, clientContextKey{}, c)
+ ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo())
handler := newHandler(ctx, conn, c.idgen, c.services)
return &clientConn{conn, handler}
}
@@ -145,7 +137,7 @@ func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, erro
select {
case <-ctx.Done():
// Send the timeout to dispatch so it can remove the request IDs.
- if !c.isHTTP() {
+ if !c.isHTTP {
select {
case c.reqTimeout <- op:
case <-c.closing:
@@ -194,7 +186,7 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
}
}
-// Client retrieves the client from the context, if any. This can be used to perform
+// ClientFromContext retrieves the client from the context, if any. This can be used to perform
// 'reverse calls' in a handler method.
func ClientFromContext(ctx context.Context) (*Client, bool) {
client, ok := ctx.Value(clientContextKey{}).(*Client)
@@ -212,18 +204,10 @@ func newClient(initctx context.Context, connect reconnectFunc) (*Client, error)
}
func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *Client {
- scheme := ""
- switch conn.(type) {
- case *httpConn:
- scheme = httpScheme
- case *websocketCodec:
- scheme = wsScheme
- case *jsonCodec:
- scheme = ipcScheme
- }
+ _, isHTTP := conn.(*httpConn)
c := &Client{
+ isHTTP: isHTTP,
idgen: idgen,
- scheme: scheme,
services: services,
writeConn: conn,
close: make(chan struct{}),
@@ -236,7 +220,7 @@ func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *C
reqSent: make(chan error, 1),
reqTimeout: make(chan *requestOp),
}
- if !c.isHTTP() {
+ if !isHTTP {
go c.dispatch(conn)
}
return c
@@ -267,7 +251,7 @@ func (c *Client) SupportedModules() (map[string]string, error) {
// Close closes the client, aborting any in-flight requests.
func (c *Client) Close() {
- if c.isHTTP() {
+ if c.isHTTP {
return
}
select {
@@ -281,7 +265,7 @@ func (c *Client) Close() {
// This method only works for clients using HTTP, it doesn't have
// any effect for clients using another transport.
func (c *Client) SetHeader(key, value string) {
- if !c.isHTTP() {
+ if !c.isHTTP {
return
}
conn := c.writeConn.(*httpConn)
@@ -315,7 +299,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)}
- if c.isHTTP() {
+ if c.isHTTP {
err = c.sendHTTP(ctx, op, msg)
} else {
err = c.send(ctx, op, msg)
@@ -349,7 +333,7 @@ func (c *Client) BatchCall(b []BatchElem) error {
return c.BatchCallContext(ctx, b)
}
-// BatchCall sends all given requests as a single batch and waits for the server
+// BatchCallContext sends all given requests as a single batch and waits for the server
// to return a response for all of them. The wait duration is bounded by the
// context's deadline.
//
@@ -378,7 +362,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
}
var err error
- if c.isHTTP() {
+ if c.isHTTP {
err = c.sendBatchHTTP(ctx, op, msgs)
} else {
err = c.send(ctx, op, msgs)
@@ -417,7 +401,7 @@ func (c *Client) Notify(ctx context.Context, method string, args ...interface{})
}
msg.ID = nil
- if c.isHTTP() {
+ if c.isHTTP {
return c.sendHTTP(ctx, op, msg)
}
return c.send(ctx, op, msg)
@@ -450,12 +434,12 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
// Check type of channel first.
chanVal := reflect.ValueOf(channel)
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
- panic("first argument to Subscribe must be a writable channel")
+ panic(fmt.Sprintf("channel argument of Subscribe has type %T, need writable channel", channel))
}
if chanVal.IsNil() {
panic("channel given to Subscribe must not be nil")
}
- if c.isHTTP() {
+ if c.isHTTP {
return nil, ErrNotificationsUnsupported
}
@@ -509,8 +493,8 @@ func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error
}
func (c *Client) write(ctx context.Context, msg interface{}, retry bool) error {
- // The previous write failed. Try to establish a new connection.
if c.writeConn == nil {
+ // The previous write failed. Try to establish a new connection.
if err := c.reconnect(ctx); err != nil {
return err
}
@@ -657,7 +641,3 @@ func (c *Client) read(codec ServerCodec) {
c.readOp <- readOp{msgs, batch}
}
}
-
-func (c *Client) isHTTP() bool {
- return c.scheme == httpScheme
-}
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 224eb0c5c828..fa6010bb199c 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -615,6 +615,7 @@ func TestClientReconnect(t *testing.T) {
// Start a server and corresponding client.
s1, l1 := startServer("127.0.0.1:0")
client, err := DialContext(ctx, "ws://"+l1.Addr().String())
+ defer client.Close()
if err != nil {
t.Fatal("can't dial", err)
}
diff --git a/rpc/http.go b/rpc/http.go
index 32f4e7d90a25..18404c060a86 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -48,11 +48,18 @@ type httpConn struct {
headers http.Header
}
-// httpConn is treated specially by Client.
+// httpConn implements ServerCodec, but it is treated specially by Client
+// and some methods don't work. The panic() stubs here exist to ensure
+// this special treatment is correct.
+
func (hc *httpConn) writeJSON(context.Context, interface{}) error {
panic("writeJSON called on httpConn")
}
+func (hc *httpConn) peerInfo() PeerInfo {
+ panic("peerInfo called on httpConn")
+}
+
func (hc *httpConn) remoteAddr() string {
return hc.url
}
@@ -174,6 +181,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos
return nil, err
}
req.ContentLength = int64(len(body))
+ req.GetBody = func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewReader(body)), nil }
// set headers
hc.mu.Lock()
@@ -236,20 +244,19 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), code)
return
}
+
+ // Create request-scoped context.
+ connInfo := PeerInfo{Transport: "http", RemoteAddr: r.RemoteAddr}
+ connInfo.HTTP.Version = r.Proto
+ connInfo.HTTP.Host = r.Host
+ connInfo.HTTP.Origin = r.Header.Get("Origin")
+ connInfo.HTTP.UserAgent = r.Header.Get("User-Agent")
+ ctx := r.Context()
+ ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo)
+
// All checks passed, create a codec that reads directly from the request body
// until EOF, writes the response to w, and orders the server to process a
// single request.
- ctx := r.Context()
- ctx = context.WithValue(ctx, "remote", r.RemoteAddr)
- ctx = context.WithValue(ctx, "scheme", r.Proto)
- ctx = context.WithValue(ctx, "local", r.Host)
- if ua := r.Header.Get("User-Agent"); ua != "" {
- ctx = context.WithValue(ctx, "User-Agent", ua)
- }
- if origin := r.Header.Get("Origin"); origin != "" {
- ctx = context.WithValue(ctx, "Origin", origin)
- }
-
w.Header().Set("content-type", contentType)
codec := newHTTPServerConn(r, w)
defer codec.close()
diff --git a/rpc/http_test.go b/rpc/http_test.go
index 97f8d44c39bc..c84d7705f205 100644
--- a/rpc/http_test.go
+++ b/rpc/http_test.go
@@ -162,3 +162,39 @@ func TestHTTPErrorResponse(t *testing.T) {
t.Error("unexpected error message", errMsg)
}
}
+
+func TestHTTPPeerInfo(t *testing.T) {
+ s := newTestServer()
+ defer s.Stop()
+ ts := httptest.NewServer(s)
+ defer ts.Close()
+
+ c, err := Dial(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.SetHeader("user-agent", "ua-testing")
+ c.SetHeader("origin", "origin.example.com")
+
+ // Request peer information.
+ var info PeerInfo
+ if err := c.Call(&info, "test_peerInfo"); err != nil {
+ t.Fatal(err)
+ }
+
+ if info.RemoteAddr == "" {
+ t.Error("RemoteAddr not set")
+ }
+ if info.Transport != "http" {
+ t.Errorf("wrong Transport %q", info.Transport)
+ }
+ if info.HTTP.Version != "HTTP/1.1" {
+ t.Errorf("wrong HTTP.Version %q", info.HTTP.Version)
+ }
+ if info.HTTP.UserAgent != "ua-testing" {
+ t.Errorf("wrong HTTP.UserAgent %q", info.HTTP.UserAgent)
+ }
+ if info.HTTP.Origin != "origin.example.com" {
+ t.Errorf("wrong HTTP.Origin %q", info.HTTP.UserAgent)
+ }
+}
diff --git a/rpc/json.go b/rpc/json.go
index 1daee3db82af..6024f1e7dc9b 100644
--- a/rpc/json.go
+++ b/rpc/json.go
@@ -198,6 +198,11 @@ func NewCodec(conn Conn) ServerCodec {
return NewFuncCodec(conn, enc.Encode, dec.Decode)
}
+func (c *jsonCodec) peerInfo() PeerInfo {
+ // This returns "ipc" because all other built-in transports have a separate codec type.
+ return PeerInfo{Transport: "ipc", RemoteAddr: c.remote}
+}
+
func (c *jsonCodec) remoteAddr() string {
return c.remote
}
diff --git a/rpc/server.go b/rpc/server.go
index 64e078a7fd1b..babc5688e264 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -26,6 +26,7 @@ import (
)
const MetadataApi = "rpc"
+const EngineApi = "engine"
// CodecOption specifies which type of messages a codec supports.
//
@@ -145,3 +146,38 @@ func (s *RPCService) Modules() map[string]string {
}
return modules
}
+
+// PeerInfo contains information about the remote end of the network connection.
+//
+// This is available within RPC method handlers through the context. Call
+// PeerInfoFromContext to get information about the client connection related to
+// the current method call.
+type PeerInfo struct {
+ // Transport is name of the protocol used by the client.
+ // This can be "http", "ws" or "ipc".
+ Transport string
+
+ // Address of client. This will usually contain the IP address and port.
+ RemoteAddr string
+
+ // Addditional information for HTTP and WebSocket connections.
+ HTTP struct {
+ // Protocol version, i.e. "HTTP/1.1". This is not set for WebSocket.
+ Version string
+ // Header values sent by the client.
+ UserAgent string
+ Origin string
+ Host string
+ }
+}
+
+type peerInfoContextKey struct{}
+
+// PeerInfoFromContext returns information about the client's network connection.
+// Use this with the context passed to RPC method handler functions.
+//
+// The zero value is returned if no connection info is present in ctx.
+func PeerInfoFromContext(ctx context.Context) PeerInfo {
+ info, _ := ctx.Value(peerInfoContextKey{}).(PeerInfo)
+ return info
+}
diff --git a/rpc/server_test.go b/rpc/server_test.go
index 6a2b09e44940..e67893710dc2 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -45,7 +45,7 @@ func TestServerRegisterName(t *testing.T) {
t.Fatalf("Expected service calc to be registered")
}
- wantCallbacks := 9
+ wantCallbacks := 10
if len(svc.callbacks) != wantCallbacks {
t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks))
}
@@ -134,7 +134,7 @@ func TestServerShortLivedConn(t *testing.T) {
if err != nil {
t.Fatal("can't dial:", err)
}
- defer conn.Close()
+
conn.SetDeadline(deadline)
// Write the request, then half-close the connection so the server stops reading.
conn.Write([]byte(request))
@@ -142,6 +142,8 @@ func TestServerShortLivedConn(t *testing.T) {
// Now try to get the response.
buf := make([]byte, 2000)
n, err := conn.Read(buf)
+ conn.Close()
+
if err != nil {
t.Fatal("read error:", err)
}
diff --git a/rpc/subscription.go b/rpc/subscription.go
index 942e764e5d6c..d7ba784fc532 100644
--- a/rpc/subscription.go
+++ b/rpc/subscription.go
@@ -34,7 +34,7 @@ import (
var (
// ErrNotificationsUnsupported is returned when the connection doesn't support notifications
ErrNotificationsUnsupported = errors.New("notifications not supported")
- // ErrNotificationNotFound is returned when the notification for the given id is not found
+ // ErrSubscriptionNotFound is returned when the notification for the given id is not found
ErrSubscriptionNotFound = errors.New("subscription not found")
)
diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go
index 62afc1df44f4..253e26328900 100644
--- a/rpc/testservice_test.go
+++ b/rpc/testservice_test.go
@@ -80,6 +80,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args *
return echoResult{str, i, args}
}
+func (s *testService) PeerInfo(ctx context.Context) PeerInfo {
+ return PeerInfoFromContext(ctx)
+}
+
func (s *testService) Sleep(ctx context.Context, duration time.Duration) {
time.Sleep(duration)
}
diff --git a/rpc/types.go b/rpc/types.go
index ca52d474d93b..46b08caf6800 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -30,18 +30,21 @@ import (
// API describes the set of methods offered over the RPC interface
type API struct {
- Namespace string // namespace under which the rpc methods of Service are exposed
- Version string // api version for DApp's
- Service interface{} // receiver instance which holds the methods
- Public bool // indication if the methods must be considered safe for public use
+ Namespace string // namespace under which the rpc methods of Service are exposed
+ Version string // api version for DApp's
+ Service interface{} // receiver instance which holds the methods
+ Public bool // indication if the methods must be considered safe for public use
+ Authenticated bool // whether the api should only be available behind authentication.
}
// ServerCodec implements reading, parsing and writing RPC messages for the server side of
// a RPC session. Implementations must be go-routine safe since the codec can be called in
// multiple go-routines concurrently.
type ServerCodec interface {
+ peerInfo() PeerInfo
readBatch() (msgs []*jsonrpcMessage, isBatch bool, err error)
close()
+
jsonWriter
}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index ce7eda34ddba..74f955464d72 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -59,7 +59,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler {
log.Debug("WebSocket upgrade failed", "err", err)
return
}
- codec := newWebsocketCodec(conn)
+ codec := newWebsocketCodec(conn, r.Host, r.Header)
s.ServeCodec(codec, 0)
})
}
@@ -196,7 +196,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale
}
return nil, hErr
}
- return newWebsocketCodec(conn), nil
+ return newWebsocketCodec(conn, endpoint, header), nil
})
}
@@ -234,18 +234,28 @@ func wsClientHeaders(endpoint, origin string) (string, http.Header, error) {
type websocketCodec struct {
*jsonCodec
conn *websocket.Conn
+ info PeerInfo
wg sync.WaitGroup
pingReset chan struct{}
}
-func newWebsocketCodec(conn *websocket.Conn) ServerCodec {
+func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec {
conn.SetReadLimit(wsMessageSizeLimit)
wc := &websocketCodec{
jsonCodec: NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON).(*jsonCodec),
conn: conn,
pingReset: make(chan struct{}, 1),
+ info: PeerInfo{
+ Transport: "ws",
+ RemoteAddr: conn.RemoteAddr().String(),
+ },
}
+ // Fill in connection details.
+ wc.info.HTTP.Host = host
+ wc.info.HTTP.Origin = req.Get("Origin")
+ wc.info.HTTP.UserAgent = req.Get("User-Agent")
+ // Start pinger.
wc.wg.Add(1)
go wc.pingLoop()
return wc
@@ -256,6 +266,10 @@ func (wc *websocketCodec) close() {
wc.wg.Wait()
}
+func (wc *websocketCodec) peerInfo() PeerInfo {
+ return wc.info
+}
+
func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}) error {
err := wc.jsonCodec.writeJSON(ctx, v)
if err == nil {
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index 8e1bfb587e73..fb9357605b8b 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -72,7 +72,7 @@ func TestWebsocketOriginCheck(t *testing.T) {
// Connections without origin header should work.
client, err = DialWebsocket(context.Background(), wsURL, "")
if err != nil {
- t.Fatal("error for empty origin")
+ t.Fatalf("error for empty origin: %v", err)
}
client.Close()
}
@@ -113,6 +113,41 @@ func TestWebsocketLargeCall(t *testing.T) {
}
}
+func TestWebsocketPeerInfo(t *testing.T) {
+ var (
+ s = newTestServer()
+ ts = httptest.NewServer(s.WebsocketHandler([]string{"origin.example.com"}))
+ tsurl = "ws:" + strings.TrimPrefix(ts.URL, "http:")
+ )
+ defer s.Stop()
+ defer ts.Close()
+
+ ctx := context.Background()
+ c, err := DialWebsocket(ctx, tsurl, "origin.example.com")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Request peer information.
+ var connInfo PeerInfo
+ if err := c.Call(&connInfo, "test_peerInfo"); err != nil {
+ t.Fatal(err)
+ }
+
+ if connInfo.RemoteAddr == "" {
+ t.Error("RemoteAddr not set")
+ }
+ if connInfo.Transport != "ws" {
+ t.Errorf("wrong Transport %q", connInfo.Transport)
+ }
+ if connInfo.HTTP.UserAgent != "Go-http-client/1.1" {
+ t.Errorf("wrong HTTP.UserAgent %q", connInfo.HTTP.UserAgent)
+ }
+ if connInfo.HTTP.Origin != "origin.example.com" {
+ t.Errorf("wrong HTTP.Origin %q", connInfo.HTTP.UserAgent)
+ }
+}
+
// This test checks that client handles WebSocket ping frames correctly.
func TestClientWebsocketPing(t *testing.T) {
t.Parallel()
diff --git a/signer/core/api.go b/signer/core/api.go
index 48b54b8f43dd..f06fbeb76dd1 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/ethereum/go-ethereum/signer/storage"
)
@@ -188,23 +189,24 @@ func StartClefAccountManager(ksLocation string, nousb, lightKDF bool, scpath str
// MetadataFromContext extracts Metadata from a given context.Context
func MetadataFromContext(ctx context.Context) Metadata {
+ info := rpc.PeerInfoFromContext(ctx)
+
m := Metadata{"NA", "NA", "NA", "", ""} // batman
- if v := ctx.Value("remote"); v != nil {
- m.Remote = v.(string)
- }
- if v := ctx.Value("scheme"); v != nil {
- m.Scheme = v.(string)
- }
- if v := ctx.Value("local"); v != nil {
- m.Local = v.(string)
+ if info.Transport != "" {
+ if info.Transport == "http" {
+ m.Scheme = info.HTTP.Version
+ }
+ m.Scheme = info.Transport
}
- if v := ctx.Value("Origin"); v != nil {
- m.Origin = v.(string)
+ if info.RemoteAddr != "" {
+ m.Remote = info.RemoteAddr
}
- if v := ctx.Value("User-Agent"); v != nil {
- m.UserAgent = v.(string)
+ if info.HTTP.Host != "" {
+ m.Local = info.HTTP.Host
}
+ m.Origin = info.HTTP.Origin
+ m.UserAgent = info.HTTP.UserAgent
return m
}
diff --git a/signer/core/api_test.go b/signer/core/api_test.go
index 36f12f71a52d..9f44ca319566 100644
--- a/signer/core/api_test.go
+++ b/signer/core/api_test.go
@@ -256,6 +256,9 @@ func TestSignTx(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ if len(list) == 0 {
+ t.Fatal("Unexpected empty list")
+ }
a := common.NewMixedcaseAddress(list[0])
methodSig := "test(uint)"
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 15ab15341646..f5c2fe2f3db9 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -262,6 +262,7 @@ func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage
// Dependencies returns an array of custom types ordered by their hierarchical reference tree
func (typedData *TypedData) Dependencies(primaryType string, found []string) []string {
+ primaryType = strings.TrimSuffix(primaryType, "[]")
includes := func(arr []string, str string) bool {
for _, obj := range arr {
if obj == str {
@@ -364,7 +365,7 @@ func (typedData *TypedData) EncodeData(primaryType string, data map[string]inter
if err != nil {
return nil, err
}
- arrayBuffer.Write(encodedData)
+ arrayBuffer.Write(crypto.Keccak256(encodedData))
} else {
bytesValue, err := typedData.EncodePrimitiveValue(parsedType, item, depth)
if err != nil {
diff --git a/signer/core/gnosis_safe.go b/signer/core/gnosis_safe.go
index 016b1fff3e78..1b88db1afe76 100644
--- a/signer/core/gnosis_safe.go
+++ b/signer/core/gnosis_safe.go
@@ -31,6 +31,7 @@ type GnosisSafeTx struct {
SafeTxGas big.Int `json:"safeTxGas"`
Nonce big.Int `json:"nonce"`
InputExpHash common.Hash `json:"safeTxHash"`
+ ChainId *math.HexOrDecimal256 `json:"chainId,omitempty"`
}
// ToTypedData converts the tx to a EIP-712 Typed Data structure for signing
@@ -39,9 +40,14 @@ func (tx *GnosisSafeTx) ToTypedData() apitypes.TypedData {
if tx.Data != nil {
data = *tx.Data
}
+ var domainType = []apitypes.Type{{Name: "verifyingContract", Type: "address"}}
+ if tx.ChainId != nil {
+ domainType = append([]apitypes.Type{{Name: "chainId", Type: "uint256"}}, domainType[0])
+ }
+
gnosisTypedData := apitypes.TypedData{
Types: apitypes.Types{
- "EIP712Domain": []apitypes.Type{{Name: "verifyingContract", Type: "address"}},
+ "EIP712Domain": domainType,
"SafeTx": []apitypes.Type{
{Name: "to", Type: "address"},
{Name: "value", Type: "uint256"},
@@ -57,6 +63,7 @@ func (tx *GnosisSafeTx) ToTypedData() apitypes.TypedData {
},
Domain: apitypes.TypedDataDomain{
VerifyingContract: tx.Safe.Address().Hex(),
+ ChainId: tx.ChainId,
},
PrimaryType: "SafeTx",
Message: apitypes.TypedDataMessage{
@@ -88,6 +95,7 @@ func (tx *GnosisSafeTx) ArgsForValidation() *apitypes.SendTxArgs {
Nonce: hexutil.Uint64(tx.Nonce.Uint64()),
Data: tx.Data,
Input: nil,
+ ChainID: (*hexutil.Big)(tx.ChainId),
}
return args
}
diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go
index 1d972d296131..fbc2903d9e1d 100644
--- a/signer/core/signed_data_test.go
+++ b/signer/core/signed_data_test.go
@@ -532,3 +532,283 @@ func TestGnosisCustomData(t *testing.T) {
t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
}
}
+
+var gnosisTypedDataWithChainId = `
+{
+ "types": {
+ "EIP712Domain": [
+ { "type": "uint256", "name": "chainId" },
+ { "type": "address", "name": "verifyingContract" }
+ ],
+ "SafeTx": [
+ { "type": "address", "name": "to" },
+ { "type": "uint256", "name": "value" },
+ { "type": "bytes", "name": "data" },
+ { "type": "uint8", "name": "operation" },
+ { "type": "uint256", "name": "safeTxGas" },
+ { "type": "uint256", "name": "baseGas" },
+ { "type": "uint256", "name": "gasPrice" },
+ { "type": "address", "name": "gasToken" },
+ { "type": "address", "name": "refundReceiver" },
+ { "type": "uint256", "name": "nonce" }
+ ]
+ },
+ "domain": {
+ "verifyingContract": "0x111dAE35D176A9607053e0c46e91F36AFbC1dc57",
+ "chainId": "4"
+ },
+ "primaryType": "SafeTx",
+ "message": {
+ "to": "0x5592EC0cfb4dbc12D3aB100b257153436a1f0FEa",
+ "value": "0",
+ "data": "0xa9059cbb00000000000000000000000099d580d3a7fe7bd183b2464517b2cd7ce5a8f15a0000000000000000000000000000000000000000000000000de0b6b3a7640000",
+ "operation": 0,
+ "safeTxGas": 0,
+ "baseGas": 0,
+ "gasPrice": "0",
+ "gasToken": "0x0000000000000000000000000000000000000000",
+ "refundReceiver": "0x0000000000000000000000000000000000000000",
+ "nonce": 15
+ }
+}`
+
+var gnosisTxWithChainId = `
+{
+ "safe": "0x111dAE35D176A9607053e0c46e91F36AFbC1dc57",
+ "to": "0x5592EC0cfb4dbc12D3aB100b257153436a1f0FEa",
+ "value": "0",
+ "data": "0xa9059cbb00000000000000000000000099d580d3a7fe7bd183b2464517b2cd7ce5a8f15a0000000000000000000000000000000000000000000000000de0b6b3a7640000",
+ "operation": 0,
+ "gasToken": "0x0000000000000000000000000000000000000000",
+ "safeTxGas": 0,
+ "baseGas": 0,
+ "gasPrice": "0",
+ "refundReceiver": "0x0000000000000000000000000000000000000000",
+ "nonce": 15,
+ "executionDate": "2022-01-10T20:00:12Z",
+ "submissionDate": "2022-01-10T19:59:59.689989Z",
+ "modified": "2022-01-10T20:00:31.903635Z",
+ "blockNumber": 9968802,
+ "transactionHash": "0xc9fef30499ee8984974ab9dddd9d15c2a97c1a4393935dceed5efc3af9fc41a4",
+ "safeTxHash": "0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29",
+ "executor": "0xbc2BB26a6d821e69A38016f3858561a1D80d4182",
+ "isExecuted": true,
+ "isSuccessful": true,
+ "ethGasPrice": "2500000009",
+ "gasUsed": 82902,
+ "fee": "207255000746118",
+ "chainId": "4",
+ "origin": null,
+ "dataDecoded": {
+ "method": "transfer",
+ "parameters": [
+ {
+ "name": "to",
+ "type": "address",
+ "value": "0x99D580d3a7FE7BD183b2464517B2cD7ce5A8F15A"
+ },
+ {
+ "name": "value",
+ "type": "uint256",
+ "value": "1000000000000000000"
+ }
+ ]
+ },
+ "confirmationsRequired": 1,
+ "confirmations": [
+ {
+ "owner": "0xbc2BB26a6d821e69A38016f3858561a1D80d4182",
+ "submissionDate": "2022-01-10T19:59:59.722500Z",
+ "transactionHash": null,
+ "signature": "0x5ca34641bcdee06e7b99143bfe34778195ca41022bd35837b96c204c7786be9d6dfa6dba43b53cd92da45ac728899e1561b232d28f38ba82df45f164caba38be1b",
+ "signatureType": "EOA"
+ }
+ ],
+ "signatures": "0x5ca34641bcdee06e7b99143bfe34778195ca41022bd35837b96c204c7786be9d6dfa6dba43b53cd92da45ac728899e1561b232d28f38ba82df45f164caba38be1b"
+}
+`
+
+func TestGnosisTypedDataWithChainId(t *testing.T) {
+ var td apitypes.TypedData
+ err := json.Unmarshal([]byte(gnosisTypedDataWithChainId), &td)
+ if err != nil {
+ t.Fatalf("unmarshalling failed '%v'", err)
+ }
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
+
+// TestGnosisCustomData tests the scenario where a user submits only the gnosis-safe
+// specific data, and we fill the TypedData struct on our side
+func TestGnosisCustomDataWithChainId(t *testing.T) {
+ var tx core.GnosisSafeTx
+ err := json.Unmarshal([]byte(gnosisTxWithChainId), &tx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var td = tx.ToTypedData()
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
+
+var complexTypedData = `
+{
+ "types": {
+ "EIP712Domain": [
+ {
+ "name": "chainId",
+ "type": "uint256"
+ },
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "verifyingContract",
+ "type": "address"
+ },
+ {
+ "name": "version",
+ "type": "string"
+ }
+ ],
+ "Action": [
+ {
+ "name": "action",
+ "type": "string"
+ },
+ {
+ "name": "params",
+ "type": "string"
+ }
+ ],
+ "Cell": [
+ {
+ "name": "capacity",
+ "type": "string"
+ },
+ {
+ "name": "lock",
+ "type": "string"
+ },
+ {
+ "name": "type",
+ "type": "string"
+ },
+ {
+ "name": "data",
+ "type": "string"
+ },
+ {
+ "name": "extraData",
+ "type": "string"
+ }
+ ],
+ "Transaction": [
+ {
+ "name": "DAS_MESSAGE",
+ "type": "string"
+ },
+ {
+ "name": "inputsCapacity",
+ "type": "string"
+ },
+ {
+ "name": "outputsCapacity",
+ "type": "string"
+ },
+ {
+ "name": "fee",
+ "type": "string"
+ },
+ {
+ "name": "action",
+ "type": "Action"
+ },
+ {
+ "name": "inputs",
+ "type": "Cell[]"
+ },
+ {
+ "name": "outputs",
+ "type": "Cell[]"
+ },
+ {
+ "name": "digest",
+ "type": "bytes32"
+ }
+ ]
+ },
+ "primaryType": "Transaction",
+ "domain": {
+ "chainId": "56",
+ "name": "da.systems",
+ "verifyingContract": "0x0000000000000000000000000000000020210722",
+ "version": "1"
+ },
+ "message": {
+ "DAS_MESSAGE": "SELL mobcion.bit FOR 100000 CKB",
+ "inputsCapacity": "1216.9999 CKB",
+ "outputsCapacity": "1216.9998 CKB",
+ "fee": "0.0001 CKB",
+ "digest": "0x53a6c0f19ec281604607f5d6817e442082ad1882bef0df64d84d3810dae561eb",
+ "action": {
+ "action": "start_account_sale",
+ "params": "0x00"
+ },
+ "inputs": [
+ {
+ "capacity": "218 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-cell-type,0x01,0x",
+ "data": "{ account: mobcion.bit, expired_at: 1670913958 }",
+ "extraData": "{ status: 0, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }"
+ }
+ ],
+ "outputs": [
+ {
+ "capacity": "218 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-cell-type,0x01,0x",
+ "data": "{ account: mobcion.bit, expired_at: 1670913958 }",
+ "extraData": "{ status: 1, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }"
+ },
+ {
+ "capacity": "201 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-sale-cell-type,0x01,0x",
+ "data": "0x1209460ef3cb5f1c68ed2c43a3e020eec2d9de6e...",
+ "extraData": ""
+ }
+ ]
+ }
+}
+`
+
+func TestComplexTypedData(t *testing.T) {
+ var td apitypes.TypedData
+ err := json.Unmarshal([]byte(complexTypedData), &td)
+ if err != nil {
+ t.Fatalf("unmarshalling failed '%v'", err)
+ }
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x42b1aca82bb6900ff75e90a136de550a58f1a220a071704088eabd5e6ce20446")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
diff --git a/signer/fourbyte/abi.go b/signer/fourbyte/abi.go
index d8fbabd3b1b3..352abc59e182 100644
--- a/signer/fourbyte/abi.go
+++ b/signer/fourbyte/abi.go
@@ -20,7 +20,6 @@ import (
"bytes"
"encoding/json"
"fmt"
- "regexp"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi"
@@ -75,42 +74,15 @@ func verifySelector(selector string, calldata []byte) (*decodedCallData, error)
return parseCallData(calldata, string(abidata))
}
-// selectorRegexp is used to validate that a 4byte database selector corresponds
-// to a valid ABI function declaration.
-//
-// Note, although uppercase letters are not part of the ABI spec, this regexp
-// still accepts it as the general format is valid. It will be rejected later
-// by the type checker.
-var selectorRegexp = regexp.MustCompile(`^([^\)]+)\(([A-Za-z0-9,\[\]]*)\)`)
-
// parseSelector converts a method selector into an ABI JSON spec. The returned
// data is a valid JSON string which can be consumed by the standard abi package.
func parseSelector(unescapedSelector string) ([]byte, error) {
- // Define a tiny fake ABI struct for JSON marshalling
- type fakeArg struct {
- Type string `json:"type"`
- }
- type fakeABI struct {
- Name string `json:"name"`
- Type string `json:"type"`
- Inputs []fakeArg `json:"inputs"`
- }
- // Validate the unescapedSelector and extract it's components
- groups := selectorRegexp.FindStringSubmatch(unescapedSelector)
- if len(groups) != 3 {
- return nil, fmt.Errorf("invalid selector %q (%v matches)", unescapedSelector, len(groups))
+ selector, err := abi.ParseSelector(unescapedSelector)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse selector: %v", err)
}
- name := groups[1]
- args := groups[2]
- // Reassemble the fake ABI and constuct the JSON
- arguments := make([]fakeArg, 0)
- if len(args) > 0 {
- for _, arg := range strings.Split(args, ",") {
- arguments = append(arguments, fakeArg{arg})
- }
- }
- return json.Marshal([]fakeABI{{name, "function", arguments}})
+ return json.Marshal([]abi.SelectorMarshaling{selector})
}
// parseCallData matches the provided call data against the ABI definition and
diff --git a/tests/evm-benchmarks b/tests/evm-benchmarks
new file mode 160000
index 000000000000..849b3e239a28
--- /dev/null
+++ b/tests/evm-benchmarks
@@ -0,0 +1 @@
+Subproject commit 849b3e239a28f236dc99574b2e10e0c720895105
diff --git a/tests/fuzzers/bn256/bn256_fuzz.go b/tests/fuzzers/bn256/bn256_fuzz.go
index 030ac19b3f52..11fd9e18df00 100644
--- a/tests/fuzzers/bn256/bn256_fuzz.go
+++ b/tests/fuzzers/bn256/bn256_fuzz.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
+//go:build gofuzz
// +build gofuzz
package bn256
diff --git a/tests/fuzzers/secp256k1/secp_fuzzer.go b/tests/fuzzers/secp256k1/secp_fuzzer.go
index 53845b643345..47083d5fe3a9 100644
--- a/tests/fuzzers/secp256k1/secp_fuzzer.go
+++ b/tests/fuzzers/secp256k1/secp_fuzzer.go
@@ -21,7 +21,7 @@ package secp256k1
import (
"fmt"
- "github.com/btcsuite/btcd/btcec"
+ "github.com/btcsuite/btcd/btcec/v2"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
fuzz "github.com/google/gofuzz"
)
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index e73ef4851a9c..9ed8bcbc51d5 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -66,6 +66,8 @@ func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement
func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
+func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
+func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
func (s *spongeDb) Close() error { return nil }
diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go
index 0414c001ec4e..b3b523cc8243 100644
--- a/tests/fuzzers/vflux/clientpool-fuzzer.go
+++ b/tests/fuzzers/vflux/clientpool-fuzzer.go
@@ -267,9 +267,7 @@ func FuzzClientPool(input []byte) int {
bias = f.randomDelay()
requested = f.randomBool()
)
- if _, err := pool.SetCapacity(f.peers[index].node, reqCap, bias, requested); err == vfs.ErrCantFindMaximum {
- panic(nil)
- }
+ pool.SetCapacity(f.peers[index].node, reqCap, bias, requested)
doLog("Set capacity", "id", f.peers[index].node.ID(), "reqcap", reqCap, "bias", bias, "requested", requested)
case 7:
index := f.randomByte()
diff --git a/tests/gen_stenv.go b/tests/gen_stenv.go
index ecf7af850382..29fbce121385 100644
--- a/tests/gen_stenv.go
+++ b/tests/gen_stenv.go
@@ -17,7 +17,8 @@ var _ = (*stEnvMarshaling)(nil)
func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"optional"`
+ Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
@@ -26,6 +27,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
@@ -37,7 +39,8 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"optional"`
+ Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
@@ -51,10 +54,12 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentCoinbase' for stEnv")
}
s.Coinbase = common.Address(*dec.Coinbase)
- if dec.Difficulty == nil {
- return errors.New("missing required field 'currentDifficulty' for stEnv")
+ if dec.Difficulty != nil {
+ s.Difficulty = (*big.Int)(dec.Difficulty)
+ }
+ if dec.Random != nil {
+ s.Random = (*big.Int)(dec.Random)
}
- s.Difficulty = (*big.Int)(dec.Difficulty)
if dec.GasLimit == nil {
return errors.New("missing required field 'currentGasLimit' for stEnv")
}
diff --git a/tests/init_test.go b/tests/init_test.go
index 312ad8869a37..7e2f3ff7f5bc 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -41,6 +41,7 @@ var (
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
+ benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
)
func readJSON(reader io.Reader, value interface{}) error {
diff --git a/tests/state_test.go b/tests/state_test.go
index 78ecda0409a6..d2c92b211cd1 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -20,9 +20,16 @@ import (
"bufio"
"bytes"
"fmt"
+ "math/big"
+ "os"
+ "path/filepath"
"reflect"
+ "strings"
"testing"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
)
@@ -61,6 +68,7 @@ func TestState(t *testing.T) {
for _, dir := range []string{
stateTestDir,
legacyStateTestDir,
+ benchmarksDir,
} {
st.walk(t, dir, func(t *testing.T, name string, test *StateTest) {
for _, subtest := range test.Subtests() {
@@ -131,3 +139,116 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
// t.Logf("EVM output: 0x%x", tracer.Output())
// t.Logf("EVM error: %v", tracer.Error())
}
+
+func BenchmarkEVM(b *testing.B) {
+ // Walk the directory.
+ dir := benchmarksDir
+ dirinfo, err := os.Stat(dir)
+ if os.IsNotExist(err) || !dirinfo.IsDir() {
+ fmt.Fprintf(os.Stderr, "can't find test files in %s, did you clone the evm-benchmarks submodule?\n", dir)
+ b.Skip("missing test files")
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ return nil
+ }
+ if ext := filepath.Ext(path); ext == ".json" {
+ name := filepath.ToSlash(strings.TrimPrefix(strings.TrimSuffix(path, ext), dir+string(filepath.Separator)))
+ b.Run(name, func(b *testing.B) { runBenchmarkFile(b, path) })
+ }
+ return nil
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+}
+
+func runBenchmarkFile(b *testing.B, path string) {
+ m := make(map[string]StateTest)
+ if err := readJSONFile(path, &m); err != nil {
+ b.Fatal(err)
+ return
+ }
+ if len(m) != 1 {
+ b.Fatal("expected single benchmark in a file")
+ return
+ }
+ for _, t := range m {
+ runBenchmark(b, &t)
+ }
+}
+
+func runBenchmark(b *testing.B, t *StateTest) {
+ for _, subtest := range t.Subtests() {
+ subtest := subtest
+ key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
+
+ b.Run(key, func(b *testing.B) {
+ vmconfig := vm.Config{}
+
+ config, eips, err := GetChainConfig(subtest.Fork)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ vmconfig.ExtraEips = eips
+ block := t.genesis(config).ToBlock(nil)
+ _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false)
+
+ var baseFee *big.Int
+ if config.IsLondon(new(big.Int)) {
+ baseFee = t.json.Env.BaseFee
+ if baseFee == nil {
+ // Retesteth uses `0x10` for genesis baseFee. Therefore, it defaults to
+ // parent - 2 : 0xa as the basefee for 'this' context.
+ baseFee = big.NewInt(0x0a)
+ }
+ }
+ post := t.json.Post[subtest.Fork][subtest.Index]
+ msg, err := t.json.Tx.toMessage(post, baseFee)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+
+ // Try to recover tx with current signer
+ if len(post.TxBytes) != 0 {
+ var ttx types.Transaction
+ err := ttx.UnmarshalBinary(post.TxBytes)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+
+ if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
+ b.Error(err)
+ return
+ }
+ }
+
+ // Prepare the EVM.
+ txContext := core.NewEVMTxContext(msg)
+ context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
+ context.GetHash = vmTestBlockHash
+ context.BaseFee = baseFee
+ evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
+
+ // Create "contract" for sender to cache code analysis.
+ sender := vm.NewContract(vm.AccountRef(msg.From()), vm.AccountRef(msg.From()),
+ nil, 0)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ // Execute the message.
+ snapshot := statedb.Snapshot()
+ _, _, err = evm.Call(sender, *msg.To(), msg.Data(), msg.Gas(), msg.Value())
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ statedb.RevertToSnapshot(snapshot)
+ }
+
+ })
+ }
+}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index f7fb08bfbc8d..4fd3cf76b210 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -80,16 +80,18 @@ type stPostState struct {
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
- Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty" gencodec:"optional"`
+ Random *big.Int `json:"currentRandom" gencodec:"optional"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
- BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"`
+ BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"`
}
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
+ Random *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
Timestamp math.HexOrDecimal64
@@ -218,8 +220,12 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
context.BaseFee = baseFee
+ if t.json.Env.Random != nil {
+ rnd := common.BigToHash(t.json.Env.Random)
+ context.Random = &rnd
+ context.Difficulty = big.NewInt(0)
+ }
evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
-
// Execute the message.
snapshot := statedb.Snapshot()
gaspool := new(core.GasPool)
@@ -268,7 +274,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
- return &core.Genesis{
+ genesis := &core.Genesis{
Config: config,
Coinbase: t.json.Env.Coinbase,
Difficulty: t.json.Env.Difficulty,
@@ -277,6 +283,12 @@ func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
Timestamp: t.json.Env.Timestamp,
Alloc: t.json.Pre,
}
+ if t.json.Env.Random != nil {
+ // Post-Merge
+ genesis.Mixhash = common.BigToHash(t.json.Env.Random)
+ genesis.Difficulty = big.NewInt(0)
+ }
+ return genesis
}
func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (core.Message, error) {
diff --git a/trie/committer.go b/trie/committer.go
index 0721990a2179..db753e2fa0c4 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -44,7 +44,6 @@ type leaf struct {
// By 'some level' of parallelism, it's still the case that all leaves will be
// processed sequentially - onleaf will never be called in parallel or out of order.
type committer struct {
- tmp sliceBuffer
sha crypto.KeccakState
onleaf LeafCallback
@@ -55,7 +54,6 @@ type committer struct {
var committerPool = sync.Pool{
New: func() interface{} {
return &committer{
- tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
}
},
diff --git a/trie/database.go b/trie/database.go
index 58ca4e6f3caa..d71abeee476a 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -113,16 +113,9 @@ func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end u
func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
func (n rawFullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range n {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
+ eb := rlp.NewEncoderBuffer(w)
+ n.encode(eb)
+ return eb.Flush()
}
// rawShortNode represents only the useful data content of a short node, with the
@@ -164,11 +157,7 @@ func (n *cachedNode) rlp() []byte {
if node, ok := n.node.(rawNode); ok {
return node
}
- blob, err := rlp.EncodeToBytes(n.node)
- if err != nil {
- panic(err)
- }
- return blob
+ return nodeToBytes(n.node)
}
// obj returns the decoded and expanded trie node, either directly from the cache,
diff --git a/trie/hasher.go b/trie/hasher.go
index 3a62a2f1199c..7f0748c13df3 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -24,22 +24,12 @@ import (
"golang.org/x/crypto/sha3"
)
-type sliceBuffer []byte
-
-func (b *sliceBuffer) Write(data []byte) (n int, err error) {
- *b = append(*b, data...)
- return len(data), nil
-}
-
-func (b *sliceBuffer) Reset() {
- *b = (*b)[:0]
-}
-
// hasher is a type used for the trie Hash operation. A hasher has some
// internal preallocated temp space
type hasher struct {
sha crypto.KeccakState
- tmp sliceBuffer
+ tmp []byte
+ encbuf rlp.EncoderBuffer
parallel bool // Whether to use paralallel threads when hashing
}
@@ -47,8 +37,9 @@ type hasher struct {
var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
- tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
+ sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ encbuf: rlp.NewEncoderBuffer(nil),
}
},
}
@@ -153,30 +144,41 @@ func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached
// into compact form for RLP encoding.
// If the rlp data is smaller than 32 bytes, `nil` is returned.
func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
- h.tmp.Reset()
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic("encode error: " + err.Error())
- }
+ n.encode(h.encbuf)
+ enc := h.encodedBytes()
- if len(h.tmp) < 32 && !force {
+ if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
- return h.hashData(h.tmp)
+ return h.hashData(enc)
}
// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
// may contain nil values)
func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
- h.tmp.Reset()
- // Generate the RLP encoding of the node
- if err := n.EncodeRLP(&h.tmp); err != nil {
- panic("encode error: " + err.Error())
- }
+ n.encode(h.encbuf)
+ enc := h.encodedBytes()
- if len(h.tmp) < 32 && !force {
+ if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
- return h.hashData(h.tmp)
+ return h.hashData(enc)
+}
+
+// encodedBytes returns the result of the last encoding operation on h.encbuf.
+// This also resets the encoder buffer.
+//
+// All node encoding must be done like this:
+//
+// node.encode(h.encbuf)
+// enc := h.encodedBytes()
+//
+// This convention exists because node.encode can only be inlined/escape-analyzed when
+// called on a concrete receiver type.
+func (h *hasher) encodedBytes() []byte {
+ h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
+ h.encbuf.Reset(nil)
+ return h.tmp
}
// hashData hashes the provided data
diff --git a/trie/iterator.go b/trie/iterator.go
index 654772aa13b5..e0006ee05e3b 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/rlp"
)
// Iterator is a key-value trie iterator that traverses a Trie.
@@ -86,6 +85,10 @@ type NodeIterator interface {
// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
Path() []byte
+ // NodeBlob returns the rlp-encoded value of the current iterated node.
+ // If the node is an embedded node in its parent, nil is returned then.
+ NodeBlob() []byte
+
// Leaf returns true iff the current node is a leaf node.
Leaf() bool
@@ -115,7 +118,7 @@ type NodeIterator interface {
// Before adding a similar mechanism to any other place in Geth, consider
// making trie.Database an interface and wrapping at that level. It's a huge
// refactor, but it could be worth it if another occurrence arises.
- AddResolver(ethdb.KeyValueStore)
+ AddResolver(ethdb.KeyValueReader)
}
// nodeIteratorState represents the iteration state at one particular node of the
@@ -134,7 +137,7 @@ type nodeIterator struct {
path []byte // Path to the current node
err error // Failure set in case of an internal error in the iterator
- resolver ethdb.KeyValueStore // Optional intermediate resolver above the disk layer
+ resolver ethdb.KeyValueReader // Optional intermediate resolver above the disk layer
}
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
@@ -151,15 +154,18 @@ func (e seekError) Error() string {
}
func newNodeIterator(trie *Trie, start []byte) NodeIterator {
- if trie.Hash() == emptyState {
- return new(nodeIterator)
+ if trie.Hash() == emptyRoot {
+ return &nodeIterator{
+ trie: trie,
+ err: errIteratorEnd,
+ }
}
it := &nodeIterator{trie: trie}
it.err = it.seek(start)
return it
}
-func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueReader) {
it.resolver = resolver
}
@@ -210,8 +216,7 @@ func (it *nodeIterator) LeafProof() [][]byte {
// Gather nodes that end up as hash nodes (or the root)
node, hashed := hasher.proofHash(item.node)
if _, ok := hashed.(hashNode); ok || i == 0 {
- enc, _ := rlp.EncodeToBytes(node)
- proofs = append(proofs, enc)
+ proofs = append(proofs, nodeToBytes(node))
}
}
return proofs
@@ -224,6 +229,18 @@ func (it *nodeIterator) Path() []byte {
return it.path
}
+func (it *nodeIterator) NodeBlob() []byte {
+ if it.Hash() == (common.Hash{}) {
+ return nil // skip the non-standalone node
+ }
+ blob, err := it.resolveBlob(it.Hash().Bytes(), it.Path())
+ if err != nil {
+ it.err = err
+ return nil
+ }
+ return blob
+}
+
func (it *nodeIterator) Error() error {
if it.err == errIteratorEnd {
return nil
@@ -362,6 +379,15 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
return resolved, err
}
+func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
+ if it.resolver != nil {
+ if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
+ return blob, nil
+ }
+ }
+ return it.trie.resolveBlob(hash, path)
+}
+
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
if hash, ok := st.node.(hashNode); ok {
resolved, err := it.resolveHash(hash, path)
@@ -402,7 +428,7 @@ func findChild(n *fullNode, index int, path []byte, ancestor common.Hash) (node,
func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) {
switch node := parent.node.(type) {
case *fullNode:
- //Full node, move to the first non-nil child.
+ // Full node, move to the first non-nil child.
if child, state, path, index := findChild(node, parent.index+1, it.path, ancestor); child != nil {
parent.index = index - 1
return state, path, true
@@ -480,8 +506,9 @@ func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []
}
func (it *nodeIterator) pop() {
- parent := it.stack[len(it.stack)-1]
- it.path = it.path[:parent.pathlen]
+ last := it.stack[len(it.stack)-1]
+ it.path = it.path[:last.pathlen]
+ it.stack[len(it.stack)-1] = nil
it.stack = it.stack[:len(it.stack)-1]
}
@@ -549,7 +576,11 @@ func (it *differenceIterator) Path() []byte {
return it.b.Path()
}
-func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *differenceIterator) NodeBlob() []byte {
+ return it.b.NodeBlob()
+}
+
+func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
@@ -660,7 +691,11 @@ func (it *unionIterator) Path() []byte {
return (*it.items)[0].Path()
}
-func (it *unionIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *unionIterator) NodeBlob() []byte {
+ return (*it.items)[0].NodeBlob()
+}
+
+func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 95cafdd3bdce..9a46e9b99548 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -29,6 +29,19 @@ import (
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
+func TestEmptyIterator(t *testing.T) {
+ trie := newEmpty()
+ iter := trie.NodeIterator(nil)
+
+ seen := make(map[string]struct{})
+ for iter.Next(true) {
+ seen[string(iter.Path())] = struct{}{}
+ }
+ if len(seen) != 0 {
+ t.Fatal("Unexpected trie node iterated")
+ }
+}
+
func TestIterator(t *testing.T) {
trie := newEmpty()
vals := []struct{ k, v string }{
@@ -470,10 +483,18 @@ func (l *loggingDb) NewBatch() ethdb.Batch {
return l.backend.NewBatch()
}
+func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch {
+ return l.backend.NewBatchWithSize(size)
+}
+
func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- fmt.Printf("NewIterator\n")
return l.backend.NewIterator(prefix, start)
}
+
+func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) {
+ return l.backend.NewSnapshot()
+}
+
func (l *loggingDb) Stat(property string) (string, error) {
return l.backend.Stat(property)
}
@@ -521,3 +542,54 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
+
+func TestIteratorNodeBlob(t *testing.T) {
+ var (
+ db = memorydb.New()
+ triedb = NewDatabase(db)
+ trie, _ = New(common.Hash{}, triedb)
+ )
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ all := make(map[string]string)
+ for _, val := range vals {
+ all[val.k] = val.v
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ trie.Commit(nil)
+ triedb.Cap(0)
+
+ found := make(map[common.Hash][]byte)
+ it := trie.NodeIterator(nil)
+ for it.Next(true) {
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ found[it.Hash()] = it.NodeBlob()
+ }
+
+ dbIter := db.NewIterator(nil, nil)
+ defer dbIter.Release()
+
+ var count int
+ for dbIter.Next() {
+ got, present := found[common.BytesToHash(dbIter.Key())]
+ if !present {
+ t.Fatalf("Miss trie node %v", dbIter.Key())
+ }
+ if !bytes.Equal(got, dbIter.Value()) {
+ t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
+ }
+ count += 1
+ }
+ if count != len(found) {
+ t.Fatal("Find extra trie node via iterator")
+ }
+}
diff --git a/trie/node.go b/trie/node.go
index f4055e779a1b..bf3f024bb8a7 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -28,8 +28,9 @@ import (
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
type node interface {
- fstring(string) string
cache() (hashNode, bool)
+ encode(w rlp.EncoderBuffer)
+ fstring(string) string
}
type (
@@ -52,16 +53,9 @@ var nilValueNode = valueNode(nil)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range &n.Children {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
+ eb := rlp.NewEncoderBuffer(w)
+ n.encode(eb)
+ return eb.Flush()
}
func (n *fullNode) copy() *fullNode { copy := *n; return © }
diff --git a/trie/node_enc.go b/trie/node_enc.go
new file mode 100644
index 000000000000..cade35b707c2
--- /dev/null
+++ b/trie/node_enc.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func nodeToBytes(n node) []byte {
+ w := rlp.NewEncoderBuffer(nil)
+ n.encode(w)
+ result := w.ToBytes()
+ w.Flush()
+ return result
+}
+
+func (n *fullNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ for _, c := range n.Children {
+ if c != nil {
+ c.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ }
+ w.ListEnd(offset)
+}
+
+func (n *shortNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ w.WriteBytes(n.Key)
+ if n.Val != nil {
+ n.Val.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ w.ListEnd(offset)
+}
+
+func (n hashNode) encode(w rlp.EncoderBuffer) {
+ w.WriteBytes(n)
+}
+
+func (n valueNode) encode(w rlp.EncoderBuffer) {
+ w.WriteBytes(n)
+}
+
+func (n rawFullNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ for _, c := range n {
+ if c != nil {
+ c.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ }
+ w.ListEnd(offset)
+}
+
+func (n *rawShortNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ w.WriteBytes(n.Key)
+ if n.Val != nil {
+ n.Val.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ w.ListEnd(offset)
+}
+
+func (n rawNode) encode(w rlp.EncoderBuffer) {
+ w.Write(n)
+}
diff --git a/trie/proof.go b/trie/proof.go
index 51ecea0c39e6..88ca80b0e706 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
// Prove constructs a merkle proof for key. The result contains all encoded nodes
@@ -79,7 +78,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
if hash, ok := hn.(hashNode); ok || i == 0 {
// If the node's database encoding is a hash (or is the
// root node), it becomes a proof element.
- enc, _ := rlp.EncodeToBytes(n)
+ enc := nodeToBytes(n)
if !ok {
hash = hasher.hashData(enc)
}
@@ -406,7 +405,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
}
// hasRightElement returns the indicator whether there exists more elements
-// in the right side of the given path. The given path can point to an existent
+// on the right side of the given path. The given path can point to an existent
// key or a non-existent one. This function has the assumption that the whole
// path should already be resolved.
func hasRightElement(node node, key []byte) bool {
@@ -505,7 +504,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if val != nil || hasRightElement(root, firstKey) {
return false, errors.New("more entries available")
}
- return hasRightElement(root, firstKey), nil
+ return false, nil
}
// Special case, there is only one element and two edge keys are same.
// In this case, we can't construct two edge paths. So handle it here.
@@ -563,7 +562,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if tr.Hash() != rootHash {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
}
- return hasRightElement(root, keys[len(keys)-1]), nil
+ return hasRightElement(tr.root, keys[len(keys)-1]), nil
}
// get returns the child of the given node. Return nil if the
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 95ad6169c3bd..29866714c2d0 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -1067,3 +1067,36 @@ func nonRandomTrie(n int) (*Trie, map[string]*kv) {
}
return trie, vals
}
+
+func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
+ keys := [][]byte{
+ common.Hex2Bytes("aa10000000000000000000000000000000000000000000000000000000000000"),
+ common.Hex2Bytes("aa20000000000000000000000000000000000000000000000000000000000000"),
+ }
+ vals := [][]byte{
+ common.Hex2Bytes("02"),
+ common.Hex2Bytes("03"),
+ }
+ trie := new(Trie)
+ for i, key := range keys {
+ trie.Update(key, vals[i])
+ }
+ root := trie.Hash()
+ proof := memorydb.New()
+ start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
+ end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ if err := trie.Prove(start, 0, proof); err != nil {
+ t.Fatalf("failed to prove start: %v", err)
+ }
+ if err := trie.Prove(end, 0, proof); err != nil {
+ t.Fatalf("failed to prove end: %v", err)
+ }
+
+ more, err := VerifyRangeProof(root, start, end, keys, vals, proof)
+ if err != nil {
+ t.Fatalf("failed to verify range proof: %v", err)
+ }
+ if more != false {
+ t.Error("expected more to be false")
+ }
+}
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 76258c31123c..b38bb01b0fb3 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
var ErrCommitDisabled = errors.New("no database for committing")
@@ -224,6 +223,7 @@ func (st *StackTrie) insert(key, value []byte) {
switch st.nodeType {
case branchNode: /* Branch */
idx := int(key[0])
+
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
@@ -233,12 +233,14 @@ func (st *StackTrie) insert(key, value []byte) {
break
}
}
+
// Add new child
if st.children[idx] == nil {
st.children[idx] = newLeaf(key[1:], value, st.db)
} else {
st.children[idx].insert(key[1:], value)
}
+
case extNode: /* Ext */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
@@ -326,10 +328,9 @@ func (st *StackTrie) insert(key, value []byte) {
p = st.children[0]
}
- // Create the two child leaves: the one containing the
- // original value and the one containing the new value
- // The child leave will be hashed directly in order to
- // free up some memory.
+ // Create the two child leaves: one containing the original
+ // value and another containing the new value. The child leaf
+ // is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val, st.db)
p.children[origIdx].hash()
@@ -341,19 +342,22 @@ func (st *StackTrie) insert(key, value []byte) {
// over to the children.
st.key = st.key[:diffidx]
st.val = nil
+
case emptyNode: /* Empty */
st.nodeType = leafNode
st.key = key
st.val = value
+
case hashedNode:
panic("trying to insert into hash")
+
default:
panic("invalid type")
}
}
-// hash() hashes the node 'st' and converts it into 'hashedNode', if possible.
-// Possible outcomes:
+// hash converts st into a 'hashedNode', if possible. Possible outcomes:
+//
// 1. The rlp-encoded value was >= 32 bytes:
// - Then the 32-byte `hash` will be accessible in `st.val`.
// - And the 'st.type' will be 'hashedNode'
@@ -361,119 +365,116 @@ func (st *StackTrie) insert(key, value []byte) {
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
// - And the 'st.type' will be 'hashedNode' AGAIN
//
-// This method will also:
-// set 'st.type' to hashedNode
-// clear 'st.key'
+// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
func (st *StackTrie) hash() {
- /* Shortcut if node is already hashed */
- if st.nodeType == hashedNode {
- return
- }
- // The 'hasher' is taken from a pool, but we don't actually
- // claim an instance until all children are done with their hashing,
- // and we actually need one
- var h *hasher
+ h := newHasher(false)
+ defer returnHasherToPool(h)
+
+ st.hashRec(h)
+}
+
+func (st *StackTrie) hashRec(hasher *hasher) {
+ // The switch below sets this to the RLP-encoding of this node.
+ var encodedNode []byte
switch st.nodeType {
+ case hashedNode:
+ return
+
+ case emptyNode:
+ st.val = emptyRoot.Bytes()
+ st.key = st.key[:0]
+ st.nodeType = hashedNode
+ return
+
case branchNode:
- var nodes [17]node
+ var nodes rawFullNode
for i, child := range st.children {
if child == nil {
nodes[i] = nilValueNode
continue
}
- child.hash()
+
+ child.hashRec(hasher)
if len(child.val) < 32 {
nodes[i] = rawNode(child.val)
} else {
nodes[i] = hashNode(child.val)
}
- st.children[i] = nil // Reclaim mem from subtree
+
+ // Release child back to pool.
+ st.children[i] = nil
returnToPool(child)
}
- nodes[16] = nilValueNode
- h = newHasher(false)
- defer returnHasherToPool(h)
- h.tmp.Reset()
- if err := rlp.Encode(&h.tmp, nodes); err != nil {
- panic(err)
- }
+
+ nodes.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
case extNode:
- st.children[0].hash()
- h = newHasher(false)
- defer returnHasherToPool(h)
- h.tmp.Reset()
- var valuenode node
+ st.children[0].hashRec(hasher)
+
+ sz := hexToCompactInPlace(st.key)
+ n := rawShortNode{Key: st.key[:sz]}
if len(st.children[0].val) < 32 {
- valuenode = rawNode(st.children[0].val)
+ n.Val = rawNode(st.children[0].val)
} else {
- valuenode = hashNode(st.children[0].val)
- }
- n := struct {
- Key []byte
- Val node
- }{
- Key: hexToCompact(st.key),
- Val: valuenode,
- }
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
+ n.Val = hashNode(st.children[0].val)
}
+
+ n.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
+ // Release child back to pool.
returnToPool(st.children[0])
- st.children[0] = nil // Reclaim mem from subtree
+ st.children[0] = nil
+
case leafNode:
- h = newHasher(false)
- defer returnHasherToPool(h)
- h.tmp.Reset()
st.key = append(st.key, byte(16))
sz := hexToCompactInPlace(st.key)
- n := [][]byte{st.key[:sz], st.val}
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
- }
- case emptyNode:
- st.val = emptyRoot.Bytes()
- st.key = st.key[:0]
- st.nodeType = hashedNode
- return
+ n := rawShortNode{Key: st.key[:sz], Val: valueNode(st.val)}
+
+ n.encode(hasher.encbuf)
+ encodedNode = hasher.encodedBytes()
+
default:
- panic("Invalid node type")
+ panic("invalid node type")
}
- st.key = st.key[:0]
+
st.nodeType = hashedNode
- if len(h.tmp) < 32 {
- st.val = common.CopyBytes(h.tmp)
+ st.key = st.key[:0]
+ if len(encodedNode) < 32 {
+ st.val = common.CopyBytes(encodedNode)
return
}
+
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
- st.val = make([]byte, 32)
- h.sha.Reset()
- h.sha.Write(h.tmp)
- h.sha.Read(st.val)
+ st.val = hasher.hashData(encodedNode)
if st.db != nil {
// TODO! Is it safe to Put the slice here?
// Do all db implementations copy the value provided?
- st.db.Put(st.val, h.tmp)
+ st.db.Put(st.val, encodedNode)
}
}
-// Hash returns the hash of the current node
+// Hash returns the hash of the current node.
func (st *StackTrie) Hash() (h common.Hash) {
- st.hash()
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed, and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing.
- ret := make([]byte, 32)
- h := newHasher(false)
- defer returnHasherToPool(h)
- h.sha.Reset()
- h.sha.Write(st.val)
- h.sha.Read(ret)
- return common.BytesToHash(ret)
+ hasher := newHasher(false)
+ defer returnHasherToPool(hasher)
+
+ st.hashRec(hasher)
+ if len(st.val) == 32 {
+ copy(h[:], st.val)
+ return h
}
- return common.BytesToHash(st.val)
+
+ // If the node's RLP isn't 32 bytes long, the node will not
+ // be hashed, and instead contain the rlp-encoding of the
+ // node. For the top level node, we need to force the hashing.
+ hasher.sha.Reset()
+ hasher.sha.Write(st.val)
+ hasher.sha.Read(h[:])
+ return h
}
// Commit will firstly hash the entrie trie if it's still not hashed
@@ -483,23 +484,26 @@ func (st *StackTrie) Hash() (h common.Hash) {
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
-func (st *StackTrie) Commit() (common.Hash, error) {
+func (st *StackTrie) Commit() (h common.Hash, err error) {
if st.db == nil {
return common.Hash{}, ErrCommitDisabled
}
- st.hash()
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed (and committed), and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing+commit.
- ret := make([]byte, 32)
- h := newHasher(false)
- defer returnHasherToPool(h)
- h.sha.Reset()
- h.sha.Write(st.val)
- h.sha.Read(ret)
- st.db.Put(ret, st.val)
- return common.BytesToHash(ret), nil
+
+ hasher := newHasher(false)
+ defer returnHasherToPool(hasher)
+
+ st.hashRec(hasher)
+ if len(st.val) == 32 {
+ copy(h[:], st.val)
+ return h, nil
}
- return common.BytesToHash(st.val), nil
+
+ // If the node's RLP isn't 32 bytes long, the node will not
+ // be hashed (and committed), and instead contain the rlp-encoding of the
+ // node. For the top level node, we need to force the hashing+commit.
+ hasher.sha.Reset()
+ hasher.sha.Write(st.val)
+ hasher.sha.Read(h[:])
+ st.db.Put(h[:], st.val)
+ return h, nil
}
diff --git a/trie/sync.go b/trie/sync.go
index 81d38ee3a694..7eaa35244e4e 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -155,8 +155,7 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal
}
// If database says this is a duplicate, then at least the trie node is
// present, and we hold the assumption that it's NOT legacy contract code.
- blob := rawdb.ReadTrieNode(s.database, root)
- if len(blob) > 0 {
+ if rawdb.HasTrieNode(s.database, root) {
return
}
// Assemble the new sub-trie sync request
@@ -193,7 +192,7 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
// sync is expected to run with a fresh new node. Even there
// exists the code with legacy format, fetch and store with
// new scheme anyway.
- if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
+ if rawdb.HasCodeWithPrefix(s.database, hash) {
return
}
// Assemble the new sub-trie sync request
@@ -401,7 +400,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
}
// If database says duplicate, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 {
+ if rawdb.HasTrieNode(s.database, hash) {
continue
}
// Locally unknown node, schedule for retrieval
diff --git a/trie/trie.go b/trie/trie.go
index 13343112b8d7..e40b03be38c3 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -514,6 +514,15 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
return nil, &MissingNodeError{NodeHash: hash, Path: prefix}
}
+func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) {
+ hash := common.BytesToHash(n)
+ blob, _ := t.db.Node(hash)
+ if len(blob) != 0 {
+ return blob, nil
+ }
+ return nil, &MissingNodeError{NodeHash: hash, Path: prefix}
+}
+
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index be0df8a54426..a1fdc8cd58c4 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -414,8 +414,9 @@ func runRandTest(rt randTest) bool {
values := make(map[string]string) // tracks content of the trie
for i, step := range rt {
- fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
- step.op, step.key, step.value, i)
+ // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
+ // step.op, step.key, step.value, i)
+
switch step.op {
case opUpdate:
tr.Update(step.key, step.value)
@@ -675,6 +676,8 @@ func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement
func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
+func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
+func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
func (s *spongeDb) Close() error { return nil }
@@ -884,7 +887,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
- fmt.Printf("root: %x\n", stRoot)
+
+ t.Logf("root: %x\n", stRoot)
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
}