From b842d4cc9053b4bcab2307826c174df2d88eb9e1 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 6 May 2021 18:58:39 +0200 Subject: [PATCH] all: implement EIP-compliant verkle trees verkle: Implement Trie, NodeIterator and Database ifs Fix crash in TestDump Fix TestDump Fix TrieCopy remove unnecessary traces fix: Error() returned errIteratorEnd in verkle node iterator rewrite the iterator and change the signature of OpenStorageTrie add the adapter to reuse the account trie for storage don't try to deserialize a storage leaf into an account Fix statedb unit tests (#14) * debug code * Fix more unit tests * remove traces * Go back to the full range One tree to rule them all remove updateRoot, there is no root to update store code inside the account leaf fix build save current state for Sina Update go-verkle to latest Charge WITNESS_*_COST gas on storage loads Add witness costs for SSTORE as well Charge witness gas in the case of code execution corresponding code deletion add a --verkle flag to separate verkle experiments from regular geth operations use the snapshot to get data stateless execution from block witness AccessWitness functions Add block generation test + genesis snapshot generation test stateless block execution (#18) * test stateless block execution * Force tree resolution before generating the proof increased coverage in stateless test execution (#19) * test stateless block execution * Force tree resolution before generating the proof * increase coverage in stateless test execution ensure geth compiles fix issues in tests with verkle trees deactivated Ensure stateless data is available when executing statelessly (#20) * Ensure stateless data is available when executing statelessly * Actual execution of a statless block * bugfixes in stateless block execution * code cleanup - Reduce PR footprint by reverting NewEVM to its original signature - Move the access witness to the block context - prepare for a change in AW semantics Need to store the initial values. - Use the touch helper function, DRY * revert the signature of MustCommit to its original form (#21) fix leaf proofs in stateless execution (#22) * Fixes in witness pre-state * Add the recipient's nonce to the witness * reduce PR footprint and investigate issue in root state calculation * quick build fix cleanup: Remove extra parameter in ToBlock revert ToBlock to its older signature fix import cycle in vm tests fix linter issue fix appveyor build fix nil pointers in tests Add indices, yis and Cis to the block's Verkle proof upgrade geth dependency to drop geth's common dep fix cmd/devp2p tests fix rebase issues quell an appveyor warning fix address touching in SLOAD and SSTORE fix access witness for code size touch target account data before calling make sure the proper locations get touched in (ext)codecopy touch all code pages in execution add pushdata to witness remove useless code in genesis snapshot generation testnet: fix some of the rebase/drift issues Fix verkle proof generation in block fix an issue occuring when chunking past the code size fix: ensure the code copy doesn't extend past the code size Upgrade go-verkle to its IPA version (#24) fixes for the IPA testnet upgrade to latest go-verkle update go-verkle to get more fixes simplify code by removing all stateless references (#25) fix verkle proof test by enforcing values alignment to 32 bytes remove unneeded KZG tag fix the stateless test Move AccessWitness into StateDB (#27) * move AccessWitness into StateDB * set Accesses in TxContext constructor * Ensures that a statedb is initialized with a witness * copy AccessWitness in StateDB.Copy. use copied state in miner worker.commit. * remove redundant line Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Fix contract creation issue enable verkle on cancun block: take 2 (#28) * enable verkle on cancun block: take 2 * fix typo. make unreachable line panic message more clear fix rebase issues fix linter issue merge undefined instead of panicking (#30) initialize the new access witness if not already present fix boundary check in PUSH fix bound check in code chunking fix boundary condition check in PUSH32 add circleci support (#32) * add circleci support * disable linter, which is broken again * actually run tests remove unnecessary cancun block declaration in tests (#33) upgrade go version (#34) fix calculation in get_tree_key_for_storage_slot (#35) use the witness in statedb, revert applyTx signature (#36) * use the witness in statedb, revert applyTx signature * fix miner tests * fix catalyst build Remove access witness from the signature of Process (#38) consensus/ethash: ensure uncle accounts are included in block witness (#40) consensus/ethash: move accumulation of coinbase witness before coinbase account is credited (#41) remove outdated comment miner: embed verkle proof in sealing block (#39) * miner: embed verkle proof in sealing block * add test to ensure that verkle proof is present in mined blocks Refactor witness-accumulation in EVM (#42) * make push dynamically-charged. charge witness gas costs for push. refactor evm witness gas charging to move logic for touching a range of bytecode into a helper method 'touchEachChunksAndChargeGas' * add witness gas calculation for CodeCopy, ExtCodeCopy, SLoad back to gas_table.go * witness gas charging for CALL * remove explicit reference to evm.TxContext * core/vm: make touchEachChunksAndCharge gas handle nil code value * core/vm: call implementation, separate out witnesses into touch/set * some fixes * remove witness touching from opCall: this will go in evm.go * remove witness touching for call from gas_table.go * (hopefully) fix tests * add SSTORE witness charging that was removed mistakenly * charge witness gas for call * clean up and comment touchEachChunksAndChargeGas * make suggested changes * address remaining points * fix build issues * remove double-charging for contract creation witness gas charging call onleaf in verkle commit (#45) replace sha256 with pedersen_hash in get_key (#46) * replace sha256 with pedersen_hash * fix: prevent an OOB * workaround timeout in unit test * update go-ipa and reduce the timeout * fix for unit tests: do not call NewAccessWitness in NewEVMTxContext (#49) * potential fix: do not call NewAccessWitness in NewEVMTxContext * more fixes: check for the existence of Accesses * fix absence of witness in copy * fix another witness issue * workaround: ensure the prefetcher is off in verkle mode * fix the remaining issues in tests * review feedback * fix witness allocation in stateless test reactivate working lines in test fix: don't create settings each time a key is calculated (#53) fix: don't use rlp in the tree for slot values (#51) * fix: don't use rlp in the tree for slot values * fix timeout after rebase core/vm: don't include contract deployer bytecode in AccessWitness or charge witness access costs for it (#54) Verkle EXTCODECOPY implementation (#55) * core/vm: verkle extcodecopy naive way (do jumpdest analysis on target contract every EXTCODECOPY) * no double-charge * address edge-case in touchEachChunksAndChargeGas * simplify line Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Use IsCancun where applicable (#56) * replace Accesses != nil with IsCancun(...) * fix Charge witness gas when calling/creating a contract (#60) * Charge witness gas when calling/creating a contract Co-authored-by: Jared Wasinger * gofmt * replace checks with evm.Access!=nil with IsCancun * remove double-charging of witness access costs for contract creation initialization Co-authored-by: Jared Wasinger verkle proof deserialization (#61) * use proof serialization * remove cruft * save current state * fix most issues up to this point * fix remaining build issues * update the go.mod to use the right branch * remove custom-defined set type * update go-verkle to get merged PRs * extract key, value data from proof * only activate precomp calculations if this is a verkle chain Co-authored-by: Jared Wasinger fix: build more than one block in stateless test (#66) * reproduce the bug * fix the nil AccessWitness when Resetting * fix nonce management in blocks * fix: make sure the snapshot is reused during the chain generation Fix GetTreeKey (#65) * trie/utils: fix GetTreeKey * disable the faulty test, tracking issue opened Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Implement latest stateless gas charging spec (#63) * latest stateless gas-charging spec: add write event charging * rename gas calculation function so that it doesn't seem it's charging gas Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Various fixes for rust-verkle proof format compatibility (#67) * code to extract the block * separate proof from keys in block * display state root of block 0 * change file name to reflect the correct block number * use RLP instead of flat binary for keyvals in block * update go-verkle to fix build * fix rebase issues * make test pass * fix issue in map copy Co-authored-by: Jared Wasinger Fix contract creation aw (#68) * fix contract creation AccessWitness * remove tracing * remove tracing * remove block2.rlp * move repeated error message into its own error type fix a couple linter issues Sload fix (#69) * move SLOAD gas calculation to gasSLoadEIP2929 * re-add gasSLoad, used by SSTORE * make requested changes fix: witness gas costs being charged more than once (#70) * fix: witness gas costs being charged more than one * update test case Co-authored-by: Jared Wasinger re-enable VerkleTrie::ToDot core/state: implement GetCodeSize for VerkleDB (#75) implement core/state: implement GetCodeSize for VerkleDB copy the pre-state, use an untouched copy for the proof (#72) test to compare the proof format with that of rust verkle (#73) * test to compare the proof format with that of rust verkle * comment block extraction code check proof in verkle tree test don't delete from the trie when we encounter empty state object if verkle is enabled (#74) core/state: write code to rawdb under codehash key when verkle is enabled (#76) force the 32-byte alignment of nonce and balance (#71) * force the 32-byte alignment of nonce and balance * review feedback: fix endianness in output core/state - ensure storage value is set properly in snapshot when verkle is enabled (#78) fixes for the return type change in go-verkle (#77) * fixes for the return type change in go-verkle * update go-verkle * fix endianness issue + workaround for witness fix * add changes to miner Co-authored-by: Jared Wasinger calculate tree index correctly for SLOAD access event (#79) core/vm: witness write event for SSTORE (#80) * core/vm: witness write event for SSTORE * remove TODO that is addressed by this branch fix estimateGas (handle ErrInsufficientBalanceWitness). Return proper error message when ErrInsufficientBalanceWitness is encountered (#81) Verkle/fix/perdersen hash (#84) * remove ineffective statement * fix endianness in pedersen_hash refactor: set witness account values in getStateObject() (#83) * enforce 32-byte alignment * save current state * write account values to witness from getObject * code cleanup + sanity checks * fix RLP serialization of missing keys * remove code redundancy for GetTreeKeyCodeChunk * fix stem calculation issue for code and storage * remove redundant SetLeafValue calls * Add a contract creation tx to the test * fix botched module version update * detail gas calculation cost in verkle contract deployment fix verkle trie iterator to not miss leaf values in the first index of a leaf node (#88) creation-time CODECOPY adds no code to the witness (#86) * creation-time CODECOPY adds no code to the witness * Add a more complex test to ensure EXTCODECOPY is called * Fix offset calculations in the slot function * minor tweaks * fix initial value capture for storage * fix build * fix: get the proper byte in get_tree_key_for_storage_slot * fix length calculation in getDataAndAdjustedBounds * fix verkle test small simplification in order to facilitate rebase upgrade go-ipa + go-verkle: faster precomp loading fix: place slot value, not its rlp encoding in witness fix: code chunification with incorrect header (#89) * fix: code chunification with incorrect header * add a test from rust-verkle and fix offset bug * fix ci build * complete coverage of rust-verkle chunking tests * fix: make sure the witness value is set * add another code chunking test from rust-verkle spec: don't add the trailing pushdata for a final PUSHn (#90) cleanup: don't evaluate 0 coeffs in get_tree_key (#87) ensure no overflow occurs in the chunk touch function load the full coinbase account in reward fix: previous commit broke stateless test rework the way the gas is charged per code chunk update go-verkle to grab the fix for #200 fix: don't add PUSH1-data during contract deployment (#91) * fix: don't add PUSH1-data during contract deployment * remove import cycle in tests (#92) fix: redundant proof-of-absence stems (#93) test: missing children of the same internal node produce a single extension status (#94) ParseNode with commitment parameter (#95) review feedback for rebase over kiln (#97) safe gas add (#98) * safe gas add * fix build ensure 10-byte addresses are right-aligned in pedersen_hash (#99) Co-authored-by: Tanishq Jasoria Remove the code analysis interface to reduce PR footprint (#100) * Remove the code analysis interface to reduce PR footprint * fix unit test fix: set IsDeployment=false outside of contract creation (#101) remove trailing line verkle test: count the code chunks in the witness (#102) temp fix: little endian pedersen_hash (#103) factor the amount of polynomial evaluations in access witness (#106) reuse polynomial evaluation in account update (#108) fix: remove redundant balance touching in access witness (#107) add github workflow reuse address point evaluation in code chunking (#109) --- .github/workflows/go.yml | 57 ++++ cmd/geth/snapshot.go | 4 +- common/fdlimit/fdlimit_test.go | 3 +- consensus/ethash/consensus.go | 19 ++ core/blockchain.go | 22 +- core/chain_makers.go | 102 +++++++ core/error.go | 5 + core/genesis.go | 42 ++- core/state/database.go | 84 ++++++ core/state/iterator.go | 11 +- core/state/pruner/pruner.go | 4 +- core/state/snapshot/snapshot.go | 14 +- core/state/state_object.go | 89 +++++- core/state/statedb.go | 127 ++++++++- core/state/statedb_test.go | 7 +- core/state/sync_test.go | 5 +- core/state_processor.go | 7 + core/state_processor_test.go | 227 +++++++++++++++ core/state_transition.go | 49 +++- core/types/access_witness.go | 480 ++++++++++++++++++++++++++++++++ core/types/block.go | 10 + core/vm/common.go | 12 + core/vm/contract.go | 26 +- core/vm/evm.go | 43 +++ core/vm/gas_table.go | 140 +++++++++- core/vm/instructions.go | 112 +++++++- core/vm/interface.go | 6 + core/vm/interpreter.go | 8 + core/vm/jump_table.go | 2 + core/vm/operations_acl.go | 19 +- eth/backend.go | 8 + eth/tracers/js/tracer_test.go | 5 +- go.mod | 2 + go.sum | 5 + internal/ethapi/api.go | 2 +- light/trie.go | 2 + miner/worker.go | 24 ++ miner/worker_test.go | 100 ++++++- params/config.go | 37 ++- params/protocol_params.go | 7 + tests/state_test_util.go | 2 +- trie/database.go | 1 + trie/secure_trie.go | 4 + trie/utils/verkle.go | 246 ++++++++++++++++ trie/utils/verkle_test.go | 86 ++++++ trie/verkle.go | 379 +++++++++++++++++++++++++ trie/verkle_iterator.go | 247 ++++++++++++++++ trie/verkle_test.go | 396 ++++++++++++++++++++++++++ 48 files changed, 3212 insertions(+), 77 deletions(-) create mode 100644 .github/workflows/go.yml create mode 100644 core/types/access_witness.go create mode 100644 trie/utils/verkle.go create mode 100644 trie/utils/verkle_test.go create mode 100644 trie/verkle.go create mode 100644 trie/verkle_iterator.go create mode 100644 trie/verkle_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000000..eb3bcae30e5a --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,57 @@ +name: Go lint and test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master, verkle-trie-proof-in-block-rebased ] + +jobs: + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: Build + run: go build -v ./... + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: Download golangci-lint + run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest + + - name: Lint + run: ./bin/golangci-lint run + + - name: Vet + run: go vet + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: Download precomputed points + run: wget -nv https://github.com/gballet/go-verkle/releases/download/banderwagon/precomp + + - name: Test + run: go test ./... diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 82206b58b8ea..2b7b71f638ab 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -203,7 +203,7 @@ func verifyState(ctx *cli.Context) error { log.Error("Failed to load head block") return errors.New("no head block") } - snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false) + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false) if err != nil { log.Error("Failed to open snapshot tree", "err", err) return err @@ -478,7 +478,7 @@ func dumpState(ctx *cli.Context) error { if err != nil { return err } - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false) if err != nil { return err } diff --git a/common/fdlimit/fdlimit_test.go b/common/fdlimit/fdlimit_test.go index 21362b8463a3..9fd5e9fc3cbd 100644 --- a/common/fdlimit/fdlimit_test.go +++ b/common/fdlimit/fdlimit_test.go @@ -17,7 +17,6 @@ package fdlimit import ( - "fmt" "testing" ) @@ -30,7 +29,7 @@ func TestFileDescriptorLimits(t *testing.T) { t.Fatal(err) } if hardlimit < target { - t.Skip(fmt.Sprintf("system limit is less than desired test target: %d < %d", hardlimit, target)) + t.Skipf("system limit is less than desired test target: %d < %d", hardlimit, target) } if limit, err := Current(); err != nil || limit <= 0 { diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 1c38b80ea59b..510491db9bc8 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" "golang.org/x/crypto/sha3" ) @@ -666,10 +667,28 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Sub(r, header.Number) r.Mul(r, blockReward) r.Div(r, big8) + + if config.IsCancun(header.Number) { + uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes()) + state.Witness().TouchAddressOnReadAndComputeGas(uncleCoinbase) + state.Witness().SetLeafValue(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes()) + } state.AddBalance(uncle.Coinbase, r) r.Div(blockReward, big32) reward.Add(reward, r) } + if config.IsCancun(header.Number) { + coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes()) + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = 0 // mark version + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = 2 // mark nonce + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = 3 // mark code keccak + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + balance := state.GetBalance(header.Coinbase) + state.Witness().SetLeafValue(coinbase, balance.Bytes()) + } state.AddBalance(header.Coinbase, reward) } diff --git a/core/blockchain.go b/core/blockchain.go index 3b677aca6ca6..0ad0260cb172 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -228,15 +228,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par futureBlocks, _ := lru.New(maxFutureBlocks) bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triegc: prque.New(nil), - stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Journal: cacheConfig.TrieCleanJournal, - Preimages: cacheConfig.Preimages, - }), + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triegc: prque.New(nil), quit: make(chan struct{}), chainmu: syncx.NewClosableMutex(), bodyCache: bodyCache, @@ -286,6 +281,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // Make sure the state associated with the block is available head := bc.CurrentBlock() + bc.stateCache = state.NewDatabaseWithConfig(db, &trie.Config{ + Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, + Preimages: cacheConfig.Preimages, + UseVerkle: chainConfig.IsCancun(head.Header().Number), + }) + if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -378,7 +380,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) recover = true } - bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) + bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover, chainConfig.IsCancun(head.Header().Number)) } // Start future block processor. diff --git a/core/chain_makers.go b/core/chain_makers.go index c7bf60a4b06e..495e32074666 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -24,10 +24,12 @@ import ( "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // BlockGen creates blocks for testing. @@ -284,6 +286,106 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return blocks, receipts } +func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) { + if config == nil { + config = params.TestChainConfig + } + blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) + chainreader := &fakeChainReader{config: config} + genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { + b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} + b.header = makeHeader(chainreader, parent, statedb, b.engine) + preState := statedb.Copy() + + // Mutate the state and block according to any hard-fork specs + if daoBlock := config.DAOForkBlock; daoBlock != nil { + limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) + if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { + if config.DAOForkSupport { + b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) + } + } + } + if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { + misc.ApplyDAOHardFork(statedb) + } + // Execute any user modifications to the block + if gen != nil { + gen(i, b) + } + if b.engine != nil { + // Finalize and seal the block + block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts) + if err != nil { + panic(err) + } + + // Write state changes to db + root, err := statedb.Commit(config.IsEIP158(b.header.Number)) + if err != nil { + panic(fmt.Sprintf("state write error: %v", err)) + } + if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil { + panic(fmt.Sprintf("trie write error: %v", err)) + } + + // Generate an associated verkle proof + tr := preState.GetTrie() + if !tr.IsVerkle() { + panic("tree should be verkle") + } + + vtr := tr.(*trie.VerkleTrie) + // Make sure all keys are resolved before + // building the proof. Ultimately, node + // resolution can be done with a prefetcher + // or from GetCommitmentsAlongPath. + kvs := statedb.Witness().KeyVals() + keys := statedb.Witness().Keys() + for _, key := range keys { + _, err := vtr.TryGet(key) + if err != nil { + panic(err) + } + + // Sanity check: ensure all flagged addresses have an associated + // value: keys is built from Chunks and kvs from InitialValue. + if _, exists := kvs[string(key)]; !exists { + panic(fmt.Sprintf("address not in access witness: %x", key)) + } + } + + // sanity check: ensure all values correspond to a flagged key by + // comparing the lengths of both structures: they should be equal + if len(kvs) != len(keys) { + panic("keys without a value in witness") + } + + vtr.Hash() + p, k, err := vtr.ProveAndSerialize(keys, kvs) + block.SetVerkleProof(p, k) + if err != nil { + panic(err) + } + return block, b.receipts + } + return nil, nil + } + var snaps *snapshot.Tree + for i := 0; i < n; i++ { + statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{UseVerkle: true}), snaps) + if err != nil { + panic(err) + } + block, receipt := genblock(i, parent, statedb) + blocks[i] = block + receipts[i] = receipt + parent = block + snaps = statedb.Snaps() + } + return blocks, receipts +} + func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { var time uint64 if parent.Time() == 0 { diff --git a/core/error.go b/core/error.go index 51ebefc137bc..238d881d14e7 100644 --- a/core/error.go +++ b/core/error.go @@ -63,6 +63,11 @@ var ( // have enough funds for transfer(topmost call only). ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer") + // ErrInsufficientBalanceWitness is returned if the transaction sender has enough + // funds to cover the transfer, but not enough to pay for witness access/modification + // costs for the transaction + ErrInsufficientBalanceWitness = errors.New("insufficient funds to cover witness access costs for transaction") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/genesis.go b/core/genesis.go index aa7d704ea2c7..5461f77354f8 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -237,6 +237,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } + // Just commit the new block if there is no stored genesis block. stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { @@ -252,13 +253,29 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override } return genesis.Config, block.Hash(), nil } + // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil { - if genesis == nil { - genesis = DefaultGenesisBlock() + + var trieCfg *trie.Config + + if genesis == nil { + storedcfg := rawdb.ReadChainConfig(db, stored) + if storedcfg == nil { + panic("this should never be reached: if genesis is nil, the config is already present or 'geth init' is being called which created it (in the code above, which means genesis != nil)") } + + if storedcfg.CancunBlock != nil { + if storedcfg.CancunBlock.Cmp(big.NewInt(0)) != 0 { + panic("cancun block must be 0") + } + + trieCfg = &trie.Config{UseVerkle: storedcfg.IsCancun(big.NewInt(header.Number.Int64()))} + } + } + + if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, trieCfg), nil); err != nil { // Ensure the stored genesis matches with the given one. hash := genesis.ToBlock(nil).Hash() if hash != stored { @@ -349,6 +366,11 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { if db == nil { db = rawdb.NewMemoryDatabase() } + var trieCfg *trie.Config + if g.Config != nil { + trieCfg = &trie.Config{UseVerkle: g.Config.IsCancun(big.NewInt(int64(g.Number)))} + } + statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, trieCfg), nil) root, err := g.Alloc.flush(db) if err != nil { panic(err) @@ -433,6 +455,20 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big return g.MustCommit(db) } +func DefaultVerkleGenesisBlock() *Genesis { + return &Genesis{ + Config: params.VerkleChainConfig, + Nonce: 86, + GasLimit: 0x2fefd8, + Difficulty: big.NewInt(1), + Alloc: map[common.Address]GenesisAccount{ + common.BytesToAddress([]byte{97, 118, 97, 209, 72, 165, 43, 239, 81, 162, 104, 199, 40, 179, 162, 27, 88, 249, 67, 6}): { + Balance: big.NewInt(0).Lsh(big.NewInt(1), 27), + }, + }, + } +} + // DefaultGenesisBlock returns the Ethereum main net genesis block. func DefaultGenesisBlock() *Genesis { return &Genesis{ diff --git a/core/state/database.go b/core/state/database.go index ce5d8d731715..3553c92a6062 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" + "github.com/gballet/go-verkle" lru "github.com/hashicorp/golang-lru" ) @@ -104,6 +105,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error + + // IsVerkle returns true if the trie is verkle-tree based + IsVerkle() bool } // NewDatabase creates a backing store for state. The returned database is safe for @@ -118,6 +122,13 @@ func NewDatabase(db ethdb.Database) Database { // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) + if config != nil && config.UseVerkle { + return &VerkleDB{ + db: trie.NewDatabaseWithConfig(db, config), + codeSizeCache: csc, + codeCache: fastcache.New(codeCacheSize), + } + } return &cachingDB{ db: trie.NewDatabaseWithConfig(db, config), codeSizeCache: csc, @@ -202,3 +213,76 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro func (db *cachingDB) TrieDB() *trie.Database { return db.db } + +// VerkleDB implements state.Database for a verkle tree +type VerkleDB struct { + db *trie.Database + codeSizeCache *lru.Cache + codeCache *fastcache.Cache +} + +// OpenTrie opens the main account trie. +func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) { + if root == (common.Hash{}) || root == emptyRoot { + return trie.NewVerkleTrie(verkle.New(), db.db), nil + } + payload, err := db.db.DiskDB().Get(root[:]) + if err != nil { + return nil, err + } + + r, err := verkle.ParseNode(payload, 0, root[:]) + if err != nil { + panic(err) + } + return trie.NewVerkleTrie(r, db.db), err +} + +// OpenStorageTrie opens the storage trie of an account. +func (db *VerkleDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { + // alternatively, return accTrie + panic("should not be called") +} + +// CopyTrie returns an independent copy of the given trie. +func (db *VerkleDB) CopyTrie(tr Trie) Trie { + t, ok := tr.(*trie.VerkleTrie) + if ok { + return t.Copy(db.db) + } + + panic("invalid tree type != VerkleTrie") +} + +// ContractCode retrieves a particular contract's code. +func (db *VerkleDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return code, nil + } + code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) + db.codeSizeCache.Add(codeHash, len(code)) + return code, nil + } + return nil, errors.New("not found") +} + +// ContractCodeSize retrieves a particular contracts code's size. +func (db *VerkleDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return len(code), nil + } + code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) + db.codeSizeCache.Add(codeHash, len(code)) + return len(code), nil + } + return 0, nil +} + +// TrieDB retrieves the low level trie database used for data storage. +func (db *VerkleDB) TrieDB() *trie.Database { + return db.db +} diff --git a/core/state/iterator.go b/core/state/iterator.go index 611df52431eb..aa8e455a23e7 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -76,6 +76,14 @@ func (it *NodeIterator) step() error { // Initialize the iterator if we've just started if it.stateIt == nil { it.stateIt = it.state.trie.NodeIterator(nil) + + // If the trie is a verkle trie, then the data and state + // are the same tree, and as a result both iterators are + // the same. This is a hack meant for both tree types to + // work. + if _, ok := it.state.trie.(*trie.VerkleTrie); ok { + it.dataIt = it.stateIt + } } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { @@ -100,10 +108,11 @@ func (it *NodeIterator) step() error { it.state, it.stateIt = nil, nil return nil } - // If the state trie node is an internal entry, leave as is + // If the state trie node is an internal entry, leave as is. if !it.stateIt.Leaf() { return nil } + // Otherwise we've reached an account node, initiate data iteration var account types.StateAccount if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 2f4b068d88f3..2751d78ecf20 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -89,7 +89,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6 if headBlock == nil { return nil, errors.New("Failed to load head block") } - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false, false) if err != nil { return nil, err // The relevant snapshot(s) might not exist } @@ -362,7 +362,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err // - The state HEAD is rewound already because of multiple incomplete `prune-state` // In this case, even the state HEAD is not exactly matched with snapshot, it // still feasible to recover the pruning correctly. - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true, false) if err != nil { return err // The relevant snapshot(s) might not exist } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 76200851e469..5ad832694f8f 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -24,6 +24,7 @@ import ( "sync" "sync/atomic" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" @@ -183,7 +184,7 @@ type Tree struct { // This case happens when the snapshot is 'ahead' of the state trie. // - otherwise, the entire snapshot is considered invalid and will be recreated on // a background thread. -func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) { +func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool, useVerkle bool) (*Tree, error) { // Create a new, empty snapshot tree snap := &Tree{ diskdb: diskdb, @@ -202,6 +203,17 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm } if err != nil { if rebuild { + if useVerkle { + snap.layers = map[common.Hash]snapshot{ + root: &diskLayer{ + diskdb: diskdb, + triedb: triedb, + root: root, + cache: fastcache.New(cache * 1024 * 1024), + }, + } + return snap, nil + } log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(root) return snap, nil diff --git a/core/state/state_object.go b/core/state/state_object.go index 1ffb7eb40228..e92633bbc8fa 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -18,6 +18,7 @@ package state import ( "bytes" + "encoding/binary" "fmt" "io" "math/big" @@ -28,6 +29,9 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) var emptyCodeHash = crypto.Keccak256(nil) @@ -69,6 +73,8 @@ type stateObject struct { data types.StateAccount db *StateDB + pointEval *verkle.Point + // DB error. // State objects are used by the consensus core and VM which are // unable to deal with database-level errors. Any error that occurs @@ -100,6 +106,24 @@ func (s *stateObject) empty() bool { // newObject creates a state object. func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject { + if db.trie.IsVerkle() { + var nonce, balance, version []byte + + // preserve nil as a balance value, it means it's not in the tree + // use is as a heuristic for the nonce being null as well + if data.Balance != nil { + nonce = make([]byte, 32) + balance = make([]byte, 32) + version = make([]byte, 32) + for i, b := range data.Balance.Bytes() { + balance[len(data.Balance.Bytes())-1-i] = b + } + + binary.LittleEndian.PutUint64(nonce[:8], data.Nonce) + } + db.witness.SetGetObjectTouchedLeaves(address.Bytes(), version, balance[:], nonce[:], data.CodeHash) + } + if data.Balance == nil { data.Balance = new(big.Int) } @@ -109,10 +133,16 @@ func newObject(db *StateDB, address common.Address, data types.StateAccount) *st if data.Root == (common.Hash{}) { data.Root = emptyRoot } + var pointEval *verkle.Point + if db.GetTrie().IsVerkle() { + pointEval = trieUtils.EvaluateAddressPoint(address.Bytes()) + } + return &stateObject{ db: db, address: address, addrHash: crypto.Keccak256Hash(address[:]), + pointEval: pointEval, data: data, originStorage: make(Storage), pendingStorage: make(Storage), @@ -219,6 +249,9 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { + if s.db.GetTrie().IsVerkle() { + panic("verkle trees use the snapshot") + } start := time.Now() enc, err = s.getTrie(db).TryGet(key.Bytes()) if metrics.EnabledExpensive { @@ -237,6 +270,21 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } value.SetBytes(content) } + + // Capture the initial value of the location in the verkle proof witness + if s.db.GetTrie().IsVerkle() { + if err != nil { + return common.Hash{} + } + loc := new(uint256.Int).SetBytes(key[:]) + index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(s.pointEval, loc) + if len(enc) > 0 { + s.db.Witness().SetLeafValue(index, value.Bytes()) + } else { + s.db.Witness().SetLeafValue(index, nil) + } + } + s.originStorage[key] = value return value } @@ -317,7 +365,12 @@ func (s *stateObject) updateTrie(db Database) Trie { // The snapshot storage map for the object var storage map[common.Hash][]byte // Insert all the pending updates into the trie - tr := s.getTrie(db) + var tr Trie + if s.db.trie.IsVerkle() { + tr = s.db.trie + } else { + tr = s.getTrie(db) + } hasher := s.db.hasher usedStorage := make([][]byte, 0, len(s.pendingStorage)) @@ -330,12 +383,24 @@ func (s *stateObject) updateTrie(db Database) Trie { var v []byte if (value == common.Hash{}) { - s.setError(tr.TryDelete(key[:])) + if tr.IsVerkle() { + k := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(s.pointEval, new(uint256.Int).SetBytes(key[:])) + s.setError(tr.TryDelete(k)) + //s.db.db.TrieDB().DiskDB().Delete(append(s.address[:], key[:]...)) + } else { + s.setError(tr.TryDelete(key[:])) + } s.db.StorageDeleted += 1 } else { // Encoding []byte cannot fail, ok to ignore the error. v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - s.setError(tr.TryUpdate(key[:], v)) + if !tr.IsVerkle() { + s.setError(tr.TryUpdate(key[:], v)) + } else { + k := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(s.pointEval, new(uint256.Int).SetBytes(key[:])) + // Update the trie, with v as a value + s.setError(tr.TryUpdate(k, value[:])) + } s.db.StorageUpdated += 1 } // If state snapshotting is active, cache the data til commit @@ -459,12 +524,21 @@ func (s *stateObject) Code(db Database) []byte { return s.code } if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if s.db.GetTrie().IsVerkle() { + // Mark the code size and code hash as empty + s.db.witness.SetObjectCodeTouchedLeaves(s.address.Bytes(), nil, nil) + } return nil } code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } + if s.db.GetTrie().IsVerkle() { + var cs [32]byte + binary.LittleEndian.PutUint64(cs[:8], uint64(len(code))) + s.db.witness.SetObjectCodeTouchedLeaves(s.address.Bytes(), cs[:], s.CodeHash()) + } s.code = code return code } @@ -477,12 +551,21 @@ func (s *stateObject) CodeSize(db Database) int { return len(s.code) } if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if s.db.trie.IsVerkle() { + var sz [32]byte + s.db.witness.SetLeafValuesMessageCall(s.address.Bytes(), sz[:]) + } return 0 } size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) } + if s.db.trie.IsVerkle() { + var sz [32]byte + binary.LittleEndian.PutUint64(sz[:8], uint64(size)) + s.db.witness.SetLeafValuesMessageCall(s.address.Bytes(), sz[:]) + } return size } diff --git a/core/state/statedb.go b/core/state/statedb.go index a36d65fce791..c73dd52a2744 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "encoding/binary" "errors" "fmt" "math/big" @@ -33,6 +34,8 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) type revision struct { @@ -102,6 +105,8 @@ type StateDB struct { // Per-transaction access list accessList *accessList + witness *types.AccessWitness + // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. journal *journal @@ -147,6 +152,15 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) accessList: newAccessList(), hasher: crypto.NewKeccakState(), } + if tr.IsVerkle() { + sdb.witness = types.NewAccessWitness() + if sdb.snaps == nil { + sdb.snaps, err = snapshot.New(db.TrieDB().DiskDB(), db.TrieDB(), 1, root, false, true, false, true) + if err != nil { + return nil, err + } + } + } if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { sdb.snapDestructs = make(map[common.Hash]struct{}) @@ -157,6 +171,18 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return sdb, nil } +func (s *StateDB) Snaps() *snapshot.Tree { + return s.snaps +} + +func (s *StateDB) Witness() *types.AccessWitness { + return s.witness +} + +func (s *StateDB) SetWitness(aw *types.AccessWitness) { + s.witness = aw +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -165,7 +191,7 @@ func (s *StateDB) StartPrefetcher(namespace string) { s.prefetcher.close() s.prefetcher = nil } - if s.snap != nil { + if s.snap != nil && !s.trie.IsVerkle() { s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) } } @@ -269,6 +295,24 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { return common.Big0 } +func (s *StateDB) GetNonceLittleEndian(address common.Address) []byte { + var nonceBytes [8]byte + binary.LittleEndian.PutUint64(nonceBytes[:], s.GetNonce(address)) + return nonceBytes[:] +} + +func (s *StateDB) GetBalanceLittleEndian(address common.Address) []byte { + var paddedBalance [32]byte + balanceBytes := s.GetBalance(address).Bytes() + // swap to little-endian + for i, j := 0, len(balanceBytes)-1; i < j; i, j = i+1, j-1 { + balanceBytes[i], balanceBytes[j] = balanceBytes[j], balanceBytes[i] + } + + copy(paddedBalance[:len(balanceBytes)], balanceBytes) + return paddedBalance[:len(balanceBytes)] +} + func (s *StateDB) GetNonce(addr common.Address) uint64 { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -463,8 +507,33 @@ func (s *StateDB) updateStateObject(obj *stateObject) { } // Encode the account and update the account trie addr := obj.Address() + if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil { - s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) + s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) + } + if s.trie.IsVerkle() { + if len(obj.code) > 0 { + cs := make([]byte, 32) + binary.BigEndian.PutUint64(cs, uint64(len(obj.code))) + if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) + } + + if obj.dirtyCode { + if chunks, err := trie.ChunkifyCode(obj.code); err == nil { + for i := range chunks { + s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.pointEval, uint256.NewInt(uint64(i))), chunks[i][:]) + } + } else { + s.setError(err) + } + } + } else { + cs := []byte{0} + if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) + } + } } // If state snapshotting is active, cache the data til commit. Note, this @@ -482,10 +551,15 @@ func (s *StateDB) deleteStateObject(obj *stateObject) { if metrics.EnabledExpensive { defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) } + // Delete the account from the trie - addr := obj.Address() - if err := s.trie.TryDelete(addr[:]); err != nil { - s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) + // Post-verkle, the only case where this can occur is a static call + // to a non-existent account which creates an empty stateObject + if !s.trie.IsVerkle() { + addr := obj.Address() + if err := s.trie.TryDelete(addr[:]); err != nil { + s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) + } } } @@ -533,6 +607,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { data.Root = emptyRoot } } + } // If snapshot unavailable or reading from it failed, load from the database if data == nil { @@ -662,6 +737,9 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + if s.witness != nil { + state.witness = s.witness.Copy() + } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), @@ -848,7 +926,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot(s.db) + if s.trie.IsVerkle() { + obj.updateTrie(s.db) + } else { + obj.updateRoot(s.db) + } } } // Now we're about to start to write changes to the trie. The trie is so far @@ -898,6 +980,20 @@ func (s *StateDB) clearJournalAndRefund() { s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires } +// GetTrie returns the account trie. +func (s *StateDB) GetTrie() Trie { + return s.trie +} + +func (s *StateDB) Cap(root common.Hash) error { + if s.snaps != nil { + return s.snaps.Cap(root, 0) + } + // pre-verkle path: noop if s.snaps hasn't been + // initialized. + return nil +} + // Commit writes the state to the underlying in-memory trie database. func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if s.dbErr != nil { @@ -911,17 +1007,26 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { - // Write any contract code associated with the state object - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false - } // Write any storage changes in the state object to its storage trie committed, err := obj.CommitTrie(s.db) if err != nil { return common.Hash{}, err } storageCommitted += committed + // Write any contract code associated with the state object + if obj.code != nil && obj.dirtyCode { + if s.trie.IsVerkle() { + if chunks, err := trie.ChunkifyCode(obj.code); err == nil { + for i := range chunks { + s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.pointEval, uint256.NewInt(uint64(i))), chunks[i][:]) + } + } else { + s.setError(err) + } + } + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + obj.dirtyCode = false + } } } if len(s.stateObjectsDirty) > 0 { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index e9576d4dc44d..98d76a6de778 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -704,7 +704,10 @@ func TestMissingTrieNodes(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) var root common.Hash - state, _ := New(common.Hash{}, db, nil) + state, err := New(common.Hash{}, db, nil) + if err != nil { + panic("nil stte") + } addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, big.NewInt(1)) @@ -736,7 +739,7 @@ func TestMissingTrieNodes(t *testing.T) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(false) + root, err = state.Commit(false) if err == nil { t.Fatalf("expected error, got root :%x", root) } diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 83c5aa2df7a8..cb62b4a62d35 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -69,7 +69,10 @@ func makeTestState() (Database, common.Hash, []*testAccount) { state.updateStateObject(obj) accounts = append(accounts, acc) } - root, _ := state.Commit(false) + root, err := state.Commit(false) + if err != nil { + panic(err) + } // Return the generated state return db, root, accounts diff --git a/core/state_processor.go b/core/state_processor.go index d4c77ae41042..bdb21ac68664 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -95,6 +95,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) + if config.IsCancun(blockNumber) { + txContext.Accesses = types.NewAccessWitness() + } evm.Reset(txContext, statedb) // Apply the transaction to the current state (included in the env). @@ -128,6 +131,10 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } + if config.IsCancun(blockNumber) { + statedb.Witness().Merge(txContext.Accesses) + } + // Set the receipt logs and create the bloom filter. receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index aa8e4bebf9d4..0b4201a903a0 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -17,8 +17,13 @@ package core import ( + //"bytes" + "bytes" "crypto/ecdsa" + + //"fmt" "math/big" + //"os" "testing" "github.com/ethereum/go-ethereum/common" @@ -31,6 +36,8 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + + //"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -340,3 +347,223 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr // Assemble and return the final block for sealing return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) } + +// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness +// will not contain that copied data. +// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 +var ( + codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) + codeWithExtCodeCopyGas = uint64(192372) +) + +func TestProcessVerkle(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + CancunBlock: big.NewInt(0), + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(db) + blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + defer blockchain.Stop() + + code := common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) + txCost1 := params.WitnessBranchWriteCost*2 + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*3 + params.WitnessChunkReadCost*10 + params.TxGas + txCost2 := params.WitnessBranchWriteCost + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*10 + params.TxGas + intrinsic, _ := IntrinsicGas(code, nil, true, true, true) + contractCreationCost := intrinsic + 17339 + blockGasUsagesExpected := []uint64{ + txCost1*2 + txCost2, + txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, + } + chain, _ := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) + tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + + // Add two contract creations in block #2 + if i == 1 { + tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey) + gen.AddTx(tx) + + tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(32), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey) + gen.AddTx(tx) + } + }) + + // Uncomment to extract block #2 + //f, _ := os.Create("block2.rlp") + //defer f.Close() + //var buf bytes.Buffer + //rlp.Encode(&buf, chain[1]) + //f.Write(buf.Bytes()) + //fmt.Printf("root= %x\n", chain[0].Root()) + + _, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block imported with error: %v", err) + } + + for i := 0; i < 2; i++ { + b := blockchain.GetBlockByNumber(uint64(i) + 1) + if b == nil { + t.Fatalf("expected block %d to be present in chain", i+1) + } + if b.GasUsed() != blockGasUsagesExpected[i] { + t.Fatalf("expected block txs to use %d, got %d\n", blockGasUsagesExpected[i], b.GasUsed()) + } + } +} + +func TestProcessVerkleCodeDeployExec(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + CancunBlock: big.NewInt(0), + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(db) + blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + defer blockchain.Stop() + + // compiled Storage.sol + contractCreationInput := common.FromHex(`608060405234801561001057600080fd5b50610150806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80632e64cec11461003b5780636057361d14610059575b600080fd5b610043610075565b60405161005091906100d9565b60405180910390f35b610073600480360381019061006e919061009d565b61007e565b005b60008054905090565b8060008190555050565b60008135905061009781610103565b92915050565b6000602082840312156100b3576100b26100fe565b5b60006100c184828501610088565b91505092915050565b6100d3816100f4565b82525050565b60006020820190506100ee60008301846100ca565b92915050565b6000819050919050565b600080fd5b61010c816100f4565b811461011757600080fd5b5056fea2646970667358221220404e37f487a89a932dca5e77faaf6ca2de3b991f93d230604b1b8daaef64766264736f6c63430008070033`) + callStoreInput := common.FromHex(`6057361d00000000000000000000000000000000000000000000000000000000deadbeef`) + contractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + chain, _ := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + if i == 0 { + // Create the contract in block #1 + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(0), 3000000, big.NewInt(875000000), contractCreationInput), signer, testKey) + gen.AddTx(tx) + } else { + // Call the contract's `store` function in block #2 + tx, _ := types.SignTx(types.NewTransaction(1, contractAddr, big.NewInt(0), 3000000, big.NewInt(875000000), callStoreInput), signer, testKey) + gen.AddTx(tx) + } + }) + + _, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block imported with error: %v", err) + } + + // Check that the location for the contract is available in the witness + // and is reported as not present. + b1 := blockchain.GetBlockByNumber(1) + if b1 == nil { + t.Fatalf("expected block %d to be present in chain", 1) + } + var ( + hascode bool + contractStem [31]byte + ) + + // Look for the stem that the contract will be deployed to + for _, kv := range b1.Header().VerkleKeyVals { + if kv.Key[31] == 0x80 { + // Make sure there is only one contract deployment + if hascode { + t.Fatalf("found two contract deployments, one at %x and one at %x. There can be only one.", contractStem[:], kv.Key[:31]) + } + + hascode = true + copy(contractStem[:], kv.Key[:31]) + } + } + + if !hascode { + t.Fatal("could not find a contract deployment") + } + + // Check that the code pages show up in the second block + b2 := blockchain.GetBlockByNumber(2) + if b2 == nil { + t.Fatalf("expected block %d to be present in chain", 2) + } + + hascode = false + codeCount := 0 + for _, kv := range b2.Header().VerkleKeyVals { + if bytes.Equal(contractStem[:], kv.Key[:31]) && kv.Key[31] >= 128 { + hascode = true + codeCount++ + + if len(kv.Value) == 0 { + t.Fatal("chunk value for called code should not be empty in witness") + } + + // check that the code isn't full 0s + var notallzeros bool + for _, b := range kv.Value { + notallzeros = notallzeros || (b != 0) + } + if !notallzeros { + t.Fatalf("0-filled code chunk %x", kv.Key) + } + } + } + + if !hascode { + t.Fatal("could not find contract code in the witness of the calling block") + } + + if codeCount != 10 { + t.Fatalf("got %d code chunks, expected 10", codeCount) + } +} diff --git a/core/state_transition.go b/core/state_transition.go index 3b5f81b16632..240729ffa13c 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -115,7 +115,7 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 if isContractCreation && isHomestead { @@ -259,6 +259,19 @@ func (st *StateTransition) preCheck() error { return st.buyGas() } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // @@ -310,6 +323,40 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if st.gas < gas { return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas) } + if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber) { + var originBalance, originNonceBytes []byte + + targetAddr := msg.To() + originAddr := msg.From() + + statelessGasOrigin := st.evm.Accesses.TouchTxOriginAndComputeGas(originAddr.Bytes()) + if !tryConsumeGas(&st.gas, statelessGasOrigin) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gas, gas) + } + originBalance = st.evm.StateDB.GetBalanceLittleEndian(originAddr) + originNonce := st.evm.StateDB.GetNonce(originAddr) + originNonceBytes = st.evm.StateDB.GetNonceLittleEndian(originAddr) + st.evm.Accesses.SetTxOriginTouchedLeaves(originAddr.Bytes(), originBalance, originNonceBytes, st.evm.StateDB.GetCodeSize(originAddr)) + + if msg.To() != nil { + statelessGasDest := st.evm.Accesses.TouchTxExistingAndComputeGas(targetAddr.Bytes(), msg.Value().Sign() != 0) + if !tryConsumeGas(&st.gas, statelessGasDest) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gas, gas) + } + + // ensure the code size ends up in the access witness + st.evm.StateDB.GetCodeSize(*targetAddr) + } else { + contractAddr := crypto.CreateAddress(originAddr, originNonce) + if !tryConsumeGas(&st.gas, st.evm.Accesses.TouchAndChargeContractCreateInit(contractAddr.Bytes(), msg.Value().Sign() != 0)) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gas, gas) + } + } + + if st.gas < gas { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gas, gas) + } + } st.gas -= gas // Check clause 6 diff --git a/core/types/access_witness.go b/core/types/access_witness.go new file mode 100644 index 000000000000..2a296efc43db --- /dev/null +++ b/core/types/access_witness.go @@ -0,0 +1,480 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" +) + +type VerkleStem [31]byte + +// Mode specifies how a tree location has been accessed +// for the byte value: +// the first bit is set if the branch has been edited +// the second bit is set if the branch has been read +type Mode byte + +const ( + AccessWitnessReadFlag = Mode(1) + AccessWitnessWriteFlag = Mode(2) +) + +// AccessWitness lists the locations of the state that are being accessed +// during the production of a block. +type AccessWitness struct { + // Branches flags if a given branch has been loaded + Branches map[VerkleStem]Mode + + // Chunks contains the initial value of each address + Chunks map[common.Hash]Mode + + // InitialValue contains either `nil` if the location + // didn't exist before it was accessed, or the value + // that a location had before the execution of this + // block. + InitialValue map[string][]byte +} + +func NewAccessWitness() *AccessWitness { + return &AccessWitness{ + Branches: make(map[VerkleStem]Mode), + Chunks: make(map[common.Hash]Mode), + InitialValue: make(map[string][]byte), + } +} + +func (aw *AccessWitness) SetLeafValue(addr []byte, value []byte) { + var stem [31]byte + copy(stem[:], addr[:31]) + + // Sanity check: ensure that the location has been declared + if _, exist := aw.InitialValue[string(addr)]; !exist { + if len(value) == 32 || len(value) == 0 { + aw.InitialValue[string(addr)] = value + } else { + var aligned [32]byte + copy(aligned[:len(value)], value) + + aw.InitialValue[string(addr)] = aligned[:] + } + } +} + +func (aw *AccessWitness) touchAddressOnWrite(addr []byte) (bool, bool, bool) { + var stem VerkleStem + var stemWrite, chunkWrite, chunkFill bool + copy(stem[:], addr[:31]) + + // NOTE: stem, selector access flags already exist in their + // respective maps because this function is called at the end of + // processing a read access event + + if (aw.Branches[stem] & AccessWitnessWriteFlag) == 0 { + stemWrite = true + aw.Branches[stem] |= AccessWitnessWriteFlag + } + + chunkValue := aw.Chunks[common.BytesToHash(addr)] + // if chunkValue.mode XOR AccessWitnessWriteFlag + if ((chunkValue & AccessWitnessWriteFlag) == 0) && ((chunkValue | AccessWitnessWriteFlag) != 0) { + chunkWrite = true + chunkValue |= AccessWitnessWriteFlag + aw.Chunks[common.BytesToHash(addr)] = chunkValue + } + + // TODO charge chunk filling costs if the leaf was previously empty in the state + /* + if chunkWrite { + if _, err := verkleDb.TryGet(addr); err != nil { + chunkFill = true + } + } + */ + + return stemWrite, chunkWrite, chunkFill +} + +// TouchAddress adds any missing addr to the witness and returns respectively +// true if the stem or the stub weren't arleady present. +func (aw *AccessWitness) touchAddress(addr []byte, isWrite bool) (bool, bool, bool, bool, bool) { + var ( + stem [31]byte + stemRead, selectorRead bool + stemWrite, selectorWrite, chunkFill bool + ) + copy(stem[:], addr[:31]) + + // Check for the presence of the stem + if _, hasStem := aw.Branches[stem]; !hasStem { + stemRead = true + aw.Branches[stem] = AccessWitnessReadFlag + } + + // Check for the presence of the leaf selector + if _, hasSelector := aw.Chunks[common.BytesToHash(addr)]; !hasSelector { + selectorRead = true + aw.Chunks[common.BytesToHash(addr)] = AccessWitnessReadFlag + } + + if isWrite { + stemWrite, selectorWrite, chunkFill = aw.touchAddressOnWrite(addr) + } + + return stemRead, selectorRead, stemWrite, selectorWrite, chunkFill +} + +func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, isWrite bool) uint64 { + var gas uint64 + + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := aw.touchAddress(addr, isWrite) + + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + + return gas +} + +func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte) uint64 { + return aw.touchAddressAndChargeGas(addr, true) +} + +func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte) uint64 { + return aw.touchAddressAndChargeGas(addr, false) +} + +// Merge is used to merge the witness that got generated during the execution +// of a tx, with the accumulation of witnesses that were generated during the +// execution of all the txs preceding this one in a given block. +func (aw *AccessWitness) Merge(other *AccessWitness) { + for k := range other.Branches { + if _, ok := aw.Branches[k]; !ok { + aw.Branches[k] = other.Branches[k] + } + } + + for k, chunk := range other.Chunks { + if _, ok := aw.Chunks[k]; !ok { + aw.Chunks[k] = chunk + } + } + + for k, v := range other.InitialValue { + if _, ok := aw.InitialValue[k]; !ok { + aw.InitialValue[k] = v + } + } +} + +// Key returns, predictably, the list of keys that were touched during the +// buildup of the access witness. +func (aw *AccessWitness) Keys() [][]byte { + keys := make([][]byte, 0, len(aw.Chunks)) + for key := range aw.Chunks { + var k [32]byte + copy(k[:], key[:]) + keys = append(keys, k[:]) + } + return keys +} + +func (aw *AccessWitness) KeyVals() map[string][]byte { + result := make(map[string][]byte) + for k, v := range aw.InitialValue { + result[k] = v + } + return result +} + +func (aw *AccessWitness) Copy() *AccessWitness { + naw := &AccessWitness{ + Branches: make(map[VerkleStem]Mode), + Chunks: make(map[common.Hash]Mode), + InitialValue: make(map[string][]byte), + } + + naw.Merge(aw) + + return naw +} + +func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) + return gas +} + +func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 { + var ( + gas uint64 + cskey [32]byte + ) + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(addr[:]) + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + return gas +} + +func (aw *AccessWitness) SetLeafValuesMessageCall(addr, codeSize []byte) { + var ( + cskey [32]byte + data [32]byte + ) + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(addr[:]) + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + aw.SetLeafValue(versionkey, data[:]) + aw.SetLeafValue(cskey[:], codeSize[:]) +} + +func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(callerAddr[:])) + gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(targetAddr[:])) + return gas +} + +// TouchAndChargeContractCreateInit charges access costs to initiate +// a contract creation +func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSendsValue bool) uint64 { + var ( + balancekey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + if createSendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + } + gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) + return gas +} + +// TouchAndChargeContractCreateCompleted charges access access costs after +// the completion of a contract creation to populate the created account in +// the tree +func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte, withValue bool) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + return gas +} + +func (aw *AccessWitness) SetLeafValuesContractCreateCompleted(addr, codeSize, codeKeccak []byte) { + var ckkey [32]byte + cskey := utils.GetTreeKeyCodeSize(addr[:]) + copy(ckkey[:], cskey) + ckkey[31] = utils.CodeKeccakLeafKey + + aw.SetLeafValue(cskey, codeSize) + aw.SetLeafValue(ckkey[:], codeKeccak) +} + +func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(originAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + + return gas +} + +func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(targetAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) + + if sendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + } + return gas +} + +func (aw *AccessWitness) SetTxOriginTouchedLeaves(originAddr, originBalance, originNonce []byte, codeSize int) { + var ( + balancekey, cskey, noncekey [32]byte + version [32]byte + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(originAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + + aw.SetLeafValue(versionkey, version[:]) + aw.SetLeafValue(balancekey[:], originBalance) + aw.SetLeafValue(noncekey[:], originNonce) + var cs [32]byte + binary.LittleEndian.PutUint64(cs[:8], uint64(codeSize)) + aw.SetLeafValue(cskey[:], cs[:]) +} + +func (aw *AccessWitness) SetTxExistingTouchedLeaves(targetAddr, targetBalance, targetNonce, targetCodeSize, targetCodeHash []byte) { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + version [32]byte + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(targetAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + aw.SetLeafValue(versionkey, version[:]) + aw.SetLeafValue(balancekey[:], targetBalance) + aw.SetLeafValue(noncekey[:], targetNonce) + aw.SetLeafValue(cskey[:], targetCodeSize) + aw.SetLeafValue(ckkey[:], targetCodeHash) +} + +func (aw *AccessWitness) SetGetObjectTouchedLeaves(targetAddr, version, targetBalance, targetNonce, targetCodeHash []byte) { + var balancekey, ckkey, noncekey [32]byte + versionkey := utils.GetTreeKeyVersion(targetAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + aw.SetLeafValue(versionkey, version[:]) + aw.SetLeafValue(balancekey[:], targetBalance) + aw.SetLeafValue(noncekey[:], targetNonce) + aw.SetLeafValue(ckkey[:], targetCodeHash) +} + +func (aw *AccessWitness) SetObjectCodeTouchedLeaves(addr, cs, ch []byte) { + var ckkey [32]byte + cskey := utils.GetTreeKeyCodeSize(addr[:]) + copy(ckkey[:], cskey) + ckkey[31] = utils.CodeKeccakLeafKey + + aw.SetLeafValue(cskey, cs) + aw.SetLeafValue(ckkey[:], ch) +} diff --git a/core/types/block.go b/core/types/block.go index 589a34cef6b5..66dfbe29af4b 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" + "github.com/gballet/go-verkle" ) var ( @@ -87,6 +88,10 @@ type Header struct { // BaseFee was added by EIP-1559 and is ignored in legacy headers. BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + // The verkle proof is ignored in legacy headers + VerkleProof []byte `json:"verkleProof" rlp:"optional"` + VerkleKeyVals []verkle.KeyValuePair `json:"verkleKeyVals" rlp:"optional"` + /* TODO (MariusVanDerWijden) Add this field once needed // Random was added during the merge and contains the BeaconState randomness @@ -334,6 +339,11 @@ func (b *Block) SanityCheck() error { return b.header.SanityCheck() } +func (b *Block) SetVerkleProof(vp []byte, kv []verkle.KeyValuePair) { + b.header.VerkleProof = vp + b.header.VerkleKeyVals = kv +} + type writeCounter common.StorageSize func (c *writeCounter) Write(b []byte) (int, error) { diff --git a/core/vm/common.go b/core/vm/common.go index 90ba4a4ad15b..ba75950e370b 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -63,6 +63,18 @@ func getData(data []byte, start uint64, size uint64) []byte { return common.RightPadBytes(data[start:end], int(size)) } +func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyPadded []byte, actualStart uint64, sizeNonPadded uint64) { + length := uint64(len(data)) + if start > length { + start = length + } + end := start + size + if end > length { + end = length + } + return common.RightPadBytes(data[start:end], int(size)), start, end - start +} + // toWordSize returns the ceiled word size required for memory expansion. func toWordSize(size uint64) uint64 { if size > math.MaxUint64-31 { diff --git a/core/vm/contract.go b/core/vm/contract.go index bb0902969ec7..d2074440af27 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -20,6 +20,8 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" "github.com/holiman/uint256" ) @@ -49,6 +51,7 @@ type Contract struct { CallerAddress common.Address caller ContractRef self ContractRef + addressPoint *verkle.Point jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis. analysis bitvec // Locally cached result of JUMPDEST analysis @@ -58,6 +61,9 @@ type Contract struct { CodeAddr *common.Address Input []byte + // is the execution frame represented by this object a contract deployment + IsDeployment bool + Gas uint64 value *big.Int } @@ -93,15 +99,15 @@ func (c *Contract) validJumpdest(dest *uint256.Int) bool { if OpCode(c.Code[udest]) != JUMPDEST { return false } - return c.isCode(udest) + return c.IsCode(udest) } -// isCode returns true if the provided PC location is an actual opcode, as +// IsCode returns true if the provided PC location is an actual opcode, as // opposed to a data-segment following a PUSHN operation. -func (c *Contract) isCode(udest uint64) bool { +func (c *Contract) IsCode(udest uint64) bool { // Do we already have an analysis laying around? if c.analysis != nil { - return c.analysis.codeSegment(udest) + return c.analysis.IsCode(udest) } // Do we have a contract hash already? // If we do have a hash, that means it's a 'regular' contract. For regular @@ -117,7 +123,7 @@ func (c *Contract) isCode(udest uint64) bool { } // Also stash it in current contract for faster access c.analysis = analysis - return analysis.codeSegment(udest) + return analysis.IsCode(udest) } // We don't have the code hash, most likely a piece of initcode not already // in state trie. In that case, we do an analysis, and save it locally, so @@ -126,7 +132,7 @@ func (c *Contract) isCode(udest uint64) bool { if c.analysis == nil { c.analysis = codeBitmap(c.Code) } - return c.analysis.codeSegment(udest) + return c.analysis.IsCode(udest) } // AsDelegate sets the contract to be a delegate call and returns the current @@ -172,6 +178,14 @@ func (c *Contract) Address() common.Address { return c.self.Address() } +func (c *Contract) AddressPoint() *verkle.Point { + if c.addressPoint == nil { + c.addressPoint = utils.EvaluateAddressPoint(c.Address().Bytes()) + } + + return c.addressPoint +} + // Value returns the contract's value (sent to it from it's caller) func (c *Contract) Value() *big.Int { return c.value diff --git a/core/vm/evm.go b/core/vm/evm.go index dd55618bf812..1909b0bf661a 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -22,6 +22,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" @@ -84,6 +85,8 @@ type TxContext struct { // Message information Origin common.Address // Provides information for ORIGIN GasPrice *big.Int // Provides information for GASPRICE + + Accesses *types.AccessWitness } // EVM is the Ethereum Virtual Machine base object and provides @@ -126,6 +129,9 @@ type EVM struct { // NewEVM returns a new EVM. The returned EVM is not thread safe and should // only ever be used *once*. func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM { + if txCtx.Accesses == nil && chainConfig.IsCancun(blockCtx.BlockNumber) { + txCtx.Accesses = types.NewAccessWitness() + } evm := &EVM{ Context: blockCtx, TxContext: txCtx, @@ -141,6 +147,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + if txCtx.Accesses == nil && evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + txCtx.Accesses = types.NewAccessWitness() + } evm.TxContext = txCtx evm.StateDB = statedb } @@ -161,6 +170,19 @@ func (evm *EVM) Interpreter() *EVMInterpreter { return evm.interpreter } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -177,8 +199,13 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas snapshot := evm.StateDB.Snapshot() p, isPrecompile := evm.precompile(addr) + var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + // proof of absence + tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) + } // Calling a non existing account, don't do anything, but ping the tracer if evm.Config.Debug { if evm.depth == 0 { @@ -192,6 +219,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas return nil, gas, nil } evm.StateDB.CreateAccount(addr) + creation = true } evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) @@ -217,6 +245,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. code := evm.StateDB.GetCode(addr) + if len(code) == 0 { ret, err = nil, nil // gas is unchanged } else { @@ -225,6 +254,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) + contract.IsDeployment = creation ret, err = evm.interpreter.Run(contract, input, false) gas = contract.Gas } @@ -404,6 +434,8 @@ func (c *codeAndHash) Hash() common.Hash { // create creates a new contract using code as deployment code. func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address, typ OpCode) ([]byte, common.Address, uint64, error) { + var zeroVerkleLeaf [32]byte + // Depth check execution. Fail if we're trying to execute above the // limit. if evm.depth > int(params.CallCreateDepth) { @@ -433,12 +465,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } + evm.Context.Transfer(evm.StateDB, caller.Address(), address, value) // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) + contract.IsDeployment = true if evm.Config.Debug { if evm.depth == 0 { @@ -485,6 +519,15 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } + if err == nil && evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:], value.Sign() != 0)) { + evm.StateDB.RevertToSnapshot(snapshot) + err = ErrOutOfGas + } else { + evm.Accesses.SetLeafValuesContractCreateCompleted(address.Bytes()[:], zeroVerkleLeaf[:], zeroVerkleLeaf[:]) + } + } + if evm.Config.Debug { if evm.depth == 0 { evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 4c2cb3e5cf79..3ad63d582e9e 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -21,7 +21,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -87,13 +89,88 @@ func memoryCopierGas(stackpos int) gasFunc { } var ( - gasCallDataCopy = memoryCopierGas(2) - gasCodeCopy = memoryCopierGas(2) - gasExtCodeCopy = memoryCopierGas(3) - gasReturnDataCopy = memoryCopierGas(2) + gasCallDataCopy = memoryCopierGas(2) + gasCodeCopyStateful = memoryCopierGas(2) + gasExtCodeCopyStateful = memoryCopierGas(3) + gasReturnDataCopy = memoryCopierGas(2) ) +func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + slot := stack.Back(0) + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) + usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(index) + } + + return usedGas, nil +} + +func gasCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var statelessGas uint64 + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + var ( + codeOffset = stack.Back(1) + length = stack.Back(2) + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = 0xffffffffffffffff + } + uint64Length, overflow := length.Uint64WithOverflow() + if overflow { + uint64Length = 0xffffffffffffffff + } + _, offset, nonPaddedSize := getDataAndAdjustedBounds(contract.Code, uint64CodeOffset, uint64Length) + statelessGas = touchEachChunksOnReadAndChargeGas(offset, nonPaddedSize, contract.AddressPoint(), nil, evm.Accesses, contract.IsDeployment) + } + usedGas, err := gasCodeCopyStateful(evm, contract, stack, mem, memorySize) + return usedGas + statelessGas, err +} + +func gasExtCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var statelessGas uint64 + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + var ( + codeOffset = stack.Back(2) + length = stack.Back(3) + targetAddr = stack.Back(0).Bytes20() + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = 0xffffffffffffffff + } + uint64Length, overflow := length.Uint64WithOverflow() + if overflow { + uint64Length = 0xffffffffffffffff + } + // note: we must charge witness costs for the specified range regardless of whether it + // is in-bounds of the actual target account code. This is because we must charge the cost + // before hitting the db to be able to now what the actual code size is. This is different + // behavior from CODECOPY which only charges witness access costs for the part of the range + // which overlaps in the account code. TODO: clarify this is desired behavior and amend the + // spec. + statelessGas = touchEachChunksOnReadAndChargeGasWithAddress(uint64CodeOffset, uint64Length, targetAddr[:], nil, evm.Accesses, contract.IsDeployment) + } + usedGas, err := gasExtCodeCopyStateful(evm, contract, stack, mem, memorySize) + return usedGas + statelessGas, err +} + +func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + where := stack.Back(0) + index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), where) + usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + } + + return usedGas, nil +} + func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // Apply the witness access costs, err is nil + accessGas, _ := gasSLoad(evm, contract, stack, mem, memorySize) var ( y, x = stack.Back(1), stack.Back(0) current = evm.StateDB.GetState(contract.Address(), x.Bytes32()) @@ -109,12 +186,12 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // 3. From a non-zero to a non-zero (CHANGE) switch { case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0 - return params.SstoreSetGas, nil + return params.SstoreSetGas + accessGas, nil case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0 evm.StateDB.AddRefund(params.SstoreRefundGas) - return params.SstoreClearGas, nil + return params.SstoreClearGas + accessGas, nil default: // non 0 => non 0 (or 0 => 0) - return params.SstoreResetGas, nil + return params.SstoreResetGas + accessGas, nil } } // The new gas metering is based on net gas costs (EIP-1283): @@ -331,6 +408,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize transfersValue = !stack.Back(2).IsZero() address = common.Address(stack.Back(1).Bytes20()) ) + if evm.chainRules.IsEIP158 { if transfersValue && evm.StateDB.Empty(address) { gas += params.CallNewAccountGas @@ -357,6 +435,21 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeValueTransfer(contract.Address().Bytes()[:], address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + } + return gas, nil } @@ -382,6 +475,15 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -398,6 +500,15 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -414,6 +525,15 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -434,6 +554,12 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + // TODO turn this into a panic (when we are sure this method + // will never execute when verkle is enabled) + log.Warn("verkle witness accumulation not supported for selfdestruct") + } + if !evm.StateDB.HasSuicided(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 92be3bf259a3..94c5d90ba1b1 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -20,8 +20,13 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -343,7 +348,13 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() - slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) + cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(index) + scope.Contract.UseGas(statelessGas) + } + slot.SetUint64(cs) return nil, nil } @@ -364,12 +375,68 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ if overflow { uint64CodeOffset = 0xffffffffffffffff } - codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, scope.Contract.AddressPoint(), scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + } + scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) return nil, nil } +func touchEachChunksOnReadAndChargeGasWithAddress(offset, size uint64, address, code []byte, accesses *types.AccessWitness, deployment bool) uint64 { + addrPoint := trieUtils.EvaluateAddressPoint(address) + return touchEachChunksOnReadAndChargeGas(offset, size, addrPoint, code, accesses, deployment) +} + +// touchEachChunksAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func touchEachChunksOnReadAndChargeGas(offset, size uint64, addrPoint *verkle.Point, code []byte, accesses *types.AccessWitness, deployment bool) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if code != nil && (size == 0 || offset > uint64(len(code))) { + return 0 + } + var ( + statelessGasCharged uint64 + endOffset uint64 + ) + if code != nil && offset+size > uint64(len(code)) { + endOffset = uint64(len(code)) + } else { + endOffset = offset + size + } + chunks, err := trie.ChunkifyCode(code) + if err != nil { + panic(err) + } + + // endOffset - 1 since if the end offset is aligned on a chunk boundary, + // the last chunk should not be included. + for i := offset / 31; i <= (endOffset-1)/31; i++ { + index := trieUtils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(i)) + + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, accesses.TouchAddressOnReadAndComputeGas(index)) + if overflow { + panic("overflow when adding gas") + } + + if len(code) > 0 { + if deployment { + accesses.SetLeafValue(index[:], nil) + } else { + accesses.SetLeafValue(index[:], chunks[i][:]) + } + } + } + + return statelessGasCharged +} + func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( stack = scope.Stack @@ -383,8 +450,15 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + code := interpreter.evm.StateDB.GetCode(addr) + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) + touchEachChunksOnReadAndChargeGasWithAddress(copyOffset, nonPaddedCopyLength, addr[:], code, interpreter.evm.Accesses, false) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) + } else { + codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + } return nil, nil } @@ -517,6 +591,7 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by loc := scope.Stack.peek() hash := common.Hash(loc.Bytes32()) val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash) + loc.SetBytes(val.Bytes()) return nil, nil } @@ -587,6 +662,13 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } @@ -634,6 +716,14 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + codeAndHash := &codeAndHash{code: input} + contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } // Apply EIP150 gas -= gas / 64 @@ -877,6 +967,13 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by *pc += 1 if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) + + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) && *pc%31 == 0 { + // touch next chunk if PUSH1 is at the boundary. if so, *pc has + // advanced past this boundary. + statelessGas := touchEachChunksOnReadAndChargeGas(*pc+1, uint64(1), scope.Contract.AddressPoint(), scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + scope.Contract.UseGas(statelessGas) + } } else { scope.Stack.push(integer.Clear()) } @@ -898,6 +995,11 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } + if interpreter.evm.chainConfig.IsCancun(interpreter.evm.Context.BlockNumber) { + statelessGas := touchEachChunksOnReadAndChargeGas(uint64(startMin), uint64(pushByteSize), scope.Contract.AddressPoint(), scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + scope.Contract.UseGas(statelessGas) + } + integer := new(uint256.Int) scope.Stack.push(integer.SetBytes(common.RightPadBytes( scope.Contract.Code[startMin:endMin], pushByteSize))) diff --git a/core/vm/interface.go b/core/vm/interface.go index ad9b05d666a8..37a7eab63912 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -47,6 +47,9 @@ type StateDB interface { GetState(common.Address, common.Hash) common.Hash SetState(common.Address, common.Hash, common.Hash) + GetBalanceLittleEndian(address common.Address) []byte + GetNonceLittleEndian(address common.Address) []byte + Suicide(common.Address) bool HasSuicided(common.Address) bool @@ -74,6 +77,9 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + + Witness() *types.AccessWitness + SetWitness(*types.AccessWitness) } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 4f1ebc43a229..bd97b2eda56c 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -18,6 +18,7 @@ package vm import ( "hash" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" @@ -183,6 +184,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } + + if in.evm.ChainConfig().IsCancun(in.evm.Context.BlockNumber) && !contract.IsDeployment { + // if the PC ends up in a new "page" of verkleized code, charge the + // associated witness costs. + contract.Gas -= touchEachChunksOnReadAndChargeGas(pc, 1, contract.AddressPoint(), contract.Code, in.evm.TxContext.Accesses, contract.IsDeployment) + } + // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index eef3b53d8c66..4fdc7ec2cc05 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -452,6 +452,7 @@ func newFrontierInstructionSet() JumpTable { EXTCODESIZE: { execute: opExtCodeSize, constantGas: params.ExtcodeSizeGasFrontier, + dynamicGas: gasExtCodeSize, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, @@ -532,6 +533,7 @@ func newFrontierInstructionSet() JumpTable { SLOAD: { execute: opSload, constantGas: params.SloadGasFrontier, + dynamicGas: gasSLoad, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 551e1f5f1188..8a8e26fe7ad4 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" ) func makeGasSStoreFunc(clearingRefund uint64) gasFunc { @@ -51,6 +52,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), x) + cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(index) + } + if current == value { // noop (1) // EIP 2200 original clause: // return params.SloadGasEIP2200, nil @@ -103,14 +109,23 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { loc := stack.peek() slot := common.Hash(loc.Bytes32()) + var gasUsed uint64 + + if evm.chainConfig.IsCancun(evm.Context.BlockNumber) { + where := stack.Back(0) + addr := contract.Address() + index := trieUtils.GetTreeKeyStorageSlot(addr[:], where) + gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + } + // Check slot presence in the access list if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { // If the caller cannot afford the cost, this change will be rolled back // If he does afford it, we can skip checking the same thing later on, during execution evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - return params.ColdSloadCostEIP2929, nil + return gasUsed + params.ColdSloadCostEIP2929, nil } - return params.WarmStorageReadCostEIP2929, nil + return gasUsed + params.WarmStorageReadCostEIP2929, nil } // gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929 diff --git a/eth/backend.go b/eth/backend.go index b4bea088f5fc..565ea0f85a95 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -58,6 +58,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/gballet/go-verkle" ) // Config contains the configuration options of the ETH protocol. @@ -149,6 +150,13 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { log.Info(strings.Repeat("-", 153)) log.Info("") + // Start the precomputation of Lagrange points + // if this config supports verkle trees. + if chainConfig.CancunBlock != nil { + log.Info("Detected the use of verkle trees, rebuilding the cache") + verkle.GetConfig() + } + if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { log.Error("Failed to recover state", "error", err) } diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index 2863bd4451b8..3161f9c1442f 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -216,7 +216,7 @@ func TestNoStepExec(t *testing.T) { } func TestIsPrecompile(t *testing.T) { - chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, Ethash: new(params.EthashConfig), Clique: nil} + chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, CancunBlock: nil, Ethash: new(params.EthashConfig), Clique: nil} chaincfg.ByzantiumBlock = big.NewInt(100) chaincfg.IstanbulBlock = big.NewInt(200) chaincfg.BerlinBlock = big.NewInt(300) @@ -254,14 +254,17 @@ func TestEnterExit(t *testing.T) { if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context)); err != nil { t.Fatal(err) } + // test that the enter and exit method are correctly invoked and the values passed tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context)) if err != nil { t.Fatal(err) } + scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), } + tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int)) tracer.CaptureExit([]byte{}, 400, nil) diff --git a/go.mod b/go.mod index e669cff88448..ba54660b5df3 100644 --- a/go.mod +++ b/go.mod @@ -78,10 +78,12 @@ require ( github.com/aws/smithy-go v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20220524122216-93013fc5e327 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect + github.com/gballet/go-verkle v0.0.0-20220630172459-c815f6f07dfc // indirect github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect diff --git a/go.sum b/go.sum index 933c6a06bfb4..139929c3efce 100644 --- a/go.sum +++ b/go.sum @@ -86,6 +86,8 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20220524122216-93013fc5e327 h1:E6A+t+Jx11nqwgQwUX41rM/BYa1fQOjRQqafNSBo0DE= +github.com/crate-crypto/go-ipa v0.0.0-20220524122216-93013fc5e327/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -134,6 +136,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgx github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.0.0-20220630172459-c815f6f07dfc h1:Q2ZMQ6GqJKgaSGivgFxNGAM2r5H034iLruLDVDaH/es= +github.com/gballet/go-verkle v0.0.0-20220630172459-c815f6f07dfc/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -555,6 +559,7 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b05c9a08d379..aa86fa27a8c6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1095,7 +1095,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr result, err := DoCall(ctx, b, args, blockNrOrHash, nil, 0, gasCap) if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { + if errors.Is(err, core.ErrIntrinsicGas) || errors.Is(err, core.ErrInsufficientBalanceWitness) { return true, nil, nil // Special case, raise gas limit } return true, nil, err // Bail out diff --git a/light/trie.go b/light/trie.go index 931ba30cb40a..3c7790566835 100644 --- a/light/trie.go +++ b/light/trie.go @@ -163,6 +163,8 @@ func (t *odrTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter return errors.New("not implemented, needs client/server interface split") } +func (t *odrTrie) IsVerkle() bool { return false } + // do tries and retries to execute a function until it returns with no error or // an error type other than MissingNodeError func (t *odrTrie) do(key []byte, fn func() error) error { diff --git a/miner/worker.go b/miner/worker.go index 93fb6288bb45..f1356ddd00f0 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -88,6 +88,7 @@ type environment struct { signer types.Signer state *state.StateDB // apply state changes here + original *state.StateDB // verkle: keep the orignal data to prove the pre-state ancestors mapset.Set // ancestor set (used for checking uncle parent validity) family mapset.Set // family set (used for checking uncle invalidity) tcount int // tx count in cycle @@ -766,6 +767,7 @@ func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase com signer: types.MakeSigner(w.chainConfig, header.Number), state: state, coinbase: coinbase, + original: state.Copy(), ancestors: mapset.NewSet(), family: mapset.NewSet(), header: header, @@ -1146,6 +1148,28 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if err != nil { return err } + + if tr := w.current.original.GetTrie(); tr.IsVerkle() { + vtr := tr.(*trie.VerkleTrie) + keys := s.Witness().Keys() + kvs := s.Witness().KeyVals() + for _, key := range keys { + // XXX workaround - there is a problem in the witness creation + // so fix the witness creation as well. + v, err := vtr.TryGet(key) + if err != nil { + panic(err) + } + kvs[string(key)] = v + } + vtr.Hash() + p, k, err := vtr.ProveAndSerialize(s.Witness().Keys(), s.Witness().KeyVals()) + if err != nil { + return err + } + block.SetVerkleProof(p, k) + } + // If we're post merge, just ignore if !w.isTTDReached(block.Header()) { select { diff --git a/miner/worker_test.go b/miner/worker_test.go index bda0fd4899b0..2fd4d78516f3 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -117,7 +117,7 @@ type testWorkerBackend struct { uncleBlock *types.Block } -func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { +func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int, isVerkle bool) *testWorkerBackend { var gspec = core.Genesis{ Config: chainConfig, Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, @@ -152,9 +152,17 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine if n > 0 { parent = chain.GetBlockByHash(chain.CurrentBlock().ParentHash()) } - blocks, _ := core.GenerateChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testUserAddress) - }) + var blocks []*types.Block + + if isVerkle { + blocks, _ = core.GenerateVerkleChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(testUserAddress) + }) + } else { + blocks, _ = core.GenerateChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(testUserAddress) + }) + } return &testWorkerBackend{ db: db, @@ -187,6 +195,22 @@ func (b *testWorkerBackend) newRandomUncle() *types.Block { return blocks[0] } +func (b *testWorkerBackend) newRandomVerkleUncle() *types.Block { + var parent *types.Block + cur := b.chain.CurrentBlock() + if cur.NumberU64() == 0 { + parent = b.chain.Genesis() + } else { + parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash()) + } + blocks, _ := core.GenerateVerkleChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { + var addr = make([]byte, common.AddressLength) + rand.Read(addr) + gen.SetCoinbase(common.BytesToAddress(addr)) + }) + return blocks[0] +} + func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { var tx *types.Transaction gasPrice := big.NewInt(10 * params.InitialBaseFee) @@ -198,8 +222,8 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { return tx } -func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { - backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) +func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int, isVerkle bool) (*worker, *testWorkerBackend) { + backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks, isVerkle) backend.txPool.AddLocals(pendingTxs) w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) w.setEtherbase(testBankAddress) @@ -230,7 +254,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) { } chainConfig.LondonBlock = big.NewInt(0) - w, b := newTestWorker(t, chainConfig, engine, db, 0) + w, b := newTestWorker(t, chainConfig, engine, db, 0, false) defer w.close() // This test chain imports the mined blocks. @@ -269,6 +293,60 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) { } } +func TestGenerateBlocksAndImportVerkle(t *testing.T) { + t.Skip("Skipping due to a faulty testing infrastructure, see gballet/go-verkle/issues/180 for more information.") + var ( + engine consensus.Engine + chainConfig *params.ChainConfig + db = rawdb.NewMemoryDatabase() + ) + chainConfig = params.VerkleChainConfig + engine = ethash.NewFaker() + + w, b := newTestWorker(t, chainConfig, engine, db, 0, true) + defer w.close() + + // This test chain imports the mined blocks. + db2 := rawdb.NewMemoryDatabase() + b.genesis.MustCommit(db2) + chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil, nil) + defer chain.Stop() + + // Ignore empty commit here for less noise. + /* + w.skipSealHook = func(task *task) bool { + return len(task.receipts) == 0 + } + */ + + // Wait for mined blocks. + sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) + defer sub.Unsubscribe() + + // Start mining! + w.start() + + for i := 0; i < 5; i++ { + b.txPool.AddLocal(b.newRandomTx(true)) + b.txPool.AddLocal(b.newRandomTx(false)) + w.postSideBlock(core.ChainSideEvent{Block: b.newRandomVerkleUncle()}) + w.postSideBlock(core.ChainSideEvent{Block: b.newRandomVerkleUncle()}) + + select { + case ev := <-sub.Chan(): + block := ev.Data.(core.NewMinedBlockEvent).Block + if block.Header().VerkleProof == nil { + t.Fatalf("expected Verkle proof in mined block header") + } + if _, err := chain.InsertChain([]*types.Block{block}); err != nil { + t.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err) + } + case <-time.After(3 * time.Second): // Worker needs 1s to include new changes. + t.Fatalf("timeout") + } + } +} + func TestEmptyWorkEthash(t *testing.T) { testEmptyWork(t, ethashChainConfig, ethash.NewFaker()) } @@ -279,7 +357,7 @@ func TestEmptyWorkClique(t *testing.T) { func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, false) defer w.close() var ( @@ -325,7 +403,7 @@ func TestStreamUncleBlock(t *testing.T) { ethash := ethash.NewFaker() defer ethash.Close() - w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1) + w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1, false) defer w.close() var taskCh = make(chan struct{}) @@ -383,7 +461,7 @@ func TestRegenerateMiningBlockClique(t *testing.T) { func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, false) defer w.close() var taskCh = make(chan struct{}, 3) @@ -443,7 +521,7 @@ func TestAdjustIntervalClique(t *testing.T) { func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, false) defer w.close() w.skipSealHook = func(task *task) bool { diff --git a/params/config.go b/params/config.go index 26c5123e7129..1b75070ce436 100644 --- a/params/config.go +++ b/params/config.go @@ -77,6 +77,26 @@ var ( Ethash: new(EthashConfig), } + VerkleChainConfig = &ChainConfig{ + ChainID: big.NewInt(86), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + CancunBlock: big.NewInt(0), + Ethash: new(EthashConfig), + } + // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ SectionIndex: 451, @@ -261,16 +281,16 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int), false) ) @@ -361,6 +381,7 @@ type ChainConfig struct { ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // Eip-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter + CancunBlock *big.Int `json:"cancunBlock,omitempty"` // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -444,6 +465,9 @@ func (c *ChainConfig) String() string { if c.GrayGlacierBlock != nil { banner += fmt.Sprintf(" - Gray Glacier: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/gray-glacier.md)\n", c.GrayGlacierBlock) } + if c.CancunBlock != nil { + fmt.Sprintf(" - Cancun: %-8v\n", c.CancunBlock) + } banner += "\n" // Add a special section for the merge as it's non-obvious @@ -499,6 +523,10 @@ func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool { return isForked(c.MuirGlacierBlock, num) } +func (c *ChainConfig) IsCancun(num *big.Int) bool { + return isForked(c.CancunBlock, num) +} + // IsPetersburg returns whether num is either // - equal to or greater than the PetersburgBlock fork block, // - OR is nil, and Constantinople is active @@ -727,7 +755,7 @@ type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsBerlin, IsLondon bool + IsBerlin, IsLondon, IsCancun bool IsMerge bool } @@ -750,5 +778,6 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules { IsBerlin: c.IsBerlin(num), IsLondon: c.IsLondon(num), IsMerge: isMerge, + IsCancun: c.IsCancun(num), } } diff --git a/params/protocol_params.go b/params/protocol_params.go index 5f154597a7fa..70cd72a0916f 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -157,6 +157,13 @@ const ( // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529 RefundQuotient uint64 = 2 RefundQuotientEIP3529 uint64 = 5 + + // Verkle tree EIP: costs associated to witness accesses + WitnessBranchReadCost = uint64(1900) + WitnessChunkReadCost = uint64(200) + WitnessBranchWriteCost = uint64(3000) + WitnessChunkWriteCost = uint64(500) + WitnessChunkFillCost = uint64(6200) ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations diff --git a/tests/state_test_util.go b/tests/state_test_util.go index f6d8e15001d8..4f308a084099 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -266,7 +266,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo var snaps *snapshot.Tree if snapshotter { - snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false) + snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false, false) } statedb, _ = state.New(root, sdb, snaps) return snaps, statedb diff --git a/trie/database.go b/trie/database.go index 2df2e859d7b4..95b2c0e5d08b 100644 --- a/trie/database.go +++ b/trie/database.go @@ -266,6 +266,7 @@ type Config struct { Cache int // Memory allowance (MB) to use for caching trie nodes in memory Journal string // Journal of clean cache to survive node restarts Preimages bool // Flag whether the preimage of trie key is recorded + UseVerkle bool // Flag whether the data is stored in a verkle trie } // NewDatabase creates a new trie database to store ephemeral trie content before diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 6a5cc89c9ffd..63a8dbe4f94a 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -219,3 +219,7 @@ func (t *SecureTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +func (t *SecureTrie) IsVerkle() bool { + return false +} diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go new file mode 100644 index 000000000000..3a07a69c0e83 --- /dev/null +++ b/trie/utils/verkle.go @@ -0,0 +1,246 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/gballet/go-verkle" + + "github.com/holiman/uint256" +) + +const ( + VersionLeafKey = 0 + BalanceLeafKey = 1 + NonceLeafKey = 2 + CodeKeccakLeafKey = 3 + CodeSizeLeafKey = 4 +) + +var ( + zero = uint256.NewInt(0) + HeaderStorageOffset = uint256.NewInt(64) + CodeOffset = uint256.NewInt(128) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + VerkleNodeWidth = uint256.NewInt(256) + codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) + + getTreePolyIndex0Fr [1]verkle.Fr + getTreePolyIndex0Point = new(verkle.Point) +) + +func init() { + // The byte array is the Marshalled output of the point computed as such: + //cfg, _ := verkle.GetConfig() + //verkle.FromLEBytes(&getTreePolyIndex0Fr[0], []byte{2, 64}) + //= cfg.CommitToPoly(getTreePolyIndex0Fr[:], 1) + getTreePolyIndex0Point.Unmarshal([]byte{105, 89, 33, 220, 163, 177, 108, 92, 200, 80, 233, 76, 221, 99, 245, 115, 196, 103, 102, 158, 137, 206, 200, 137, 53, 208, 52, 116, 214, 189, 249, 212}) +} + +// GetTreeKey performs both the work of the spec's get_tree_key function, and that +// of pedersen_hash: it builds the polynomial in pedersen_hash without having to +// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte +// array. Since at most the first 5 coefficients of the polynomial will be non-zero, +// these 5 coefficients are created directly. +func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [5]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + // little-endian, 32-byte aligned treeIndex + var index [32]byte + for i, b := range treeIndex.Bytes() { + index[len(treeIndex.Bytes())-1-i] = b + } + verkle.FromLEBytes(&poly[3], index[:16]) + verkle.FromLEBytes(&poly[4], index[16:]) + + cfg, _ := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, getTreePolyIndex0Point) + + // The output of Byte() is big engian for banderwagon. This + // introduces an imbalance in the tree, because hashes are + // elements of a 253-bit field. This means more than half the + // tree would be empty. To avoid this problem, use a little + // endian commitment and chop the MSB. + retb := ret.Bytes() + for i := 0; i < 16; i++ { + retb[31-i], retb[i] = retb[i], retb[31-i] + } + retb[31] = subIndex + return retb[:] + +} + +func GetTreeKeyAccountLeaf(address []byte, leaf byte) []byte { + return GetTreeKey(address, zero, leaf) +} + +func GetTreeKeyVersion(address []byte) []byte { + return GetTreeKey(address, zero, VersionLeafKey) +} + +func GetTreeKeyBalance(address []byte) []byte { + return GetTreeKey(address, zero, BalanceLeafKey) +} + +func GetTreeKeyNonce(address []byte) []byte { + return GetTreeKey(address, zero, NonceLeafKey) +} + +func GetTreeKeyCodeKeccak(address []byte) []byte { + return GetTreeKey(address, zero, CodeKeccakLeafKey) +} + +func GetTreeKeyCodeSize(address []byte) []byte { + return GetTreeKey(address, zero, CodeSizeLeafKey) +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = subIndexMod[0] + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = subIndexMod[0] + } + return getTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { + pos := storageKey.Clone() + if storageKey.Cmp(codeStorageDelta) < 0 { + pos.Add(HeaderStorageOffset, storageKey) + } else { + pos.Add(MainStorageOffset, storageKey) + } + treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) + + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + // uint256 is broken into 4 little-endian quads, + // each with native endianness. Extract the least + // significant byte. + subIndex = subIndexMod[0] & 0xFF + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func getTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { + var poly [5]fr.Element + + poly[0].SetZero() + poly[1].SetZero() + poly[2].SetZero() + + // little-endian, 32-byte aligned treeIndex + var index [32]byte + for i, b := range treeIndex.Bytes() { + index[len(treeIndex.Bytes())-1-i] = b + } + verkle.FromLEBytes(&poly[3], index[:16]) + verkle.FromLEBytes(&poly[4], index[16:]) + + cfg, _ := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add the pre-evaluated address + ret.Add(ret, evaluated) + + // The output of Byte() is big engian for banderwagon. This + // introduces an imbalance in the tree, because hashes are + // elements of a 253-bit field. This means more than half the + // tree would be empty. To avoid this problem, use a little + // endian commitment and chop the MSB. + retb := ret.Bytes() + for i := 0; i < 16; i++ { + retb[31-i], retb[i] = retb[i], retb[31-i] + } + retb[31] = subIndex + return retb[:] + +} + +func EvaluateAddressPoint(address []byte) *verkle.Point { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [3]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + cfg, _ := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, getTreePolyIndex0Point) + + return ret +} + +func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey *uint256.Int) []byte { + pos := storageKey.Clone() + if storageKey.Cmp(codeStorageDelta) < 0 { + pos.Add(HeaderStorageOffset, storageKey) + } else { + pos.Add(MainStorageOffset, storageKey) + } + treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + // uint256 is broken into 4 little-endian quads, + // each with native endianness. Extract the least + // significant byte. + subIndex = subIndexMod[0] & 0xFF + } + return getTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go new file mode 100644 index 000000000000..6a98c6acb185 --- /dev/null +++ b/trie/utils/verkle_test.go @@ -0,0 +1,86 @@ +// Copyright 2022 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "crypto/sha256" + "math/big" + "math/rand" + "testing" + + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +func TestGetTreeKey(t *testing.T) { + var addr [32]byte + for i := 0; i < 16; i++ { + addr[1+2*i] = 0xff + } + n := uint256.NewInt(1) + n = n.Lsh(n, 129) + n.Add(n, uint256.NewInt(3)) + GetTreeKey(addr[:], n, 1) +} + +func TestConstantPoint(t *testing.T) { + cfg, _ := verkle.GetConfig() + verkle.FromLEBytes(&getTreePolyIndex0Fr[0], []byte{2, 64}) + expected := cfg.CommitToPoly(getTreePolyIndex0Fr[:], 1) + + if !verkle.Equal(expected, getTreePolyIndex0Point) { + t.Fatal("Marshalled constant value is incorrect") + } +} + +func BenchmarkPedersenHash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + GetTreeKeyCodeSize(addr[:]) + } +} + +func sha256GetTreeKeyCodeSize(addr []byte) []byte { + digest := sha256.New() + digest.Write(addr) + treeIndexBytes := new(big.Int).Bytes() + var payload [32]byte + copy(payload[:len(treeIndexBytes)], treeIndexBytes) + digest.Write(payload[:]) + h := digest.Sum(nil) + h[31] = CodeKeccakLeafKey + return h +} + +func BenchmarkSha256Hash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + sha256GetTreeKeyCodeSize(addr[:]) + } +} diff --git a/trie/verkle.go b/trie/verkle.go new file mode 100644 index 000000000000..faa424382a45 --- /dev/null +++ b/trie/verkle.go @@ -0,0 +1,379 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie +// interface so that Verkle trees can be reused verbatim. +type VerkleTrie struct { + root verkle.VerkleNode + db *Database +} + +func (vt *VerkleTrie) ToDot() string { + return verkle.ToDot(vt.root) +} + +func NewVerkleTrie(root verkle.VerkleNode, db *Database) *VerkleTrie { + return &VerkleTrie{ + root: root, + db: db, + } +} + +var errInvalidProof = errors.New("invalid proof") + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +func (trie *VerkleTrie) GetKey(key []byte) []byte { + return key +} + +// TryGet returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (trie *VerkleTrie) TryGet(key []byte) ([]byte, error) { + return trie.root.Get(key, trie.db.DiskDB().Get) +} + +func (t *VerkleTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { + var ( + err error + nonce, balance [32]byte + balancekey, cskey, ckkey, noncekey [32]byte + ) + + // Only evaluate the polynomial once + versionkey := utils.GetTreeKeyVersion(key[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + if err = t.TryUpdate(versionkey, []byte{0}); err != nil { + return fmt.Errorf("updateStateObject (%x) error: %v", key, err) + } + binary.LittleEndian.PutUint64(nonce[:], acc.Nonce) + if err = t.TryUpdate(noncekey[:], nonce[:]); err != nil { + return fmt.Errorf("updateStateObject (%x) error: %v", key, err) + } + bbytes := acc.Balance.Bytes() + if len(bbytes) > 0 { + for i, b := range bbytes { + balance[len(bbytes)-i-1] = b + } + } + if err = t.TryUpdate(balancekey[:], balance[:]); err != nil { + return fmt.Errorf("updateStateObject (%x) error: %v", key, err) + } + if err = t.TryUpdate(ckkey[:], acc.CodeHash); err != nil { + return fmt.Errorf("updateStateObject (%x) error: %v", key, err) + } + // TODO figure out if the code size needs to be updated, too + + return nil +} + +// TryUpdate associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) TryUpdate(key, value []byte) error { + return trie.root.Insert(key, value, func(h []byte) ([]byte, error) { + return trie.db.DiskDB().Get(h) + }) +} + +// TryDelete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) TryDelete(key []byte) error { + return trie.root.Delete(key) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (trie *VerkleTrie) Hash() common.Hash { + return trie.root.ComputeCommitment().Bytes() +} + +func nodeToDBKey(n verkle.VerkleNode) []byte { + ret := n.ComputeCommitment().Bytes() + return ret[:] +} + +// Commit writes all nodes to the trie's memory database, tracking the internal +// and external (for account tries) references. +func (trie *VerkleTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) { + flush := make(chan verkle.VerkleNode) + go func() { + trie.root.(*verkle.InternalNode).Flush(func(n verkle.VerkleNode) { + if onleaf != nil { + if leaf, isLeaf := n.(*verkle.LeafNode); isLeaf { + for i := 0; i < verkle.NodeWidth; i++ { + if leaf.Value(i) != nil { + comm := n.ComputeCommitment().Bytes() + onleaf(nil, nil, leaf.Value(i), common.BytesToHash(comm[:])) + } + } + } + } + flush <- n + }) + close(flush) + }() + var commitCount int + for n := range flush { + commitCount += 1 + value, err := n.Serialize() + if err != nil { + panic(err) + } + + if err := trie.db.DiskDB().Put(nodeToDBKey(n), value); err != nil { + return common.Hash{}, commitCount, err + } + } + + return trie.Hash(), commitCount, nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (trie *VerkleTrie) NodeIterator(startKey []byte) NodeIterator { + return newVerkleNodeIterator(trie, nil) +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (trie *VerkleTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") +} + +func (trie *VerkleTrie) Copy(db *Database) *VerkleTrie { + return &VerkleTrie{ + root: trie.root.Copy(), + db: db, + } +} +func (trie *VerkleTrie) IsVerkle() bool { + return true +} + +func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte, kv map[string][]byte) ([]byte, []verkle.KeyValuePair, error) { + proof, _, _, _ := verkle.MakeVerkleMultiProof(trie.root, keys, kv) + p, kvps, err := verkle.SerializeProof(proof) + if err != nil { + return nil, nil, err + } + + return p, kvps, nil +} + +type set = map[string]struct{} + +func hasKey(s set, key []byte) bool { + _, ok := s[string(key)] + return ok +} + +func addKey(s set, key []byte) { + s[string(key)] = struct{}{} +} + +func DeserializeAndVerifyVerkleProof(serialized []byte) (map[common.Hash]common.Hash, error) { + proof, cis, indices, yis, leaves, err := deserializeVerkleProof(serialized) + if err != nil { + return nil, fmt.Errorf("could not deserialize proof: %w", err) + } + cfg, err := verkle.GetConfig() + if err != nil { + return nil, err + } + if !verkle.VerifyVerkleProof(proof, cis, indices, yis, cfg) { + return nil, errInvalidProof + } + + return leaves, nil +} + +func deserializeVerkleProof(serialized []byte) (*verkle.Proof, []*verkle.Point, []byte, []*verkle.Fr, map[common.Hash]common.Hash, error) { + var ( + indices []byte // List of zis + yis []*verkle.Fr // List of yis + seenIdx, seenComm set // Mark when a zi/yi has already been seen in deserialization + others set // Mark when an "other" stem has been seen + ) + + proof, err := verkle.DeserializeProof(serialized) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("verkle proof deserialization error: %w", err) + } + + for _, stem := range proof.PoaStems { + addKey(others, stem) + } + + keyvals := make(map[common.Hash]common.Hash) + for i, key := range proof.Keys { + keyvals[common.BytesToHash(key)] = common.BytesToHash(proof.Values[i]) + } + + if len(proof.Keys) != len(proof.Values) { + return nil, nil, nil, nil, nil, fmt.Errorf("keys and values are of different length %d != %d", len(proof.Keys), len(proof.Values)) + } + if len(proof.Keys) != len(proof.ExtStatus) { + return nil, nil, nil, nil, nil, fmt.Errorf("keys and values are of different length %d != %d", len(proof.Keys), len(proof.Values)) + } + + // Rebuild the tree, creating nodes in the lexicographic order of their path + lastcomm, lastpoa := 0, 0 + root := verkle.NewStateless() + for i, es := range proof.ExtStatus { + depth := es & 0x1F + status := es >> 5 + node := root + stem := proof.Keys[i] + + // go over the stem's bytes, in order to rebuild the internal nodes + for j := byte(0); j < depth; j++ { + // Recurse into the tree that is being rebuilt + if node.Children()[stem[j]] == nil { + node.SetChild(int(stem[j]), verkle.NewStatelessWithCommitment(proof.Cs[lastcomm])) + lastcomm++ + } + + node = node.Children()[stem[j]].(*verkle.StatelessNode) + + // if that zi hasn't been encountered yet, add it to + // the list of zis sorted by path. + if !hasKey(seenIdx, stem[:j]) { + addKey(seenIdx, stem[:j]) + indices = append(indices, stem[j]) + } + + // same thing with a yi + if !hasKey(seenComm, stem[:j]) { + addKey(seenComm, stem[:j]) + var yi fr.Element + bytes := node.ComputeCommitment().Bytes() + yi.SetBytesLE(bytes[:]) + yis = append(yis, &yi) + } + } + + // Reached the end, add the extension-and-suffix tree + switch status { + case 0: + // missing stem, leave it as is + break + case 1: + // another stem is found, build it + node.SetStem(proof.PoaStems[lastpoa]) + lastpoa++ + case 2: + // stem is present + node.SetStem(stem[:31]) + default: + return nil, nil, nil, nil, nil, fmt.Errorf("verkle proof deserialization error: invalid extension status %d", status) + } + + } + + return proof, proof.Cs, indices, yis, keyvals, nil +} + +// Copy the values here so as to avoid an import cycle +const ( + PUSH1 = byte(0x60) + PUSH3 = byte(0x62) + PUSH4 = byte(0x63) + PUSH7 = byte(0x66) + PUSH21 = byte(0x74) + PUSH30 = byte(0x7d) + PUSH32 = byte(0x7f) +) + +func ChunkifyCode(code []byte) ([][32]byte, error) { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / 31 + codeOffset = 0 // offset in the code + ) + if len(code)%31 != 0 { + chunkCount++ + } + chunks := make([][32]byte, chunkCount) + for i := range chunks { + // number of bytes to copy, 31 unless + // the end of the code has been reached. + end := 31 * (i + 1) + if len(code) < end { + end = len(code) + } + + // Copy the code itself + copy(chunks[i][1:], code[31*i:end]) + + // chunk offset = taken from the + // last chunk. + if chunkOffset > 31 { + // skip offset calculation if push + // data covers the whole chunk + chunks[i][0] = 31 + chunkOffset = 1 + continue + } + chunks[i][0] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset + // it should be 0 unless a PUSHn overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= 31*(i+1) { + codeOffset++ + chunkOffset = codeOffset - 31*(i+1) + break + } + } + } + } + + return chunks, nil +} diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go new file mode 100644 index 000000000000..ea30f17c0dc4 --- /dev/null +++ b/trie/verkle_iterator.go @@ -0,0 +1,247 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + + "github.com/gballet/go-verkle" +) + +type verkleNodeIteratorState struct { + Node verkle.VerkleNode + Index int +} + +type verkleNodeIterator struct { + trie *VerkleTrie + current verkle.VerkleNode + lastErr error + + stack []verkleNodeIteratorState +} + +func newVerkleNodeIterator(trie *VerkleTrie, start []byte) NodeIterator { + if trie.Hash() == emptyState { + return new(nodeIterator) + } + it := &verkleNodeIterator{trie: trie, current: trie.root} + //it.err = it.seek(start) + return it +} + +// Next moves the iterator to the next node. If the parameter is false, any child +// nodes will be skipped. +func (it *verkleNodeIterator) Next(descend bool) bool { + if it.lastErr == errIteratorEnd { + it.lastErr = errIteratorEnd + return false + } + + if len(it.stack) == 0 { + it.stack = append(it.stack, verkleNodeIteratorState{Node: it.trie.root, Index: 0}) + it.current = it.trie.root + + return true + } + + switch node := it.current.(type) { + case *verkle.InternalNode: + context := &it.stack[len(it.stack)-1] + + // Look for the next non-empty child + children := node.Children() + for ; context.Index < len(children); context.Index++ { + if _, ok := children[context.Index].(verkle.Empty); !ok { + it.stack = append(it.stack, verkleNodeIteratorState{Node: children[context.Index], Index: 0}) + it.current = children[context.Index] + return it.Next(descend) + } + } + + // Reached the end of this node, go back to the parent, if + // this isn't root. + if len(it.stack) == 1 { + it.lastErr = errIteratorEnd + return false + } + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.LeafNode: + // Look for the next non-empty value + for i := it.stack[len(it.stack)-1].Index; i < 256; i++ { + if node.Value(i) != nil { + it.stack[len(it.stack)-1].Index = i + 1 + return true + } + } + + // go back to parent to get the next leaf + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.HashedNode: + // resolve the node + data, err := it.trie.db.diskdb.Get(nodeToDBKey(node)) + if err != nil { + panic(err) + } + it.current, err = verkle.ParseNode(data, byte(len(it.stack)-1), nodeToDBKey(node)) + if err != nil { + panic(err) + } + + // update the stack and parent with the resolved node + it.stack[len(it.stack)-1].Node = it.current + parent := &it.stack[len(it.stack)-2] + parent.Node.(*verkle.InternalNode).SetChild(parent.Index, it.current) + return true + default: + panic("invalid node type") + } +} + +// Error returns the error status of the iterator. +func (it *verkleNodeIterator) Error() error { + if it.lastErr == errIteratorEnd { + return nil + } + return it.lastErr +} + +// Hash returns the hash of the current node. +func (it *verkleNodeIterator) Hash() common.Hash { + return it.current.ComputeCommitment().Bytes() +} + +// Parent returns the hash of the parent of the current node. The hash may be the one +// grandparent if the immediate parent is an internal node with no hash. +func (it *verkleNodeIterator) Parent() common.Hash { + return it.stack[len(it.stack)-1].Node.ComputeCommitment().Bytes() +} + +// Path returns the hex-encoded path to the current node. +// Callers must not retain references to the return value after calling Next. +// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10. +func (it *verkleNodeIterator) Path() []byte { + + panic("not completely implemented") +} + +// Leaf returns true iff the current node is a leaf node. +func (it *verkleNodeIterator) Leaf() bool { + _, ok := it.current.(*verkle.LeafNode) + return ok +} + +// LeafKey returns the key of the leaf. The method panics if the iterator is not +// positioned at a leaf. Callers must not retain references to the value after +// calling Next. +func (it *verkleNodeIterator) LeafKey() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("Leaf() called on an verkle node iterator not at a leaf location") + } + + return leaf.Key(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafBlob returns the content of the leaf. The method panics if the iterator +// is not positioned at a leaf. Callers must not retain references to the value +// after calling Next. +func (it *verkleNodeIterator) LeafBlob() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafBlob() called on an verkle node iterator not at a leaf location") + } + + return leaf.Value(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafProof returns the Merkle proof of the leaf. The method panics if the +// iterator is not positioned at a leaf. Callers must not retain references +// to the value after calling Next. +func (it *verkleNodeIterator) LeafProof() [][]byte { + _, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafProof() called on an verkle node iterator not at a leaf location") + } + + //return it.trie.Prove(leaf.Key()) + panic("not completely implemented") +} + +// AddResolver sets an intermediate database to use for looking up trie nodes +// before reaching into the real persistent layer. +// +// This is not required for normal operation, rather is an optimization for +// cases where trie nodes can be recovered from some external mechanism without +// reading from disk. In those cases, this resolver allows short circuiting +// accesses and returning them from memory. +// +// Before adding a similar mechanism to any other place in Geth, consider +// making trie.Database an interface and wrapping at that level. It's a huge +// refactor, but it could be worth it if another occurrence arises. +func (it *verkleNodeIterator) AddResolver(ethdb.KeyValueStore) { + panic("not completely implemented") +} + +type dummy struct{} + +func (it dummy) Next(descend bool) bool { + return false +} + +func (it dummy) Error() error { + return nil +} + +func (it dummy) Hash() common.Hash { + panic("should not be called") +} + +func (it dummy) Leaf() bool { + return false +} + +func (it dummy) LeafKey() []byte { + return nil +} + +func (it dummy) LeafProof() [][]byte { + return nil +} + +func (it dummy) LeafBlob() []byte { + return nil +} + +func (it dummy) Parent() common.Hash { + return common.Hash{} +} + +func (it dummy) Path() []byte { + return nil +} + +func (it dummy) AddResolver(ethdb.KeyValueStore) { + panic("not completely implemented") +} diff --git a/trie/verkle_test.go b/trie/verkle_test.go new file mode 100644 index 000000000000..b5e17d347797 --- /dev/null +++ b/trie/verkle_test.go @@ -0,0 +1,396 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +func TestReproduceTree(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d01"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526400"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a02"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d02"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a04"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526402"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526403"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a03"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526401"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526404"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a01"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d03"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d04"), + } + + values := [][]byte{ + common.Hex2Bytes("320122e8584be00d000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0300000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("1bc176f2790c91e6000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("e703000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...), kv) + cfg, _ := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %x", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.ComputeCommitment().Bytes()) +} + +func TestChunkifyCodeTestnet(t *testing.T) { + code, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea264697066735822122000382db0489577c1646ea2147a05f92f13f32336a32f1f82c6fb10b63e19f04064736f6c63430008070033") + chunks, err := ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != (len(code)+30)/31 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + t.Logf("%x\n", chunks[0]) + for i, chunk := range chunks[1:] { + if chunk[0] != 0 && i != 4 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + if i == 4 && chunk[0] != 12 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220d8add45a339f741a94b4fe7f22e101b560dc8a5874cbd957a884d8c9239df86264736f6c63430008070033") + chunks, err = ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != (len(code)+30)/31 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + t.Logf("%x\n", chunks[0]) + expected := []byte{0, 1, 0, 13, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3} + for i, chunk := range chunks[1:] { + if chunk[0] != expected[i] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i+1, chunk[0], expected[i]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea2646970667358221220163c79eab5630c3dbe22f7cc7692da08575198dda76698ae8ee2e3bfe62af3de64736f6c63430008070033") + chunks, err = ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != (len(code)+30)/31 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + expected = []byte{0, 0, 0, 0, 13} + for i, chunk := range chunks[1:] { + if chunk[0] != expected[i] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i+1, chunk[0], expected[i]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeSimple(t *testing.T) { + code := []byte{ + 0, PUSH4, 1, 2, 3, 4, PUSH3, 58, 68, 12, PUSH21, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + // Second 31 bytes + 0, PUSH21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + PUSH7, 1, 2, 3, 4, 5, 6, 7, + // Third 31 bytes + PUSH30, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, + } + t.Logf("code=%x", code) + chunks, err := ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != 3 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + if chunks[1][0] != 1 { + t.Fatalf("invalid offset in second chunk %d != 1, chunk=%x", chunks[1][0], chunks[1]) + } + if chunks[2][0] != 0 { + t.Fatalf("invalid offset in third chunk %d != 0", chunks[2][0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeFuzz(t *testing.T) { + code := []byte{ + 3, PUSH32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + } + chunks, err := ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != 1 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, PUSH32, + } + chunks, err = ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != 1 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks, err = ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != 2 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + if chunks[1][0] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[1][0], chunks[1]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks, err = ChunkifyCode(code) + if err != nil { + t.Fatal(err) + } + if len(chunks) != 2 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0][0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0][0]) + } + if chunks[1][0] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[1][0], chunks[1]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +// This test case checks what happens when two keys whose absence is being proven start with the +// same byte (0x0b in this case). Only one 'extension status' should be declared. +func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580801"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580802"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580803"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580804"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd00"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd01"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd02"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd03"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb00"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb01"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb02"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb03"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f00"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f01"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f02"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f03"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f80"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f81"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f82"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f83"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e700"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e702"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e703"), + common.Hex2Bytes("3aeba70b6afb762af4a507c8ec10747479d797c6ec11c14f92b5699634bd18d4"), + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("53bfa56cfcaddf191e0200000000000000000000000000000000000000000000"), + common.Hex2Bytes("0700000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("389a890a6ce3e618843300000000000000000000000000000000000000000000"), + common.Hex2Bytes("0200000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...), kv) + cfg, _ := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %x", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.ComputeCommitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.ExtStatus) != 5 { + t.Fatalf("invalid number of declared stems: %d != 5", len(proof.ExtStatus)) + } +} + +// Cover the case in which a stem is both used for a proof of absence, and for a proof of presence. +func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73008ffa580800"), + // the key differs from the key present... ^^ here + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...), kv) + cfg, _ := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %x", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.ComputeCommitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.PoaStems) != 0 { + t.Fatal("a proof-of-absence stem was declared, when there was no need") + } +} + +func TestGetTreeKeys(t *testing.T) { + addr := common.Hex2Bytes("71562b71999873DB5b286dF957af199Ec94617f7") + target := common.Hex2Bytes("e00f70099661b0afefd868e5f49863abdd83550021c3b71907a598e86b311900") + key := utils.GetTreeKeyVersion(addr) + t.Logf("key=%x", key) + t.Logf("actualKey=%x", target) + if !bytes.Equal(key, target) { + t.Fatalf("differing output %x != %x", key, target) + } +}