diff --git a/.dockerignore b/.dockerignore index bafdf59616b..738cc4a2783 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,5 @@ testing/ef_tests/consensus-spec-tests +testing/execution_engine_integration/execution_clients target/ *.data *.tar.gz diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml new file mode 100644 index 00000000000..df09502307d --- /dev/null +++ b/.github/custom/clippy.toml @@ -0,0 +1,21 @@ +disallowed-from-async-methods = [ + "tokio::runtime::Handle::block_on", + "tokio::runtime::Runtime::block_on", + "tokio::task::LocalSet::block_on", + "tokio::sync::Mutex::blocking_lock", + "tokio::sync::RwLock::blocking_read", + "tokio::sync::mpsc::Receiver::blocking_recv", + "tokio::sync::mpsc::UnboundedReceiver::blocking_recv", + "tokio::sync::oneshot::Receiver::blocking_recv", + "tokio::sync::mpsc::Sender::blocking_send", + "tokio::sync::RwLock::blocking_write", +] +async-wrapper-methods = [ + "tokio::runtime::Handle::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking_handle", + "warp_utils::task::blocking_task", + "warp_utils::task::blocking_json_task", + "validator_client::http_api::blocking_signed_json_task", + "execution_layer::test_utils::MockServer::new", +] diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 57ccbdaa140..a58491d04fd 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -12,7 +12,7 @@ env: # Deny warnings in CI RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2021-12-01 + PINNED_NIGHTLY: nightly-2022-05-20 jobs: target-branch-check: name: target-branch-check @@ -252,6 +252,23 @@ jobs: run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + disallowed-from-async-lint: + name: disallowed-from-async-lint + runs-on: ubuntu-latest + needs: cargo-fmt + continue-on-error: true + steps: + - uses: actions/checkout@v1 + - name: Install SigP Clippy fork + run: | + cd .. + git clone https://github.com/michaelsproul/rust-clippy.git + cd rust-clippy + git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a + cargo build --release --bin cargo-clippy --bin clippy-driver + cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Run Clippy with the disallowed-from-async lint + run: make nightly-lint check-msrv: name: check-msrv runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 9376efc768e..9830ef39bea 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ perf.data* *.tar.gz /bin genesis.ssz +/clippy.toml diff --git a/Cargo.lock b/Cargo.lock index 05719eab2a2..3199d1425aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -202,15 +202,6 @@ dependencies = [ "pin-project-lite 0.2.8", ] -[[package]] -name = "atomic" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "attohttpc" version = "0.10.1" @@ -263,17 +254,29 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" + [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec", + "bitvec 0.20.4", "bls", "derivative", "environment", @@ -284,9 +287,11 @@ dependencies = [ "eth2_ssz_derive", "eth2_ssz_types", "execution_layer", + "exit-future", "fork_choice", "futures", "genesis", + "hex", "int_to_bytes", "itertools", "lazy_static", @@ -323,7 +328,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.2.1" +version = "2.3.1" dependencies = [ "beacon_chain", "clap", @@ -394,10 +399,22 @@ version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ - "funty", - "radium", + "funty 1.1.0", + "radium 0.6.2", "tap", - "wyz", + "wyz 0.2.0", +] + +[[package]] +name = "bitvec" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.0", ] [[package]] @@ -443,7 +460,7 @@ dependencies = [ "eth2_hashing", "eth2_serde_utils", "eth2_ssz", - "ethereum-types", + "ethereum-types 0.12.1", "hex", "milagro_bls", "rand 0.7.3", @@ -467,7 +484,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.2.1" +version = "2.3.1" dependencies = [ "beacon_node", "clap", @@ -542,6 +559,9 @@ name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +dependencies = [ + "serde", +] [[package]] name = "bzip2" @@ -572,7 +592,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", - "ethereum-types", + "ethereum-types 0.12.1", "quickcheck 0.9.2", "quickcheck_macros", "smallvec", @@ -690,7 +710,7 @@ dependencies = [ "dirs", "eth2_network_config", "eth2_ssz", - "ethereum-types", + "ethereum-types 0.12.1", "hex", "serde", "serde_json", @@ -776,6 +796,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "convert_case" version = "0.4.0" @@ -912,6 +938,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.3" @@ -1099,7 +1137,7 @@ name = "deposit_contract" version = "0.2.0" dependencies = [ "eth2_ssz", - "ethabi", + "ethabi 16.0.0", "hex", "reqwest", "serde_json", @@ -1114,10 +1152,19 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ - "const-oid", + "const-oid 0.5.2", "typenum", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1268,12 +1315,24 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ - "der", - "elliptic-curve", + "der 0.3.5", + "elliptic-curve 0.9.12", "hmac 0.11.0", "signature", ] +[[package]] +name = "ecdsa" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +dependencies = [ + "der 0.5.1", + "elliptic-curve 0.11.12", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.4.1" @@ -1309,7 +1368,7 @@ dependencies = [ "derivative", "eth2_ssz", "eth2_ssz_derive", - "ethereum-types", + "ethereum-types 0.12.1", "fork_choice", "fs2", "hex", @@ -1339,12 +1398,30 @@ version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ - "bitvec", - "ff", + "bitvec 0.20.4", + "ff 0.9.0", + "generic-array", + "group 0.9.0", + "pkcs8 0.6.1", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +dependencies = [ + "base16ct", + "crypto-bigint", + "der 0.5.1", + "ff 0.11.0", "generic-array", - "group", - "pkcs8", + "group 0.11.0", "rand_core 0.6.3", + "sec1", "subtle", "zeroize", ] @@ -1369,20 +1446,20 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256", + "k256 0.8.1", "log", "rand 0.8.5", "rlp", "serde", - "sha3", + "sha3 0.9.1", "zeroize", ] [[package]] name = "enum-as-inner" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" +checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -1500,6 +1577,7 @@ dependencies = [ "futures-util", "libsecp256k1", "lighthouse_network", + "mime", "procinfo", "proto_array", "psutil", @@ -1599,7 +1677,7 @@ dependencies = [ name = "eth2_serde_utils" version = "0.1.1" dependencies = [ - "ethereum-types", + "ethereum-types 0.12.1", "hex", "serde", "serde_derive", @@ -1611,7 +1689,7 @@ name = "eth2_ssz" version = "0.4.1" dependencies = [ "eth2_ssz_derive", - "ethereum-types", + "ethereum-types 0.12.1", "smallvec", ] @@ -1673,11 +1751,28 @@ version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" dependencies = [ - "ethereum-types", + "ethereum-types 0.12.1", "hex", "serde", "serde_json", - "sha3", + "sha3 0.9.1", + "thiserror", + "uint", +] + +[[package]] +name = "ethabi" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b69517146dfab88e9238c00c724fd8e277951c3cc6f22b016d72f422a832213e" +dependencies = [ + "ethereum-types 0.13.1", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3 0.10.1", "thiserror", "uint", ] @@ -1695,20 +1790,68 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethbloom" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + [[package]] name = "ethereum-types" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" dependencies = [ - "ethbloom", + "ethbloom 0.11.1", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types 0.10.1", + "uint", +] + +[[package]] +name = "ethereum-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +dependencies = [ + "ethbloom 0.12.1", "fixed-hash", "impl-rlp", "impl-serde", - "primitive-types", + "primitive-types 0.11.1", "uint", ] +[[package]] +name = "ethers-core" +version = "0.6.0" +source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +dependencies = [ + "arrayvec", + "bytes", + "elliptic-curve 0.11.12", + "ethabi 17.0.0", + "generic-array", + "hex", + "k256 0.10.4", + "rand 0.8.5", + "rlp", + "rlp-derive", + "serde", + "serde_json", + "thiserror", + "tiny-keccak", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -1736,6 +1879,7 @@ dependencies = [ "eth1", "eth2_serde_utils", "eth2_ssz_types", + "ethers-core", "exit-future", "futures", "hex", @@ -1804,7 +1948,17 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72a4d941a5b7c2a75222e2d44fcdf634a67133d9db31e177ae5ff6ecda852bfe" dependencies = [ - "bitvec", + "bitvec 0.20.4", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "ff" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +dependencies = [ "rand_core 0.6.3", "subtle", ] @@ -1924,6 +2078,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.3.21" @@ -2136,7 +2296,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff 0.11.0", "rand_core 0.6.3", "subtle", ] @@ -2345,6 +2516,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "lighthouse_version", + "logging", "network", "parking_lot 0.12.0", "safe_arith", @@ -2354,6 +2526,7 @@ dependencies = [ "slot_clock", "state_processing", "store", + "task_executor", "tokio", "tokio-stream", "tree_hash", @@ -2418,7 +2591,7 @@ dependencies = [ "httpdate", "itoa 1.0.1", "pin-project-lite 0.2.8", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -2505,7 +2678,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.3.1", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec 3.1.2", ] [[package]] @@ -2576,14 +2758,14 @@ dependencies = [ [[package]] name = "ipconfig" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.3.19", - "widestring", + "socket2", + "widestring 0.5.1", "winapi", - "winreg 0.6.2", + "winreg 0.7.0", ] [[package]] @@ -2658,11 +2840,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", "sha2 0.9.9", ] +[[package]] +name = "k256" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +dependencies = [ + "cfg-if", + "ecdsa 0.13.4", + "elliptic-curve 0.11.12", + "sec1", + "sha3 0.9.1", +] + [[package]] name = "keccak" version = "0.1.0" @@ -2686,7 +2881,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.2.1" +version = "2.3.1" dependencies = [ "account_utils", "bls", @@ -2801,18 +2996,17 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.43.0" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8570e25fa03d4385405dbeaf540ba00e3ee50942f03d84e1a8928a029f35f9" +checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" dependencies = [ - "atomic", "bytes", "futures", "futures-timer", "getrandom 0.2.6", "instant", "lazy_static", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -2854,11 +3048,11 @@ dependencies = [ "multistream-select 0.10.4", "parking_lot 0.11.2", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.9.0", + "prost-build 0.9.0", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.2.1", "sha2 0.9.9", "smallvec", "thiserror", @@ -2869,9 +3063,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9164ec41455856e8187addc870bb4fe1ea2ee28e1a9244831d449a2429b32c1a" +checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" dependencies = [ "asn1_der", "bs58", @@ -2889,11 +3083,11 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.0", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "sha2 0.10.2", "smallvec", "thiserror", @@ -2904,22 +3098,23 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7838647d33978b77f943687412f4a39e74234c8342cbfdad14282b465b272cb4" +checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" dependencies = [ "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f62943fba0b0dae02b87868620c52a581c54ec9fb04b5e195cf20313fc510c3" +checksum = "c9be947d8cea8e6b469201314619395826896d2c051053c3723910ba98e68e04" dependencies = [ "asynchronous-codec", "base64", @@ -2929,12 +3124,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -2945,28 +3140,32 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f219b4d4660fe3a04bf5fe6b5970902b7c1918e25b2536be8c70efc480f88f8" +checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" dependencies = [ + "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "lru", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", + "prost-codec", "smallvec", + "thiserror", + "void", ] [[package]] name = "libp2p-metrics" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29e4e5e4c5aa567fe1ee3133afe088dc2d2fd104e20c5c2c5c2649f75129677" +checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" dependencies = [ - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -2975,14 +3174,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442eb0c9fff0bf22a34f015724b4143ce01877e079ed0963c722d94c07c72160" +checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", "nohash-hasher", "parking_lot 0.12.0", @@ -2993,18 +3192,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd7e0c94051cda67123be68cf6b65211ba3dde7277be9068412de3e7ffd63ef" +checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3015,33 +3214,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "962c0fb0e7212fb96a69b87f2d09bcefd317935239bdc79cda900e7a8897a3fe" +checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ab2d4eb8ef2966b10fdf859245cdd231026df76d3c6ed2cf9e418a8f688ec9" +checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", "pin-project 1.0.10", "rand 0.7.3", @@ -3062,34 +3261,35 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193447aa729c85aac2376828df76d171c1a589c9e6b58fcc7f9d9a020734122c" +checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "socket2 0.4.4", + "socket2", "tokio", ] [[package]] name = "libp2p-websocket" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c932834c3754501c368d1bf3d0fb458487a642b90fc25df082a3a2f3d3b32e37" +checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "quicksink", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "soketto", "url", "webpki-roots", @@ -3097,12 +3297,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be902ebd89193cd020e89e89107726a38cfc0d16d18f613f4a37d046e92c7517" +checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" dependencies = [ "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "parking_lot 0.12.0", "thiserror", "yamux", @@ -3180,7 +3380,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.2.1" +version = "2.3.1" dependencies = [ "account_manager", "account_utils", @@ -3427,7 +3627,7 @@ name = "merkle_proof" version = "0.2.0" dependencies = [ "eth2_hashing", - "ethereum-types", + "ethereum-types 0.12.1", "lazy_static", "quickcheck 0.9.2", "quickcheck_macros", @@ -3686,6 +3886,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "derivative", "environment", "error-chain", "eth2_ssz", @@ -3966,10 +4167,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ "arrayvec", - "bitvec", + "bitvec 0.20.4", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive", + "parity-scale-codec-derive 2.3.1", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" +dependencies = [ + "arrayvec", + "bitvec 1.0.0", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive 3.1.2", "serde", ] @@ -3985,6 +4200,18 @@ dependencies = [ "syn", ] +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "parking_lot" version = "0.11.2" @@ -4161,8 +4388,19 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ - "der", - "spki", + "der 0.3.5", + "spki 0.3.0", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der 0.5.1", + "spki 0.5.4", + "zeroize", ] [[package]] @@ -4241,7 +4479,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ "fixed-hash", - "impl-codec", + "impl-codec 0.5.1", + "impl-rlp", + "impl-serde", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec 0.6.0", "impl-rlp", "impl-serde", "uint", @@ -4325,9 +4576,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a896938cc6018c64f279888b8c7559d3725210d5db9a3a1ee6bc7188d51d34" +checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" dependencies = [ "dtoa", "itoa 1.0.1", @@ -4353,7 +4604,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.9.0", +] + +[[package]] +name = "prost" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +dependencies = [ + "bytes", + "prost-derive 0.10.1", ] [[package]] @@ -4369,13 +4630,48 @@ dependencies = [ "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "regex", "tempfile", "which", ] +[[package]] +name = "prost-build" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +dependencies = [ + "bytes", + "cfg-if", + "cmake", + "heck 0.4.0", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.10.4", + "prost-types 0.10.1", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +dependencies = [ + "asynchronous-codec", + "bytes", + "prost 0.10.4", + "thiserror", + "unsigned-varint 0.7.1", +] + [[package]] name = "prost-derive" version = "0.9.0" @@ -4389,6 +4685,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -4396,7 +4705,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost", + "prost 0.9.0", +] + +[[package]] +name = "prost-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +dependencies = [ + "bytes", + "prost 0.10.4", ] [[package]] @@ -4521,6 +4840,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -4728,6 +5053,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -4759,6 +5095,17 @@ dependencies = [ "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rpassword" version = "5.0.1" @@ -4871,6 +5218,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "rw-stream-sink" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +dependencies = [ + "futures", + "pin-project 1.0.10", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.9" @@ -4968,6 +5326,19 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +dependencies = [ + "der 0.5.1", + "generic-array", + "pkcs8 0.8.0", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1" version = "0.21.3" @@ -5202,6 +5573,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +dependencies = [ + "digest 0.10.3", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -5475,17 +5856,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if", - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.4.4" @@ -5524,7 +5894,17 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ - "der", + "der 0.3.5", +] + +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der 0.5.1", ] [[package]] @@ -5641,14 +6021,15 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e623e69a04a6352677c1f892027e14e034dfc6c4aabed0a4a0be9c1a0a46cee" +checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ "darling", "itertools", "proc-macro2", "quote", + "smallvec", "syn", ] @@ -5658,7 +6039,7 @@ version = "0.2.0" dependencies = [ "criterion", "eth2_hashing", - "ethereum-types", + "ethereum-types 0.12.1", ] [[package]] @@ -5718,6 +6099,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "slog", + "sloggers", "tokio", ] @@ -5926,7 +6308,7 @@ dependencies = [ "parking_lot 0.12.0", "pin-project-lite 0.2.8", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -6133,7 +6515,7 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", - "ethereum-types", + "ethereum-types 0.12.1", "rand 0.8.5", "smallvec", "tree_hash_derive", @@ -6151,9 +6533,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.4" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" +checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" dependencies = [ "async-trait", "cfg-if", @@ -6176,9 +6558,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.4" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" +checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" dependencies = [ "cfg-if", "futures-util", @@ -6186,7 +6568,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "resolv-conf", "smallvec", "thiserror", @@ -6252,7 +6634,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", - "ethereum-types", + "ethereum-types 0.12.1", "hex", "int_to_bytes", "itertools", @@ -6726,8 +7108,8 @@ dependencies = [ "base64", "bytes", "derive_more", - "ethabi", - "ethereum-types", + "ethabi 16.0.0", + "ethereum-types 0.12.1", "futures", "futures-timer", "headers", @@ -6834,6 +7216,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +[[package]] +name = "widestring" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" + [[package]] name = "winapi" version = "0.3.9" @@ -6873,7 +7261,7 @@ checksum = "177b1723986bcb4c606058e77f6e8614b51c7f9ad2face6f6fd63dd5c8b3cec3" dependencies = [ "field-offset", "libc", - "widestring", + "widestring 0.4.3", "winapi", ] @@ -6922,9 +7310,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "winreg" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ "winapi", ] @@ -6944,6 +7332,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +[[package]] +name = "wyz" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +dependencies = [ + "tap", +] + [[package]] name = "x25519-dalek" version = "1.1.1" diff --git a/Dockerfile b/Dockerfile index 76347e9bfe4..aa2853ce4f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ARG FEATURES ENV FEATURES $FEATURES RUN cd lighthouse && make -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Dockerfile.cross b/Dockerfile.cross index c8bd8688781..e210c5bdfcb 100644 --- a/Dockerfile.cross +++ b/Dockerfile.cross @@ -1,7 +1,7 @@ # This image is meant to enable cross-architecture builds. # It assumes the lighthouse binary has already been # compiled for `$TARGETPLATFORM` and moved to `./bin`. -FROM --platform=$TARGETPLATFORM ubuntu:latest +FROM --platform=$TARGETPLATFORM ubuntu:22.04 RUN apt-get update && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Makefile b/Makefile index 01fd45a4dd8..a97637bfd12 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ AARCH64_TAG = "aarch64-unknown-linux-gnu" BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly +CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. @@ -145,6 +146,14 @@ lint: -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push +# FIXME: fails if --release is added due to broken HTTP API tests +nightly-lint: + cp .github/custom/clippy.toml . + cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests -- \ + -A clippy::all \ + -D clippy::disallowed_from_async + rm clippy.toml + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/README.md b/README.md index acf5f5926de..aa3cc020e1a 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,8 @@ An open-source Ethereum consensus client, written in Rust and maintained by Sigma Prime. -[![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link] +[![Book Status]][Book Link] [![Chat Badge]][Chat Link] -[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=stable -[Build Link]: https://github.com/sigp/lighthouse/actions [Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da [Chat Link]: https://discord.gg/cyAszAh [Book Status]:https://img.shields.io/badge/user--docs-unstable-informational @@ -43,7 +41,7 @@ as the canonical staking deposit contract address. The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and developers. -The Lighthouse team maintains a blog at [lighthouse.sigmaprime.io][blog] which contains periodical +The Lighthouse team maintains a blog at [lighthouse-blog.sigmaprime.io][blog] which contains periodical progress updates, roadmap insights and interesting findings. ## Branches diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 986ff7a615b..081e91aba8a 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.2.1" +version = "2.3.1" authors = ["Paul Hauner ", "Age Manning { /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: RwLock>, + /// Transmitter used to indicate that slot-start fork choice has completed running. + pub fork_choice_signal_tx: Option, + /// Receiver used by block production to wait on slot-start fork choice. + pub fork_choice_signal_rx: Option, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, @@ -587,7 +601,7 @@ impl BeaconChain { block_root: Hash256, ) -> Result> + '_, Error> { let block = self - .get_block(&block_root)? + .get_blinded_block(&block_root)? .ok_or(Error::MissingBeaconBlock(block_root))?; let state = self .get_state(&block.state_root(), Some(block.slot()))? @@ -752,11 +766,11 @@ impl BeaconChain { &self, request_slot: Slot, skips: WhenSlotSkipped, - ) -> Result>, Error> { + ) -> Result>, Error> { let root = self.block_root_at_slot(request_slot, skips)?; if let Some(block_root) = root { - Ok(self.store.get_block(&block_root)?) + Ok(self.store.get_blinded_block(&block_root)?) } else { Ok(None) } @@ -961,16 +975,14 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_block_checking_early_attester_cache( + pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, ) -> Result>, Error> { - let block_opt = self - .store - .get_block(block_root)? - .or_else(|| self.early_attester_cache.get_block(*block_root)); - - Ok(block_opt) + if let Some(block) = self.early_attester_cache.get_block(*block_root) { + return Ok(Some(block)); + } + self.get_block(block_root).await } /// Returns the block at the given root, if any. @@ -978,11 +990,69 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_block( + pub async fn get_block( &self, block_root: &Hash256, ) -> Result>, Error> { - Ok(self.store.get_block(block_root)?) + // Load block from database, returning immediately if we have the full block w payload + // stored. + let blinded_block = match self.store.try_get_full_block(block_root)? { + Some(DatabaseBlock::Full(block)) => return Ok(Some(block)), + Some(DatabaseBlock::Blinded(block)) => block, + None => return Ok(None), + }; + + // If we only have a blinded block, load the execution payload from the EL. + let block_message = blinded_block.message(); + let execution_payload_header = &block_message + .execution_payload() + .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? + .execution_payload_header; + + let exec_block_hash = execution_payload_header.block_hash; + + let execution_payload = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)? + .get_payload_by_block_hash(exec_block_hash) + .await + .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? + .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + + // Verify payload integrity. + let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); + if header_from_payload != *execution_payload_header { + for txn in &execution_payload.transactions { + debug!( + self.log, + "Reconstructed txn"; + "bytes" => format!("0x{}", hex::encode(&**txn)), + ); + } + + return Err(Error::InconsistentPayloadReconstructed { + slot: blinded_block.slot(), + exec_block_hash, + canonical_payload_root: execution_payload_header.tree_hash_root(), + reconstructed_payload_root: header_from_payload.tree_hash_root(), + canonical_transactions_root: execution_payload_header.transactions_root, + reconstructed_transactions_root: header_from_payload.transactions_root, + }); + } + + // Add the payload to the block to form a full block. + blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or(Error::AddPayloadLogicError) + .map(Some) + } + + pub fn get_blinded_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + Ok(self.store.get_blinded_block(block_root)?) } /// Returns the state at the given root, if any. @@ -2208,7 +2278,7 @@ impl BeaconChain { /// This method is generally much more efficient than importing each block using /// `Self::process_block`. pub fn process_chain_segment( - &self, + self: &Arc, chain_segment: Vec>, ) -> ChainSegmentResult { let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); @@ -2402,7 +2472,7 @@ impl BeaconChain { /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. pub fn process_block>( - &self, + self: &Arc, unverified_block: B, ) -> Result> { // Start the Prometheus timer. @@ -2893,12 +2963,64 @@ impl BeaconChain { Ok(block_root) } + /// If configured, wait for the fork choice run at the start of the slot to complete. + fn wait_for_fork_choice_before_block_production( + self: &Arc, + slot: Slot, + ) -> Result<(), BlockProductionError> { + if let Some(rx) = &self.fork_choice_signal_rx { + let current_slot = self + .slot() + .map_err(|_| BlockProductionError::UnableToReadSlot)?; + + let timeout = Duration::from_millis(self.config.fork_choice_before_proposal_timeout_ms); + + if slot == current_slot || slot == current_slot + 1 { + match rx.wait_for_fork_choice(slot, timeout) { + ForkChoiceWaitResult::Success(fc_slot) => { + debug!( + self.log, + "Fork choice successfully updated before block production"; + "slot" => slot, + "fork_choice_slot" => fc_slot, + ); + } + ForkChoiceWaitResult::Behind(fc_slot) => { + warn!( + self.log, + "Fork choice notifier out of sync with block production"; + "fork_choice_slot" => fc_slot, + "slot" => slot, + "message" => "this block may be orphaned", + ); + } + ForkChoiceWaitResult::TimeOut => { + warn!( + self.log, + "Timed out waiting for fork choice before proposal"; + "message" => "this block may be orphaned", + ); + } + } + } else { + error!( + self.log, + "Producing block at incorrect slot"; + "block_slot" => slot, + "current_slot" => current_slot, + "message" => "check clock sync, this block may be orphaned", + ); + } + } + Ok(()) + } + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. pub fn produce_block>( - &self, + self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, @@ -2913,7 +3035,7 @@ impl BeaconChain { /// Same as `produce_block` but allowing for configuration of RANDAO-verification. pub fn produce_block_with_verification>( - &self, + self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, @@ -2922,6 +3044,10 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); + self.wait_for_fork_choice_before_block_production(slot)?; + drop(fork_choice_timer); + // Producing a block requires the tree hash cache, so clone a full state corresponding to // the head from the snapshot cache. Unfortunately we can't move the snapshot out of the // cache (which would be fast), because we need to re-process the block after it has been @@ -3234,7 +3360,7 @@ impl BeaconChain { /// /// See the documentation of `InvalidationOperation` for information about defining `op`. pub fn process_invalid_execution_payload( - &self, + self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { debug!( @@ -3302,11 +3428,19 @@ impl BeaconChain { } /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(&self) -> Result<(), Error> { + pub fn fork_choice(self: &Arc) -> Result<(), Error> { + self.fork_choice_at_slot(self.slot()?) + } + + /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. + /// + /// The `slot` is not verified in any way, callers should ensure it corresponds to at most + /// one slot ahead of the current wall-clock slot. + pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - let result = self.fork_choice_internal(); + let result = self.fork_choice_internal(slot); if result.is_err() { metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); @@ -3315,13 +3449,13 @@ impl BeaconChain { result } - fn fork_choice_internal(&self) -> Result<(), Error> { + fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { // Atomically obtain the head block root and the finalized block. let (beacon_block_root, finalized_block) = { let mut fork_choice = self.fork_choice.write(); // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(self.slot()?, &self.spec)?; + let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; (beacon_block_root, fork_choice.get_finalized_block()?) }; @@ -3373,7 +3507,8 @@ impl BeaconChain { .map::, _>(Ok) .unwrap_or_else(|| { let beacon_block = self - .get_block(&beacon_block_root)? + .store + .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let beacon_state_root = beacon_block.state_root(); @@ -3692,6 +3827,8 @@ impl BeaconChain { } // Update the execution layer. + // Always use the wall-clock slot to update the execution engine rather than the `slot` + // passed in. if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { crit!( self.log, @@ -3718,7 +3855,7 @@ impl BeaconChain { Ok(()) } - pub fn prepare_beacon_proposer_blocking(&self) -> Result<(), Error> { + pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { let current_slot = self.slot()?; // Avoids raising an error before Bellatrix. @@ -3750,7 +3887,10 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async(&self, current_slot: Slot) -> Result<(), Error> { + pub async fn prepare_beacon_proposer_async( + self: &Arc, + current_slot: Slot, + ) -> Result<(), Error> { let prepare_slot = current_slot + 1; let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -3942,8 +4082,6 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - // Use the blocking method here so that we don't form a queue of these functions when - // routinely calling them. self.update_execution_engine_forkchoice_async(current_slot) .await?; } @@ -3952,7 +4090,7 @@ impl BeaconChain { } pub fn update_execution_engine_forkchoice_blocking( - &self, + self: &Arc, current_slot: Slot, ) -> Result<(), Error> { // Avoids raising an error before Bellatrix. @@ -3973,7 +4111,7 @@ impl BeaconChain { } pub async fn update_execution_engine_forkchoice_async( - &self, + self: &Arc, current_slot: Slot, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4091,7 +4229,7 @@ impl BeaconChain { drop(forkchoice_lock); match forkchoice_updated_response { - Ok(status) => match &status { + Ok(status) => match status { PayloadStatus::Valid => { // Ensure that fork choice knows that the block is no longer optimistic. if let Err(e) = self @@ -4134,13 +4272,24 @@ impl BeaconChain { ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: *latest_valid_hash, - }, - )?; + let chain = self.clone(); + execution_layer + .executor() + .spawn_blocking_handle( + move || { + chain.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + }, + "process_invalid_execution_payload_many", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4156,11 +4305,22 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - )?; + let chain = self.clone(); + execution_layer + .executor() + .spawn_blocking_handle( + move || { + chain.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }, + ) + }, + "process_invalid_execution_payload_single", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4252,12 +4412,46 @@ impl BeaconChain { /// Called by the timer on every slot. /// - /// Performs slot-based pruning. - pub fn per_slot_task(&self) { + /// Note: this function **MUST** be called from a non-async context since + /// it contains a call to `fork_choice` which may eventually call + /// `tokio::runtime::block_on` in certain cases. + pub fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { + // Always run the light-weight pruning tasks (these structures should be empty during + // sync anyway). self.naive_aggregation_pool.write().prune(slot); self.block_times_cache.write().prune(slot); + + // Don't run heavy-weight tasks during sync. + if self.best_slot().map_or(true, |head_slot| { + head_slot + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot + }) { + return; + } + + // Run fork choice and signal to any waiting task that it has completed. + if let Err(e) = self.fork_choice() { + error!( + self.log, + "Fork choice error at slot start"; + "error" => ?e, + "slot" => slot, + ); + } + + // Send the notification regardless of fork choice success, this is a "best effort" + // notification and we don't want block production to hit the timeout in case of error. + if let Some(tx) = &self.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(slot) { + warn!( + self.log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => slot, + ); + } + } } } @@ -4500,11 +4694,14 @@ impl BeaconChain { /// /// This could be a very expensive operation and should only be done in testing/analysis /// activities. - pub fn chain_dump(&self) -> Result>, Error> { + #[allow(clippy::type_complexity)] + pub fn chain_dump( + &self, + ) -> Result>>, Error> { let mut dump = vec![]; let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block, + beacon_block: self.head()?.beacon_block.into(), beacon_block_root: self.head()?.beacon_block_root, beacon_state: self.head()?.beacon_state, }; @@ -4518,9 +4715,12 @@ impl BeaconChain { break; // Genesis has been reached. } - let beacon_block = self.store.get_block(&beacon_block_root)?.ok_or_else(|| { - Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) - })?; + let beacon_block = self + .store + .get_blinded_block(&beacon_block_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) + })?; let beacon_state_root = beacon_block.state_root(); let beacon_state = self .store @@ -4605,7 +4805,7 @@ impl BeaconChain { visited.insert(block_hash); if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { - let block = self.get_block(&block_hash).unwrap().unwrap(); + let block = self.get_blinded_block(&block_hash).unwrap().unwrap(); let state = self .get_state(&block.state_root(), Some(block.slot())) .unwrap() diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 2e90203f2b5..dc80fb7008f 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -13,7 +13,8 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, + BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, + Slot, }; #[derive(Debug)] @@ -254,9 +255,9 @@ where self.time = slot } - fn on_verified_block( + fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: &BeaconBlock, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { @@ -300,7 +301,7 @@ where metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self .store - .get_block(&self.justified_checkpoint.root) + .get_blinded_block(&self.justified_checkpoint.root) .map_err(Error::FailedToReadBlock)? .ok_or(Error::MissingBlock(self.justified_checkpoint.root))? .deconstruct() diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index b9de6e9eba1..94adb479c84 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,19 +1,22 @@ use serde_derive::Serialize; -use types::{beacon_state::CloneConfig, BeaconState, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, + SignedBeaconBlock, +}; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] -pub struct BeaconSnapshot { - pub beacon_block: SignedBeaconBlock, +pub struct BeaconSnapshot = FullPayload> { + pub beacon_block: SignedBeaconBlock, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } -impl BeaconSnapshot { +impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: SignedBeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -36,7 +39,7 @@ impl BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: SignedBeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 83b204113fe..74a27d5f751 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -2,12 +2,12 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; use operation_pool::{AttMaxCover, MaxCover}; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; -use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch}; +use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256, RelativeEpoch}; impl BeaconChain { - pub fn compute_block_reward( + pub fn compute_block_reward>( &self, - block: BeaconBlockRef<'_, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState, ) -> Result { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index d156b92c54c..afdbaf13ee2 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -53,6 +53,7 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; +use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; @@ -72,14 +73,15 @@ use state_processing::{ use std::borrow::Cow; use std::fs; use std::io::Write; +use std::sync::Arc; use std::time::Duration; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::ExecPayload; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, - ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, + EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, + RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; const POS_PANDA_BANNER: &str = r#" @@ -536,7 +538,8 @@ pub fn signature_verify_chain_segment( /// A wrapper around a `SignedBeaconBlock` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Debug)] +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { pub block: SignedBeaconBlock, pub block_root: Hash256, @@ -566,7 +569,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub block: SignedBeaconBlock, pub block_root: Hash256, pub state: BeaconState, - pub parent_block: SignedBeaconBlock, + pub parent_block: SignedBeaconBlock>, pub confirmation_db_batch: Vec>, pub payload_verification_status: PayloadVerificationStatus, } @@ -577,7 +580,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub trait IntoFullyVerifiedBlock: Sized { fn into_fully_verified_block( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockError> { self.into_fully_verified_block_slashable(chain) .map(|fully_verified| { @@ -593,7 +596,7 @@ pub trait IntoFullyVerifiedBlock: Sized { /// Convert the block to fully-verified form while producing data to aid checking slashability. fn into_fully_verified_block_slashable( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -828,7 +831,7 @@ impl IntoFullyVerifiedBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. fn into_fully_verified_block_slashable( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { let fully_verified = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; @@ -948,7 +951,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -977,7 +980,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) @@ -1004,7 +1007,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { block: SignedBeaconBlock, block_root: Hash256, parent: PreProcessingSnapshot, - chain: &BeaconChain, + chain: &Arc>, ) -> Result> { if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { // Reject any block where the parent has an invalid payload. It's impossible for a valid @@ -1568,7 +1571,7 @@ fn load_parent( // indicate that we don't yet know the parent. let root = block.parent_root(); let parent_block = chain - .get_block(&block.parent_root()) + .get_blinded_block(&block.parent_root()) .map_err(BlockError::BeaconChainError)? .ok_or_else(|| { // Return a `MissingBeaconBlock` error instead of a `ParentUnknown` error since diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index e9860124c0c..361246b4d38 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; +use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; use crate::migrate::{BackgroundMigrator, MigratorConfig}; @@ -27,7 +28,7 @@ use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use types::{ BeaconBlock, BeaconState, ChainSpec, Checkpoint, EthSpec, Graffiti, Hash256, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, @@ -91,6 +92,7 @@ pub struct BeaconChainBuilder { // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, + task_executor: Option, } impl @@ -129,6 +131,7 @@ where slasher: None, validator_monitor: None, pending_io_batch: vec![], + task_executor: None, } } @@ -182,6 +185,13 @@ where self.log = Some(log); self } + + /// Sets the task executor. + pub fn task_executor(mut self, task_executor: TaskExecutor) -> Self { + self.task_executor = Some(task_executor); + self + } + /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -240,7 +250,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_block(&chain.genesis_block_root) + .get_blinded_block(&chain.genesis_block_root) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -588,7 +598,7 @@ where // Try to decode the head block according to the current fork, if that fails, try // to backtrack to before the most recent fork. let (head_block_root, head_block, head_reverted) = - match store.get_block(&initial_head_block_root) { + match store.get_full_block(&initial_head_block_root) { Ok(Some(block)) => (initial_head_block_root, block, false), Ok(None) => return Err("Head block not found in store".into()), Err(StoreError::SszDecodeError(_)) => { @@ -685,6 +695,16 @@ where ); } + // If enabled, set up the fork choice signaller. + let (fork_choice_signal_tx, fork_choice_signal_rx) = + if self.chain_config.fork_choice_before_proposal_timeout_ms != 0 { + let tx = ForkChoiceSignalTx::new(); + let rx = tx.get_receiver(); + (Some(tx), Some(rx)) + } else { + (None, None) + }; + // Store the `PersistedBeaconChain` in the database atomically with the metadata so that on // restart we can correctly detect the presence of an initialized database. // @@ -743,6 +763,8 @@ where genesis_block_root, genesis_state_root, fork_choice: RwLock::new(fork_choice), + fork_choice_signal_tx, + fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( @@ -919,6 +941,7 @@ mod test { use std::time::Duration; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; + use task_executor::test_utils::TestRuntime; use types::{EthSpec, MinimalEthSpec, Slot}; type TestEthSpec = MinimalEthSpec; @@ -952,10 +975,12 @@ mod test { .expect("should create interop genesis state"); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let runtime = TestRuntime::default(); let chain = BeaconChainBuilder::new(MinimalEthSpec) .logger(log.clone()) .store(Arc::new(store)) + .task_executor(runtime.task_executor.clone()) .genesis_state(genesis_state) .expect("should build state using recent genesis") .dummy_eth1_backend() @@ -986,10 +1011,10 @@ mod test { assert_eq!( chain .store - .get_block(&Hash256::zero()) + .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block, + block.clone().into(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 4aee06d468c..36c2f41d9d6 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,6 +1,8 @@ use serde_derive::{Deserialize, Serialize}; use types::Checkpoint; +pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing a consensus message (e.g., block, @@ -18,6 +20,10 @@ pub struct ChainConfig { pub enable_lock_timeouts: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, + /// Number of milliseconds to wait for fork choice before proposing a block. + /// + /// If set to 0 then block proposal will not wait for fork choice at all. + pub fork_choice_before_proposal_timeout_ms: u64, } impl Default for ChainConfig { @@ -28,6 +34,7 @@ impl Default for ChainConfig { reconstruct_historic_states: false, enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M + fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 56dced94e62..f589585f8a6 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -104,6 +104,10 @@ impl EarlyAttesterCache { return Ok(None); } + if request_slot < item.block.slot() { + return Ok(None); + } + let committee_count = item .committee_lengths .get_committee_count_per_slot::(spec)?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 8d275414176..834823992ac 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -26,6 +26,7 @@ use state_processing::{ }; use std::time::Duration; use task_executor::ShutdownReason; +use tokio::task::JoinError; use types::*; macro_rules! easy_from_to { @@ -90,7 +91,7 @@ pub enum BeaconChainError { BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), BlockReplayError(BlockReplayError), DuplicateValidatorPublicKey, - ValidatorPubkeyCacheFileError(String), + ValidatorPubkeyCacheError(String), ValidatorIndexUnknown(usize), ValidatorPubkeyUnknown(PublicKeyBytes), OpPoolError(OpPoolError), @@ -138,6 +139,18 @@ pub enum BeaconChainError { }, AltairForkDisabled, ExecutionLayerMissing, + BlockVariantLacksExecutionPayload(Hash256), + ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), + BlockHashMissingFromExecutionLayer(ExecutionBlockHash), + InconsistentPayloadReconstructed { + slot: Slot, + exec_block_hash: ExecutionBlockHash, + canonical_payload_root: Hash256, + reconstructed_payload_root: Hash256, + canonical_transactions_root: Hash256, + reconstructed_transactions_root: Hash256, + }, + AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), PrepareProposerBlockingFailed(execution_layer::Error), ExecutionForkChoiceUpdateInvalid { @@ -170,6 +183,12 @@ pub enum BeaconChainError { CannotAttestToFinalizedBlock { beacon_block_root: Hash256, }, + RuntimeShutdown, + ProcessInvalidExecutionPayload(JoinError), + ForkChoiceSignalOutOfOrder { + current: Slot, + latest: Slot, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -219,6 +238,7 @@ pub enum BlockProductionError { FailedToReadFinalizedBlock(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), + ForkChoiceError(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 47446e55925..08e4cd41efd 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -20,6 +20,7 @@ use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, }; +use std::sync::Arc; use types::*; /// Verify that `execution_payload` contained by `block` is considered valid by an execution @@ -32,7 +33,7 @@ use types::*; /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload pub fn notify_new_payload( - chain: &BeaconChain, + chain: &Arc>, state: &BeaconState, block: BeaconBlockRef, ) -> Result> { @@ -332,7 +333,7 @@ pub async fn prepare_execution_payload, Condvar)>, +} + +/// Receiver, for use by the beacon chain waiting on fork choice to complete. +pub struct ForkChoiceSignalRx { + pair: Arc<(Mutex, Condvar)>, +} + +pub enum ForkChoiceWaitResult { + /// Successfully reached a slot greater than or equal to the awaited slot. + Success(Slot), + /// Fork choice was updated to a lower slot, indicative of lag or processing delays. + Behind(Slot), + /// Timed out waiting for the fork choice update from the sender. + TimeOut, +} + +impl ForkChoiceSignalTx { + pub fn new() -> Self { + let pair = Arc::new((Mutex::new(Slot::new(0)), Condvar::new())); + Self { pair } + } + + pub fn get_receiver(&self) -> ForkChoiceSignalRx { + ForkChoiceSignalRx { + pair: self.pair.clone(), + } + } + + /// Signal to the receiver that fork choice has been updated to `slot`. + /// + /// Return an error if the provided `slot` is strictly less than any previously provided slot. + pub fn notify_fork_choice_complete(&self, slot: Slot) -> Result<(), BeaconChainError> { + let &(ref lock, ref condvar) = &*self.pair; + + let mut current_slot = lock.lock(); + + if slot < *current_slot { + return Err(BeaconChainError::ForkChoiceSignalOutOfOrder { + current: *current_slot, + latest: slot, + }); + } else { + *current_slot = slot; + } + + // We use `notify_all` because there may be multiple block proposals waiting simultaneously. + // Usually there'll be 0-1. + condvar.notify_all(); + + Ok(()) + } +} + +impl Default for ForkChoiceSignalTx { + fn default() -> Self { + Self::new() + } +} + +impl ForkChoiceSignalRx { + pub fn wait_for_fork_choice(&self, slot: Slot, timeout: Duration) -> ForkChoiceWaitResult { + let &(ref lock, ref condvar) = &*self.pair; + + let mut current_slot = lock.lock(); + + // Wait for `current_slot >= slot`. + // + // Do not loop and wait, if we receive an update for the wrong slot then something is + // quite out of whack and we shouldn't waste more time waiting. + if *current_slot < slot { + let timeout_result = condvar.wait_for(&mut current_slot, timeout); + + if timeout_result.timed_out() { + return ForkChoiceWaitResult::TimeOut; + } + } + + if *current_slot >= slot { + ForkChoiceWaitResult::Success(*current_slot) + } else { + ForkChoiceWaitResult::Behind(*current_slot) + } + } +} diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 38cfc27fe86..7935fc6d961 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -48,7 +48,7 @@ pub fn revert_to_fork_boundary, Cold: ItemStore ); let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root); - process_results(block_iter, |mut iter| { + let (block_root, blinded_block) = process_results(block_iter, |mut iter| { iter.find_map(|(block_root, block)| { if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) { Some((block_root, block)) @@ -69,7 +69,13 @@ pub fn revert_to_fork_boundary, Cold: ItemStore e, CORRUPT_DB_MESSAGE ) })? - .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE)) + .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))?; + + let block = store + .make_full_block(&block_root, blinded_block) + .map_err(|e| format!("Unable to add payload to new head block: {:?}", e))?; + + Ok((block_root, block)) } /// Reset fork choice to the finalized checkpoint of the supplied head state. @@ -97,7 +103,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_block_root = finalized_checkpoint.root; let finalized_block = store - .get_block(&finalized_block_root) + .get_full_block(&finalized_block_root) .map_err(|e| format!("Error loading finalized block: {:?}", e))? .ok_or_else(|| { format!( diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 234e6c64e41..1891362ebbd 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -9,7 +9,7 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; -use types::{Hash256, SignedBeaconBlock, Slot}; +use types::{Hash256, SignedBlindedBeaconBlock, Slot}; /// Use a longer timeout on the pubkey cache. /// @@ -58,7 +58,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: &[SignedBeaconBlock], + blocks: Vec>, ) -> Result { let anchor_info = self .store @@ -106,8 +106,9 @@ impl BeaconChain { .into()); } - // Store block in the hot database. - hot_batch.push(self.store.block_as_kv_store_op(&block_root, block)); + // Store block in the hot database without payload. + self.store + .blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch); // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 65908547fff..579020b1d1e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -15,6 +15,7 @@ mod errors; pub mod eth1_chain; pub mod events; mod execution_payload; +pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 41b76045329..4d0f63674ae 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -90,6 +90,10 @@ lazy_static! { ); pub static ref BLOCK_PRODUCTION_TIMES: Result = try_create_histogram("beacon_block_production_seconds", "Full runtime of block production"); + pub static ref BLOCK_PRODUCTION_FORK_CHOICE_TIMES: Result = try_create_histogram( + "beacon_block_production_fork_choice_seconds", + "Time taken to run fork choice before block production" + ); pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result = try_create_histogram( "beacon_block_production_state_load_seconds", "Time taken to load the base state for block production" diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 5ae76273211..1c0d9c4ed31 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -55,7 +55,13 @@ pub enum PruningOutcome { Successful { old_finalized_checkpoint: Checkpoint, }, - DeferredConcurrentMutation, + /// The run was aborted because the new finalized checkpoint is older than the previous one. + OutOfOrderFinalization { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, + /// The run was aborted due to a concurrent mutation of the head tracker. + DeferredConcurrentHeadTrackerMutation, } /// Logic errors that can occur during pruning, none of these should ever happen. @@ -68,6 +74,10 @@ pub enum PruningError { MissingInfoForCanonicalChain { slot: Slot, }, + FinalizedStateOutOfOrder { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, UnexpectedEqualStateRoots, UnexpectedUnequalStateRoots, } @@ -223,7 +233,7 @@ impl, Cold: ItemStore> BackgroundMigrator old_finalized_checkpoint, - Ok(PruningOutcome::DeferredConcurrentMutation) => { + Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( log, "Pruning deferred because of a concurrent mutation"; @@ -231,8 +241,21 @@ impl, Cold: ItemStore> BackgroundMigrator { + warn!( + log, + "Ignoring out of order finalization request"; + "old_finalized_epoch" => old_finalized_checkpoint.epoch, + "new_finalized_epoch" => new_finalized_checkpoint.epoch, + "message" => "this is expected occasionally due to a (harmless) race condition" + ); + return; + } Err(e) => { - warn!(log, "Block pruning failed"; "error" => format!("{:?}", e)); + warn!(log, "Block pruning failed"; "error" => ?e); return; } }; @@ -347,6 +370,16 @@ impl, Cold: ItemStore> BackgroundMigrator new_finalized_slot { + return Ok(PruningOutcome::OutOfOrderFinalization { + old_finalized_checkpoint, + new_finalized_checkpoint, + }); + } + debug!( log, "Starting database pruning"; @@ -391,7 +424,7 @@ impl, Cold: ItemStore> BackgroundMigrator block.state_root(), Ok(None) => { return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into()) @@ -523,7 +556,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator> = abandoned_blocks .into_iter() .map(Into::into) - .map(StoreOp::DeleteBlock) + .flat_map(|block_root: Hash256| { + [ + StoreOp::DeleteBlock(block_root), + StoreOp::DeleteExecutionPayload(block_root), + ] + }) .chain( abandoned_states .into_iter() @@ -543,7 +581,7 @@ impl, Cold: ItemStore> BackgroundMigrator BeaconChain { } // 2. Check on disk. - if self.store.get_block(&block_root)?.is_some() { + if self.store.get_blinded_block(&block_root)?.is_some() { cache.block_roots.put(block_root, ()); return Ok(true); } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 778ac85066e..b29943bfb93 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,28 +1,21 @@ //! Utilities for managing database schema changes. +mod migration_schema_v10; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; mod migration_schema_v9; mod types; -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{ PersistedForkChoiceV1, PersistedForkChoiceV7, PersistedForkChoiceV8, }; -use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase}; use slog::{warn, Logger}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::fs; use std::path::Path; use std::sync::Arc; -use store::config::OnDiskStoreConfig; use store::hot_cold_store::{HotColdDB, HotColdDBError}; -use store::metadata::{SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION}; -use store::{DBColumn, Error as StoreError, ItemStore, StoreItem}; - -const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz"; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; +use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( @@ -35,75 +28,17 @@ pub fn migrate_schema( match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), - // Migrate across multiple versions by recursively migrating one step at a time. + // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); migrate_schema::(db.clone(), datadir, from, next, log.clone())?; migrate_schema::(db, datadir, next, to, log) } - // Migration from v0.3.0 to v0.3.x, adding the temporary states column. - // Nothing actually needs to be done, but once a DB uses v2 it shouldn't go back. - (SchemaVersion(1), SchemaVersion(2)) => { - db.store_schema_version(to)?; - Ok(()) - } - // Migration for removing the pubkey cache. - (SchemaVersion(2), SchemaVersion(3)) => { - let pk_cache_path = datadir.join(PUBKEY_CACHE_FILENAME); - - // Load from file, store to DB. - ValidatorPubkeyCache::::load_from_file(&pk_cache_path) - .and_then(|cache| ValidatorPubkeyCache::convert(cache, db.clone())) - .map_err(|e| StoreError::SchemaMigrationError(format!("{:?}", e)))?; - - db.store_schema_version(to)?; - - // Delete cache file now that keys are stored in the DB. - fs::remove_file(&pk_cache_path).map_err(|e| { - StoreError::SchemaMigrationError(format!( - "unable to delete {}: {:?}", - pk_cache_path.display(), - e - )) - })?; - - Ok(()) - } - // Migration for adding sync committee contributions to the persisted op pool. - (SchemaVersion(3), SchemaVersion(4)) => { - // Deserialize from what exists in the database using the `PersistedOperationPoolBase` - // variant and convert it to the Altair variant. - let pool_opt = db - .get_item::>(&OP_POOL_DB_KEY)? - .map(PersistedOperationPool::Base) - .map(PersistedOperationPool::base_to_altair); - - if let Some(pool) = pool_opt { - // Store the converted pool under the same key. - db.put_item::>(&OP_POOL_DB_KEY, &pool)?; - } - - db.store_schema_version(to)?; - - Ok(()) - } - // Migration for weak subjectivity sync support and clean up of `OnDiskStoreConfig` (#1784). - (SchemaVersion(4), SchemaVersion(5)) => { - if let Some(OnDiskStoreConfigV4 { - slots_per_restore_point, - .. - }) = db.hot_db.get(&CONFIG_KEY)? - { - let new_config = OnDiskStoreConfig { - slots_per_restore_point, - }; - db.hot_db.put(&CONFIG_KEY, &new_config)?; - } - db.store_schema_version(to)?; + // + // Migrations from before SchemaVersion(5) are deprecated. + // - Ok(()) - } // Migration for adding `execution_status` field to the fork choice store. (SchemaVersion(5), SchemaVersion(6)) => { // Database operations to be done atomically @@ -184,11 +119,22 @@ pub fn migrate_schema( Ok(()) } + // Upgrade from v8 to v9 to separate the execution payloads into their own column. (SchemaVersion(8), SchemaVersion(9)) => { + migration_schema_v9::upgrade_to_v9::(db.clone(), log)?; + db.store_schema_version(to) + } + // Downgrade from v9 to v8 to ignore the separation of execution payloads + // NOTE: only works before the Bellatrix fork epoch. + (SchemaVersion(9), SchemaVersion(8)) => { + migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; + db.store_schema_version(to) + } + (SchemaVersion(9), SchemaVersion(10)) => { let mut ops = vec![]; let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v9::update_fork_choice(fork_choice)?; + let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); } @@ -205,24 +151,3 @@ pub fn migrate_schema( .into()), } } - -// Store config used in v4 schema and earlier. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] -pub struct OnDiskStoreConfigV4 { - pub slots_per_restore_point: u64, - pub _block_cache_size: usize, -} - -impl StoreItem for OnDiskStoreConfigV4 { - fn db_column() -> DBColumn { - DBColumn::BeaconMeta - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs new file mode 100644 index 00000000000..5da029cc9a6 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs @@ -0,0 +1,30 @@ +use crate::persisted_fork_choice::PersistedForkChoiceV8; +use crate::schema_change::{ + types::{SszContainerV7, SszContainerV9}, + StoreError, +}; +use proto_array::core::SszContainer; +use ssz::{Decode, Encode}; + +pub fn update_fork_choice( + mut fork_choice: PersistedForkChoiceV8, +) -> Result { + let ssz_container_v7 = SszContainerV7::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + // These transformations instantiate `node.unrealized_justified_checkpoint` and + // `node.unrealized_finalized_checkpoint` to `None`. + let ssz_container_v9: SszContainerV9 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v9.into(); + + fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); + + Ok(fork_choice) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index dd105f3006e..359c5e85cbe 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -31,7 +31,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .finalized_checkpoint .root; let anchor_block = db - .get_block(&anchor_block_root) + .get_full_block_prior_to_v9(&anchor_block_root) .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon block".to_string())?; let anchor_state = db diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs index 5998eaa1258..ef3f7857f9a 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs @@ -34,7 +34,7 @@ pub fn update_fork_choice( // before schema v8 the cache would always miss on skipped slots. for item in balances_cache.items { // Drop any blocks that aren't found, they're presumably too old and this is only a cache. - if let Some(block) = db.get_block(&item.block_root)? { + if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? { fork_choice_store.balances_cache.items.push(CacheItemV8 { block_root: item.block_root, epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs index 5da029cc9a6..e2c48d5c89d 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs @@ -1,30 +1,176 @@ -use crate::persisted_fork_choice::PersistedForkChoiceV8; -use crate::schema_change::{ - types::{SszContainerV7, SszContainerV9}, - StoreError, -}; -use proto_array::core::SszContainer; -use ssz::{Decode, Encode}; - -pub fn update_fork_choice( - mut fork_choice: PersistedForkChoiceV8, -) -> Result { - let ssz_container_v7 = SszContainerV7::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // These transformations instantiate `node.unrealized_justified_checkpoint` and - // `node.unrealized_finalized_checkpoint` to `None`. - let ssz_container_v9: SszContainerV9 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v9.into(); - - fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); - - Ok(fork_choice) +use crate::beacon_chain::BeaconChainTypes; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{DBColumn, Error, HotColdDB, KeyValueStore}; +use types::{EthSpec, Hash256, Slot}; + +const OPS_PER_BLOCK_WRITE: usize = 2; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + // At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it + // actually has no payload. + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +pub fn upgrade_to_v9( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + // This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration + // was implemented before the activation of Bellatrix on all networks except Kiln, so the only + // users who will need to wait for the slow copying migration are Kiln users. + let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { + slot_clock + } else { + error!( + log, + "Unable to complete migration because genesis state or genesis block is missing" + ); + return Err(Error::SlotClockUnavailableForMigration); + }; + + let current_epoch = if let Some(slot) = slot_clock.now() { + slot.epoch(T::EthSpec::slots_per_epoch()) + } else { + return Ok(()); + }; + + let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { + fork_epoch + } else { + info!( + log, + "Upgrading database schema to v9 (no-op)"; + "info" => "To downgrade before the merge run `lighthouse db migrate`" + ); + return Ok(()); + }; + + if current_epoch >= bellatrix_fork_epoch { + info!( + log, + "Upgrading database schema to v9"; + "info" => "This will take several minutes. Each block will be read from and \ + re-written to the database. You may safely exit now (Ctrl-C) and resume \ + the migration later. Downgrading is no longer possible." + ); + + for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) { + let block_root = res?; + let block = match db.get_full_block_prior_to_v9(&block_root) { + // A pre-v9 block is present. + Ok(Some(block)) => block, + // A block is missing. + Ok(None) => return Err(Error::BlockNotFound(block_root)), + // There was an error reading a pre-v9 block. Try reading it as a post-v9 block. + Err(_) => { + if db.try_get_full_block(&block_root)?.is_some() { + // The block is present as a post-v9 block, assume that it was already + // correctly migrated. + continue; + } else { + // This scenario should not be encountered since a prior check has ensured + // that this block exists. + return Err(Error::V9MigrationFailure(block_root)); + } + } + }; + + if block.message().execution_payload().is_ok() { + // Overwrite block with blinded block and store execution payload separately. + debug!( + log, + "Rewriting Bellatrix block"; + "block_root" => ?block_root, + ); + + let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE); + db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?; + db.hot_db.do_atomically(kv_batch)?; + } + } + } else { + info!( + log, + "Upgrading database schema to v9 (no-op)"; + "info" => "To downgrade before the merge run `lighthouse db migrate`" + ); + } + + Ok(()) +} + +// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been +// reached. +pub fn downgrade_from_v9( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { + slot_clock + } else { + error!( + log, + "Unable to complete migration because genesis state or genesis block is missing" + ); + return Err(Error::SlotClockUnavailableForMigration); + }; + + let current_epoch = if let Some(slot) = slot_clock.now() { + slot.epoch(T::EthSpec::slots_per_epoch()) + } else { + return Ok(()); + }; + + let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { + fork_epoch + } else { + info!( + log, + "Downgrading database schema from v9"; + "info" => "You need to upgrade to v9 again before the merge" + ); + return Ok(()); + }; + + if current_epoch >= bellatrix_fork_epoch { + error!( + log, + "Downgrading from schema v9 after the Bellatrix fork epoch is not supported"; + "current_epoch" => current_epoch, + "bellatrix_fork_epoch" => bellatrix_fork_epoch, + "reason" => "You need a v9 schema database to run on a merged version of Prater or \ + mainnet. On Kiln, you have to re-sync", + ); + Err(Error::ResyncRequiredForExecutionPayloadSeparation) + } else { + Ok(()) + } } diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index f4bbae8a32e..55855813629 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -3,8 +3,8 @@ use itertools::process_results; use std::cmp; use std::time::Duration; use types::{ - beacon_state::CloneConfig, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, - Slot, + beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, + SignedBeaconBlock, Slot, }; /// The default size of the cache. @@ -23,7 +23,7 @@ pub struct PreProcessingSnapshot { pub pre_state: BeaconState, /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. pub beacon_state_root: Option, - pub beacon_block: SignedBeaconBlock, + pub beacon_block: SignedBeaconBlock>, pub beacon_block_root: Hash256, } @@ -33,7 +33,7 @@ impl From> for PreProcessingSnapshot { Self { pre_state: snapshot.beacon_state, beacon_state_root, - beacon_block: snapshot.beacon_block, + beacon_block: snapshot.beacon_block.into(), beacon_block_root: snapshot.beacon_block_root, } } @@ -63,7 +63,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block, + beacon_block: self.beacon_block.into(), beacon_block_root: self.beacon_block_root, pre_state: self.pre_state.unwrap_or(self.beacon_state), beacon_state_root, @@ -76,7 +76,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.clone(), + beacon_block: self.beacon_block.clone().into(), beacon_block_root: self.beacon_block_root, pre_state: self .pre_state diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 6a3c3ea00e1..030507a83a0 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -27,7 +27,7 @@ use std::sync::{ Arc, }; use task_executor::TaskExecutor; -use tokio::time::sleep; +use tokio::time::{sleep, sleep_until, Instant}; use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform @@ -37,6 +37,9 @@ use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; /// for some period of time. const MAX_ADVANCE_DISTANCE: u64 = 4; +/// Similarly for fork choice: avoid the fork choice lookahead during sync. +const MAX_FORK_CHOICE_DISTANCE: u64 = 4; + #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), @@ -105,8 +108,8 @@ async fn state_advance_timer( let slot_duration = slot_clock.slot_duration(); loop { - match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => sleep(duration + (slot_duration / 4) * 3).await, + let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration, None => { error!(log, "Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. @@ -115,7 +118,45 @@ async fn state_advance_timer( } }; - // Only start spawn the state advance task if the lock was previously free. + // Run the state advance 3/4 of the way through the slot (9s on mainnet). + let state_advance_offset = slot_duration / 4; + let state_advance_instant = if duration_to_next_slot > state_advance_offset { + Instant::now() + duration_to_next_slot - state_advance_offset + } else { + // Skip the state advance for the current slot and wait until the next one. + Instant::now() + duration_to_next_slot + slot_duration - state_advance_offset + }; + + // Run fork choice 23/24s of the way through the slot (11.5s on mainnet). + // We need to run after the state advance, so use the same condition as above. + let fork_choice_offset = slot_duration / 24; + let fork_choice_instant = if duration_to_next_slot > state_advance_offset { + Instant::now() + duration_to_next_slot - fork_choice_offset + } else { + Instant::now() + duration_to_next_slot + slot_duration - fork_choice_offset + }; + + // Wait for the state advance. + sleep_until(state_advance_instant).await; + + // Compute the current slot here at approx 3/4 through the slot. Even though this slot is + // only used by fork choice we need to calculate it here rather than after the state + // advance, in case the state advance flows over into the next slot. + let current_slot = match beacon_chain.slot() { + Ok(slot) => slot, + Err(e) => { + warn!( + log, + "Unable to determine slot in state advance timer"; + "error" => ?e + ); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; + } + }; + + // Only spawn the state advance task if the lock was previously free. if !is_running.lock() { let log = log.clone(); let beacon_chain = beacon_chain.clone(); @@ -163,6 +204,47 @@ async fn state_advance_timer( "msg" => "system resources may be overloaded" ) } + + // Run fork choice pre-emptively for the next slot. This processes most of the attestations + // from this slot off the hot path of block verification and production. + // Wait for the fork choice instant (which may already be past). + sleep_until(fork_choice_instant).await; + + let log = log.clone(); + let beacon_chain = beacon_chain.clone(); + let next_slot = current_slot + 1; + executor.spawn_blocking( + move || { + // Don't run fork choice during sync. + if beacon_chain.best_slot().map_or(true, |head_slot| { + head_slot + MAX_FORK_CHOICE_DISTANCE < current_slot + }) { + return; + } + + if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { + warn!( + log, + "Error updating fork choice for next slot"; + "error" => ?e, + "slot" => next_slot, + ); + } + + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance", + ); } } @@ -172,7 +254,7 @@ async fn state_advance_timer( /// /// See the module-level documentation for rationale. fn advance_head( - beacon_chain: &BeaconChain, + beacon_chain: &Arc>, log: &Logger, ) -> Result<(), Error> { let current_slot = beacon_chain.slot()?; @@ -193,13 +275,6 @@ fn advance_head( } } - // Run fork choice so we get the latest view of the head. - // - // This is useful since it's quite likely that the last time we ran fork choice was shortly - // after receiving the latest gossip block, but not necessarily after we've received the - // majority of attestations. - beacon_chain.fork_choice()?; - let head_root = beacon_chain.head_info()?.block_root; let (head_slot, head_state_root, mut state) = match beacon_chain diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0878a61211c..a47c41edccd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -12,15 +12,12 @@ use crate::{ }; use bls::get_withdrawal_credentials; use execution_layer::{ - test_utils::{ - ExecutionBlockGenerator, ExecutionLayerRuntime, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK, - }, + test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, ExecutionLayer, }; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; -use logging::test_logger; use merkle_proof::MerkleTree; use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; @@ -31,14 +28,18 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::TestingSlotClock; -use state_processing::{BlockSignatureStrategy, per_block_processing, state_advance::{complete_state_advance, partial_state_advance}, StateRootStrategy, VerifyBlockRoot}; +use state_processing::{ + per_block_processing, + state_advance::{complete_state_advance, partial_state_advance}, + BlockSignatureStrategy, StateRootStrategy, VerifyBlockRoot, +}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; -use task_executor::ShutdownReason; +use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; @@ -61,7 +62,7 @@ pub type BaseHarnessType = pub type DiskHarnessType = BaseHarnessType, LevelDB>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; -type BoxedMutator = Box< +pub type BoxedMutator = Box< dyn FnOnce( BeaconChainBuilder>, ) -> BeaconChainBuilder>, @@ -148,8 +149,8 @@ pub struct Builder { initial_mutator: Option>, store_mutator: Option>, execution_layer: Option, - execution_layer_runtime: Option, mock_execution_layer: Option>, + runtime: TestRuntime, log: Logger, } @@ -252,6 +253,9 @@ where Cold: ItemStore, { pub fn new(eth_spec_instance: E) -> Self { + let runtime = TestRuntime::default(); + let log = runtime.log.clone(); + Self { eth_spec_instance, spec: None, @@ -263,8 +267,8 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, - execution_layer_runtime: None, - log: test_logger(), + runtime, + log, } } @@ -327,8 +331,6 @@ where "execution layer already defined" ); - let el_runtime = ExecutionLayerRuntime::default(); - let urls: Vec = urls .iter() .map(|s| SensitiveUrl::parse(*s)) @@ -343,19 +345,19 @@ where }; let execution_layer = ExecutionLayer::from_config( config, - el_runtime.task_executor.clone(), - el_runtime.log.clone(), + self.runtime.task_executor.clone(), + self.log.clone(), ) .unwrap(); self.execution_layer = Some(execution_layer); - self.execution_layer_runtime = Some(el_runtime); self } pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let mock = MockExecutionLayer::new( + self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, @@ -380,7 +382,7 @@ where pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); - let log = test_logger(); + let log = self.log; let spec = self.spec.expect("cannot build without spec"); let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self @@ -392,6 +394,7 @@ where .custom_spec(spec) .store(self.store.expect("cannot build without store")) .store_migrator_config(MigratorConfig::default().blocking()) + .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") @@ -431,8 +434,8 @@ where chain: Arc::new(chain), validator_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), + runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, - execution_layer_runtime: self.execution_layer_runtime, rng: make_rng(), } } @@ -448,9 +451,9 @@ pub struct BeaconChainHarness { pub chain: Arc>, pub spec: ChainSpec, pub shutdown_receiver: Arc>>, + pub runtime: TestRuntime, pub mock_execution_layer: Option>, - pub execution_layer_runtime: Option, pub rng: Mutex, } @@ -525,8 +528,11 @@ where self.chain.slot().unwrap() } - pub fn get_block(&self, block_hash: SignedBeaconBlockHash) -> Option> { - self.chain.get_block(&block_hash.into()).unwrap() + pub fn get_block( + &self, + block_hash: SignedBeaconBlockHash, + ) -> Option>> { + self.chain.get_blinded_block(&block_hash.into()).unwrap() } pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool { @@ -581,18 +587,7 @@ where // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); - let randao_reveal = { - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = self.spec.get_domain( - epoch, - Domain::Randao, - &state.fork(), - state.genesis_validators_root(), - ); - let message = epoch.signing_root(domain); - let sk = &self.validator_keypairs[proposer_index].sk; - sk.sign(message) - }; + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let (block, state) = self .chain @@ -640,18 +635,7 @@ where // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); - let randao_reveal = { - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = self.spec.get_domain( - epoch, - Domain::Randao, - &state.fork(), - state.genesis_validators_root(), - ); - let message = epoch.signing_root(domain); - let sk = &self.validator_keypairs[proposer_index].sk; - sk.sign(message) - }; + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let pre_state = state.clone(); @@ -677,6 +661,25 @@ where (signed_block, pre_state) } + /// Create a randao reveal for a block at `slot`. + pub fn sign_randao_reveal( + &self, + state: &BeaconState, + proposer_index: usize, + slot: Slot, + ) -> Signature { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::Randao, + &state.fork(), + state.genesis_validators_root(), + ); + let message = epoch.signing_root(domain); + let sk = &self.validator_keypairs[proposer_index].sk; + sk.sign(message) + } + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -1262,10 +1265,11 @@ where BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, &self.spec, - ).unwrap(); + ) + .unwrap(); signed_block.message_altair_mut().unwrap().state_root = state.canonical_root(); - let (mut block, _) = signed_block.deconstruct(); + let (block, _) = signed_block.deconstruct(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 769d66cd14b..beb8da8b647 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,11 +1,8 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; -use ssz::{Decode, DecodeError, Encode}; +use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; -use std::fs::File; -use std::io::{self, Read, Write}; -use std::path::Path; use store::{DBColumn, Error as StoreError, StoreItem}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; @@ -24,15 +21,7 @@ pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, pubkey_bytes: Vec, - backing: PubkeyCacheBacking, -} - -/// Abstraction over on-disk backing. -/// -/// `File` backing is legacy, `Database` is current. -enum PubkeyCacheBacking { - File(ValidatorPubkeyCacheFile), - Database(BeaconStore), + store: BeaconStore, } impl ValidatorPubkeyCache { @@ -48,7 +37,7 @@ impl ValidatorPubkeyCache { pubkeys: vec![], indices: HashMap::new(), pubkey_bytes: vec![], - backing: PubkeyCacheBacking::Database(store), + store, }; cache.import_new_pubkeys(state)?; @@ -66,7 +55,9 @@ impl ValidatorPubkeyCache { if let Some(DatabasePubkey(pubkey)) = store.get_item(&DatabasePubkey::key_for_index(validator_index))? { - pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); + pubkeys.push((&pubkey).try_into().map_err(|e| { + BeaconChainError::ValidatorPubkeyCacheError(format!("{:?}", e)) + })?); pubkey_bytes.push(pubkey); indices.insert(pubkey, validator_index); } else { @@ -78,31 +69,10 @@ impl ValidatorPubkeyCache { pubkeys, indices, pubkey_bytes, - backing: PubkeyCacheBacking::Database(store), + store, }) } - /// DEPRECATED: used only for migration - pub fn load_from_file>(path: P) -> Result { - ValidatorPubkeyCacheFile::open(&path) - .and_then(ValidatorPubkeyCacheFile::into_cache) - .map_err(Into::into) - } - - /// Convert a cache using `File` backing to one using `Database` backing. - /// - /// This will write all of the keys from `existing_cache` to `store`. - pub fn convert(existing_cache: Self, store: BeaconStore) -> Result { - let mut result = ValidatorPubkeyCache { - pubkeys: Vec::with_capacity(existing_cache.pubkeys.len()), - indices: HashMap::with_capacity(existing_cache.indices.len()), - pubkey_bytes: Vec::with_capacity(existing_cache.indices.len()), - backing: PubkeyCacheBacking::Database(store), - }; - result.import(existing_cache.pubkeys.iter().map(PublicKeyBytes::from))?; - Ok(result) - } - /// Scan the given `state` and add any new validator public keys. /// /// Does not delete any keys from `self` if they don't appear in `state`. @@ -146,14 +116,8 @@ impl ValidatorPubkeyCache { // The motivation behind this ordering is that we do not want to have states that // reference a pubkey that is not in our cache. However, it's fine to have pubkeys // that are never referenced in a state. - match &mut self.backing { - PubkeyCacheBacking::File(persistence_file) => { - persistence_file.append(i, &pubkey)?; - } - PubkeyCacheBacking::Database(store) => { - store.put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; - } - } + self.store + .put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; self.pubkeys.push( (&pubkey) @@ -219,105 +183,6 @@ impl DatabasePubkey { } } -/// Allows for maintaining an on-disk copy of the `ValidatorPubkeyCache`. The file is raw SSZ bytes -/// (not ASCII encoded). -/// -/// ## Writes -/// -/// Each entry is simply appended to the file. -/// -/// ## Reads -/// -/// The whole file is parsed as an SSZ "variable list" of objects. -/// -/// This parsing method is possible because the items in the list are fixed-length SSZ objects. -struct ValidatorPubkeyCacheFile(File); - -#[derive(Debug)] -enum Error { - Io(io::Error), - Ssz(DecodeError), - PubkeyDecode(bls::Error), - /// The file read from disk does not have a contiguous list of validator public keys. The file - /// has become corrupted. - InconsistentIndex { - _expected: Option, - _found: usize, - }, -} - -impl From for BeaconChainError { - fn from(e: Error) -> BeaconChainError { - BeaconChainError::ValidatorPubkeyCacheFileError(format!("{:?}", e)) - } -} - -impl ValidatorPubkeyCacheFile { - /// Opens an existing file for reading and writing. - pub fn open>(path: P) -> Result { - File::options() - .read(true) - .write(true) - .create(false) - .append(true) - .open(path) - .map(Self) - .map_err(Error::Io) - } - - /// Append a public key to file. - /// - /// The provided `index` should each be one greater than the previous and start at 0. - /// Otherwise, the file will become corrupted and unable to be converted into a cache . - pub fn append(&mut self, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> { - append_to_file(&mut self.0, index, pubkey) - } - - /// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file. - pub fn into_cache(mut self) -> Result, Error> { - let mut bytes = vec![]; - self.0.read_to_end(&mut bytes).map_err(Error::Io)?; - - let list: Vec<(usize, PublicKeyBytes)> = Vec::from_ssz_bytes(&bytes).map_err(Error::Ssz)?; - - let mut last = None; - let mut pubkeys = Vec::with_capacity(list.len()); - let mut indices = HashMap::with_capacity(list.len()); - let mut pubkey_bytes = Vec::with_capacity(list.len()); - - for (index, pubkey) in list { - let expected = last.map(|n| n + 1); - if expected.map_or(true, |expected| index == expected) { - last = Some(index); - pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); - pubkey_bytes.push(pubkey); - indices.insert(pubkey, index); - } else { - return Err(Error::InconsistentIndex { - _expected: expected, - _found: index, - }); - } - } - - Ok(ValidatorPubkeyCache { - pubkeys, - indices, - pubkey_bytes, - backing: PubkeyCacheBacking::File(self), - }) - } -} - -fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> { - let mut line = Vec::with_capacity(index.ssz_bytes_len() + pubkey.ssz_bytes_len()); - - index.ssz_append(&mut line); - pubkey.ssz_append(&mut line); - - file.write_all(&line).map_err(Error::Io) -} - #[cfg(test)] mod test { use super::*; @@ -325,10 +190,7 @@ mod test { use logging::test_logger; use std::sync::Arc; use store::HotColdDB; - use tempfile::tempdir; - use types::{ - test_utils::generate_deterministic_keypair, BeaconState, EthSpec, Keypair, MainnetEthSpec, - }; + use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec}; type E = MainnetEthSpec; type T = EphemeralHarnessType; @@ -422,7 +284,7 @@ mod test { check_cache_get(&cache, &keypairs[..]); drop(cache); - // Re-init the cache from the file. + // Re-init the cache from the store. let mut cache = ValidatorPubkeyCache::load_from_store(store.clone()).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); @@ -435,36 +297,8 @@ mod test { check_cache_get(&cache, &keypairs[..]); drop(cache); - // Re-init the cache from the file. + // Re-init the cache from the store. let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); } - - #[test] - fn invalid_persisted_file() { - let dir = tempdir().expect("should create tempdir"); - let path = dir.path().join("cache.ssz"); - let pubkey = generate_deterministic_keypair(0).pk.into(); - - let mut file = File::create(&path).expect("should create file"); - append_to_file(&mut file, 0, &pubkey).expect("should write to file"); - drop(file); - - let cache = ValidatorPubkeyCache::::load_from_file(&path).expect("should open cache"); - drop(cache); - - let mut file = File::options() - .write(true) - .append(true) - .open(&path) - .expect("should open file"); - - append_to_file(&mut file, 42, &pubkey).expect("should write bad data to file"); - drop(file); - - assert!( - ValidatorPubkeyCache::::load_from_file(&path).is_err(), - "should not parse invalid file" - ); - } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 189d3baded2..b1d1f71d6cd 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -55,11 +55,15 @@ fn produces_attestations() { Slot::from(num_blocks_produced) }; - let block = chain + let blinded_block = chain .block_at_slot(block_slot, WhenSlotSkipped::Prev) .expect("should get block") .expect("block should not be skipped"); - let block_root = block.message().tree_hash_root(); + let block_root = blinded_block.message().tree_hash_root(); + let block = chain + .store + .make_full_block(&block_root, blinded_block) + .unwrap(); let epoch_boundary_slot = state .current_epoch() @@ -144,3 +148,58 @@ fn produces_attestations() { } } } + +/// Ensures that the early attester cache wont create an attestation to a block in a later slot than +/// the one requested. +#[test] +fn early_attester_cache_old_request() { + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness.extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let head = harness.chain.head().unwrap(); + assert_eq!(head.beacon_block.slot(), 2); + let head_proto_block = harness + .chain + .fork_choice + .read() + .get_block(&head.beacon_block_root) + .unwrap(); + + harness + .chain + .early_attester_cache + .add_head_block( + head.beacon_block_root, + head.beacon_block.clone(), + head_proto_block, + &head.beacon_state, + &harness.chain.spec, + ) + .unwrap(); + + let attest_slot = head.beacon_block.slot() - 1; + let attestation = harness + .chain + .produce_unaggregated_attestation(attest_slot, 0) + .unwrap(); + + assert_eq!(attestation.data.slot, attest_slot); + let attested_block = harness + .chain + .get_blinded_block(&attestation.data.beacon_block_root) + .unwrap() + .unwrap(); + assert_eq!(attested_block.slot(), attest_slot); +} diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 00bf9fa9aab..2fe8818a9aa 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -975,7 +975,7 @@ fn attestation_that_skips_epochs() { let block_slot = harness .chain .store - .get_block(&block_root) + .get_blinded_block(&block_root) .expect("should not error getting block") .expect("should find attestation block") .message() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 9acfba17b22..ca65b05fd8b 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -46,6 +46,18 @@ fn get_chain_segment() -> Vec> { .chain_dump() .expect("should dump chain") .into_iter() + .map(|snapshot| { + let full_block = harness + .chain + .store + .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .unwrap(); + BeaconSnapshot { + beacon_block_root: snapshot.beacon_block_root, + beacon_block: full_block, + beacon_state: snapshot.beacon_state, + } + }) .skip(1) .collect() } diff --git a/beacon_node/beacon_chain/tests/fork_choice.rs b/beacon_node/beacon_chain/tests/fork_choice.rs index c69894a1ea2..533024528ab 100644 --- a/beacon_node/beacon_chain/tests/fork_choice.rs +++ b/beacon_node/beacon_chain/tests/fork_choice.rs @@ -147,15 +147,23 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { for i in 0..15 { slashings.push(harness.make_proposer_slashing(i as u64)); } - let (block, pre_state) = harness.make_block_with_modifier(head.beacon_state, slot_a, |block| { - block.body_altair_mut().unwrap().proposer_slashings = VariableList::::new(slashings).unwrap(); - }); + let (block, _pre_state) = + harness.make_block_with_modifier(head.beacon_state, slot_a, |block| { + block.body_altair_mut().unwrap().proposer_slashings = + VariableList::::new(slashings).unwrap(); + }); // Process the block containing the slashings at the slot before the epoch transition and attest to it. harness.process_block(slot_a, block).unwrap(); let head = harness.chain.head().unwrap(); let vals = (15..VALIDATOR_COUNT).collect::>(); - harness.attest_block( &head.beacon_state, head.beacon_state.canonical_root(), SignedBeaconBlockHash::from(head.beacon_block_root), &head.beacon_block, vals.as_slice()); + harness.attest_block( + &head.beacon_state, + head.beacon_state.canonical_root(), + SignedBeaconBlockHash::from(head.beacon_block_root), + &head.beacon_block, + vals.as_slice(), + ); assert_eq!(head.beacon_block.slot(), slot_a); assert_eq!( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 9410dce12b8..879f223e967 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -21,13 +21,15 @@ const VALIDATOR_COUNT: usize = 32; type E = MainnetEthSpec; -#[derive(PartialEq, Clone)] +#[derive(PartialEq, Clone, Copy)] enum Payload { Valid, Invalid { latest_valid_hash: Option, }, Syncing, + InvalidBlockHash, + InvalidTerminalBlock, } struct InvalidPayloadRig { @@ -69,7 +71,7 @@ impl InvalidPayloadRig { fn block_hash(&self, block_root: Hash256) -> ExecutionBlockHash { self.harness .chain - .get_block(&block_root) + .get_blinded_block(&block_root) .unwrap() .unwrap() .message() @@ -154,8 +156,9 @@ impl InvalidPayloadRig { assert_eq!(justified_checkpoint.epoch, 2); } + /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. fn import_block(&mut self, is_valid: Payload) -> Hash256 { - self.import_block_parametric(is_valid, |error| { + self.import_block_parametric(is_valid, is_valid, |error| { matches!( error, BlockError::ExecutionPayloadError( @@ -183,7 +186,8 @@ impl InvalidPayloadRig { fn import_block_parametric) -> bool>( &mut self, - is_valid: Payload, + new_payload_response: Payload, + forkchoice_response: Payload, evaluate_error: F, ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); @@ -194,15 +198,54 @@ impl InvalidPayloadRig { let (block, post_state) = self.harness.make_block(state, slot); let block_root = block.canonical_root(); - match is_valid { - Payload::Valid | Payload::Syncing => { - if is_valid == Payload::Syncing { - // Importing a payload whilst returning `SYNCING` simulates an EE that obtains - // the block via it's own means (e.g., devp2p). - let should_import_payload = true; - mock_execution_layer - .server - .all_payloads_syncing(should_import_payload); + let set_new_payload = |payload: Payload| match payload { + Payload::Valid => mock_execution_layer + .server + .all_payloads_valid_on_new_payload(), + Payload::Syncing => mock_execution_layer + .server + .all_payloads_syncing_on_new_payload(true), + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + mock_execution_layer + .server + .all_payloads_invalid_on_new_payload(latest_valid_hash) + } + Payload::InvalidBlockHash => mock_execution_layer + .server + .all_payloads_invalid_block_hash_on_new_payload(), + Payload::InvalidTerminalBlock => mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_new_payload(), + }; + let set_forkchoice_updated = |payload: Payload| match payload { + Payload::Valid => mock_execution_layer + .server + .all_payloads_valid_on_forkchoice_updated(), + Payload::Syncing => mock_execution_layer + .server + .all_payloads_syncing_on_forkchoice_updated(), + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + mock_execution_layer + .server + .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + } + Payload::InvalidBlockHash => mock_execution_layer + .server + .all_payloads_invalid_block_hash_on_forkchoice_updated(), + Payload::InvalidTerminalBlock => mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_forkchoice_updated(), + }; + + match (new_payload_response, forkchoice_response) { + (Payload::Valid | Payload::Syncing, Payload::Valid | Payload::Syncing) => { + if new_payload_response == Payload::Syncing { + set_new_payload(new_payload_response); + set_forkchoice_updated(forkchoice_response); } else { mock_execution_layer.server.full_payload_verification(); } @@ -221,47 +264,69 @@ impl InvalidPayloadRig { let execution_status = self.execution_status(root.into()); - match is_valid { + match forkchoice_response { Payload::Syncing => assert!(execution_status.is_optimistic()), Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), - Payload::Invalid { .. } => unreachable!(), + Payload::Invalid { .. } + | Payload::InvalidBlockHash + | Payload::InvalidTerminalBlock => unreachable!(), } assert_eq!( - self.harness.chain.get_block(&block_root).unwrap().unwrap(), + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap(), block, "block from db must match block imported" ); } - Payload::Invalid { latest_valid_hash } => { - let latest_valid_hash = latest_valid_hash - .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - - mock_execution_layer - .server - .all_payloads_invalid(latest_valid_hash); + ( + Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, + _, + ) + | ( + _, + Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, + ) => { + set_new_payload(new_payload_response); + set_forkchoice_updated(forkchoice_response); match self.harness.process_block(slot, block) { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) } - Ok(_) => panic!("block with invalid payload was imported"), + Ok(_) => { + // An invalid payload should only be imported initially if its status when + // initially supplied to the EE is Valid or Syncing. + assert!(matches!( + new_payload_response, + Payload::Valid | Payload::Syncing + )); + } }; - assert!( - self.harness - .chain - .fork_choice - .read() - .get_block(&block_root) - .is_none(), - "invalid block must not exist in fork choice" - ); - assert!( - self.harness.chain.get_block(&block_root).unwrap().is_none(), - "invalid block cannot be accessed via get_block" - ); + let block_in_forkchoice = + self.harness.chain.fork_choice.read().get_block(&block_root); + if let Payload::Invalid { .. } = new_payload_response { + // A block found to be immediately invalid should not end up in fork choice. + assert_eq!(block_in_forkchoice, None); + + assert!( + self.harness + .chain + .get_blinded_block(&block_root) + .unwrap() + .is_none(), + "invalid block cannot be accessed via get_block" + ); + } else { + // A block imported and then found invalid should have an invalid status. + assert!(block_in_forkchoice.unwrap().execution_status.is_invalid()); + } } } @@ -317,6 +382,48 @@ fn invalid_payload_invalidates_parent() { assert_eq!(rig.head_info().block_root, roots[0]); } +/// Test invalidation of a payload via the fork choice updated message. +/// +/// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, +/// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. +fn immediate_forkchoice_update_invalid_test( + invalid_payload: impl FnOnce(Option) -> Payload, +) { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing); + + let valid_head_root = rig.import_block(Payload::Valid); + let latest_valid_hash = Some(rig.block_hash(valid_head_root)); + + // Import a block which returns syncing when supplied via newPayload, and then + // invalid when the forkchoice update is sent. + rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { + false + }); + + // The head should be the latest valid block. + assert_eq!(rig.head_info().block_root, valid_head_root); +} + +#[test] +fn immediate_forkchoice_update_payload_invalid() { + immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { + latest_valid_hash, + }) +} + +#[test] +fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +} + +#[test] +fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +} + /// Ensure the client tries to exit when the justified checkpoint is invalidated. #[test] fn justified_checkpoint_becomes_invalid() { @@ -329,7 +436,7 @@ fn justified_checkpoint_becomes_invalid() { let parent_root_of_justified = rig .harness .chain - .get_block(&justified_checkpoint.root) + .get_blinded_block(&justified_checkpoint.root) .unwrap() .unwrap() .parent_root(); @@ -339,19 +446,17 @@ fn justified_checkpoint_becomes_invalid() { assert!(rig.harness.shutdown_reasons().is_empty()); // Import a block that will invalidate the justified checkpoint. - rig.import_block_parametric( - Payload::Invalid { - latest_valid_hash: Some(parent_hash_of_justified), - }, - |error| { - matches!( - error, - // The block import should fail since the beacon chain knows the justified payload - // is invalid. - BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) - ) - }, - ); + let is_valid = Payload::Invalid { + latest_valid_hash: Some(parent_hash_of_justified), + }; + rig.import_block_parametric(is_valid, is_valid, |error| { + matches!( + error, + // The block import should fail since the beacon chain knows the justified payload + // is invalid. + BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) + ) + }); // The beacon chain should have triggered a shutdown. assert_eq!( @@ -547,7 +652,13 @@ fn invalidates_all_descendants() { assert!(rig.execution_status(fork_block_root).is_invalid()); for root in blocks { - let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + let slot = rig + .harness + .chain + .get_blinded_block(&root) + .unwrap() + .unwrap() + .slot(); // Fork choice doesn't have info about pre-finalization, nothing to check here. if slot < finalized_slot { @@ -611,7 +722,13 @@ fn switches_heads() { assert!(rig.execution_status(fork_block_root).is_optimistic()); for root in blocks { - let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + let slot = rig + .harness + .chain + .get_blinded_block(&root) + .unwrap() + .unwrap() + .slot(); // Fork choice doesn't have info about pre-finalization, nothing to check here. if slot < finalized_slot { @@ -643,9 +760,17 @@ fn invalid_during_processing() { ]; // 0 should be present in the chain. - assert!(rig.harness.chain.get_block(&roots[0]).unwrap().is_some()); + assert!(rig + .harness + .chain + .get_blinded_block(&roots[0]) + .unwrap() + .is_some()); // 1 should *not* be present in the chain. - assert_eq!(rig.harness.chain.get_block(&roots[1]).unwrap(), None); + assert_eq!( + rig.harness.chain.get_blinded_block(&roots[1]).unwrap(), + None + ); // 2 should be the head. let head = rig.harness.chain.head_info().unwrap(); assert_eq!(head.block_root, roots[2]); @@ -664,7 +789,7 @@ fn invalid_after_optimistic_sync() { ]; for root in &roots { - assert!(rig.harness.chain.get_block(root).unwrap().is_some()); + assert!(rig.harness.chain.get_blinded_block(root).unwrap().is_some()); } // 2 should be the head. diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d3038ac48d3..771295c415e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -313,7 +313,10 @@ fn epoch_boundary_state_attestation_processing() { for (attestation, subnet_id) in late_attestations.into_iter().flatten() { // load_epoch_boundary_state is idempotent! let block_root = attestation.data.beacon_block_root; - let block = store.get_block(&block_root).unwrap().expect("block exists"); + let block = store + .get_blinded_block(&block_root) + .unwrap() + .expect("block exists"); let epoch_boundary_state = store .load_epoch_boundary_state(&block.state_root()) .expect("no error") @@ -603,7 +606,7 @@ fn delete_blocks_and_states() { ); let faulty_head_block = store - .get_block(&faulty_head.into()) + .get_blinded_block(&faulty_head.into()) .expect("no errors") .expect("faulty head block exists"); @@ -645,7 +648,7 @@ fn delete_blocks_and_states() { break; } store.delete_block(&block_root).unwrap(); - assert_eq!(store.get_block(&block_root).unwrap(), None); + assert_eq!(store.get_blinded_block(&block_root).unwrap(), None); } // Deleting frozen states should do nothing @@ -890,7 +893,12 @@ fn shuffling_compatible_short_fork() { } fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState { - let head_block = harness.chain.get_block(&block_root).unwrap().unwrap(); + let head_block = harness + .chain + .store + .get_blinded_block(&block_root) + .unwrap() + .unwrap(); harness .chain .get_state(&head_block.state_root(), Some(head_block.slot())) @@ -1695,7 +1703,7 @@ fn check_all_blocks_exist<'a>( blocks: impl Iterator, ) { for &block_hash in blocks { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); + let block = harness.chain.get_blinded_block(&block_hash.into()).unwrap(); assert!( block.is_some(), "expected block {:?} to be in DB", @@ -1742,7 +1750,7 @@ fn check_no_blocks_exist<'a>( blocks: impl Iterator, ) { for &block_hash in blocks { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); + let block = harness.chain.get_blinded_block(&block_hash.into()).unwrap(); assert!( block.is_none(), "did not expect block {:?} to be in the DB", @@ -1988,7 +1996,12 @@ fn weak_subjectivity_sync() { .unwrap() .unwrap(); let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; - let wss_block = harness.get_block(wss_checkpoint.root.into()).unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_checkpoint.root) + .unwrap() + .unwrap(); let wss_state = full_store .get_state(&wss_block.state_root(), None) .unwrap() @@ -2011,26 +2024,28 @@ fn weak_subjectivity_sync() { let seconds_per_slot = spec.seconds_per_slot; // Initialise a new beacon chain from the finalized checkpoint - let beacon_chain = BeaconChainBuilder::new(MinimalEthSpec) - .store(store.clone()) - .custom_spec(test_spec::()) - .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) - .unwrap() - .logger(log.clone()) - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) - .expect("should configure testing slot clock") - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .monitor_validators(true, vec![], log) - .build() - .expect("should build"); + let beacon_chain = Arc::new( + BeaconChainBuilder::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) + .unwrap() + .logger(log.clone()) + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .testing_slot_clock(Duration::from_secs(seconds_per_slot)) + .expect("should configure testing slot clock") + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .monitor_validators(true, vec![], log) + .build() + .expect("should build"), + ); // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -2040,8 +2055,14 @@ fn weak_subjectivity_sync() { for snapshot in new_blocks { let block = &snapshot.beacon_block; + let full_block = harness + .chain + .store + .make_full_block(&snapshot.beacon_block_root, block.clone()) + .unwrap(); + beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(block.clone()).unwrap(); + beacon_chain.process_block(full_block).unwrap(); beacon_chain.fork_choice().unwrap(); // Check that the new block's state can be loaded correctly. @@ -2083,13 +2104,13 @@ fn weak_subjectivity_sync() { .map(|s| s.beacon_block.clone()) .collect::>(); beacon_chain - .import_historical_block_batch(&historical_blocks) + .import_historical_block_batch(historical_blocks.clone()) .unwrap(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); // Resupplying the blocks should not fail, they can be safely ignored. beacon_chain - .import_historical_block_batch(&historical_blocks) + .import_historical_block_batch(historical_blocks) .unwrap(); // The forwards iterator should now match the original chain @@ -2112,7 +2133,7 @@ fn weak_subjectivity_sync() { .unwrap() .map(Result::unwrap) { - let block = store.get_block(&block_root).unwrap().unwrap(); + let block = store.get_blinded_block(&block_root).unwrap().unwrap(); assert_eq!(block.slot(), slot); } @@ -2572,7 +2593,7 @@ fn check_iterators(harness: &TestHarness) { } fn get_finalized_epoch_boundary_blocks( - dump: &[BeaconSnapshot], + dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() .cloned() @@ -2580,7 +2601,9 @@ fn get_finalized_epoch_boundary_blocks( .collect() } -fn get_blocks(dump: &[BeaconSnapshot]) -> HashSet { +fn get_blocks( + dump: &[BeaconSnapshot>], +) -> HashSet { dump.iter() .cloned() .map(|checkpoint| checkpoint.beacon_block_root.into()) diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 2a0aa35b1b1..7b17937a210 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -744,7 +744,11 @@ fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::Prev should accurately return the prior skipped block" ); - let expected_block = harness.chain.get_block(&skipped_root).unwrap().unwrap(); + let expected_block = harness + .chain + .get_blinded_block(&skipped_root) + .unwrap() + .unwrap(); assert_eq!( harness @@ -782,7 +786,11 @@ fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::None and WhenSlotSkipped::Prev should be equal on non-skipped slot" ); - let expected_block = harness.chain.get_block(&skips_prev).unwrap().unwrap(); + let expected_block = harness + .chain + .get_blinded_block(&skips_prev) + .unwrap() + .unwrap(); assert_eq!( harness diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 353b174a02b..1f02ec7b3c3 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -166,6 +166,7 @@ where let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(context.log().clone()) .store(store) + .task_executor(context.executor.clone()) .custom_spec(spec.clone()) .chain_config(chain_config) .graffiti(graffiti) @@ -484,13 +485,8 @@ where .beacon_chain .clone() .ok_or("node timer requires a beacon chain")?; - let seconds_per_slot = self - .chain_spec - .as_ref() - .ok_or("node timer requires a chain spec")? - .seconds_per_slot; - spawn_timer(context.executor, beacon_chain, seconds_per_slot) + spawn_timer(context.executor, beacon_chain) .map_err(|e| format!("Unable to start node timer: {}", e))?; Ok(self) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index bb9e196f7e5..13614af12ee 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -149,10 +149,8 @@ impl Config { pub fn get_existing_legacy_data_dir(&self) -> Option { dirs::home_dir() .map(|home_dir| home_dir.join(&self.data_dir)) - // Return `None` if the directory does not exists. - .filter(|dir| dir.exists()) - // Return `None` if the legacy directory is identical to the modern. - .filter(|dir| *dir != self.get_modern_data_dir()) + // Return `None` if the legacy directory does not exist or if it is identical to the modern. + .filter(|dir| dir.exists() && *dir != self.get_modern_data_dir()) } /// Returns the core path for the client. diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 460f53e7326..a35d5740371 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -41,8 +41,16 @@ const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; const WARNING_MSG: &str = "BLOCK PROPOSALS WILL FAIL WITHOUT VALID, SYNCED ETH1 CONNECTION"; -/// A factor used to reduce the eth1 follow distance to account for discrepancies in the block time. -const ETH1_BLOCK_TIME_TOLERANCE_FACTOR: u64 = 4; +/// Number of blocks to download if the node detects it is lagging behind due to an inaccurate +/// relationship between block-number-based follow distance and time-based follow distance. +const CATCHUP_BATCH_SIZE: u64 = 128; + +/// The absolute minimum follow distance to enforce when downloading catchup batches. +const CATCHUP_MIN_FOLLOW_DISTANCE: u64 = 64; + +/// To account for fast PoW blocks requiring more blocks in the cache than the block-based follow +/// distance would imply, we store `CACHE_FACTOR` more blocks in our cache. +const CACHE_FACTOR: u64 = 2; #[derive(Debug, PartialEq, Clone)] pub enum EndpointError { @@ -284,10 +292,18 @@ async fn get_remote_head_and_new_block_ranges( e }; let new_deposit_block_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::Deposit) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::Deposit, + ) .map_err(handle_remote_not_synced)?; let new_block_cache_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::BlockCache) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::BlockCache, + ) .map_err(handle_remote_not_synced)?; Ok(( remote_head_block, @@ -307,7 +323,7 @@ async fn relevant_new_block_numbers_from_endpoint( get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) .map_err(SingleEndpointError::GetBlockNumberFailed) .await?; - service.relevant_new_block_numbers(remote_highest_block, head_type) + service.relevant_new_block_numbers(remote_highest_block, None, head_type) } #[derive(Debug, PartialEq)] @@ -319,7 +335,7 @@ pub enum SingleEndpointError { RemoteNotSynced { next_required_block: u64, remote_highest_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, }, /// Failed to download a block from the eth1 node. BlockDownloadFailed(String), @@ -384,6 +400,11 @@ pub struct Config { /// /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. pub follow_distance: u64, + /// The follow distance to use for blocks in our cache. + /// + /// This can be set lower than the true follow distance in order to correct for poor timing + /// of eth1 blocks. + pub cache_follow_distance: Option, /// Specifies the seconds when we consider the head of a node far behind. /// This should be less than `ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK`. pub node_far_behind_seconds: u64, @@ -410,20 +431,30 @@ impl Config { E::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - // Compute the number of extra blocks we store prior to the voting period start blocks. - let follow_distance_tolerance_blocks = - spec.eth1_follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; - // Ensure we can store two full windows of voting blocks. let voting_windows = eth1_blocks_per_voting_period * 2; - // Extend the cache to account for varying eth1 block times and the follow distance - // tolerance blocks. - let length = voting_windows - + (voting_windows / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) - + follow_distance_tolerance_blocks; + // Extend the cache to account for the cache follow distance. + let extra_follow_distance_blocks = self + .follow_distance + .saturating_sub(self.cache_follow_distance()); + + let length = voting_windows + extra_follow_distance_blocks; + + // Allow for more blocks to account for blocks being generated faster than expected. + // The cache expiry should really be timestamp based, but that would require a more + // extensive refactor. + let cache_size = CACHE_FACTOR * length; - self.block_cache_truncation = Some(length as usize); + self.block_cache_truncation = Some(cache_size as usize); + } + + /// The distance at which the cache should follow the head. + /// + /// Defaults to 3/4 of `follow_distance` unless set manually. + pub fn cache_follow_distance(&self) -> u64 { + self.cache_follow_distance + .unwrap_or(3 * self.follow_distance / 4) } } @@ -438,6 +469,7 @@ impl Default for Config { deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, follow_distance: 128, + cache_follow_distance: None, node_far_behind_seconds: 128 * 14, block_cache_truncation: Some(4_096), auto_update_interval_millis: 60_000, @@ -486,9 +518,8 @@ impl Service { /// /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is /// actually `15` on Goerli. - pub fn reduced_follow_distance(&self) -> u64 { - let full = self.config().follow_distance; - full.saturating_sub(full / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) + pub fn cache_follow_distance(&self) -> u64 { + self.config().cache_follow_distance() } /// Return byte representation of deposit and block caches. @@ -834,9 +865,10 @@ impl Service { fn relevant_new_block_numbers( &self, remote_highest_block: u64, + remote_highest_block_timestamp: Option, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let follow_distance = self.reduced_follow_distance(); + let follow_distance = self.cache_follow_distance(); let next_required_block = match head_type { HeadType::Deposit => self .deposits() @@ -852,8 +884,16 @@ impl Service { .map(|n| n + 1) .unwrap_or_else(|| self.config().lowest_cached_block_number), }; + let latest_cached_block = self.latest_cached_block(); - relevant_block_range(remote_highest_block, next_required_block, follow_distance) + relevant_block_range( + remote_highest_block, + remote_highest_block_timestamp, + next_required_block, + follow_distance, + latest_cached_block.as_ref(), + &self.inner.spec, + ) } /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured @@ -1189,24 +1229,48 @@ impl Service { /// Returns an error if `next_required_block > remote_highest_block + 1` which means the remote went /// backwards. fn relevant_block_range( - remote_highest_block: u64, + remote_highest_block_number: u64, + remote_highest_block_timestamp: Option, next_required_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, + latest_cached_block: Option<&Eth1Block>, + spec: &ChainSpec, ) -> Result>, SingleEndpointError> { - let remote_follow_block = remote_highest_block.saturating_sub(reduced_follow_distance); + // If the latest cached block is lagging the head block by more than `cache_follow_distance` + // times the expected block time then the eth1 block time is likely quite different from what we + // assumed. + // + // In order to catch up, load batches of `CATCHUP_BATCH_SIZE` until the situation rights itself. + // Note that we need to check this condition before the regular follow distance condition + // or we will keep downloading small numbers of blocks. + if let (Some(remote_highest_block_timestamp), Some(latest_cached_block)) = + (remote_highest_block_timestamp, latest_cached_block) + { + let lagging = latest_cached_block.timestamp + + cache_follow_distance * spec.seconds_per_eth1_block + < remote_highest_block_timestamp; + let end_block = std::cmp::min( + remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), + next_required_block + CATCHUP_BATCH_SIZE, + ); + if lagging && next_required_block <= end_block { + return Ok(Some(next_required_block..=end_block)); + } + } + let remote_follow_block = remote_highest_block_number.saturating_sub(cache_follow_distance); if next_required_block <= remote_follow_block { Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block + 1 { + } else if next_required_block > remote_highest_block_number + 1 { // If this is the case, the node must have gone "backwards" in terms of it's sync // (i.e., it's head block is lower than it was before). // - // We assume that the `reduced_follow_distance` should be sufficient to ensure this never + // We assume that the `cache_follow_distance` should be sufficient to ensure this never // happens, otherwise it is an error. Err(SingleEndpointError::RemoteNotSynced { next_required_block, - remote_highest_block, - reduced_follow_distance, + remote_highest_block: remote_highest_block_number, + cache_follow_distance, }) } else { // Return an empty range. @@ -1292,10 +1356,9 @@ mod tests { let seconds_per_voting_period = ::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - let reduce_follow_distance_blocks = - config.follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; + let cache_follow_distance_blocks = config.follow_distance - config.cache_follow_distance(); - let minimum_len = eth1_blocks_per_voting_period * 2 + reduce_follow_distance_blocks; + let minimum_len = eth1_blocks_per_voting_period * 2 + cache_follow_distance_blocks; assert!(len > minimum_len as usize); } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index bb00ebaab1b..3fe3b3ca527 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -107,7 +107,7 @@ mod eth1_cache { async { let log = null_logger(); - for follow_distance in 0..2 { + for follow_distance in 0..3 { let eth1 = new_ganache_instance() .await .expect("should start eth1 environment"); @@ -116,17 +116,16 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; - let service = Service::new( - Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); + let config = Config { + endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: initial_block_number, + follow_distance, + ..Config::default() + }; + let cache_follow_distance = config.cache_follow_distance(); + + let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec()); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -139,7 +138,7 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance) + .map(|n| n + cache_follow_distance) .expect("should have a latest block after the first round") }; @@ -168,12 +167,13 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance), + .map(|n| n + cache_follow_distance), Some(initial + blocks), - "should update {} blocks in round {} (follow {})", + "should update {} blocks in round {} (follow {} i.e. {})", blocks, round, follow_distance, + cache_follow_distance ); } } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 6cd49e9f608..0351b5e433d 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -35,3 +35,4 @@ rand = "0.8.5" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" +ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ad14ceb519b..9eb98cecb97 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,13 +1,14 @@ use crate::engines::ForkChoiceState; use async_trait::async_trait; use eth1::http::RpcError; +pub use ethers_core::types::Transaction; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use slog::Logger; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, Hash256, - Uint256, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, + Hash256, Uint256, VariableList, }; pub mod auth; @@ -46,6 +47,8 @@ pub enum Error { prev_randao: Hash256, suggested_fee_recipient: Address, }, + DeserializeTransaction(ssz_types::Error), + DeserializeTransactions(ssz_types::Error), } impl From for Error { @@ -109,6 +112,9 @@ pub enum BlockByNumberQuery<'a> { Tag(&'a str), } +/// Representation of an exection block with enough detail to determine the terminal PoW block. +/// +/// See `get_pow_block_hash_at_total_difficulty`. #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ExecutionBlock { @@ -120,6 +126,35 @@ pub struct ExecutionBlock { pub total_difficulty: Uint256, } +/// Representation of an exection block with enough detail to reconstruct a payload. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionBlockWithTransactions { + pub parent_hash: ExecutionBlockHash, + #[serde(alias = "miner")] + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipts_root: Hash256, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub logs_bloom: FixedVector, + #[serde(alias = "mixHash")] + pub prev_randao: Hash256, + #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + pub base_fee_per_gas: Uint256, + #[serde(rename = "hash")] + pub block_hash: ExecutionBlockHash, + pub transactions: Vec, +} + #[derive(Clone, Copy, Debug, PartialEq)] pub struct PayloadAttributes { pub timestamp: u64, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 718b0853435..179045ccf86 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -149,7 +149,7 @@ impl HttpJsonRpc { .await } - pub async fn get_block_by_hash<'a>( + pub async fn get_block_by_hash( &self, block_hash: ExecutionBlockHash, ) -> Result, Error> { @@ -159,6 +159,15 @@ impl HttpJsonRpc { .await } + pub async fn get_block_by_hash_with_txns( + &self, + block_hash: ExecutionBlockHash, + ) -> Result>, Error> { + let params = json!([block_hash, true]); + self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) + .await + } + pub async fn new_payload_v1( &self, execution_payload: ExecutionPayload, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 442c5b48d9d..d6acd5fe54c 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -17,7 +17,7 @@ use payload_status::process_multiple_payload_statuses; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; use std::convert::TryInto; @@ -268,7 +268,7 @@ impl ExecutionLayer { &self.inner.builders } - fn executor(&self) -> &TaskExecutor { + pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } @@ -304,11 +304,7 @@ impl ExecutionLayer { T: Fn(&'a Self) -> U, U: Future>, { - let runtime = self - .executor() - .runtime() - .upgrade() - .ok_or(Error::ShuttingDown)?; + let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; // TODO(merge): respect the shutdown signal. runtime.block_on(generate_future(self)) } @@ -322,11 +318,7 @@ impl ExecutionLayer { T: Fn(&'a Self) -> U, U: Future, { - let runtime = self - .executor() - .runtime() - .upgrade() - .ok_or(Error::ShuttingDown)?; + let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; // TODO(merge): respect the shutdown signal. Ok(runtime.block_on(generate_future(self))) } @@ -539,6 +531,23 @@ impl ExecutionLayer { if let Some(preparation_data_entry) = self.proposer_preparation_data().await.get(&proposer_index) { + if let Some(suggested_fee_recipient) = self.inner.suggested_fee_recipient { + if preparation_data_entry.preparation_data.fee_recipient != suggested_fee_recipient + { + warn!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "proposer_index" => ?proposer_index, + "fee_recipient" => ?preparation_data_entry.preparation_data.fee_recipient, + "suggested_fee_recipient" => ?suggested_fee_recipient, + ) + } + } // The values provided via the API have first priority. preparation_data_entry.preparation_data.fee_recipient } else if let Some(address) = self.inner.suggested_fee_recipient { @@ -668,25 +677,28 @@ impl ExecutionLayer { suggested_fee_recipient, }; - engine + let response = engine .notify_forkchoice_updated( fork_choice_state, Some(payload_attributes), self.log(), ) - .await - .map(|response| response.payload_id)? - .ok_or_else(|| { + .await?; + + match response.payload_id { + Some(payload_id) => payload_id, + None => { error!( self.log(), "Exec engine unable to produce payload"; "msg" => "No payload ID, the engine is likely syncing. \ This has the potential to cause a missed block \ proposal.", + "status" => ?response.payload_status ); - - ApiError::PayloadIdUnavailable - })? + return Err(ApiError::PayloadIdUnavailable); + } + } }; engine @@ -1183,6 +1195,64 @@ impl ExecutionLayer { } } + pub async fn get_payload_by_block_hash( + &self, + hash: ExecutionBlockHash, + ) -> Result>, Error> { + self.engines() + .first_success(|engine| async move { + self.get_payload_by_block_hash_from_engine(engine, hash) + .await + }) + .await + .map_err(Error::EngineErrors) + } + + async fn get_payload_by_block_hash_from_engine( + &self, + engine: &Engine, + hash: ExecutionBlockHash, + ) -> Result>, ApiError> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); + + if hash == ExecutionBlockHash::zero() { + return Ok(Some(ExecutionPayload::default())); + } + + let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + block + } else { + return Ok(None); + }; + + let transactions = VariableList::new( + block + .transactions + .into_iter() + .map(|transaction| VariableList::new(transaction.rlp().to_vec())) + .collect::>() + .map_err(ApiError::DeserializeTransaction)?, + ) + .map_err(ApiError::DeserializeTransactions)?; + + Ok(Some(ExecutionPayload { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions, + })) + } + pub async fn propose_blinded_beacon_block( &self, block: &SignedBeaconBlock>, @@ -1205,13 +1275,15 @@ impl ExecutionLayer { mod test { use super::*; use crate::test_utils::MockExecutionLayer as GenericMockExecutionLayer; + use task_executor::test_utils::TestRuntime; use types::MainnetEthSpec; type MockExecutionLayer = GenericMockExecutionLayer; #[tokio::test] async fn produce_three_valid_pos_execution_blocks() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .produce_valid_execution_payload_on_head() .await @@ -1223,7 +1295,8 @@ mod test { #[tokio::test] async fn finds_valid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1242,7 +1315,8 @@ mod test { #[tokio::test] async fn verifies_valid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1258,7 +1332,8 @@ mod test { #[tokio::test] async fn rejects_invalid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1276,7 +1351,8 @@ mod test { #[tokio::test] async fn rejects_unknown_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 4a761c8e46a..356c5a46dd9 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -31,4 +31,8 @@ lazy_static::lazy_static! { "Indicates hits or misses for already having prepared a payload id before payload production", &["event"] ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH: Result = try_create_histogram( + "execution_layer_get_payload_by_block_hash_time", + "Time to reconstruct a payload from the EE using eth_getBlockByHash" + ); } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f5a7313395b..5770a8a3821 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -2,61 +2,22 @@ use crate::{ test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET}, Config, *, }; -use environment::null_logger; use sensitive_url::SensitiveUrl; -use std::sync::Arc; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; -pub struct ExecutionLayerRuntime { - pub runtime: Option>, - pub _runtime_shutdown: exit_future::Signal, - pub task_executor: TaskExecutor, - pub log: Logger, -} - -impl Default for ExecutionLayerRuntime { - fn default() -> Self { - let runtime = Arc::new( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(), - ); - let (runtime_shutdown, exit) = exit_future::signal(); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = null_logger().unwrap(); - let task_executor = - TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - - Self { - runtime: Some(runtime), - _runtime_shutdown: runtime_shutdown, - task_executor, - log, - } - } -} - -impl Drop for ExecutionLayerRuntime { - fn drop(&mut self) { - if let Some(runtime) = self.runtime.take() { - Arc::try_unwrap(runtime).unwrap().shutdown_background() - } - } -} - pub struct MockExecutionLayer { pub server: MockServer, pub el: ExecutionLayer, - pub el_runtime: ExecutionLayerRuntime, + pub executor: TaskExecutor, pub spec: ChainSpec, } impl MockExecutionLayer { - pub fn default_params() -> Self { + pub fn default_params(executor: TaskExecutor) -> Self { Self::new( + executor, DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), @@ -65,13 +26,13 @@ impl MockExecutionLayer { } pub fn new( + executor: TaskExecutor, terminal_total_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, ) -> Self { - let el_runtime = ExecutionLayerRuntime::default(); - let handle = el_runtime.runtime.as_ref().unwrap().handle(); + let handle = executor.handle().unwrap(); let mut spec = T::default_spec(); spec.terminal_total_difficulty = terminal_total_difficulty; @@ -79,7 +40,7 @@ impl MockExecutionLayer { spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; let server = MockServer::new( - handle, + &handle, terminal_total_difficulty, terminal_block, terminal_block_hash, @@ -97,17 +58,13 @@ impl MockExecutionLayer { suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; - let el = ExecutionLayer::from_config( - config, - el_runtime.task_executor.clone(), - el_runtime.log.clone(), - ) - .unwrap(); + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); Self { server, el, - el_runtime, + executor, spec, } } diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index afa6afb28c0..805f6716fbf 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,7 +22,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; -pub use mock_execution_layer::{ExecutionLayerRuntime, MockExecutionLayer}; +pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; @@ -126,51 +126,156 @@ impl MockServer { self.ctx.previous_request.lock().take() } - pub fn all_payloads_valid(&self) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Valid, - latest_valid_hash: None, - validation_error: None, - }, - should_import: true, - }; - *self.ctx.static_forkchoice_updated_response.lock() = Some(response.status.clone()); + pub fn set_new_payload_response(&self, response: StaticNewPayloadResponse) { *self.ctx.static_new_payload_response.lock() = Some(response) } + pub fn set_forkchoice_updated_response(&self, status: PayloadStatusV1) { + *self.ctx.static_forkchoice_updated_response.lock() = Some(status); + } + + fn valid_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: None, + validation_error: None, + } + } + + fn valid_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::valid_status(), + should_import: true, + } + } + + fn syncing_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + } + } + + fn syncing_new_payload_response(should_import: bool) -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::syncing_status(), + should_import, + } + } + + fn invalid_status(latest_valid_hash: ExecutionBlockHash) -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(latest_valid_hash), + validation_error: Some("static response".into()), + } + } + + fn invalid_new_payload_response( + latest_valid_hash: ExecutionBlockHash, + ) -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_status(latest_valid_hash), + should_import: true, + } + } + + fn invalid_block_hash_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::InvalidBlockHash, + latest_valid_hash: None, + validation_error: Some("static response".into()), + } + } + + fn invalid_block_hash_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_block_hash_status(), + should_import: true, + } + } + + fn invalid_terminal_block_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::InvalidTerminalBlock, + latest_valid_hash: None, + validation_error: Some("static response".into()), + } + } + + fn invalid_terminal_block_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_terminal_block_status(), + should_import: true, + } + } + + pub fn all_payloads_valid(&self) { + self.all_payloads_valid_on_new_payload(); + self.all_payloads_valid_on_forkchoice_updated(); + } + + pub fn all_payloads_valid_on_new_payload(&self) { + self.set_new_payload_response(Self::valid_new_payload_response()); + } + + pub fn all_payloads_valid_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::valid_status()); + } + /// Setting `should_import = true` simulates an EE that initially returns `SYNCING` but obtains - /// the block via it's own means (e.g., devp2p). + /// the block via its own means (e.g., devp2p). pub fn all_payloads_syncing(&self, should_import: bool) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Syncing, - latest_valid_hash: None, - validation_error: None, - }, - should_import, - }; - *self.ctx.static_forkchoice_updated_response.lock() = Some(response.status.clone()); - *self.ctx.static_new_payload_response.lock() = Some(response) + self.all_payloads_syncing_on_new_payload(should_import); + self.all_payloads_syncing_on_forkchoice_updated(); + } + + pub fn all_payloads_syncing_on_new_payload(&self, should_import: bool) { + self.set_new_payload_response(Self::syncing_new_payload_response(should_import)); + } + + pub fn all_payloads_syncing_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::syncing_status()); } pub fn all_payloads_invalid(&self, latest_valid_hash: ExecutionBlockHash) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Invalid, - latest_valid_hash: Some(latest_valid_hash), - validation_error: Some("static response".into()), - }, - should_import: true, - }; - *self.ctx.static_forkchoice_updated_response.lock() = Some(response.status.clone()); - *self.ctx.static_new_payload_response.lock() = Some(response) + self.all_payloads_invalid_on_new_payload(latest_valid_hash); + self.all_payloads_invalid_on_forkchoice_updated(latest_valid_hash); + } + + pub fn all_payloads_invalid_on_new_payload(&self, latest_valid_hash: ExecutionBlockHash) { + self.set_new_payload_response(Self::invalid_new_payload_response(latest_valid_hash)); + } + + pub fn all_payloads_invalid_on_forkchoice_updated( + &self, + latest_valid_hash: ExecutionBlockHash, + ) { + self.set_forkchoice_updated_response(Self::invalid_status(latest_valid_hash)); + } + + pub fn all_payloads_invalid_block_hash_on_new_payload(&self) { + self.set_new_payload_response(Self::invalid_block_hash_new_payload_response()); + } + + pub fn all_payloads_invalid_block_hash_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::invalid_block_hash_status()); + } + + pub fn all_payloads_invalid_terminal_block_on_new_payload(&self) { + self.set_new_payload_response(Self::invalid_terminal_block_new_payload_response()); + } + + pub fn all_payloads_invalid_terminal_block_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::invalid_terminal_block_status()); } - /// Disables any static payload response so the execution block generator will do its own + /// Disables any static payload responses so the execution block generator will do its own /// verification. pub fn full_payload_verification(&self) { - *self.ctx.static_new_payload_response.lock() = None + *self.ctx.static_new_payload_response.lock() = None; + *self.ctx.static_forkchoice_updated_response.lock() = None; } pub fn insert_pow_block( diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0e20f5c8b8d..a34618c2ef4 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -30,6 +30,7 @@ futures = "0.3.8" execution_layer = {path = "../execution_layer"} parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} +task_executor = { path = "../../common/task_executor" } [dev-dependencies] @@ -37,6 +38,7 @@ store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } +logging = { path = "../../common/logging" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 4142d064cb1..4acca75c1b3 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -7,7 +7,7 @@ use state_processing::{ }; use std::sync::Arc; use types::beacon_state::participation_cache::Error as ParticipationCacheError; -use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; const MAX_REQUEST_RANGE_EPOCHS: usize = 100; @@ -112,7 +112,7 @@ pub fn get_attestation_performance( ) })?; let first_block = chain - .get_block(first_block_root) + .get_blinded_block(first_block_root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) @@ -120,7 +120,7 @@ pub fn get_attestation_performance( // Load the block of the prior slot which will be used to build the starting state. let prior_block = chain - .get_block(&first_block.parent_root()) + .get_blinded_block(&first_block.parent_root()) .and_then(|maybe_block| { maybe_block .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) @@ -197,13 +197,13 @@ pub fn get_attestation_performance( .iter() .map(|root| { chain - .get_block(root) + .get_blinded_block(root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) .map_err(beacon_chain_error) }) - .collect::>, _>>()?; + .collect::, _>>()?; replayer = replayer .apply_blocks(blocks, None) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index c21701f3a37..727215bfcad 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; use std::str::FromStr; -use types::{Hash256, SignedBeaconBlock, Slot}; +use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -52,7 +52,55 @@ impl BlockId { } /// Return the `SignedBeaconBlock` identified by `self`. - pub fn block( + pub fn blinded_block( + &self, + chain: &BeaconChain, + ) -> Result>, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_beacon_block() + .map(Into::into) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Slot(slot) => { + let root = self.root(chain)?; + chain + .get_blinded_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|block_opt| match block_opt { + Some(block) => { + if block.slot() != *slot { + return Err(warp_utils::reject::custom_not_found(format!( + "slot {} was skipped", + slot + ))); + } + Ok(block) + } + None => Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))), + }) + } + _ => { + let root = self.root(chain)?; + chain + .get_blinded_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + }) + } + } + } + + /// Return the `SignedBeaconBlock` identified by `self`. + pub async fn full_block( &self, chain: &BeaconChain, ) -> Result, warp::Rejection> { @@ -64,6 +112,7 @@ impl BlockId { let root = self.root(chain)?; chain .get_block(&root) + .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| match block_opt { Some(block) => { @@ -85,6 +134,7 @@ impl BlockId { let root = self.root(chain)?; chain .get_block(&root) + .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|root_opt| { root_opt.ok_or_else(|| { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index d948c0d7d81..1b924f38288 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -10,8 +10,8 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; use types::{ - BeaconCommittee, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, - OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, + BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, + Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, }; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; @@ -104,7 +104,7 @@ impl PackingEfficiencyHandler { fn apply_block( &mut self, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock>, ) -> Result { let block_body = block.message().body(); let attestations = block_body.attestations(); @@ -251,7 +251,7 @@ pub fn get_block_packing_efficiency( .ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?; let first_block = chain - .get_block(first_block_root) + .get_blinded_block(first_block_root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) @@ -309,7 +309,7 @@ pub fn get_block_packing_efficiency( }; let pre_block_hook = |_state: &mut BeaconState, - block: &SignedBeaconBlock| + block: &SignedBeaconBlock<_, BlindedPayload<_>>| -> Result<(), PackingEfficiencyError> { let slot = block.slot(); @@ -363,13 +363,13 @@ pub fn get_block_packing_efficiency( .iter() .map(|root| { chain - .get_block(root) + .get_blinded_block(root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) .map_err(beacon_chain_error) }) - .collect::>, _>>()?; + .collect::, _>>()?; replayer = replayer .apply_blocks(blocks, None) diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 3a7c81ad8af..014db8a6027 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -2,7 +2,7 @@ use beacon_chain::store::{metadata::CURRENT_SCHEMA_VERSION, AnchorInfo}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::DatabaseInfo; use std::sync::Arc; -use types::SignedBeaconBlock; +use types::SignedBlindedBeaconBlock; pub fn info( chain: Arc>, @@ -22,10 +22,10 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>, ) -> Result { chain - .import_historical_block_batch(&blocks) + .import_historical_block_batch(blocks) .map_err(warp_utils::reject::beacon_chain_error)?; let anchor = chain.store.get_anchor_info().ok_or_else(|| { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7b58ce68247..fa3b6a9d953 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -48,8 +48,9 @@ use types::{ Attestation, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SyncCommitteeMessage, SyncContributionData, + SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SyncContributionData, }; use version::{ add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, @@ -826,10 +827,10 @@ pub fn serve( (None, None) => chain .head_beacon_block() .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block))?, + .map(|block| (block.canonical_root(), block.into()))?, // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { - let parent = BlockId::from_root(parent_root).block(&chain)?; + let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) .map_err(warp_utils::reject::beacon_chain_error)? @@ -847,14 +848,14 @@ pub fn serve( })?; BlockId::from_root(root) - .block(&chain) + .blinded_block(&chain) .map(|block| (root, block))? } // Slot is supplied, search by slot and optionally filter by // parent root. (Some(slot), parent_root_opt) => { let root = BlockId::from_slot(slot).root(&chain)?; - let block = BlockId::from_root(root).block(&chain)?; + let block = BlockId::from_root(root).blinded_block(&chain)?; // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. @@ -899,7 +900,7 @@ pub fn serve( .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { let root = block_id.root(&chain)?; - let block = BlockId::from_root(root).block(&chain)?; + let block = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) @@ -1161,8 +1162,8 @@ pub fn serve( block_id: BlockId, chain: Arc>, accept_header: Option| { - blocking_task(move || { - let block = block_id.block(&chain)?; + async move { + let block = block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1181,7 +1182,7 @@ pub fn serve( .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) - }) + } }, ); @@ -1207,7 +1208,7 @@ pub fn serve( .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { block_id - .block(&chain) + .blinded_block(&chain) .map(|block| block.message().body().attestations().clone()) .map(api_types::GenericResponse::from) }) @@ -1736,9 +1737,9 @@ pub fn serve( .head_info() .map(|info| info.slot) .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to read slot clock".into()) + })?; // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; @@ -2786,7 +2787,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>, chain: Arc>, log: Logger| { info!( diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 64ce3b6566c..8b12aa4a5b2 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,7 +1,9 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use crate::common::*; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use eth2::types::DepositContractData; -use types::{EthSpec, MainnetEthSpec}; +use tree_hash::TreeHash; +use types::{EthSpec, FullPayload, MainnetEthSpec, Slot}; type E = MainnetEthSpec; @@ -30,3 +32,96 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } + +// Test that running fork choice before proposing results in selection of the correct head. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn fork_choice_before_proposal() { + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 32; + let all_validators = (0..validator_count).collect::>(); + let num_initial: u64 = 31; + + let tester = InteractiveTester::::new(None, validator_count).await; + let harness = &tester.harness; + + // Create some chain depth. + harness.advance_slot(); + harness.extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + // We set up the following block graph, where B is a block that is temporarily orphaned by C, + // but is then reinstated and built upon by D. + // + // A | B | - | D | + // ^ | - | C | + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + let slot_c = slot_a + 2; + let slot_d = slot_a + 3; + + let state_a = harness.get_current_state(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); + let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + + // Create attestations to B but keep them in reserve until after C has been processed. + let attestations_b = harness.make_attestations( + &all_validators, + &state_b, + state_b.tree_hash_root(), + block_root_b, + slot_b, + ); + + let (block_c, state_c) = harness.make_block(state_a, slot_c); + let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + + // Create attestations to C from a small number of validators and process them immediately. + let attestations_c = harness.make_attestations( + &all_validators[..validator_count / 2], + &state_c, + state_c.tree_hash_root(), + block_root_c, + slot_c, + ); + harness.process_attestations(attestations_c); + + // Apply the attestations to B, but don't re-run fork choice. + harness.process_attestations(attestations_b); + + // Due to proposer boost, the head should be C during slot C. + assert_eq!( + harness.chain.head_info().unwrap().block_root, + block_root_c.into() + ); + + // Ensure that building a block via the HTTP API re-runs fork choice and builds block D upon B. + // Manually prod the per-slot task, because the slot timer doesn't run in the background in + // these tests. + harness.advance_slot(); + harness.chain.per_slot_task(); + + let proposer_index = state_b + .get_beacon_proposer_index(slot_d, &harness.chain.spec) + .unwrap(); + let randao_reveal = harness + .sign_randao_reveal(&state_b, proposer_index, slot_d) + .into(); + let block_d = tester + .client + .get_validator_blocks::>(slot_d, &randao_reveal, None) + .await + .unwrap() + .data; + + // Head is now B. + assert_eq!( + harness.chain.head_info().unwrap().block_root, + block_root_b.into() + ); + // D's parent is B. + assert_eq!(block_d.parent_root(), block_root_b.into()); +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 98dd3d5a5f6..5f53a961560 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -20,6 +20,7 @@ use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; +use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; @@ -63,6 +64,7 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + _runtime: TestRuntime, } impl ApiTester { @@ -121,8 +123,7 @@ impl ApiTester { harness.chain.slot().unwrap(), ) .into_iter() - .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) - .flatten() + .flat_map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) .collect::>(); assert!( @@ -186,7 +187,7 @@ impl ApiTester { external_peer_id, } = create_api_server(chain.clone(), log).await; - tokio::spawn(server); + harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -213,6 +214,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + _runtime: harness.runtime, } } @@ -244,8 +246,7 @@ impl ApiTester { harness.chain.slot().unwrap(), ) .into_iter() - .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) - .flatten() + .flat_map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) .collect::>(); let attester_slashing = harness.make_attester_slashing(vec![0, 1]); @@ -265,7 +266,7 @@ impl ApiTester { external_peer_id, } = create_api_server(chain.clone(), log).await; - tokio::spawn(server); + harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -292,6 +293,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + _runtime: harness.runtime, } } @@ -762,9 +764,9 @@ impl ApiTester { } } - fn get_block(&self, block_id: BlockId) -> Option> { - let root = self.get_block_root(block_id); - root.and_then(|root| self.chain.get_block(&root).unwrap()) + async fn get_block(&self, block_id: BlockId) -> Option> { + let root = self.get_block_root(block_id)?; + self.chain.get_block(&root).await.unwrap() } pub async fn test_beacon_headers_all_slots(self) -> Self { @@ -859,7 +861,11 @@ impl ApiTester { } } - let block_opt = block_root_opt.and_then(|root| self.chain.get_block(&root).unwrap()); + let block_opt = if let Some(root) = block_root_opt { + self.chain.get_block(&root).await.unwrap() + } else { + None + }; if block_opt.is_none() && result.is_none() { continue; @@ -945,7 +951,7 @@ impl ApiTester { pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { - let expected = self.get_block(block_id); + let expected = self.get_block(block_id).await; if let BlockId::Slot(slot) = block_id { if expected.is_none() { @@ -1030,6 +1036,7 @@ impl ApiTester { let expected = self .get_block(block_id) + .await .map(|block| block.message().body().attestations().clone().into()); if let BlockId::Slot(slot) = block_id { @@ -2385,8 +2392,7 @@ impl ApiTester { .unwrap(); let attesting_validators: Vec = committees .into_iter() - .map(|committee| committee.committee.iter().cloned()) - .flatten() + .flat_map(|committee| committee.committee.iter().cloned()) .collect(); // All attesters should now be considered live let expected = expected diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 95ca7dc27a6..e7c4781e21f 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -37,12 +37,12 @@ rand = "0.8.5" directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } -superstruct = "0.4.1" -prometheus-client = "0.15.0" +superstruct = "0.5.0" +prometheus-client = "0.16.0" unused_port = { path = "../../common/unused_port" } [dependencies.libp2p] -version = "0.43.0" +version = "0.45.1" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index e67bb29de3c..81de3f015ad 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1006,9 +1006,6 @@ where proto, error, } => { - if matches!(error, RPCError::HandlerRejected) { - // this peer's request got canceled - } // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 1d542a7f393..b513ede59fa 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -210,10 +210,8 @@ pub fn load_enr_from_disk(dir: &Path) -> Result { let mut enr_string = String::new(); match enr_file.read_to_string(&mut enr_string) { Err(_) => Err("Could not read ENR from file".to_string()), - Ok(_) => match Enr::from_str(&enr_string) { - Ok(disk_enr) => Ok(disk_enr), - Err(e) => Err(format!("ENR from file could not be decoded: {:?}", e)), - }, + Ok(_) => Enr::from_str(&enr_string) + .map_err(|e| format!("ENR from file could not be decoded: {:?}", e)), } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 11f59b0d218..3575d9d34d7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -457,10 +457,7 @@ impl PeerManager { debug!(self.log, "Internal RPC Error"; "error" => %e, "peer_id" => %peer_id); return; } - RPCError::HandlerRejected => { - // Our fault. Do nothing - return; - } + RPCError::HandlerRejected => PeerAction::Fatal, RPCError::InvalidData(_) => { // Peer is not complying with the protocol. This is considered a malicious action PeerAction::Fatal @@ -841,21 +838,14 @@ impl PeerManager { let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) { // We need more peers in general. - // The maximum discovery query is for 16 peers, but we can search for less if - // needed. - std::cmp::min( - self.target_peers.saturating_sub(dialing_peers) - peer_count, - 16, - ) + // Note: The maximum discovery query is bounded by `Discovery`. + self.target_peers.saturating_sub(dialing_peers) - peer_count } else if outbound_only_peer_count < self.min_outbound_only_peers() && peer_count < self.max_outbound_dialing_peers() { - std::cmp::min( - self.max_outbound_dialing_peers() - .saturating_sub(dialing_peers) - - peer_count, - 16, - ) + self.max_outbound_dialing_peers() + .saturating_sub(dialing_peers) + - peer_count } else { 0 }; @@ -1022,20 +1012,17 @@ impl PeerManager { let mut removed_peer_index = None; for (index, (candidate_peer, info)) in peers_on_subnet.iter().enumerate() { // Ensure we don't remove too many outbound peers - if info.is_outbound_only() { - if self.target_outbound_peers() - < connected_outbound_peer_count + if info.is_outbound_only() + && self.target_outbound_peers() + >= connected_outbound_peer_count .saturating_sub(outbound_peers_pruned) - { - outbound_peers_pruned += 1; - } else { - // Restart the main loop with the outbound peer removed from - // the list. This will lower the peers per subnet count and - // potentially a new subnet may be chosen to remove peers. This - // can occur recursively until we have no peers left to choose - // from. - continue; - } + { + // Restart the main loop with the outbound peer removed from + // the list. This will lower the peers per subnet count and + // potentially a new subnet may be chosen to remove peers. This + // can occur recursively until we have no peers left to choose + // from. + continue; } // Check the sync committee @@ -1058,6 +1045,9 @@ impl PeerManager { } } + if info.is_outbound_only() { + outbound_peers_pruned += 1; + } // This peer is suitable to be pruned removed_peer_index = Some(index); break; @@ -1892,4 +1882,170 @@ mod tests { assert!(!connected_peers.contains(&peers[1])); assert!(!connected_peers.contains(&peers[2])); } + + /// This test is for reproducing the issue: + /// https://github.com/sigp/lighthouse/pull/3236#issue-1256432659 + /// + /// Whether the issue happens depends on `subnet_to_peer` (HashMap), since HashMap doesn't + /// guarantee a particular order of iteration. So we repeat the test case to try to reproduce + /// the issue. + #[tokio::test] + async fn test_peer_manager_prune_based_on_subnet_count_repeat() { + for _ in 0..100 { + test_peer_manager_prune_based_on_subnet_count().await; + } + } + + /// Test the pruning logic to prioritize peers with the most subnets. This test specifies + /// the connection direction for the peers. + /// Either Peer 4 or 5 is expected to be removed in this test case. + /// + /// Create 8 peers. + /// Peer0 (out) : Subnet 1, Sync-committee-1 + /// Peer1 (out) : Subnet 1, Sync-committee-1 + /// Peer2 (out) : Subnet 2, Sync-committee-2 + /// Peer3 (out) : Subnet 2, Sync-committee-2 + /// Peer4 (out) : Subnet 3 + /// Peer5 (out) : Subnet 3 + /// Peer6 (in) : Subnet 4 + /// Peer7 (in) : Subnet 5 + async fn test_peer_manager_prune_based_on_subnet_count() { + let target = 7; + let mut peer_manager = build_peer_manager(target).await; + + // Create 8 peers to connect to. + let mut peers = Vec::new(); + for x in 0..8 { + let peer = PeerId::random(); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + match x { + 0 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 1 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 2 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 3 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 4 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 5 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 6 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(4, true).unwrap(); + } + 7 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(5, true).unwrap(); + } + _ => unreachable!(), + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + let long_lived_subnets = peer_manager + .network_globals + .peers + .read() + .peer_info(&peer) + .unwrap() + .long_lived_subnets(); + println!("{},{}", x, peer); + for subnet in long_lived_subnets { + println!("Subnet: {:?}", subnet); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, subnet); + } + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + // Either peer 4 or 5 should be removed. + // Check that we keep 6 and 7 peers, which we have few on a particular subnet. + assert!(connected_peers.contains(&peers[6])); + assert!(connected_peers.contains(&peers[7])); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index ac39e0cecc7..9ac062adc46 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -40,6 +40,9 @@ const IO_ERROR_RETRIES: u8 = 3; /// Maximum time given to the handler to perform shutdown operations. const SHUTDOWN_TIMEOUT_SECS: u8 = 15; +/// Maximum number of simultaneous inbound substreams we keep for this peer. +const MAX_INBOUND_SUBSTREAMS: usize = 32; + /// Identifier of inbound and outbound substreams from the handler's perspective. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); @@ -241,7 +244,7 @@ where // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })); @@ -265,7 +268,7 @@ where self.dial_queue.push((id, req)); } _ => self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })), @@ -339,23 +342,32 @@ where // store requests that expect responses if expected_responses > 0 { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(expected_responses as usize), - delay_key: Some(delay_key), - protocol: req.protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(expected_responses as usize), + delay_key: Some(delay_key), + protocol: req.protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } } // If we received a goodbye, shutdown the connection. @@ -382,7 +394,7 @@ where // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto, id, })); @@ -671,7 +683,7 @@ where { // if the request was still active, report back to cancel it self.events_out.push(Err(HandlerErr::Inbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: info.protocol, id: *id, })); @@ -803,7 +815,7 @@ where // the handler is deactivated. Close the stream entry.get_mut().state = OutboundSubstreamState::Closing(substream); self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: entry.get().proto, id: entry.get().req_id, })) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 3688baf341b..5aae8652e75 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -43,3 +43,4 @@ lru_cache = { path = "../../common/lru_cache" } if-addrs = "0.6.4" strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } +derivative = "2.2.0" diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index eb40be960dd..4aa7c769244 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -42,6 +42,7 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::{ @@ -51,7 +52,6 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; -use std::fmt; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -89,7 +89,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 16_384; +const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. @@ -331,17 +331,13 @@ impl DuplicateCache { } /// An event to be processed by the manager task. +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct WorkEvent { drop_during_sync: bool, work: Work, } -impl fmt::Debug for WorkEvent { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - impl WorkEvent { /// Create a new `Work` event for some unaggregated attestation. pub fn unaggregated_attestation( @@ -615,7 +611,8 @@ impl std::convert::From> for WorkEvent { } /// A consensus message (or multiple) from the network that requires processing. -#[derive(Debug)] +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub enum Work { GossipAttestation { message_id: MessageId, @@ -872,6 +869,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); + let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); @@ -1113,6 +1111,9 @@ impl BeaconProcessor { // Check exits last since our validators don't get rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + // Handle backfill sync chain segments. + } else if let Some(item) = backfill_chain_segment.pop() { + self.spawn_worker(item, toolbox); // This statement should always be the final else statement. } else { // Let the journal know that a worker is freed and there's nothing else @@ -1198,9 +1199,15 @@ impl BeaconProcessor { sync_contribution_queue.push(work) } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), - Work::ChainSegment { .. } => { - chain_segment_queue.push(work, work_id, &self.log) - } + Work::ChainSegment { ref process_id, .. } => match process_id { + ChainSegmentProcessId::RangeBatchId { .. } + | ChainSegmentProcessId::ParentLookup { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + ChainSegmentProcessId::BackSyncBatchId { .. } => { + backfill_chain_segment.push(work, work_id, &self.log) + } + }, Work::Status { .. } => status_queue.push(work, work_id, &self.log), Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) @@ -1250,6 +1257,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL, + backfill_chain_segment.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, gossip_voluntary_exit_queue.len() as i64, @@ -1344,6 +1355,7 @@ impl BeaconProcessor { "worker" => worker_id, ); + let sub_executor = executor.clone(); executor.spawn_blocking( move || { let _worker_timer = worker_timer; @@ -1520,7 +1532,15 @@ impl BeaconProcessor { peer_id, request_id, request, - } => worker.handle_blocks_by_range_request(peer_id, request_id, request), + } => { + return worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + } /* * Processing of blocks by roots requests from other peers. */ @@ -1528,7 +1548,15 @@ impl BeaconProcessor { peer_id, request_id, request, - } => worker.handle_blocks_by_root_request(peer_id, request_id, request), + } => { + return worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + } Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 0f97bc79443..1c9d323576d 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -20,7 +20,7 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Runtime; +use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -324,20 +324,19 @@ impl TestRig { .unwrap(); } - fn runtime(&mut self) -> Arc { + fn handle(&mut self) -> Handle { self.environment .as_mut() .unwrap() .core_context() .executor - .runtime() - .upgrade() + .handle() .unwrap() } /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. pub fn assert_no_events_for(&mut self, duration: Duration) { - self.runtime().block_on(async { + self.handle().block_on(async { tokio::select! { _ = tokio::time::sleep(duration) => (), event = self.work_journal_rx.recv() => panic!( @@ -360,7 +359,7 @@ impl TestRig { .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.runtime().block_on(async { + let (events, worker_freed_remaining) = self.handle().block_on(async { let mut events = Vec::with_capacity(expected.len()); let mut worker_freed_remaining = expected.len(); @@ -415,7 +414,7 @@ impl TestRig { /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.runtime().block_on(async { + let events = self.handle().block_on(async { let mut events = Vec::with_capacity(expected.len()); let drain_future = async { diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index f79a655745f..2d2196b9e99 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -1,4 +1,4 @@ -use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; +use crate::beacon_processor::{worker::FUTURE_SLOT_TOLERANCE, SendOnDrop}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; @@ -9,6 +9,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -122,38 +123,71 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( - &self, + self, + executor: TaskExecutor, + send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, request: BlocksByRootRequest, ) { - let mut send_block_count = 0; - for root in request.block_roots.iter() { - if let Ok(Some(block)) = self.chain.get_block_checking_early_attester_cache(root) { - self.send_response( - peer_id, - Response::BlocksByRoot(Some(Box::new(block))), - request_id, - ); - send_block_count += 1; - } else { - debug!(self.log, "Peer requested unknown block"; + // Fetching blocks is async because it may have to hit the execution layer for payloads. + executor.spawn( + async move { + let mut send_block_count = 0; + for root in request.block_roots.iter() { + match self + .chain + .get_block_checking_early_attester_cache(root) + .await + { + Ok(Some(block)) => { + self.send_response( + peer_id, + Response::BlocksByRoot(Some(Box::new(block))), + request_id, + ); + send_block_count += 1; + } + Ok(None) => { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => %peer_id, + "request_root" => ?root + ); + } + Err(e) => { + debug!( + self.log, + "Error fetching block for peer"; + "peer" => %peer_id, + "request_root" => ?root, + "error" => ?e, + ); + } + } + } + debug!( + self.log, + "Received BlocksByRoot Request"; "peer" => %peer_id, - "request_root" => ?root); - } - } - debug!(self.log, "Received BlocksByRoot Request"; - "peer" => %peer_id, - "requested" => request.block_roots.len(), - "returned" => send_block_count); + "requested" => request.block_roots.len(), + "returned" => send_block_count + ); - // send stream termination - self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + // send stream termination + self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + drop(send_on_drop); + }, + "load_blocks_by_root_blocks", + ) } /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( - &self, + self, + executor: TaskExecutor, + send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, mut req: BlocksByRangeRequest, @@ -228,54 +262,84 @@ impl Worker { // remove all skip slots let block_roots = block_roots.into_iter().flatten().collect::>(); - let mut blocks_sent = 0; - for root in block_roots { - if let Ok(Some(block)) = self.chain.store.get_block(&root) { - // Due to skip slots, blocks could be out of the range, we ensure they are in the - // range before sending - if block.slot() >= req.start_slot - && block.slot() < req.start_slot + req.count * req.step - { - blocks_sent += 1; - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), - id: request_id, - }); + // Fetching blocks is async because it may have to hit the execution layer for payloads. + executor.spawn( + async move { + let mut blocks_sent = 0; + + for root in block_roots { + match self.chain.get_block(&root).await { + Ok(Some(block)) => { + // Due to skip slots, blocks could be out of the range, we ensure they + // are in the range before sending + if block.slot() >= req.start_slot + && block.slot() < req.start_slot + req.count * req.step + { + blocks_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(Some(Box::new(block))), + id: request_id, + }); + } + } + Ok(None) => { + error!( + self.log, + "Block in the chain is not in the store"; + "request_root" => ?root + ); + break; + } + Err(e) => { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + break; + } + } } - } else { - error!(self.log, "Block in the chain is not in the store"; - "request_root" => ?root); - } - } - let current_slot = self - .chain - .slot() - .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - if blocks_sent < (req.count as usize) { - debug!(self.log, "BlocksByRange Response processed"; - "peer" => %peer_id, - "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent); - } else { - debug!(self.log, "BlocksByRange Response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent); - } + if blocks_sent < (req.count as usize) { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "msg" => "Failed to return all requested blocks", + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } else { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(None), - id: request_id, - }); + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + drop(send_on_drop); + }, + "load_blocks_by_range_blocks", + ); } } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 082808f88e5..943ee9cdaf7 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -138,7 +138,7 @@ impl Worker { let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_backfill_blocks(&downloaded_blocks) { + match self.process_backfill_blocks(downloaded_blocks) { (_, Ok(_)) => { debug!(self.log, "Backfill batch processed"; "batch_epoch" => epoch, @@ -223,9 +223,10 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: &[SignedBeaconBlock], + blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { - match self.chain.import_historical_block_batch(blocks) { + let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL, diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 02c491cb019..cc0165131c2 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -161,6 +161,10 @@ lazy_static! { "beacon_processor_chain_segment_queue_total", "Count of chain segments from the rpc waiting to be verified." ); + pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified." + ); pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( "beacon_processor_chain_segment_success_total", "Total number of chain segments successfully processed." diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index e76c037dad4..be750e25f02 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -11,7 +11,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; -use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchState}; +use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchProcessingResult, BatchState}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -606,7 +606,7 @@ impl BackFillSync { } }; - if let Err(e) = batch.processing_completed(true) { + if let Err(e) = batch.processing_completed(BatchProcessingResult::Success) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } // If the processed batch was not empty, we can validate previous unvalidated @@ -664,7 +664,9 @@ impl BackFillSync { }; debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - match batch.processing_completed(false) { + match batch.processing_completed(BatchProcessingResult::Failed { + count_attempt: peer_action.is_some(), + }) { Err(e) => { // Batch was in the wrong state self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 53480db88ef..0003db6ab08 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -327,10 +327,17 @@ impl SyncManager { if let Some(was_updated) = update_sync_status { let is_connected = self.network_globals.peers.read().is_connected(peer_id); if was_updated { - debug!(self.log, "Peer transitioned sync state"; "peer_id" => %peer_id, "new_state" => rpr, - "our_head_slot" => local_sync_info.head_slot, "out_finalized_epoch" => local_sync_info.finalized_epoch, - "their_head_slot" => remote_sync_info.head_slot, "their_finalized_epoch" => remote_sync_info.finalized_epoch, - "is_connected" => is_connected); + debug!( + self.log, + "Peer transitioned sync state"; + "peer_id" => %peer_id, + "new_state" => rpr, + "our_head_slot" => local_sync_info.head_slot, + "our_finalized_epoch" => local_sync_info.finalized_epoch, + "their_head_slot" => remote_sync_info.head_slot, + "their_finalized_epoch" => remote_sync_info.finalized_epoch, + "is_connected" => is_connected + ); // A peer has transitioned its sync state. If the new state is "synced" we // inform the backfill sync that a new synced peer has joined us. diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 614bf57dd00..aaebe022c70 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -72,6 +72,11 @@ pub struct WrongState(pub(crate) String); /// Auxiliary type alias for readability. type IsFailed = bool; +pub enum BatchProcessingResult { + Success, + Failed { count_attempt: bool }, +} + /// A segment of a chain. pub struct BatchInfo { /// Start slot of the batch. @@ -80,6 +85,8 @@ pub struct BatchInfo { end_slot: Slot, /// The `Attempts` that have been made and failed to send us this batch. failed_processing_attempts: Vec, + /// Number of processing attempts that have failed but we do not count. + other_failed_processing_attempts: u8, /// The number of download retries this batch has undergone due to a failed request. failed_download_attempts: Vec, /// State of the batch. @@ -143,6 +150,7 @@ impl BatchInfo { end_slot, failed_processing_attempts: Vec::new(), failed_download_attempts: Vec::new(), + other_failed_processing_attempts: 0, state: BatchState::AwaitingDownload, marker: std::marker::PhantomData, } @@ -348,23 +356,33 @@ impl BatchInfo { } #[must_use = "Batch may have failed"] - pub fn processing_completed(&mut self, was_sucessful: bool) -> Result { + pub fn processing_completed( + &mut self, + procesing_result: BatchProcessingResult, + ) -> Result { match self.state.poison() { BatchState::Processing(attempt) => { - self.state = if !was_sucessful { - // register the failed attempt - self.failed_processing_attempts.push(attempt); - - // check if the batch can be downloaded again - if self.failed_processing_attempts.len() - >= B::max_batch_processing_attempts() as usize - { - BatchState::Failed - } else { - BatchState::AwaitingDownload + self.state = match procesing_result { + BatchProcessingResult::Success => BatchState::AwaitingValidation(attempt), + BatchProcessingResult::Failed { count_attempt } => { + if count_attempt { + // register the failed attempt + self.failed_processing_attempts.push(attempt); + + // check if the batch can be downloaded again + if self.failed_processing_attempts.len() + >= B::max_batch_processing_attempts() as usize + { + BatchState::Failed + } else { + BatchState::AwaitingDownload + } + } else { + self.other_failed_processing_attempts = + self.other_failed_processing_attempts.saturating_add(1); + BatchState::AwaitingDownload + } } - } else { - BatchState::AwaitingValidation(attempt) }; Ok(self.state.is_failed()) } @@ -451,6 +469,10 @@ impl slog::KV for BatchInfo { )?; serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; serializer.emit_usize("processed", self.failed_processing_attempts.len())?; + serializer.emit_u8( + "processed_no_penalty", + self.other_failed_processing_attempts, + )?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 3f816472178..88837d0e127 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,4 +1,4 @@ -use super::batch::{BatchInfo, BatchState}; +use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use crate::beacon_processor::ChainSegmentProcessId; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; @@ -26,8 +26,9 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is /// required. -#[must_use = "Should be checked, since a failed chain must be removed. A chain that requested - being removed and continued is now in an inconsistent state"] +/// +/// Should be checked, since a failed chain must be removed. A chain that requested being removed +/// and continued is now in an inconsistent state. pub type ProcessingResult = Result; /// Reasons for removing a chain @@ -462,7 +463,7 @@ impl SyncingChain { )) })?; - batch.processing_completed(true)?; + batch.processing_completed(BatchProcessingResult::Success)?; // If the processed batch was not empty, we can validate previous unvalidated // blocks. if *was_non_empty { @@ -511,9 +512,12 @@ impl SyncingChain { batch.state(), )) })?; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, + debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, "peer_penalty" => ?peer_action, "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - if batch.processing_completed(false)? { + + if batch.processing_completed(BatchProcessingResult::Failed { + count_attempt: peer_action.is_some(), + })? { // check that we have not exceeded the re-process retry counter // If a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers in this chain are are sending invalid batches diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 512f7a989a5..7ddfc3f70aa 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -407,7 +407,6 @@ impl ChainCollection { local_info: &SyncInfo, awaiting_head_peers: &mut HashMap, ) { - debug!(self.log, "Purging chains"); let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -416,10 +415,7 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - let is = - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root); - debug!(log_ref, "Chain is outdated {}", is); - is + target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index b4a27c23c7a..31122d59a1a 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,7 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchState}; +pub use batch::{BatchConfig, BatchInfo, BatchProcessingResult, BatchState}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 11537e6ec3b..2f7fba45401 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -90,6 +90,8 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let att_participation_flags = get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec) .ok()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, spec).ok()?; let fresh_validators_rewards = attesting_indices .iter() @@ -98,7 +100,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let participation = participation_list.get(index)?; let base_reward = - altair::get_base_reward(state, index, total_active_balance, spec).ok()?; + altair::get_base_reward(state, index, base_reward_per_increment, spec).ok()?; for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { if att_participation_flags.contains(&flag_index) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index eef09631ebb..70eb31cd0fb 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,9 +8,7 @@ mod sync_aggregate_id; pub use attestation::AttMaxCover; pub use max_cover::MaxCover; -pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, -}; +pub use persistence::{PersistedOperationPool, PersistedOperationPoolAltair}; use crate::sync_aggregate_id::SyncAggregateId; use attestation_id::AttestationId; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index acab2db60e4..07697860971 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -17,7 +17,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { } impl PersistedOperationPool { - /// Convert an `OperationPool` into serializable form. Always converts to - /// `PersistedOperationPool::Altair` because the v3 to v4 database schema migration ensures - /// the op pool is always persisted as the Altair variant. + /// Convert an `OperationPool` into serializable form. pub fn from_operation_pool(operation_pool: &OperationPool) -> Self { let attestations = operation_pool .attestations @@ -114,14 +112,6 @@ impl PersistedOperationPool { .collect(), ); let op_pool = match self { - PersistedOperationPool::Base(_) => OperationPool { - attestations, - sync_contributions: <_>::default(), - attester_slashings, - proposer_slashings, - voluntary_exits, - _phantom: Default::default(), - }, PersistedOperationPool::Altair(_) => { let sync_contributions = RwLock::new(self.sync_contributions()?.iter().cloned().collect()); @@ -138,44 +128,9 @@ impl PersistedOperationPool { }; Ok(op_pool) } - - /// Convert the `PersistedOperationPool::Base` variant to `PersistedOperationPool::Altair` by - /// setting `sync_contributions` to its default. - pub fn base_to_altair(self) -> Self { - match self { - PersistedOperationPool::Base(_) => { - PersistedOperationPool::Altair(PersistedOperationPoolAltair { - attestations: self.attestations().to_vec(), - sync_contributions: <_>::default(), - attester_slashings: self.attester_slashings().to_vec(), - proposer_slashings: self.proposer_slashings().to_vec(), - voluntary_exits: self.voluntary_exits().to_vec(), - }) - } - PersistedOperationPool::Altair(_) => self, - } - } -} - -/// This `StoreItem` implementation is necessary for migrating the `PersistedOperationPool` -/// in the v3 to v4 database schema migration. -impl StoreItem for PersistedOperationPoolBase { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } } -/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair` -/// because the v3 to v4 database schema migration ensures the persisted op pool is always stored -/// in the Altair format. +/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { DBColumn::OpPool diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 6e4c2996a62..3102018e3e2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -377,6 +377,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("1000") .takes_value(true) ) + .arg( + Arg::with_name("eth1-cache-follow-distance") + .long("eth1-cache-follow-distance") + .value_name("BLOCKS") + .help("Specifies the distance between the Eth1 chain head and the last block which \ + should be imported into the cache. Setting this value lower can help \ + compensate for irregular Proof-of-Work block times, but setting it too low \ + can make the node vulnerable to re-orgs.") + .takes_value(true) + ) .arg( Arg::with_name("slots-per-restore-point") .long("slots-per-restore-point") @@ -686,4 +696,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { experimental as it may obscure performance issues.") .takes_value(false) ) + .arg( + Arg::with_name("fork-choice-before-proposal-timeout") + .long("fork-choice-before-proposal-timeout") + .help("Set the maximum number of milliseconds to wait for fork choice before \ + proposing a block. You can prevent waiting at all by setting the timeout \ + to 0, however you risk proposing atop the wrong parent block.") + .default_value("250") + .takes_value(true) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 461f230d280..db765100c3a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -236,6 +236,12 @@ pub fn get_config( client_config.eth1.purge_cache = true; } + if let Some(follow_distance) = + clap_utils::parse_optional(cli_args, "eth1-cache-follow-distance")? + { + client_config.eth1.cache_follow_distance = Some(follow_distance); + } + if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { let mut el_config = execution_layer::Config::default(); @@ -581,6 +587,12 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + if let Some(timeout) = + clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? + { + client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; + } + Ok(client_config) } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 1147d52c436..30ee66074f8 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -41,6 +41,10 @@ pub enum Error { computed: Hash256, }, BlockReplayError(BlockReplayError), + AddPayloadLogicError, + ResyncRequiredForExecutionPayloadSeparation, + SlotClockUnavailableForMigration, + V9MigrationFailure(Hash256), } pub trait HandleUnavailable { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 2c31f7cf25d..fe66a176b67 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,8 +18,8 @@ use crate::metadata::{ }; use crate::metrics; use crate::{ - get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, - StoreOp, + get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, + PartialBeaconState, StoreItem, StoreOp, }; use leveldb::iterator::LevelDBIterator; use lru::LruCache; @@ -89,6 +89,8 @@ pub enum HotColdDBError { MissingHotStateSummary(Hash256), MissingEpochBoundaryState(Hash256), MissingSplitState(Hash256, Slot), + MissingExecutionPayload(Hash256), + MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), @@ -185,6 +187,21 @@ impl HotColdDB, LevelDB> { } } + // Load the previous split slot from the database (if any). This ensures we can + // stop and restart correctly. This needs to occur *before* running any migrations + // because some migrations load states and depend on the split. + if let Some(split) = db.load_split()? { + *db.split.write() = split; + *db.anchor_info.write() = db.load_anchor_info()?; + + info!( + db.log, + "Hot-Cold DB initialized"; + "split_slot" => split.slot, + "split_state" => ?split.state_root + ); + } + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -206,20 +223,6 @@ impl HotColdDB, LevelDB> { } db.store_config()?; - // Load the previous split slot from the database (if any). This ensures we can - // stop and restart correctly. - if let Some(split) = db.load_split()? { - *db.split.write() = split; - *db.anchor_info.write() = db.load_anchor_info()?; - - info!( - db.log, - "Hot-Cold DB initialized"; - "split_slot" => split.slot, - "split_state" => ?split.state_root - ); - } - // Run a garbage collection pass. db.remove_garbage()?; @@ -263,53 +266,150 @@ impl, Cold: ItemStore> HotColdDB block: SignedBeaconBlock, ) -> Result<(), Error> { // Store on disk. - let op = self.block_as_kv_store_op(block_root, &block); - self.hot_db.do_atomically(vec![op])?; - + let mut ops = Vec::with_capacity(2); + let block = self.block_as_kv_store_ops(block_root, block, &mut ops)?; + self.hot_db.do_atomically(ops)?; // Update cache. self.block_cache.lock().put(*block_root, block); - Ok(()) } /// Prepare a signed beacon block for storage in the database. - pub fn block_as_kv_store_op( + /// + /// Return the original block for re-use after storage. It's passed by value so it can be + /// cracked open and have its payload extracted. + pub fn block_as_kv_store_ops( + &self, + key: &Hash256, + block: SignedBeaconBlock, + ops: &mut Vec, + ) -> Result, Error> { + // Split block into blinded block and execution payload. + let (blinded_block, payload) = block.into(); + + // Store blinded block. + self.blinded_block_as_kv_store_ops(key, &blinded_block, ops); + + // Store execution payload if present. + if let Some(ref execution_payload) = payload { + ops.push(execution_payload.as_kv_store_op(*key)); + } + + // Re-construct block. This should always succeed. + blinded_block + .try_into_full_block(payload) + .ok_or(Error::AddPayloadLogicError) + } + + /// Prepare a signed beacon block for storage in the datbase *without* its payload. + pub fn blinded_block_as_kv_store_ops( &self, key: &Hash256, - block: &SignedBeaconBlock, - ) -> KeyValueStoreOp { - // FIXME(altair): re-add block write/overhead metrics, or remove them + blinded_block: &SignedBeaconBlock>, + ops: &mut Vec, + ) { let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes()); - KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes()) + ops.push(KeyValueStoreOp::PutKeyValue( + db_key, + blinded_block.as_ssz_bytes(), + )); } - /// Fetch a block from the store. - pub fn get_block(&self, block_root: &Hash256) -> Result>, Error> { + pub fn try_get_full_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); // Check the cache. if let Some(block) = self.block_cache.lock().get(block_root) { metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT); - return Ok(Some(block.clone())); + return Ok(Some(DatabaseBlock::Full(block.clone()))); } - let block = self.get_block_with(block_root, |bytes| { + // Load the blinded block. + let blinded_block = match self.get_blinded_block(block_root)? { + Some(block) => block, + None => return Ok(None), + }; + + // If the block is after the split point then we should have the full execution payload + // stored in the database. Otherwise, just return the blinded block. + // Hold the split lock so that it can't change. + let split = self.split.read_recursive(); + + let block = if blinded_block.message().execution_payload().is_err() + || blinded_block.slot() >= split.slot + { + // Re-constructing the full block should always succeed here. + let full_block = self.make_full_block(block_root, blinded_block)?; + + // Add to cache. + self.block_cache.lock().put(*block_root, full_block.clone()); + + DatabaseBlock::Full(full_block) + } else { + DatabaseBlock::Blinded(blinded_block) + }; + drop(split); + + Ok(Some(block)) + } + + /// Fetch a full block with execution payload from the store. + pub fn get_full_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + match self.try_get_full_block(block_root)? { + Some(DatabaseBlock::Full(block)) => Ok(Some(block)), + Some(DatabaseBlock::Blinded(block)) => Err( + HotColdDBError::MissingFullBlockExecutionPayloadPruned(*block_root, block.slot()) + .into(), + ), + None => Ok(None), + } + } + + /// Get a schema V8 or earlier full block by reading it and its payload from disk. + pub fn get_full_block_prior_to_v9( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + self.get_block_with(block_root, |bytes| { SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) - })?; + }) + } - // Add to cache. - if let Some(ref block) = block { - self.block_cache.lock().put(*block_root, block.clone()); + /// Convert a blinded block into a full block by loading its execution payload if necessary. + pub fn make_full_block( + &self, + block_root: &Hash256, + blinded_block: SignedBeaconBlock>, + ) -> Result, Error> { + if blinded_block.message().execution_payload().is_ok() { + let execution_payload = self.get_execution_payload(block_root)?; + blinded_block.try_into_full_block(Some(execution_payload)) + } else { + blinded_block.try_into_full_block(None) } + .ok_or(Error::AddPayloadLogicError) + } - Ok(block) + pub fn get_blinded_block( + &self, + block_root: &Hash256, + ) -> Result>>, Error> { + self.get_block_with(block_root, |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) + }) } /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant( + pub fn get_block_any_variant>( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_block_with(block_root, SignedBeaconBlock::any_from_ssz_bytes) } @@ -317,11 +417,11 @@ impl, Cold: ItemStore> HotColdDB /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it /// were for a different fork. - pub fn get_block_with( + pub fn get_block_with>( &self, block_root: &Hash256, - decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, - ) -> Result>, Error> { + decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, + ) -> Result>, Error> { self.hot_db .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_bytes())? .map(|block_bytes| decoder(&block_bytes)) @@ -329,6 +429,15 @@ impl, Cold: ItemStore> HotColdDB .map_err(|e| e.into()) } + /// Load the execution payload for a block from disk. + pub fn get_execution_payload( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.get_item(block_root)? + .ok_or_else(|| HotColdDBError::MissingExecutionPayload(*block_root).into()) + } + /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db @@ -339,7 +448,9 @@ impl, Cold: ItemStore> HotColdDB pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().pop(block_root); self.hot_db - .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; + self.hot_db + .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } pub fn put_state_summary( @@ -550,24 +661,27 @@ impl, Cold: ItemStore> HotColdDB } /// Convert a batch of `StoreOp` to a batch of `KeyValueStoreOp`. - pub fn convert_to_kv_batch(&self, batch: &[StoreOp]) -> Result, Error> { + pub fn convert_to_kv_batch( + &self, + batch: Vec>, + ) -> Result, Error> { let mut key_value_batch = Vec::with_capacity(batch.len()); for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - key_value_batch.push(self.block_as_kv_store_op(block_root, block)); + self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; } StoreOp::PutState(state_root, state) => { - self.store_hot_state(state_root, state, &mut key_value_batch)?; + self.store_hot_state(&state_root, state, &mut key_value_batch)?; } StoreOp::PutStateSummary(state_root, summary) => { - key_value_batch.push(summary.as_kv_store_op(*state_root)); + key_value_batch.push(summary.as_kv_store_op(state_root)); } StoreOp::PutStateTemporaryFlag(state_root) => { - key_value_batch.push(TemporaryFlag.as_kv_store_op(*state_root)); + key_value_batch.push(TemporaryFlag.as_kv_store_op(state_root)); } StoreOp::DeleteStateTemporaryFlag(state_root) => { @@ -592,17 +706,21 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key)); } } + + StoreOp::DeleteExecutionPayload(block_root) => { + let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } } } Ok(key_value_batch) } pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { + // Update the block cache whilst holding a lock, to ensure that the cache updates atomically + // with the database. let mut guard = self.block_cache.lock(); - self.hot_db - .do_atomically(self.convert_to_kv_batch(&batch)?)?; - for op in &batch { match op { StoreOp::PutBlock(block_root, block) => { @@ -622,8 +740,15 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteState(_, _) => (), + + StoreOp::DeleteExecutionPayload(_) => (), } } + + self.hot_db + .do_atomically(self.convert_to_kv_batch(batch)?)?; + drop(guard); + Ok(()) } @@ -887,34 +1012,33 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, end_block_hash: Hash256, - ) -> Result>, Error> { - let mut blocks: Vec> = - ParentRootBlockIterator::new(self, end_block_hash) - .map(|result| result.map(|(_, block)| block)) - // Include the block at the end slot (if any), it needs to be - // replayed in order to construct the canonical state at `end_slot`. - .filter(|result| { - result - .as_ref() - .map_or(true, |block| block.slot() <= end_slot) - }) - // Include the block at the start slot (if any). Whilst it doesn't need to be - // applied to the state, it contains a potentially useful state root. - // - // Return `true` on an `Err` so that the `collect` fails, unless the error is a - // `BlockNotFound` error and some blocks are intentionally missing from the DB. - // This complexity is unfortunately necessary to avoid loading the parent of the - // oldest known block -- we can't know that we have all the required blocks until we - // load a block with slot less than the start slot, which is impossible if there are - // no blocks with slot less than the start slot. - .take_while(|result| match result { - Ok(block) => block.slot() >= start_slot, - Err(Error::BlockNotFound(_)) => { - self.get_oldest_block_slot() == self.spec.genesis_slot - } - Err(_) => true, - }) - .collect::>()?; + ) -> Result>>, Error> { + let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) + .map(|result| result.map(|(_, block)| block)) + // Include the block at the end slot (if any), it needs to be + // replayed in order to construct the canonical state at `end_slot`. + .filter(|result| { + result + .as_ref() + .map_or(true, |block| block.slot() <= end_slot) + }) + // Include the block at the start slot (if any). Whilst it doesn't need to be + // applied to the state, it contains a potentially useful state root. + // + // Return `true` on an `Err` so that the `collect` fails, unless the error is a + // `BlockNotFound` error and some blocks are intentionally missing from the DB. + // This complexity is unfortunately necessary to avoid loading the parent of the + // oldest known block -- we can't know that we have all the required blocks until we + // load a block with slot less than the start slot, which is impossible if there are + // no blocks with slot less than the start slot. + .take_while(|result| match result { + Ok(block) => block.slot() >= start_slot, + Err(Error::BlockNotFound(_)) => { + self.get_oldest_block_slot() == self.spec.genesis_slot + } + Err(_) => true, + }) + .collect::, _>>()?; blocks.reverse(); Ok(blocks) } @@ -926,7 +1050,7 @@ impl, Cold: ItemStore> HotColdDB fn replay_blocks( &self, state: BeaconState, - blocks: Vec>, + blocks: Vec>>, target_slot: Slot, state_root_iter: Option>>, state_root_strategy: StateRootStrategy, @@ -956,6 +1080,11 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Get a reference to the `ChainSpec` used by the database. + pub fn get_chain_spec(&self) -> &ChainSpec { + &self.spec + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 1b442cbc553..736585a72aa 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1 +1,2 @@ pub mod beacon_state; +pub mod execution_payload; diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs new file mode 100644 index 00000000000..ddb9a446280 --- /dev/null +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -0,0 +1,17 @@ +use crate::{DBColumn, Error, StoreItem}; +use ssz::{Decode, Encode}; +use types::{EthSpec, ExecutionPayload}; + +impl StoreItem for ExecutionPayload { + fn db_column() -> DBColumn { + DBColumn::ExecPayload + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index d5448de9832..91097924785 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -3,7 +3,8 @@ use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; use types::{ - typenum::Unsigned, BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock, Slot, + typenum::Unsigned, BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, + SignedBeaconBlock, Slot, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. @@ -188,7 +189,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, block_hash: Hash256, ) -> Result { let block = store - .get_block(&block_hash)? + .get_blinded_block(&block_hash)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; let state = store .get_state(&block.state_root(), Some(block.slot()))? @@ -272,7 +273,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } - fn do_next(&mut self) -> Result)>, Error> { + #[allow(clippy::type_complexity)] + fn do_next( + &mut self, + ) -> Result>)>, Error> { // Stop once we reach the zero parent, otherwise we'll keep returning the genesis // block forever. if self.next_block_root.is_zero() { @@ -282,7 +286,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> let block = if self.decode_any_variant { self.store.get_block_any_variant(&block_root) } else { - self.store.get_block(&block_root) + self.store.get_blinded_block(&block_root) }? .ok_or(Error::BlockNotFound(block_root))?; self.next_block_root = block.message().parent_root(); @@ -294,7 +298,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator for ParentRootBlockIterator<'a, E, Hot, Cold> { - type Item = Result<(Hash256, SignedBeaconBlock), Error>; + type Item = Result<(Hash256, SignedBeaconBlock>), Error>; fn next(&mut self) -> Option { self.do_next().transpose() @@ -322,10 +326,10 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, } } - fn do_next(&mut self) -> Result>, Error> { + fn do_next(&mut self) -> Result>>, Error> { if let Some(result) = self.roots.next() { let (root, _slot) = result?; - self.roots.inner.store.get_block(&root) + self.roots.inner.store.get_blinded_block(&root) } else { Ok(None) } @@ -335,7 +339,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator for BlockIterator<'a, T, Hot, Cold> { - type Item = Result, Error>; + type Item = Result>, Error>; fn next(&mut self) -> Option { self.do_next().transpose() diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 4a47353cba1..86bd4ffaccd 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -197,6 +197,28 @@ impl KeyValueStore for LevelDB { }), ) } + + /// Iterate through all keys and values in a particular column. + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + let start_key = + BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.matches_column(column)) + .map(move |bytes_key| { + let key = + bytes_key + .remove_column(column) + .ok_or(HotColdDBError::IterationError { + unexpected_key: bytes_key, + })?; + Ok(key) + }), + ) + } } impl ItemStore for LevelDB {} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index bc8f62dd250..613c2e416ca 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -43,6 +43,7 @@ use strum::{EnumString, IntoStaticStr}; pub use types::*; pub type ColumnIter<'a> = Box), Error>> + 'a>; +pub type ColumnKeyIter<'a> = Box> + 'a>; pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. @@ -77,11 +78,17 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Compact the database, freeing space used by deleted items. fn compact(&self) -> Result<(), Error>; - /// Iterate through all values in a particular column. + /// Iterate through all keys and values in a particular column. fn iter_column(&self, _column: DBColumn) -> ColumnIter { // Default impl for non LevelDB databases Box::new(std::iter::empty()) } + + /// Iterate through all keys in a particular column. + fn iter_column_keys(&self, _column: DBColumn) -> ColumnKeyIter { + // Default impl for non LevelDB databases + Box::new(std::iter::empty()) + } } pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { @@ -152,6 +159,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), DeleteState(Hash256, Option), + DeleteExecutionPayload(Hash256), } /// A unique column identifier. @@ -172,6 +180,9 @@ pub enum DBColumn { /// and then made non-temporary by the deletion of their state root from this column. #[strum(serialize = "bst")] BeaconStateTemporary, + /// Execution payloads for blocks more recent than the finalized checkpoint. + #[strum(serialize = "exp")] + ExecPayload, /// For persisting in-memory state to the database. #[strum(serialize = "bch")] BeaconChain, @@ -198,6 +209,12 @@ pub enum DBColumn { DhtEnrs, } +/// A block from the database, which might have an execution payload or not. +pub enum DatabaseBlock { + Full(SignedBeaconBlock), + Blinded(SignedBeaconBlock>), +} + impl DBColumn { pub fn as_str(self) -> &'static str { self.into() diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 6b808974e71..7db2652f2cd 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -76,7 +76,7 @@ where None } else { Some( - self.get_block(&block_root)? + self.get_blinded_block(&block_root)? .ok_or(Error::BlockNotFound(block_root))?, ) }; diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index d0f449bab83..bf2acaf5bb5 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,31 +3,51 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::info; +use slog::{debug, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; -use std::time::Duration; -use tokio::time::{interval_at, Instant}; +use tokio::time::sleep; /// Spawns a timer service which periodically executes tasks for the beacon chain pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, - seconds_per_slot: u64, ) -> Result<(), &'static str> { let log = executor.log(); - let start_instant = Instant::now() - + beacon_chain - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; + let per_slot_executor = executor.clone(); - // Warning: `interval_at` panics if `seconds_per_slot` = 0. - let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot)); let timer_future = async move { + let log = per_slot_executor.log().clone(); loop { - interval.tick().await; - beacon_chain.per_slot_task(); + let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration, + None => { + warn!(log, "Unable to determine duration to next slot"); + return; + } + }; + + sleep(duration_to_next_slot).await; + + let chain = beacon_chain.clone(); + if let Some(handle) = per_slot_executor + .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") + { + if let Err(e) = handle.await { + warn!( + log, + "Per slot task failed"; + "info" => ?e + ); + } + } else { + debug!( + log, + "Per slot task timer stopped"; + "info" => "shutting down" + ); + break; + } } }; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 22d279d8b77..e2a2eb37ebf 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -38,10 +38,12 @@ * [Validator Graffiti](./graffiti.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) + * [Database Migrations](./database-migrations.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) + * [Release Candidates](./advanced-release-candidates.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index b90bd631d41..f3f4a523041 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,4 +1,4 @@ # Pre-Releases -Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +Pre-releases are now referred to as [Release Candidates](./advanced-release-candidates.md). The terms may be used interchangeably. diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md new file mode 100644 index 00000000000..ce7ff213280 --- /dev/null +++ b/book/src/database-migrations.md @@ -0,0 +1,121 @@ +# Database Migrations + +Lighthouse uses a versioned database schema to allow its database design to evolve over time. + +Since beacon chain genesis in December 2020 there have been several database upgrades that have +been applied automatically and in a _backwards compatible_ way. + +However, backwards compatibility does not imply the ability to _downgrade_ to a prior version of +Lighthouse after upgrading. To facilitate smooth downgrades, Lighthouse v2.3.0 and above includes a +command for applying database downgrades. + +**Everything on this page applies to the Lighthouse _beacon node_, not to the +validator client or the slasher**. + +## How to apply a database downgrade + +To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. + +1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that + knows about the latest schema change, and has the ability to revert it. +2. Work out the schema version you would like to downgrade to by checking the Lighthouse release + notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version from v8 to v9, then + you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of + them are time-sensitive. The release notes will state whether a downgrade is available and + whether any caveats apply to it. +4. Work out the parameters for [Running `lighthouse db` correctly][run-correctly], including your + Lighthouse user, your datadir and your network flag. +5. After stopping the beacon node, run the migrate command with the `--to` parameter set to the + schema version you would like to downgrade to. + +``` +sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" +``` + +For example if you want to downgrade to Lighthouse v2.1 or v2.2 from v2.3 and you followed Somer +Esat's guide, you would run: + +``` +sudo -u lighthousebeacon lighthouse db migrate --to 8 --datadir /var/lib/lighthouse --network mainnet +``` + +Where `lighthouse` is Lighthouse v2.3.0+. After the downgrade succeeds you can then replace your +global `lighthouse` binary with the older version and start your node again. + +## How to apply a database upgrade + +Database _upgrades_ happen automatically upon installing a new version of Lighthouse. We will +highlight in the release notes when a database upgrade is included, and make note of the schema +versions involved (e.g. v2.3.0 includes an upgrade from v8 to v9). + +They can also be applied using the `--to` parameter to `lighthouse db migrate`. See the section +on downgrades above. + +## How to check the schema version + +To check the schema version of a running Lighthouse instance you can use the HTTP API: + +```bash +curl "http://localhost:5052/lighthouse/database/info" +``` + +```json +{ + "schema_version": 8, + "config": { + "slots_per_restore_point": 8192, + "slots_per_restore_point_set_explicitly": true, + "block_cache_size": 5, + "compact_on_init": false, + "compact_on_prune": true + } +} +``` + +The `schema_version` key indicates that this database is using schema version 8. + +Alternatively, you can check the schema version with the `lighthouse db` command. + +``` +sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet +``` + +See the section on [Running `lighthouse db` correctly][run-correctly] for details. + +## How to run `lighthouse db` correctly + +Several conditions need to be met in order to run `lighthouse db`: + +1. The beacon node must be **stopped** (not running). If you are using systemd a command like + `sudo systemctl stop lighthousebeacon` will accomplish this. +2. The command must run as the user that owns the beacon node database. If you are using systemd then + your beacon node might run as a user called `lighthousebeacon`. +3. The `--datadir` flag must be set to the location of the Lighthouse data directory. +4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `prater` or `ropsten`. + +The general form for a `lighthouse db` command is: + +``` +sudo -u "$LH_USER" lighthouse db version --datadir "$LH_DATADIR" --network "$NET" +``` + +If you followed Somer Esat's guide for mainnet: + +``` +sudo systemctl stop lighthousebeacon +``` +``` +sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet +``` + +If you followed the CoinCashew guide for mainnet: + +``` +sudo systemctl stop beacon-chain +``` +``` +lighthouse db version --network mainnet +``` + +[run-correctly]: #how-to-run-lighthouse-db-correctly diff --git a/book/src/validator-web3signer.md b/book/src/validator-web3signer.md index 2de641d48be..103f1ccb3cf 100644 --- a/book/src/validator-web3signer.md +++ b/book/src/validator-web3signer.md @@ -43,12 +43,15 @@ remote signer: type: web3signer url: "https://my-remote-signer.com:1234" root_certificate_path: /home/paul/my-certificates/my-remote-signer.pem + client_identity_path: /home/paul/my-keys/my-identity-certificate.p12 + client_identity_password: "password" ``` When using this file, the Lighthouse VC will perform duties for the `0xa5566..` validator and defer to the `https://my-remote-signer.com:1234` server to obtain any signatures. It will load a "self-signed" SSL certificate from `/home/paul/my-certificates/my-remote-signer.pem` (on the -filesystem of the VC) to encrypt the communications between the VC and Web3Signer. +filesystem of the VC) to encrypt the communications between the VC and Web3Signer. It will use +SSL client authentication with the "self-signed" certificate in `/home/paul/my-keys/my-identity-certificate.p12`. > The `request_timeout_ms` key can also be specified. Use this key to override the default timeout > with a new timeout in milliseconds. This is the timeout before requests to Web3Signer are diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index d3a28102f68..2069c43ec4b 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.2.1" +version = "2.3.1" authors = ["Sigma Prime "] edition = "2021" diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 4652370c38d..3f4831ae174 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -72,6 +72,16 @@ pub enum SigningDefinition { /// The timeout is applied from when the request starts connecting until the response body has finished. #[serde(skip_serializing_if = "Option::is_none")] request_timeout_ms: Option, + + /// Path to a PKCS12 file. + #[serde(skip_serializing_if = "Option::is_none")] + client_identity_path: Option, + + /// Password for the PKCS12 file. + /// + /// An empty password will be used if this is omitted. + #[serde(skip_serializing_if = "Option::is_none")] + client_identity_password: Option, }, } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 674672326c4..294f8ec8a3d 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -26,6 +26,7 @@ futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } +mime = "0.3.16" [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index e7c74668e82..5e02ec0bb28 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -476,6 +476,16 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_remotekeys_url(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("remotekeys"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { let mut url = self.server.full.clone(); @@ -509,6 +519,30 @@ impl ValidatorClientHttpClient { let url = self.make_keystores_url()?; self.delete_with_unsigned_response(url, req).await } + + /// `GET eth/v1/remotekeys` + pub async fn get_remotekeys(&self) -> Result { + let url = self.make_remotekeys_url()?; + self.get_unsigned(url).await + } + + /// `POST eth/v1/remotekeys` + pub async fn post_remotekeys( + &self, + req: &ImportRemotekeysRequest, + ) -> Result { + let url = self.make_remotekeys_url()?; + self.post_with_unsigned_response(url, req).await + } + + /// `DELETE eth/v1/remotekeys` + pub async fn delete_remotekeys( + &self, + req: &DeleteRemotekeysRequest, + ) -> Result { + let url = self.make_remotekeys_url()?; + self.delete_with_unsigned_response(url, req).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index ebcce3fab01..d9fe9691384 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -102,3 +102,59 @@ pub enum DeleteKeystoreStatus { NotFound, Error, } + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct ListRemotekeysResponse { + pub data: Vec, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct SingleListRemotekeysResponse { + pub pubkey: PublicKeyBytes, + pub url: String, + pub readonly: bool, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ImportRemotekeysRequest { + pub remote_keys: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] +pub struct SingleImportRemotekeysRequest { + pub pubkey: PublicKeyBytes, + pub url: String, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum ImportRemotekeyStatus { + Imported, + Duplicate, + Error, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ImportRemotekeysResponse { + pub data: Vec>, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DeleteRemotekeysRequest { + pub pubkeys: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum DeleteRemotekeyStatus { + Deleted, + NotFound, + Error, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct DeleteRemotekeysResponse { + pub data: Vec>, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 9bf7546749f..fe9b6a48c09 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -92,4 +92,8 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub request_timeout_ms: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_password: Option, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8cd3a1d67f1..8ef3582268e 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -3,7 +3,9 @@ use crate::Error as ServerError; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; +use mime::{Mime, APPLICATION, JSON, OCTET_STREAM, STAR}; use serde::{Deserialize, Serialize}; +use std::cmp::Reverse; use std::convert::TryFrom; use std::fmt; use std::str::{from_utf8, FromStr}; @@ -1008,15 +1010,37 @@ impl FromStr for Accept { type Err = String; fn from_str(s: &str) -> Result { - match s { - "application/octet-stream" => Ok(Accept::Ssz), - "application/json" => Ok(Accept::Json), - "*/*" => Ok(Accept::Any), - _ => Err("accept header cannot be parsed.".to_string()), - } + let mut mimes = parse_accept(s)?; + + // [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2 + // find the highest q-factor supported accept type + mimes.sort_by_key(|m| { + Reverse(m.get_param("q").map_or(1000_u16, |n| { + (n.as_ref().parse::().unwrap_or(0_f32) * 1000_f32) as u16 + })) + }); + mimes + .into_iter() + .find_map(|m| match (m.type_(), m.subtype()) { + (APPLICATION, OCTET_STREAM) => Some(Accept::Ssz), + (APPLICATION, JSON) => Some(Accept::Json), + (STAR, STAR) => Some(Accept::Any), + _ => None, + }) + .ok_or_else(|| "accept header is not supported".to_string()) } } +fn parse_accept(accept: &str) -> Result, String> { + accept + .split(',') + .map(|part| { + part.parse() + .map_err(|e| format!("error parsing Accept header: {}", e)) + }) + .collect() +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, @@ -1045,4 +1069,23 @@ mod tests { } ); } + + #[test] + fn parse_accept_header_content() { + assert_eq!( + Accept::from_str("application/json; charset=utf-8").unwrap(), + Accept::Json + ); + + assert_eq!( + Accept::from_str("text/plain,application/octet-stream;q=0.3,application/json;q=0.9") + .unwrap(), + Accept::Json + ); + + assert_eq!( + Accept::from_str("text/plain"), + Err("accept header is not supported".to_string()) + ) + } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7992955dc41..ec8522ac983 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -237,5 +237,6 @@ define_hardcoded_nets!( (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), (prater, "prater", GENESIS_STATE_IS_KNOWN), (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), - (kiln, "kiln", GENESIS_STATE_IS_KNOWN) + (kiln, "kiln", GENESIS_STATE_IS_KNOWN), + (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN) ); diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index d762ecfbdaa..7987899c3df 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -74,8 +74,8 @@ CHURN_LIMIT_QUOTIENT: 4096 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 16aa507eed0..cc4e7dcab4f 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -74,8 +74,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 106c95595e1..d337c4120ac 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -74,8 +74,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml new file mode 100644 index 00000000000..27e6e53fc42 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml @@ -0,0 +1,4 @@ +# Pari +- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +# Teku +- enr:-KG4QMJSJ7DHk6v2p-W8zQ3Xv7FfssZ_1E3p2eY6kN13staMObUonAurqyWhODoeY6edXtV8e9eL9RnhgZ9va2SMDRQMhGV0aDKQS-iVMYAAAHD0AQAAAAAAAIJpZIJ2NIJpcIQDhAAhiXNlY3AyNTZrMaEDXBVUZhhmdy1MYor1eGdRJ4vHYghFKDgjyHgt6sJ-IlCDdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml new file mode 100644 index 00000000000..5dad3ff7599 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml @@ -0,0 +1,71 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: 'ropsten' + +# Genesis +# --------------------------------------------------------------- +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 +# Monday, May 30th, 2022 3:00:00 PM +UTC +MIN_GENESIS_TIME: 1653318000 +GENESIS_FORK_VERSION: 0x80000069 +GENESIS_DELAY: 604800 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x80000070 +ALTAIR_FORK_EPOCH: 500 +# Merge +BELLATRIX_FORK_VERSION: 0x80000071 +BELLATRIX_FORK_EPOCH: 750 +TERMINAL_TOTAL_DIFFICULTY: 50000000000000000 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Sharding +SHARDING_FORK_VERSION: 0x03001020 +SHARDING_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 3 +DEPOSIT_NETWORK_ID: 3 +DEPOSIT_CONTRACT_ADDRESS: 0x6f22fFbC56eFF051aECF839396DD1eD9aD6BBA9D diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt new file mode 100644 index 00000000000..dd46f23b620 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt @@ -0,0 +1 @@ +12269949 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip new file mode 100644 index 00000000000..5f83ed3b65f Binary files /dev/null and b/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip differ diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5162ddffbcc..c5a5bc57e8e 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -4,6 +4,7 @@ use target_info::Target; /// Returns the current version of this build of Lighthouse. /// /// A plus-sign (`+`) is appended to the git commit if the tree is dirty. +/// Commit hash is omitted if the sources don't include git information. /// /// ## Example /// @@ -16,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.2.1-", - fallback = "unknown" + prefix = "Lighthouse/v2.3.1-", + fallback = "Lighthouse/v2.3.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 660cc1ca011..f344dc47354 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,9 +5,10 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } +sloggers = { version = "2.1.1", features = ["json"] } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 2d3e941a3eb..dd525bea504 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,10 +1,11 @@ mod metrics; +pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; use slog::{crit, debug, o, trace}; use std::sync::Weak; -use tokio::runtime::Runtime; +use tokio::runtime::{Handle, Runtime}; /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] @@ -24,11 +25,51 @@ impl ShutdownReason { } } +/// Provides a `Handle` by either: +/// +/// 1. Holding a `Weak` and calling `Runtime::handle`. +/// 2. Directly holding a `Handle` and cloning it. +/// +/// This enum allows the `TaskExecutor` to work in production where a `Weak` is directly +/// accessible and in testing where the `Runtime` is hidden outside our scope. +#[derive(Clone)] +pub enum HandleProvider { + Runtime(Weak), + Handle(Handle), +} + +impl From for HandleProvider { + fn from(handle: Handle) -> Self { + HandleProvider::Handle(handle) + } +} + +impl From> for HandleProvider { + fn from(weak_runtime: Weak) -> Self { + HandleProvider::Runtime(weak_runtime) + } +} + +impl HandleProvider { + /// Returns a `Handle` to a `Runtime`. + /// + /// May return `None` if the weak reference to the `Runtime` has been dropped (this generally + /// means Lighthouse is shutting down). + pub fn handle(&self) -> Option { + match self { + HandleProvider::Runtime(weak_runtime) => weak_runtime + .upgrade() + .map(|runtime| runtime.handle().clone()), + HandleProvider::Handle(handle) => Some(handle.clone()), + } + } +} + /// A wrapper over a runtime handle which can spawn async and blocking tasks. #[derive(Clone)] pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned - runtime: Weak, + handle_provider: HandleProvider, /// The receiver exit future which on receiving shuts down the task exit: exit_future::Exit, /// Sender given to tasks, so that if they encounter a state in which execution cannot @@ -43,16 +84,19 @@ pub struct TaskExecutor { impl TaskExecutor { /// Create a new task executor. /// - /// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from - /// a [`RuntimeContext`](struct.RuntimeContext.html) - pub fn new( - runtime: Weak, + /// ## Note + /// + /// This function should only be used during testing. In production, prefer to obtain an + /// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment` + /// crate). + pub fn new>( + handle: T, exit: exit_future::Exit, log: slog::Logger, signal_tx: Sender, ) -> Self { Self { - runtime, + handle_provider: handle.into(), exit, signal_tx, log, @@ -62,7 +106,7 @@ impl TaskExecutor { /// Clones the task executor adding a service name. pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { - runtime: self.runtime.clone(), + handle_provider: self.handle_provider.clone(), exit: self.exit.clone(), signal_tx: self.signal_tx.clone(), log: self.log.new(o!("service" => service_name)), @@ -94,8 +138,8 @@ impl TaskExecutor { let mut shutdown_sender = self.shutdown_sender(); let log = self.log.clone(); - if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn(async move { + if let Some(handle) = self.handle() { + handle.spawn(async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); if let Err(join_error) = task_handle.await { if let Ok(panic) = join_error.try_into_panic() { @@ -160,8 +204,8 @@ impl TaskExecutor { }); int_gauge.inc(); - if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn(future); + if let Some(handle) = self.handle() { + handle.spawn(future); } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); } @@ -211,8 +255,8 @@ impl TaskExecutor { }); int_gauge.inc(); - if let Some(runtime) = self.runtime.upgrade() { - Some(runtime.spawn(future)) + if let Some(handle) = self.handle() { + Some(handle.spawn(future)) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); None @@ -242,8 +286,8 @@ impl TaskExecutor { let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]); - let join_handle = if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn_blocking(task) + let join_handle = if let Some(handle) = self.handle() { + handle.spawn_blocking(task) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); return None; @@ -268,8 +312,9 @@ impl TaskExecutor { Some(future) } - pub fn runtime(&self) -> Weak { - self.runtime.clone() + /// Returns a `Handle` to the current runtime. + pub fn handle(&self) -> Option { + self.handle_provider.handle() } /// Returns a copy of the `exit_future::Exit`. diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs new file mode 100644 index 00000000000..7d59cdf022c --- /dev/null +++ b/common/task_executor/src/test_utils.rs @@ -0,0 +1,68 @@ +use crate::TaskExecutor; +use slog::Logger; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::sync::Arc; +use tokio::runtime; + +/// Whilst the `TestRuntime` is not necessarily useful in itself, it provides the necessary +/// components for creating a `TaskExecutor` during tests. +/// +/// May create its own runtime or use an existing one. +/// +/// ## Warning +/// +/// This struct should never be used in production, only testing. +pub struct TestRuntime { + runtime: Option>, + _runtime_shutdown: exit_future::Signal, + pub task_executor: TaskExecutor, + pub log: Logger, +} + +impl Default for TestRuntime { + /// If called *inside* an existing runtime, instantiates `Self` using a handle to that runtime. If + /// called *outside* any existing runtime, create a new `Runtime` and keep it alive until the + /// `Self` is dropped. + fn default() -> Self { + let (runtime_shutdown, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let log = null_logger().unwrap(); + + let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { + (None, handle) + } else { + let runtime = Arc::new( + runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(), + ); + let handle = runtime.handle().clone(); + (Some(runtime), handle) + }; + + let task_executor = TaskExecutor::new(handle, exit, log.clone(), shutdown_tx); + + Self { + runtime, + _runtime_shutdown: runtime_shutdown, + task_executor, + log, + } + } +} + +impl Drop for TestRuntime { + fn drop(&mut self) { + if let Some(runtime) = self.runtime.take() { + Arc::try_unwrap(runtime).unwrap().shutdown_background() + } + } +} + +pub fn null_logger() -> Result { + let log_builder = NullLoggerBuilder; + log_builder + .build() + .map_err(|e| format!("Failed to start null logger: {:?}", e)) +} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 746f0769978..ed89482b6a4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -570,10 +570,10 @@ where /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. #[allow(clippy::too_many_arguments)] - pub fn on_block( + pub fn on_block>( &mut self, current_slot: Slot, - block: &BeaconBlock, + block: &BeaconBlock, block_root: Hash256, block_delay: Duration, state: &mut BeaconState, diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9b85708f34c..78260075160 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -31,9 +31,9 @@ pub trait ForkChoiceStore: Sized { /// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork /// choice. Allows the implementer to performing caching or other housekeeping duties. - fn on_verified_block( + fn on_verified_block>( &mut self, - block: &BeaconBlock, + block: &BeaconBlock, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error>; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 160800ca508..3f8a2ac6b6b 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -344,7 +344,7 @@ impl ForkChoiceTest { let state_root = harness .chain .store - .get_block(&fc.fc_store().justified_checkpoint().root) + .get_blinded_block(&fc.fc_store().justified_checkpoint().root) .unwrap() .unwrap() .message() diff --git a/consensus/safe_arith/src/lib.rs b/consensus/safe_arith/src/lib.rs index ab5985a6e11..c1dbff4c7c8 100644 --- a/consensus/safe_arith/src/lib.rs +++ b/consensus/safe_arith/src/lib.rs @@ -20,6 +20,7 @@ macro_rules! assign_method { #[doc = "Safe variant of `"] #[doc = $doc_op] #[doc = "`."] + #[inline] fn $name(&mut self, other: $rhs_ty) -> Result<()> { *self = self.$op(other)?; Ok(()) @@ -68,30 +69,37 @@ macro_rules! impl_safe_arith { const ZERO: Self = 0; const ONE: Self = 1; + #[inline] fn safe_add(&self, other: Self) -> Result { self.checked_add(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_sub(&self, other: Self) -> Result { self.checked_sub(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_mul(&self, other: Self) -> Result { self.checked_mul(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_div(&self, other: Self) -> Result { self.checked_div(other).ok_or(ArithError::DivisionByZero) } + #[inline] fn safe_rem(&self, other: Self) -> Result { self.checked_rem(other).ok_or(ArithError::DivisionByZero) } + #[inline] fn safe_shl(&self, other: u32) -> Result { self.checked_shl(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_shr(&self, other: u32) -> Result { self.checked_shr(other).ok_or(ArithError::Overflow) } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 937348263b4..d4b4b067e37 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -3,10 +3,12 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, }; use std::marker::PhantomData; -use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; -type PreBlockHook<'a, E, Error> = - Box, &SignedBeaconBlock) -> Result<(), Error> + 'a>; +type PreBlockHook<'a, E, Error> = Box< + dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> + + 'a, +>; type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; type PostSlotHook<'a, E, Error> = Box< @@ -175,7 +177,7 @@ where fn get_state_root( &mut self, slot: Slot, - blocks: &[SignedBeaconBlock], + blocks: &[SignedBeaconBlock>], i: usize, ) -> Result, Error> { // If we don't care about state roots then return immediately. @@ -214,7 +216,7 @@ where /// after the blocks have been applied. pub fn apply_blocks( mut self, - blocks: Vec>, + blocks: Vec>>, target_slot: Option, ) -> Result { for (i, block) in blocks.iter().enumerate() { diff --git a/consensus/state_processing/src/common/altair.rs b/consensus/state_processing/src/common/altair.rs index 6cf80bdd9ed..8943ef2f40b 100644 --- a/consensus/state_processing/src/common/altair.rs +++ b/consensus/state_processing/src/common/altair.rs @@ -2,27 +2,45 @@ use integer_sqrt::IntegerSquareRoot; use safe_arith::{ArithError, SafeArith}; use types::*; +/// This type exists to avoid confusing `total_active_balance` with `base_reward_per_increment`, +/// since they are used in close proximity and the same type (`u64`). +#[derive(Copy, Clone)] +pub struct BaseRewardPerIncrement(u64); + +impl BaseRewardPerIncrement { + pub fn new(total_active_balance: u64, spec: &ChainSpec) -> Result { + get_base_reward_per_increment(total_active_balance, spec).map(Self) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + /// Returns the base reward for some validator. /// +/// The function has a different interface to the spec since it accepts the +/// `base_reward_per_increment` without computing it each time. Avoiding the re computation has +/// shown to be a significant optimisation. +/// /// Spec v1.1.0 pub fn get_base_reward( state: &BeaconState, index: usize, - // Should be == get_total_active_balance(state, spec) - total_active_balance: u64, + base_reward_per_increment: BaseRewardPerIncrement, spec: &ChainSpec, ) -> Result { state .get_effective_balance(index)? .safe_div(spec.effective_balance_increment)? - .safe_mul(get_base_reward_per_increment(total_active_balance, spec)?) + .safe_mul(base_reward_per_increment.as_u64()) .map_err(Into::into) } /// Returns the base reward for some validator. /// /// Spec v1.1.0 -pub fn get_base_reward_per_increment( +fn get_base_reward_per_increment( total_active_balance: u64, spec: &ChainSpec, ) -> Result { diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 8358003e4b4..306e86714c6 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,4 +1,4 @@ -use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; +use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; use safe_arith::SafeArith; @@ -72,7 +72,8 @@ pub fn compute_sync_aggregate_rewards( let total_active_balance = state.get_total_active_balance()?; let total_active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + let total_base_rewards = BaseRewardPerIncrement::new(total_active_balance, spec)? + .as_u64() .safe_mul(total_active_increments)?; let max_participant_rewards = total_base_rewards .safe_mul(SYNC_REWARD_WEIGHT)? diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 3bf22d004a4..31a4ac1fb42 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -1,7 +1,8 @@ use super::*; use crate::common::{ - altair::get_base_reward, get_attestation_participation_flag_indices, increase_balance, - initiate_validator_exit, slash_validator, + altair::{get_base_reward, BaseRewardPerIncrement}, + get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, + slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; @@ -128,6 +129,7 @@ pub mod altair { // Update epoch participation flags. let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; for index in &indexed_attestation.attesting_indices { let index = *index as usize; @@ -143,7 +145,7 @@ pub mod altair { { validator_participation.add_flag(flag_index)?; proposer_reward_numerator.safe_add_assign( - get_base_reward(state, index, total_active_balance, spec)? + get_base_reward(state, index, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 038fe770440..967f642e85d 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -14,6 +14,7 @@ pub fn process_inactivity_updates( participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { + let previous_epoch = state.previous_epoch(); // Score updates based on previous epoch participation, skip genesis epoch if state.current_epoch() == T::genesis_epoch() { return Ok(()); @@ -33,7 +34,7 @@ pub fn process_inactivity_updates( .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak - if !state.is_in_inactivity_leak(spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec) { let inactivity_score = state.get_inactivity_score_mut(index)?; inactivity_score .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index ce102694f58..ccebbcb3a29 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -6,7 +6,10 @@ use types::consts::altair::{ }; use types::{BeaconState, ChainSpec, EthSpec}; -use crate::common::{altair::get_base_reward, decrease_balance, increase_balance}; +use crate::common::{ + altair::{get_base_reward, BaseRewardPerIncrement}, + decrease_balance, increase_balance, +}; use crate::per_epoch_processing::{Delta, Error}; /// Apply attester and proposer rewards. @@ -67,13 +70,14 @@ pub fn get_flag_index_deltas( let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; for &index in participation_cache.eligible_validator_indices() { - let base_reward = get_base_reward(state, index, total_active_balance, spec)?; + let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; let mut delta = Delta::default(); if unslashed_participating_indices.contains(index as usize)? { - if !state.is_in_inactivity_leak(spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 99d08a6db33..87e4261e0a1 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -78,6 +78,7 @@ pub fn get_attestation_deltas( validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result, Error> { + let previous_epoch = state.previous_epoch(); let finality_delay = state .previous_epoch() .safe_sub(state.finalized_checkpoint().epoch)? @@ -92,7 +93,7 @@ pub fn get_attestation_deltas( // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in // the unslashed indices of the matching source attestations is active, and therefore // eligible. - if !state.is_eligible_validator(index)? { + if !state.is_eligible_validator(previous_epoch, index)? { continue; } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 7ea83c4b15e..e730e97cd33 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -102,7 +102,9 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_active_unslashed_in_current_epoch(val_index), + } => participation_cache + .is_active_unslashed_in_current_epoch(val_index) + .unwrap_or(false), } } @@ -198,7 +200,9 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_active_unslashed_in_previous_epoch(val_index), + } => participation_cache + .is_active_unslashed_in_previous_epoch(val_index) + .unwrap_or(false), } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 6350d0775cf..881d17a3309 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -43,7 +43,7 @@ regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.4.1" +superstruct = "0.5.0" serde_json = "1.0.74" smallvec = "1.8.0" diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 7f83d46dd4d..6eb12ddf05e 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -37,7 +37,9 @@ use tree_hash_derive::TreeHash; ref_attributes( derive(Debug, PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent") - ) + ), + map_ref_into(BeaconBlockBodyRef), + map_ref_mut_into(BeaconBlockBodyRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] @@ -199,20 +201,17 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. pub fn body(&self) -> BeaconBlockBodyRef<'a, T, Payload> { - match self { - BeaconBlockRef::Base(block) => BeaconBlockBodyRef::Base(&block.body), - BeaconBlockRef::Altair(block) => BeaconBlockBodyRef::Altair(&block.body), - BeaconBlockRef::Merge(block) => BeaconBlockBodyRef::Merge(&block.body), - } + map_beacon_block_ref_into_beacon_block_body_ref!(&'a _, *self, |block, cons| cons( + &block.body + )) } /// Return the tree hash root of the block's body. pub fn body_root(&self) -> Hash256 { - match self { - BeaconBlockRef::Base(block) => block.body.tree_hash_root(), - BeaconBlockRef::Altair(block) => block.body.tree_hash_root(), - BeaconBlockRef::Merge(block) => block.body.tree_hash_root(), - } + map_beacon_block_ref!(&'a _, *self, |block, cons| { + let _: Self = cons(block); + block.body.tree_hash_root() + }) } /// Returns the epoch corresponding to `self.slot()`. @@ -249,11 +248,9 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> { /// Convert a mutable reference to a beacon block to a mutable ref to its body. pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T, Payload> { - match self { - BeaconBlockRefMut::Base(block) => BeaconBlockBodyRefMut::Base(&mut block.body), - BeaconBlockRefMut::Altair(block) => BeaconBlockBodyRefMut::Altair(&mut block.body), - BeaconBlockRefMut::Merge(block) => BeaconBlockBodyRefMut::Merge(&mut block.body), - } + map_beacon_block_ref_mut_into_beacon_block_body_ref_mut!(&'a _, self, |block, cons| cons( + &mut block.body + )) } } @@ -465,6 +462,99 @@ impl> BeaconBlockMerge { } } +// We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. +impl From>> + for BeaconBlockBase> +{ + fn from(block: BeaconBlockBase>) -> Self { + let BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root, + body: body.into(), + } + } +} + +impl From>> + for BeaconBlockAltair> +{ + fn from(block: BeaconBlockAltair>) -> Self { + let BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root, + body: body.into(), + } + } +} + +// We can convert blocks with payloads to blocks without payloads, and an optional payload. +macro_rules! impl_from { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>, $body_expr:expr) => { + impl From<$ty_name<$($from_params),*>> + for ($ty_name<$($to_params),*>, Option>) + { + #[allow(clippy::redundant_closure_call)] + fn from(block: $ty_name<$($from_params),*>) -> Self { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + let (body, payload) = ($body_expr)(body); + + ($ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + }, payload) + } + } + } +} + +impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); +impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); +impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); + +impl From>> + for ( + BeaconBlock>, + Option>, + ) +{ + fn from(block: BeaconBlock>) -> Self { + map_beacon_block!(block, |inner, cons| { + let (block, payload) = inner.into(); + (cons(block), payload) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c1db64ae00b..34761ea9a7f 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -73,6 +73,198 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { } } +// We can convert pre-Bellatrix block bodies without payloads into block bodies "with" payloads. +impl From>> + for BeaconBlockBodyBase> +{ + fn from(body: BeaconBlockBodyBase>) -> Self { + let BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom, + } = body; + + BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom: PhantomData, + } + } +} + +impl From>> + for BeaconBlockBodyAltair> +{ + fn from(body: BeaconBlockBodyAltair>) -> Self { + let BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom, + } = body; + + BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom: PhantomData, + } + } +} + +// Likewise bodies with payloads can be transformed into bodies without. +impl From>> + for ( + BeaconBlockBodyBase>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyBase>) -> Self { + let BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom, + } = body; + + ( + BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom: PhantomData, + }, + None, + ) + } +} + +impl From>> + for ( + BeaconBlockBodyAltair>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyAltair>) -> Self { + let BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom, + } = body; + + ( + BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom: PhantomData, + }, + None, + ) + } +} + +impl From>> + for ( + BeaconBlockBodyMerge>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyMerge>) -> Self { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = body; + + ( + BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { + execution_payload_header: From::from(&execution_payload), + }, + }, + Some(execution_payload), + ) + } +} + +impl From>> + for ( + BeaconBlockBody>, + Option>, + ) +{ + fn from(body: BeaconBlockBody>) -> Self { + map_beacon_block_body!(body, |inner, cons| { + let (block, payload) = inner.into(); + (cons(block), payload) + }) + } +} + #[cfg(test)] mod tests { mod base { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index f06cc1c96b4..11cabb29e09 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1635,17 +1635,23 @@ impl BeaconState { self.clone_with(CloneConfig::committee_caches_only()) } - pub fn is_eligible_validator(&self, val_index: usize) -> Result { - let previous_epoch = self.previous_epoch(); + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_eligible_validator( + &self, + previous_epoch: Epoch, + val_index: usize, + ) -> Result { self.get_validator(val_index).map(|val| { val.is_active_at(previous_epoch) || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) }) } - pub fn is_in_inactivity_leak(&self, spec: &ChainSpec) -> bool { - (self.previous_epoch() - self.finalized_checkpoint().epoch) - > spec.min_epochs_to_inactivity_penalty + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { + (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees diff --git a/consensus/types/src/beacon_state/participation_cache.rs b/consensus/types/src/beacon_state/participation_cache.rs index 1c232c1af25..3232cd4b7de 100644 --- a/consensus/types/src/beacon_state/participation_cache.rs +++ b/consensus/types/src/beacon_state/participation_cache.rs @@ -19,12 +19,12 @@ use crate::{ BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, }; use safe_arith::{ArithError, SafeArith}; -use std::collections::HashMap; use std::sync::Arc; #[derive(Debug, PartialEq)] pub enum Error { InvalidFlagIndex(usize), + InvalidValidatorIndex(usize), } /// A balance which will never be below the specified `minimum`. @@ -65,7 +65,7 @@ struct SingleEpochParticipationCache { /// It would be ideal to maintain a reference to the `BeaconState` here rather than copying the /// `ParticipationFlags`, however that would cause us to run into mutable reference limitations /// upstream. - unslashed_participating_indices: HashMap, + unslashed_participating_indices: Vec>, /// Stores the sum of the balances for all validators in `self.unslashed_participating_indices` /// for all flags in `NUM_FLAG_INDICES`. /// @@ -77,11 +77,12 @@ struct SingleEpochParticipationCache { } impl SingleEpochParticipationCache { - fn new(hashmap_len: usize, spec: &ChainSpec) -> Self { + fn new(state: &BeaconState, spec: &ChainSpec) -> Self { + let num_validators = state.validators().len(); let zero_balance = Balance::zero(spec.effective_balance_increment); Self { - unslashed_participating_indices: HashMap::with_capacity(hashmap_len), + unslashed_participating_indices: vec![None; num_validators], total_flag_balances: [zero_balance; NUM_FLAG_INDICES], total_active_balance: zero_balance, } @@ -101,7 +102,11 @@ impl SingleEpochParticipationCache { /// /// May return an error if `flag_index` is out-of-bounds. fn has_flag(&self, val_index: usize, flag_index: usize) -> Result { - if let Some(participation_flags) = self.unslashed_participating_indices.get(&val_index) { + let participation_flags = self + .unslashed_participating_indices + .get(val_index) + .ok_or(Error::InvalidValidatorIndex(val_index))?; + if let Some(participation_flags) = participation_flags { participation_flags .has_flag(flag_index) .map_err(|_| Error::InvalidFlagIndex(flag_index)) @@ -122,13 +127,14 @@ impl SingleEpochParticipationCache { &mut self, val_index: usize, state: &BeaconState, + current_epoch: Epoch, relative_epoch: RelativeEpoch, ) -> Result<(), BeaconStateError> { let val_balance = state.get_effective_balance(val_index)?; let validator = state.get_validator(val_index)?; // Sanity check to ensure the validator is active. - let epoch = relative_epoch.into_epoch(state.current_epoch()); + let epoch = relative_epoch.into_epoch(current_epoch); if !validator.is_active_at(epoch) { return Err(BeaconStateError::ValidatorIsInactive { val_index }); } @@ -150,8 +156,10 @@ impl SingleEpochParticipationCache { } // Add their `ParticipationFlags` to the map. - self.unslashed_participating_indices - .insert(val_index, *epoch_participation); + *self + .unslashed_participating_indices + .get_mut(val_index) + .ok_or(BeaconStateError::UnknownValidator(val_index))? = Some(*epoch_participation); // Iterate through all the flags and increment the total flag balances for whichever flags // are set for `val_index`. @@ -186,16 +194,12 @@ impl PreviousParticipationCache { state: &BeaconState, spec: &ChainSpec, ) -> Result { + let current_epoch = state.current_epoch(); let previous_epoch = state.previous_epoch(); - let num_previous_epoch_active_vals = state - .get_cached_active_validator_indices(RelativeEpoch::Previous)? - .len(); - // Both the current/previous epoch participations are set to a capacity that is slightly // larger than required. The difference will be due slashed-but-active validators. - let mut previous_epoch_participation = - SingleEpochParticipationCache::new(num_previous_epoch_active_vals, spec); + let mut previous_epoch_participation = SingleEpochParticipationCache::new(state, spec); // Contains the set of validators which are either: // // - Active in the previous epoch. @@ -217,19 +221,20 @@ impl PreviousParticipationCache { previous_epoch_participation.process_active_validator( val_index, state, + current_epoch, RelativeEpoch::Previous, )?; } // Note: a validator might still be "eligible" whilst returning `false` to // `Validator::is_active_at`. - if state.is_eligible_validator(val_index)? { + if state.is_eligible_validator(previous_epoch, val_index)? { eligible_indices.push(val_index) } } Ok(Self { - initialized_epoch: state.current_epoch(), + initialized_epoch: current_epoch, previous_epoch, previous_epoch_participation: Arc::new(previous_epoch_participation), eligible_indices: Arc::new(eligible_indices), @@ -253,16 +258,13 @@ impl CurrentEpochParticipationCache { spec: &ChainSpec, ) -> Result { let current_epoch = state.current_epoch(); - let num_current_epoch_active_vals = state - .get_cached_active_validator_indices(RelativeEpoch::Current)? - .len(); - let mut current_epoch_participation = - SingleEpochParticipationCache::new(num_current_epoch_active_vals, spec); + let mut current_epoch_participation = SingleEpochParticipationCache::new(state, spec); for (val_index, val) in state.validators().iter().enumerate() { if val.is_active_at(current_epoch) { current_epoch_participation.process_active_validator( val_index, state, + current_epoch, RelativeEpoch::Current, )?; } @@ -282,10 +284,12 @@ impl CurrentEpochParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } - pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + /// Returns `None` for an unknown `val_index`. + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> Option { self.current_epoch_participation .unslashed_participating_indices - .contains_key(&val_index) + .get(val_index) + .map(|flags| flags.is_some()) } /// Always returns false for a slashed validator. @@ -396,10 +400,12 @@ impl ParticipationCache { * Active/Unslashed */ - pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> bool { + /// Returns `None` for an unknown `val_index`. + pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> Option { self.previous_epoch_participation .unslashed_participating_indices - .contains_key(&val_index) + .get(val_index) + .map(|flags| flags.is_some()) } /* @@ -439,10 +445,12 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } - pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + /// Returns `None` for an unknown `val_index`. + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> Option { self.current_epoch_participation .unslashed_participating_indices - .contains_key(&val_index) + .get(val_index) + .map(|flags| flags.is_some()) } /// Always returns false for a slashed validator. diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index eb081f3ec70..c283d4cb48c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -500,7 +500,7 @@ impl ChainSpec { * Fork choice */ safe_slots_to_update_justified: 8, - proposer_score_boost: Some(70), + proposer_score_boost: Some(40), /* * Eth1 @@ -698,7 +698,7 @@ impl ChainSpec { * Fork choice */ safe_slots_to_update_justified: 8, - proposer_score_boost: Some(70), + proposer_score_boost: Some(40), /* * Eth1 @@ -1261,7 +1261,7 @@ mod yaml_tests { EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 CHURN_LIMIT_QUOTIENT: 65536 - PROPOSER_SCORE_BOOST: 70 + PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 41ea60e9a58..a9683a175d1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -140,7 +140,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, + SignedBeaconBlockMerge, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 5be603c196f..d736f0be193 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -15,7 +15,9 @@ pub enum BlockType { } pub trait ExecPayload: - Encode + Debug + + Clone + + Encode + Decode + TestRandom + TreeHash @@ -37,6 +39,7 @@ pub trait ExecPayload: // More fields can be added here if you wish. fn parent_hash(&self) -> ExecutionBlockHash; fn prev_randao(&self) -> Hash256; + fn block_number(&self) -> u64; fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; } @@ -58,6 +61,10 @@ impl ExecPayload for FullPayload { self.execution_payload.prev_randao } + fn block_number(&self) -> u64 { + self.execution_payload.block_number + } + fn timestamp(&self) -> u64 { self.execution_payload.timestamp } @@ -84,6 +91,10 @@ impl ExecPayload for BlindedPayload { self.execution_payload_header.prev_randao } + fn block_number(&self) -> u64 { + self.execution_payload_header.block_number + } + fn timestamp(&self) -> u64 { self.execution_payload_header.timestamp } @@ -93,13 +104,28 @@ impl ExecPayload for BlindedPayload { } } -#[derive(Default, Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] +#[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeader, } +// NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` +// implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the +// default payload in `is_merge_transition_block` to determine whether the merge has occurred. +// +// The default `BlindedPayload` is therefore the payload header that results from blinding the +// default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that +// its `transactions_root` is the hash of the empty list rather than 0x0. +impl Default for BlindedPayload { + fn default() -> Self { + Self { + execution_payload_header: ExecutionPayloadHeader::from(&ExecutionPayload::default()), + } + } +} + impl From> for BlindedPayload { fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { Self { diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index da191dbff2e..54880706882 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -53,7 +53,10 @@ impl From for Hash256 { derivative(PartialEq, Hash(bound = "E: EthSpec")), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), serde(bound = "E: EthSpec, Payload: ExecPayload"), - ) + ), + map_into(BeaconBlock), + map_ref_into(BeaconBlockRef), + map_ref_mut_into(BeaconBlockRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] @@ -72,6 +75,8 @@ pub struct SignedBeaconBlock = FullPayload = SignedBeaconBlock>; + impl> SignedBeaconBlock { /// Returns the name of the fork pertaining to `self`. /// @@ -132,31 +137,27 @@ impl> SignedBeaconBlock { /// This is necessary to get a `&BeaconBlock` from a `SignedBeaconBlock` because /// `SignedBeaconBlock` only contains a `BeaconBlock` _variant_. pub fn deconstruct(self) -> (BeaconBlock, Signature) { - match self { - SignedBeaconBlock::Base(block) => (BeaconBlock::Base(block.message), block.signature), - SignedBeaconBlock::Altair(block) => { - (BeaconBlock::Altair(block.message), block.signature) - } - SignedBeaconBlock::Merge(block) => (BeaconBlock::Merge(block.message), block.signature), - } + map_signed_beacon_block_into_beacon_block!(self, |block, beacon_block_cons| { + (beacon_block_cons(block.message), block.signature) + }) } /// Accessor for the block's `message` field as a ref. - pub fn message(&self) -> BeaconBlockRef<'_, E, Payload> { - match self { - SignedBeaconBlock::Base(inner) => BeaconBlockRef::Base(&inner.message), - SignedBeaconBlock::Altair(inner) => BeaconBlockRef::Altair(&inner.message), - SignedBeaconBlock::Merge(inner) => BeaconBlockRef::Merge(&inner.message), - } + pub fn message<'a>(&'a self) -> BeaconBlockRef<'a, E, Payload> { + map_signed_beacon_block_ref_into_beacon_block_ref!( + &'a _, + self.to_ref(), + |inner, cons| cons(&inner.message) + ) } /// Accessor for the block's `message` as a mutable reference (for testing only). - pub fn message_mut(&mut self) -> BeaconBlockRefMut<'_, E, Payload> { - match self { - SignedBeaconBlock::Base(inner) => BeaconBlockRefMut::Base(&mut inner.message), - SignedBeaconBlock::Altair(inner) => BeaconBlockRefMut::Altair(&mut inner.message), - SignedBeaconBlock::Merge(inner) => BeaconBlockRefMut::Merge(&mut inner.message), - } + pub fn message_mut<'a>(&'a mut self) -> BeaconBlockRefMut<'a, E, Payload> { + map_signed_beacon_block_ref_mut_into_beacon_block_ref_mut!( + &'a _, + self.to_mut(), + |inner, cons| cons(&mut inner.message) + ) } /// Verify `self.signature`. @@ -225,3 +226,165 @@ impl> SignedBeaconBlock { self.message().tree_hash_root() } } + +// We can convert pre-Bellatrix blocks without payloads into blocks with payloads. +impl From>> + for SignedBeaconBlockBase> +{ + fn from(signed_block: SignedBeaconBlockBase>) -> Self { + let SignedBeaconBlockBase { message, signature } = signed_block; + SignedBeaconBlockBase { + message: message.into(), + signature, + } + } +} + +impl From>> + for SignedBeaconBlockAltair> +{ + fn from(signed_block: SignedBeaconBlockAltair>) -> Self { + let SignedBeaconBlockAltair { message, signature } = signed_block; + SignedBeaconBlockAltair { + message: message.into(), + signature, + } + } +} + +// Post-Bellatrix blocks can be "unblinded" by adding the full payload. +// NOTE: It might be nice to come up with a `superstruct` pattern to abstract over this before +// the first fork after Bellatrix. +impl SignedBeaconBlockMerge> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayload, + ) -> SignedBeaconBlockMerge> { + let SignedBeaconBlockMerge { + message: + BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { .. }, + }, + }, + signature, + } = self; + SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlock> { + pub fn try_into_full_block( + self, + execution_payload: Option>, + ) -> Option>> { + let full_block = match self { + SignedBeaconBlock::Base(block) => SignedBeaconBlock::Base(block.into()), + SignedBeaconBlock::Altair(block) => SignedBeaconBlock::Altair(block.into()), + SignedBeaconBlock::Merge(block) => { + SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) + } + }; + Some(full_block) + } +} + +// We can blind blocks with payloads by converting the payload into a header. +// +// We can optionally keep the header, or discard it. +impl From> + for (SignedBlindedBeaconBlock, Option>) +{ + fn from(signed_block: SignedBeaconBlock) -> Self { + let (block, signature) = signed_block.deconstruct(); + let (blinded_block, payload) = block.into(); + ( + SignedBeaconBlock::from_block(blinded_block, signature), + payload, + ) + } +} + +impl From> for SignedBlindedBeaconBlock { + fn from(signed_block: SignedBeaconBlock) -> Self { + let (blinded_block, _) = signed_block.into(); + blinded_block + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn add_remove_payload_roundtrip() { + type E = MainnetEthSpec; + + let spec = &E::default_spec(); + let sig = Signature::empty(); + let blocks = vec![ + SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Altair(BeaconBlockAltair::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block(BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), sig), + ]; + + for block in blocks { + let (blinded_block, payload): (SignedBlindedBeaconBlock, _) = block.clone().into(); + assert_eq!(blinded_block.tree_hash_root(), block.tree_hash_root()); + + if let Some(payload) = &payload { + assert_eq!( + payload.tree_hash_root(), + block + .message() + .execution_payload() + .unwrap() + .tree_hash_root() + ); + } + + let reconstructed = blinded_block.try_into_full_block(payload).unwrap(); + assert_eq!(reconstructed, block); + } + } +} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index c80b936686b..f81b3ac39b5 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.2.1" +version = "2.3.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 27ec8cc86ca..255f96eec16 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -8,6 +8,6 @@ ARG PORTABLE ENV PORTABLE $PORTABLE RUN cd lighthouse && make install-lcli -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs deleted file mode 100644 index 1c7ba1fe61a..00000000000 --- a/lcli/src/etl/block_efficiency.rs +++ /dev/null @@ -1,379 +0,0 @@ -use clap::ArgMatches; -use eth2::types::*; -use eth2::{BeaconNodeHttpClient, Timeouts}; -use log::{error, info}; -use sensitive_url::SensitiveUrl; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::time::Duration; - -type CommitteePosition = usize; -type Committee = u64; -type InclusionDistance = u64; -type ValidatorIndex = u64; - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -struct UniqueAttestation { - slot: Slot, - committee_index: Committee, - committee_position: CommitteePosition, -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -struct ProposerInfo { - proposer_index: ValidatorIndex, - graffiti: String, -} - -#[derive(Debug)] -struct CommitteeInfo { - number_of_committees: usize, - validators_per_committee: usize, -} - -async fn get_validator_set_len( - node: &BeaconNodeHttpClient, - slot: Slot, -) -> Result { - let active_validator_set = node - .get_beacon_states_validators(StateId::Slot(slot), None, None) - .await - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "No validators found".to_string())? - .data; - Ok(active_validator_set - .iter() - .filter(|x| x.status.superstatus() == ValidatorStatus::Active) - .count()) -} - -async fn get_block_attestations_set<'a, T: EthSpec>( - node: &BeaconNodeHttpClient, - slot: Slot, -) -> Result, ProposerInfo)>, String> { - let mut unique_attestations_set: HashMap = HashMap::new(); - - let option_block: Option>> = node - .get_beacon_blocks(BlockId::Slot(slot)) - .await - .map_err(|e| format!("{:?}", e))?; - - let block = match option_block { - Some(block) => block.data, - // No block was proposed for this slot. - None => return Ok(None), - }; - - let proposer = ProposerInfo { - proposer_index: block.message().proposer_index(), - graffiti: block - .message() - .body() - .graffiti() - .as_utf8_lossy() - // Remove commas and apostropes from graffiti to ensure correct CSV format. - .replace(',', "") - .replace('"', "") - .replace('\'', ""), - }; - - let attestations = block.message().body().attestations(); - - for attestation in attestations.iter() { - for (position, voted) in attestation.aggregation_bits.iter().enumerate() { - if voted { - let unique_attestation = UniqueAttestation { - slot: attestation.data.slot, - committee_index: attestation.data.index, - committee_position: position, - }; - let inclusion_distance: u64 = slot - .as_u64() - .checked_sub(attestation.data.slot.as_u64()) - .ok_or_else(|| "Attestation slot is larger than the block slot".to_string())?; - unique_attestations_set.insert(unique_attestation, inclusion_distance); - } - } - } - - Ok(Some((unique_attestations_set, proposer))) -} - -async fn get_epoch_committee_data( - node: &BeaconNodeHttpClient, - epoch: Epoch, -) -> Result<(Vec, CommitteeInfo), String> { - let committee_data = node - .get_beacon_states_committees( - StateId::Slot(Epoch::start_slot(epoch, T::slots_per_epoch())), - None, - None, - Some(epoch), - ) - .await - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "No committees found".to_string())? - .data; - - let committee_info = CommitteeInfo { - number_of_committees: committee_data.len(), - // FIXME: validators.len() isn't consistent between different committees in the - // same epoch. - validators_per_committee: committee_data[0].validators.len(), - }; - - Ok((committee_data, committee_info)) -} - -pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { - const SECONDS_PER_SLOT: Duration = Duration::from_secs(12); - let output_path: PathBuf = clap_utils::parse_required(matches, "output")?; - let start_epoch: Epoch = clap_utils::parse_required(matches, "start-epoch")?; - let offline_window: u64 = matches - .value_of("offline-window") - .unwrap_or("3") - .parse() - .map_err(|e| format!("{:?}", e))?; - let calculate_offline_vals = offline_window != 0; - - if start_epoch == 0 { - return Err("start_epoch cannot be 0.".to_string()); - } - let initialization_epoch: Epoch = start_epoch - 1; - let end_epoch: Epoch = clap_utils::parse_required(matches, "end-epoch")?; - - if end_epoch < start_epoch { - return Err("start_epoch must be smaller than end_epoch".to_string()); - } - - let mut available_attestations_set: HashSet = HashSet::new(); - let mut included_attestations_set: HashMap = - HashMap::new(); - - // Build validator set HashMap - // This allows a 'best effort' attempt to normalize block efficiencies. - let mut online_validator_set: HashMap = HashMap::new(); - - let mut proposer_map: HashMap = HashMap::new(); - - let mut file = File::options() - .read(true) - .write(true) - .create(true) - .open(output_path) - .map_err(|e| format!("Unable to open file: {}", e))?; - - write!(file, "slot,proposer,available,included,offline,graffiti").unwrap(); - - // Initialize API. - let endpoint = matches - .value_of("endpoint") - .unwrap_or("http://localhost:5052/"); - let node = BeaconNodeHttpClient::new( - SensitiveUrl::parse(endpoint).map_err(|_| "Unable to parse endpoint.".to_string())?, - Timeouts::set_all(SECONDS_PER_SLOT), - ); - - // Check we can connect to the API. - let version = - match node.get_node_version().await { - Ok(version_response) => version_response.data.version, - Err(_) => return Err( - "Error: A working HTTP API server is required. Ensure one is synced and available." - .to_string(), - ), - }; - - // Check we are synced past the required epoch range. - let head_slot_synced = - match node.get_node_syncing().await { - Ok(synced_response) => synced_response.data.head_slot, - Err(_) => return Err( - "Error: A working HTTP API server is required. Ensure one is synced and available." - .to_string(), - ), - }; - - if head_slot_synced < end_epoch.end_slot(T::slots_per_epoch()) { - return Err( - "Error: The beacon node is not sufficiently synced. Make sure your node is synced \ - past the desired `end-epoch` and that you aren't requesting future epochs." - .to_string(), - ); - } - - // Whether the beacon node is responding to requests. This is helpful for logging. - let mut connected: bool = true; - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - - // Loop over epochs. - for epoch in (initialization_epoch.as_u64()..=end_epoch.as_u64()).map(Epoch::new) { - if epoch != initialization_epoch { - info!("Analysing epoch {}...", epoch); - } else { - info!("Initializing..."); - } - let mut epoch_data: Vec<(Slot, Option, usize, usize)> = Vec::new(); - - // Current epochs available attestations set - let (committee_data, committee_info) = loop { - if let Ok(committee_result) = get_epoch_committee_data::(&node, epoch).await { - if !connected { - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - connected = true; - } - break committee_result; - } - - if connected { - connected = false; - error!("A request to the Beacon Node API failed. Check connectivity."); - } - }; - - // Ensure available attestations don't exceed the possible amount of attestations - // as determined by the committee size/number. - // This is unlikely to happen, but not impossible. - let max_possible_attesations = - committee_info.validators_per_committee * committee_info.number_of_committees; - - // Get number of active validators. - let active_validators = - get_validator_set_len::(&node, epoch.start_slot(T::slots_per_epoch())).await?; - - for slot in epoch.slot_iter(T::slots_per_epoch()) { - // Get all included attestations. - let block_result = loop { - if let Ok(block_result) = get_block_attestations_set::(&node, slot).await { - if !connected { - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - connected = true; - } - break block_result; - }; - if connected { - connected = false; - error!("A request to the Beacon Node API failed. Check connectivity."); - } - }; - let (mut attestations_in_block, proposer) = match block_result { - Some(output) => (output.0, Some(output.1)), - None => (HashMap::new(), None), - }; - - // Insert block proposer into proposer_map. - if let Some(proposer_info) = proposer { - proposer_map.insert(slot, proposer_info.clone()); - } - - // Remove duplicate attestations. - attestations_in_block.retain(|x, _| included_attestations_set.get(x).is_none()); - - // Add them to the set. - included_attestations_set.extend(attestations_in_block.clone()); - - // Remove expired available attestations. - available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); - - // Don't write data from the initialization epoch. - if epoch != initialization_epoch { - let included = attestations_in_block.len(); - - let available = if max_possible_attesations < available_attestations_set.len() { - max_possible_attesations - } else { - available_attestations_set.len() - }; - - // Get proposer information. - let proposer = proposer_map.get(&slot).cloned(); - - // Store slot data. - epoch_data.push((slot, proposer, available, included)); - } - - // Included attestations are no longer available. - for new_attestation in &attestations_in_block { - available_attestations_set.remove(new_attestation.0); - } - - // Get all available attestations. - for committee in &committee_data { - if committee.slot == slot { - for position in 0..committee.validators.len() { - let unique_attestation = UniqueAttestation { - slot: committee.slot, - committee_index: committee.index, - committee_position: position, - }; - available_attestations_set.insert(unique_attestation.clone()); - } - } - } - } - - let mut offline = "None".to_string(); - if calculate_offline_vals { - // Get all online validators for the epoch. - for committee in &committee_data { - for position in 0..committee.validators.len() { - let unique_attestation = UniqueAttestation { - slot: committee.slot, - committee_index: committee.index, - committee_position: position, - }; - let index = committee.validators.get(position).ok_or_else(|| { - "Error parsing validator indices from committee data".to_string() - })?; - - if included_attestations_set.get(&unique_attestation).is_some() { - online_validator_set.insert(*index, epoch); - } - } - } - - // Calculate offline validators. - offline = if epoch >= start_epoch + offline_window { - active_validators - .checked_sub(online_validator_set.len()) - .ok_or_else(|| "Online set is greater than active set".to_string())? - .to_string() - } else { - "None".to_string() - }; - } - - // Write epoch data. - for (slot, proposer, available, included) in epoch_data { - let proposer_index = proposer - .clone() - .map_or("None".to_string(), |x| x.proposer_index.to_string()); - let graffiti = proposer.map_or("None".to_string(), |x| x.graffiti); - write!( - file, - "\n{},{},{},{},{},{}", - slot, proposer_index, available, included, offline, graffiti - ) - .unwrap(); - } - - // Free some memory by removing included attestations older than 1 epoch. - included_attestations_set.retain(|x, _| { - x.slot >= Epoch::new(epoch.as_u64().saturating_sub(1)).start_slot(T::slots_per_epoch()) - }); - - if calculate_offline_vals { - // Remove old validators from the validator set which are outside the offline window. - online_validator_set.retain(|_, x| { - *x >= Epoch::new( - epoch - .as_u64() - .saturating_sub(offline_window.saturating_sub(1)), - ) - }); - } - } - Ok(()) -} diff --git a/lcli/src/etl/mod.rs b/lcli/src/etl/mod.rs deleted file mode 100644 index 1137fbb2ef6..00000000000 --- a/lcli/src/etl/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod block_efficiency; diff --git a/lcli/src/indexed_attestations.rs b/lcli/src/indexed_attestations.rs new file mode 100644 index 00000000000..6e3bfa51d32 --- /dev/null +++ b/lcli/src/indexed_attestations.rs @@ -0,0 +1,48 @@ +use clap::ArgMatches; +use clap_utils::parse_required; +use state_processing::common::get_indexed_attestation; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; +use types::*; + +fn read_file_bytes(filename: &Path) -> Result, String> { + let mut bytes = vec![]; + let mut file = File::open(filename) + .map_err(|e| format!("Unable to open {}: {}", filename.display(), e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename.display(), e))?; + Ok(bytes) +} + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + + let state_file: PathBuf = parse_required(matches, "state")?; + let attestations_file: PathBuf = parse_required(matches, "attestations")?; + + let mut state = BeaconState::::from_ssz_bytes(&read_file_bytes(&state_file)?, spec) + .map_err(|e| format!("Invalid state: {:?}", e))?; + state + .build_all_committee_caches(spec) + .map_err(|e| format!("{:?}", e))?; + + let attestations: Vec> = + serde_json::from_slice(&read_file_bytes(&attestations_file)?) + .map_err(|e| format!("Invalid attestation list: {:?}", e))?; + + let indexed_attestations = attestations + .into_iter() + .map(|att| { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + get_indexed_attestation(committee.committee, &att) + }) + .collect::, _>>() + .map_err(|e| format!("Error constructing indexed attestation: {:?}", e))?; + + let string_output = serde_json::to_string_pretty(&indexed_attestations) + .map_err(|e| format!("Unable to convert to JSON: {:?}", e))?; + println!("{}", string_output); + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 9af4b255488..c440f500086 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -5,8 +5,8 @@ mod check_deposit_data; mod create_payload_header; mod deploy_deposit_contract; mod eth1_genesis; -mod etl; mod generate_bootnode_enr; +mod indexed_attestations; mod insecure_validators; mod interop_genesis; mod new_testnet; @@ -103,7 +103,13 @@ fn main() { .required(true) .default_value("./output.ssz") .help("Path to output a SSZ file."), - ), + ) + .arg( + Arg::with_name("no-signature-verification") + .long("no-signature-verification") + .takes_value(false) + .help("Disable signature verification.") + ) ) .subcommand( SubCommand::with_name("pretty-ssz") @@ -600,60 +606,23 @@ fn main() { ) ) .subcommand( - SubCommand::with_name("etl-block-efficiency") - .about( - "Performs ETL analysis of block efficiency. Requires a Beacon Node API to \ - extract data from.", - ) + SubCommand::with_name("indexed-attestations") + .about("Convert attestations to indexed form, using the committees from a state.") .arg( - Arg::with_name("endpoint") - .long("endpoint") - .short("e") + Arg::with_name("state") + .long("state") + .value_name("SSZ_STATE") .takes_value(true) - .default_value("http://localhost:5052") - .help( - "The endpoint of the Beacon Node API." - ), - ) - .arg( - Arg::with_name("output") - .long("output") - .short("o") - .takes_value(true) - .help("The path of the output data in CSV file.") - .required(true), - ) - .arg( - Arg::with_name("start-epoch") - .long("start-epoch") - .takes_value(true) - .help( - "The first epoch in the range of epochs to be evaluated. Use with \ - --end-epoch.", - ) - .required(true), - ) - .arg( - Arg::with_name("end-epoch") - .long("end-epoch") - .takes_value(true) - .help( - "The last epoch in the range of epochs to be evaluated. Use with \ - --start-epoch.", - ) - .required(true), + .required(true) + .help("BeaconState to generate committees from (SSZ)"), ) .arg( - Arg::with_name("offline-window") - .long("offline-window") + Arg::with_name("attestations") + .long("attestations") + .value_name("JSON_ATTESTATIONS") .takes_value(true) - .default_value("3") - .help( - "If a validator does not submit an attestion within this many epochs, \ - they are deemed offline. For example, for a offline window of 3, if a \ - validator does not attest in epochs 4, 5 or 6, it is deemed offline \ - during epoch 6. A value of 0 will skip these checks." - ) + .required(true) + .help("List of Attestations to convert to indexed form (JSON)"), ) ) .get_matches(); @@ -737,10 +706,8 @@ fn run( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), - ("etl-block-efficiency", Some(matches)) => env - .runtime() - .block_on(etl::block_efficiency::run::(matches)) - .map_err(|e| format!("Failed to run etl-block_efficiency: {}", e)), + ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) + .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index f78c6b005e7..74be1e62846 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; +use std::time::Instant; use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; pub fn run_transition_blocks( @@ -31,6 +32,13 @@ pub fn run_transition_blocks( .parse::() .map_err(|e| format!("Failed to parse output path: {}", e))?; + let no_signature_verification = matches.is_present("no-signature-verification"); + let signature_strategy = if no_signature_verification { + BlockSignatureStrategy::NoVerification + } else { + BlockSignatureStrategy::VerifyIndividual + }; + info!("Using {} spec", T::spec_name()); info!("Pre-state path: {:?}", pre_state_path); info!("Block path: {:?}", block_path); @@ -43,7 +51,9 @@ pub fn run_transition_blocks( let block: SignedBeaconBlock = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; - let post_state = do_transition(pre_state, block, spec)?; + let t = Instant::now(); + let post_state = do_transition(pre_state, block, signature_strategy, spec)?; + println!("Total transition time: {}ms", t.elapsed().as_millis()); let mut output_file = File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; @@ -58,31 +68,58 @@ pub fn run_transition_blocks( fn do_transition( mut pre_state: BeaconState, block: SignedBeaconBlock, + signature_strategy: BlockSignatureStrategy, spec: &ChainSpec, ) -> Result, String> { + let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; + println!("Build caches: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Initial tree hash: {}ms", t.elapsed().as_millis()); // Transition the parent state to the block slot. + let t = Instant::now(); for i in pre_state.slot().as_u64()..block.slot().as_u64() { per_slot_processing(&mut pre_state, None, spec) .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; } + println!("Slot processing: {}ms", t.elapsed().as_millis()); + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Pre-block tree hash: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; + println!("Build all caches (again): {}ms", t.elapsed().as_millis()); + let t = Instant::now(); per_block_processing( &mut pre_state, &block, None, - BlockSignatureStrategy::VerifyIndividual, + signature_strategy, VerifyBlockRoot::True, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; + println!("Process block: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Post-block tree hash: {}ms", t.elapsed().as_millis()); Ok(pre_state) } @@ -97,5 +134,12 @@ pub fn load_from_ssz_with( let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; - decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)) + let t = Instant::now(); + let result = decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)); + println!( + "SSZ decoding {}: {}ms", + path.display(), + t.elapsed().as_millis() + ); + result } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 83fd19c2bcb..35fee803157 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.2.1" +version = "2.3.1" authors = ["Sigma Prime "] edition = "2021" autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 91feef5b058..160f696542d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -13,9 +13,7 @@ use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; -use sloggers::{ - file::FileLoggerBuilder, null::NullLoggerBuilder, types::Format, types::Severity, Build, -}; +use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; use std::path::PathBuf; use std::sync::Arc; @@ -33,6 +31,8 @@ use { #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; +pub use task_executor::test_utils::null_logger; + const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -506,13 +506,6 @@ impl Environment { } } -pub fn null_logger() -> Result { - let log_builder = NullLoggerBuilder; - log_builder - .build() - .map_err(|e| format!("Failed to start null logger: {:?}", e)) -} - #[cfg(target_family = "unix")] struct SignalFuture { signal: Signal, diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 8424a2fdc34..33aa8ad165d 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -72,8 +72,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 3088fa423df..effccbbd662 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -108,6 +108,26 @@ fn disable_lock_timeouts_flag() { .with_config(|config| assert!(!config.chain.enable_lock_timeouts)); } +#[test] +fn fork_choice_before_proposal_timeout_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.fork_choice_before_proposal_timeout_ms, + beacon_node::beacon_chain::chain_config::DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT + ) + }); +} + +#[test] +fn fork_choice_before_proposal_timeout_zero() { + CommandLineTest::new() + .flag("fork-choice-before-proposal-timeout", Some("0")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -205,6 +225,25 @@ fn eth1_purge_cache_flag() { .run_with_zero_port() .with_config(|config| assert!(config.eth1.purge_cache)); } +#[test] +fn eth1_cache_follow_distance_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, None); + assert_eq!(config.eth1.cache_follow_distance(), 3 * 2048 / 4); + }); +} +#[test] +fn eth1_cache_follow_distance_manual() { + CommandLineTest::new() + .flag("eth1-cache-follow-distance", Some("128")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, Some(128)); + assert_eq!(config.eth1.cache_follow_distance(), 128); + }); +} // Tests for Bellatrix flags. #[test] diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 22b3408ab39..368350f11b9 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -13,7 +13,8 @@ flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } -mdbx = { package = "libmdbx", version = "0.1.0" } +# MDBX is pinned at the last version with Windows and macOS support. This is only viable short-term. +mdbx = { package = "libmdbx", version = "=0.1.4" } lru = "0.7.1" parking_lot = "0.12.0" rand = "0.8.5" diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index cdf865b2df5..5a92961ba74 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -338,7 +338,11 @@ impl Tester { // function. if !valid { // A missing parent block whilst `valid == false` means the test should pass. - if let Some(parent_block) = self.harness.chain.get_block(&block.parent_root()).unwrap() + if let Some(parent_block) = self + .harness + .chain + .get_blinded_block(&block.parent_root()) + .unwrap() { let parent_state_root = parent_block.state_root(); let mut state = self diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index dd5d03be89b..7df88aa0d7c 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -22,7 +22,6 @@ pub struct ExecutionEngine { engine: E, #[allow(dead_code)] datadir: TempDir, - http_port: u16, http_auth_port: u16, child: Child, } @@ -46,16 +45,11 @@ impl ExecutionEngine { Self { engine, datadir, - http_port, http_auth_port, child, } } - pub fn http_url(&self) -> SensitiveUrl { - SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() - } - pub fn http_auth_url(&self) -> SensitiveUrl { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() } diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 0de56fba3cc..87fdaec14a4 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -40,77 +40,3 @@ pub fn geth_genesis_json() -> Value { "baseFeePerGas":"0x7" }) } - -/// Sourced from: -/// -/// https://github.com/NethermindEth/nethermind/blob/themerge_kintsugi/src/Nethermind/Chains/themerge_kintsugi_m2.json -pub fn nethermind_genesis_json() -> Value { - json!({ - "name": "TheMerge_Devnet", - "engine": { - "clique": { - "params": { - "period": 5, - "epoch": 30000 - } - } - }, - "params": { - "gasLimitBoundDivisor": "0x400", - "accountStartNonce": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID": 1, - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip158Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip140Transition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip145Transition": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip1283Transition": "0x0", - "eip1283DisableTransition": "0x0", - "eip152Transition": "0x0", - "eip1108Transition": "0x0", - "eip1344Transition": "0x0", - "eip1884Transition": "0x0", - "eip2028Transition": "0x0", - "eip2200Transition": "0x0", - "eip2565Transition": "0x0", - "eip2929Transition": "0x0", - "eip2930Transition": "0x0", - "eip1559Transition": "0x0", - "eip3198Transition": "0x0", - "eip3529Transition": "0x0", - "eip3541Transition": "0x0" - }, - "genesis": { - "seal": { - "ethereum": { - "nonce": "0x42", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x000000000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "author": "0x0000000000000000000000000000000000000000", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" - }, - "accounts": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance":"0x6d6172697573766477000000" - } - } - }) -} diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index 30c8132b7c0..a4ec0f92153 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -15,7 +15,7 @@ use nethermind::NethermindEngine; use test_rig::TestRig; /// Set to `false` to send logs to the console during tests. Logs are useful when debugging. -const SUPPRESS_LOGS: bool = true; +const SUPPRESS_LOGS: bool = false; fn main() { if cfg!(windows) { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 833409c69e9..be638fe0424 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -1,13 +1,12 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; -use crate::genesis_json::nethermind_genesis_json; +use std::env; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "kiln"; +const NETHERMIND_BRANCH: &str = "master"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -71,38 +70,33 @@ impl NethermindEngine { impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { - let datadir = TempDir::new().unwrap(); - - let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); - let json = nethermind_genesis_json(); - serde_json::to_writer(&mut file, &json).unwrap(); - - datadir + TempDir::new().unwrap() } fn start_client( datadir: &TempDir, - http_port: u16, + _http_port: u16, http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { let network_port = unused_tcp_port().unwrap(); - let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") - .arg("themerge_kiln_testvectors") - .arg("--Init.ChainSpecPath") - .arg(genesis_json_path.to_str().unwrap()) + .arg("kiln") + .arg("--Merge.TerminalTotalDifficulty") + .arg("0") .arg("--JsonRpc.AdditionalRpcUrls") - .arg(format!("http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client|no-auth,http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_port, http_auth_port)) + .arg(format!( + "http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", + http_auth_port + )) .arg("--JsonRpc.EnabledModules") .arg("net,eth,subscribe,web3,admin,engine") .arg("--JsonRpc.Port") - .arg(http_port.to_string()) + .arg(http_auth_port.to_string()) .arg("--Network.DiscoveryPort") .arg(network_port.to_string()) .arg("--Network.P2PPort") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 95751d1a8d5..21162fea56d 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -5,11 +5,11 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, FullPayload, Hash256, MainnetEthSpec, Slot, - Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, + MainnetEthSpec, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. @@ -68,7 +68,7 @@ impl TestRig { let ee_b = { let execution_engine = ExecutionEngine::new(generic_engine); - let urls = vec![execution_engine.http_url()]; + let urls = vec![execution_engine.http_auth_url()]; let config = execution_layer::Config { execution_endpoints: urls, @@ -214,6 +214,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_a, &valid_payload).await; /* * Execution Engine A: @@ -288,6 +289,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_a, &second_payload).await; /* * Execution Engine A: @@ -359,6 +361,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_b, &valid_payload).await; /* * Execution Engine B: @@ -372,6 +375,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_b, &second_payload).await; /* * Execution Engine B: @@ -392,6 +396,22 @@ impl TestRig { } } +/// Check that the given payload can be re-constructed by fetching it from the EE. +/// +/// Panic if payload reconstruction fails. +async fn check_payload_reconstruction( + ee: &ExecutionPair, + payload: &ExecutionPayload, +) { + let reconstructed = ee + .execution_layer + .get_payload_by_block_hash(payload.block_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(reconstructed, *payload); +} + /// Returns the duration since the unix epoch. pub fn timestamp_now() -> u64 { SystemTime::now() diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 3668cf00644..6cfc3e6db79 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -107,15 +107,16 @@ impl LocalNetwork { beacon_config.network.discv5_config.table_filter = |_| true; } - let mut write_lock = self_1.beacon_nodes.write(); - let index = write_lock.len(); - + // We create the beacon node without holding the lock, so that the lock isn't held + // across the await. This is only correct if this function never runs in parallel + // with itself (which at the time of writing, it does not). + let index = self_1.beacon_nodes.read().len(); let beacon_node = LocalBeaconNode::production( self.context.service_context(format!("node_{}", index)), beacon_config, ) .await?; - write_lock.push(beacon_node); + self_1.beacon_nodes.write().push(beacon_node); Ok(()) } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 128c4a6fe92..800f988654b 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -36,7 +36,9 @@ mod tests { use types::*; use url::Url; use validator_client::{ - initialized_validators::{load_pem_certificate, InitializedValidators}, + initialized_validators::{ + load_pem_certificate, load_pkcs12_identity, InitializedValidators, + }, validator_store::ValidatorStore, SlashingDatabase, SLASHING_PROTECTION_FILENAME, }; @@ -108,7 +110,18 @@ mod tests { } fn root_certificate_path() -> PathBuf { - tls_dir().join("cert.pem") + tls_dir().join("lighthouse").join("web3signer.pem") + } + + fn client_identity_path() -> PathBuf { + tls_dir().join("lighthouse").join("key.p12") + } + + fn client_identity_password() -> String { + fs::read_to_string(tls_dir().join("lighthouse").join("password.txt")) + .unwrap() + .trim() + .to_string() } /// A testing rig which holds a live Web3Signer process. @@ -155,8 +168,9 @@ mod tests { File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); - let tls_keystore_file = tls_dir().join("key.p12"); - let tls_keystore_password_file = tls_dir().join("password.txt"); + let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); + let tls_keystore_password_file = tls_dir().join("web3signer").join("password.txt"); + let tls_known_clients_file = tls_dir().join("web3signer").join("known_clients.txt"); let stdio = || { if SUPPRESS_WEB3SIGNER_LOGS { @@ -173,7 +187,10 @@ mod tests { )) .arg(format!("--http-listen-host={}", listen_address)) .arg(format!("--http-listen-port={}", listen_port)) - .arg("--tls-allow-any-client=true") + .arg(format!( + "--tls-known-clients-file={}", + tls_known_clients_file.to_str().unwrap() + )) .arg(format!( "--tls-keystore-file={}", tls_keystore_file.to_str().unwrap() @@ -193,8 +210,11 @@ mod tests { let url = Url::parse(&format!("https://{}:{}", listen_address, listen_port)).unwrap(); let certificate = load_pem_certificate(root_certificate_path()).unwrap(); + let identity = + load_pkcs12_identity(client_identity_path(), &client_identity_password()).unwrap(); let http_client = Client::builder() .add_root_certificate(certificate) + .identity(identity) .build() .unwrap(); @@ -358,6 +378,8 @@ mod tests { url: signer_rig.url.to_string(), root_certificate_path: Some(root_certificate_path()), request_timeout_ms: None, + client_identity_path: Some(client_identity_path()), + client_identity_password: Some(client_identity_password()), }, }; ValidatorStoreRig::new(vec![validator_definition], spec).await diff --git a/testing/web3signer_tests/tls/cert.pem b/testing/web3signer_tests/tls/cert.pem deleted file mode 100644 index 7f2d5f1f2c0..00000000000 --- a/testing/web3signer_tests/tls/cert.pem +++ /dev/null @@ -1,32 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFmTCCA4GgAwIBAgIUd6yn4o1bKr2YpzTxcBmoiM4PorkwDQYJKoZIhvcNAQEL -BQAwajELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 -eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRIwEAYD -VQQDDAkxMjcuMC4wLjEwIBcNMjEwOTA2MDgxMDU2WhgPMjEyMTA4MTMwODEwNTZa -MGoxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwIU29tZUNpdHkx -EjAQBgNVBAoMCU15Q29tcGFueTETMBEGA1UECwwKTXlEaXZpc2lvbjESMBAGA1UE -AwwJMTI3LjAuMC4xMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAx/a1 -SRqehj/D18166GcJh/zOyDtZCbeoLWcVfS1aBq+J1FFy4LYKWgwNhOYsrxHLhsIr -/LpHpRm/FFqLPxGNoEPMcJi1dLcELPcJAG1l+B0Ur52V/nxOmzn71Mi0WQv0oOFx -hOtUOToY3heVW0JXgrILhdD834mWdsxBWPhq1LeLZcMth4woMgD9AH4KzxUNtFvo -8i8IneEYvoDIQ8dGZ5lHnFV5kaC8Is0hevMljTw83E9BD0B/bpp+o2rByccVulsy -/WK763tFteDxK5eZZ3/5rRId+uoN5+D4oRnG6zuki0t7+eTZo1cUPi28IIDTNjPR -Xvw35dt+SdTDjtI/FUf8VWhLIHZZXaevFliuBbcuOMpWCdjAdwb7Uf9WpMnxzZtK -fatAC9dk3VPsehFcf6w/H+ah3tu/szAaDJ5zZb0m05cAxDZekZ9SccBIPglccM3f -vzNjrDIoi4z7uCiTJc2FW0qb2MzusQsGjtLW53n7IGoSIFDvOhiZa9D+vOE2wG6o -VNf2K9/QvwNDCzRvW81mcUCRr/BhcAmX5drwYPwUEcdBXQeFPt6nZ33fmIgl2Cbv -io9kUJzjlQWOZ6BX5FmC69dWAedcfHGY693tG6LQKk9a5B+NiuIB4m1bHcvjYhsh -GqVrw980YIN52RmIoskGRdt34/gKHWcqjIEK0+kCAwEAAaM1MDMwCwYDVR0PBAQD -AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZI -hvcNAQELBQADggIBAILVu5ppYnumyxvchgSLAi/ahBZV/wmtI3X8vxOHuQwYF8rZ -7b2gd+PClJBuhxeOEJZTtCSDMMUdlBXsxnoftp0TcDhFXeAlSp0JQe38qGAlX94l -4ZH39g+Ut5kVpImb/nI/iQhdOSDzQHaivTMjhNlBW+0EqvVJ1YsjjovtcxXh8gbv -4lKpGkuT6xVRrSGsZh0LQiVtngKNqte8vBvFWBQfj9JFyoYmpSvYl/LaYjYkmCya -V2FbfrhDXDI0IereknqMKDs8rF4Ik6i22b+uG91yyJsRFh63x7agEngpoxYKYV6V -5YXIzH5kLX8hklHnLgVhES2ZjhheDgC8pCRUCPqR4+KVnQcFRHP9MJCqcEIFAppD -oHITdiFDs/qE0EDV9WW1iOWgBmdgxUZ8dh1CfW+7B72+Uy0/eXWdnlrRDe5cN/hs -xXpnLCMfzSDEMA4WmImabpU/fRXL7pazZENJj7iyIAr/pEL34+QjqVfWaXkWrHoN -KsrkxTdoZNVdarBDSw9JtMUECmnWYOjMaOm1O8waib9H1SlPSSPrK5pGT/6h1g0d -LM982X36Ej8XyW33E5l6qWiLVRye7SaAvZbVLsyd+cfemi6BPsK+y09eCs4a+Qp7 -9YWZOPT6s/ahJYdTGF961JZ62ypIioimW6wx8hAMCkKKfhn1WI0+0RlOrjbw ------END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index 1e45bb61b54..f00e7b7e37a 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,4 +1,7 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout key.key -out cert.pem -config config && -openssl pkcs12 -export -out key.p12 -inkey key.key -in cert.pem -password pass:$(cat password.txt) - +openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +cp web3signer/cert.pem lighthouse/web3signer.pem && +openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/key.key b/testing/web3signer_tests/tls/key.key deleted file mode 100644 index 6f1331db1aa..00000000000 --- a/testing/web3signer_tests/tls/key.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDH9rVJGp6GP8PX -zXroZwmH/M7IO1kJt6gtZxV9LVoGr4nUUXLgtgpaDA2E5iyvEcuGwiv8ukelGb8U -Wos/EY2gQ8xwmLV0twQs9wkAbWX4HRSvnZX+fE6bOfvUyLRZC/Sg4XGE61Q5Ohje -F5VbQleCsguF0PzfiZZ2zEFY+GrUt4tlwy2HjCgyAP0AfgrPFQ20W+jyLwid4Ri+ -gMhDx0ZnmUecVXmRoLwizSF68yWNPDzcT0EPQH9umn6jasHJxxW6WzL9Yrvre0W1 -4PErl5lnf/mtEh366g3n4PihGcbrO6SLS3v55NmjVxQ+LbwggNM2M9Fe/Dfl235J -1MOO0j8VR/xVaEsgdlldp68WWK4Fty44ylYJ2MB3BvtR/1akyfHNm0p9q0AL12Td -U+x6EVx/rD8f5qHe27+zMBoMnnNlvSbTlwDENl6Rn1JxwEg+CVxwzd+/M2OsMiiL -jPu4KJMlzYVbSpvYzO6xCwaO0tbnefsgahIgUO86GJlr0P684TbAbqhU1/Yr39C/ -A0MLNG9bzWZxQJGv8GFwCZfl2vBg/BQRx0FdB4U+3qdnfd+YiCXYJu+Kj2RQnOOV -BY5noFfkWYLr11YB51x8cZjr3e0botAqT1rkH42K4gHibVsdy+NiGyEapWvD3zRg -g3nZGYiiyQZF23fj+AodZyqMgQrT6QIDAQABAoICAGMICuZGmaXxJIPXDvzUMsM3 -cA14XvNSEqdRuzHAaSqQexk8sUEaxuurtnJQMGcP0BVQSsqiUuMwahKheP7mKZbq -nPBSoONJ1HaUbc/ZXjvP4zPKPsPHOoLj55WNRMwpAKFApaDnj1G8NR6g3WZR59ch -aFWAmAv5LxxsshxnAzmQIShnzj+oKSwCk0pQIfhG+/+L2UVAB+tw1HlcfFIc+gBK -yE1jg46c5S/zGZaznrBg2d9eHOF51uKm/vrd31WYFGmzyv/0iw7ngTG/UpF9Rgsd -NUECjPh8PCDPqTLX+kz7v9UAsEiljye2856LtfT++BuK9DEvhlt/Jf9YsPUlqPl3 -3wUG8yiqBQrlGTUY1KUdHsulmbTiq4Q9ch5QLcvazk+9c7hlB6WP+/ofqgIPSlDt -fOHkROmO7GURz78lVM8+E/pRgy6qDq+yM1uVMeWWme4hKfOAL2lnJDTO4PKNQA4b -03YXsdVSz4mm9ppnyHIPXei6/qHpU/cRRf261HNEI16eC0ZnoIAxhORJtxo6kMns -am4yuhHm9qLjbOI1uJPAgpR/o0O5NaBgkdEzJ102pmv2grf2U743n9bqu+y/vJF9 -HRmMDdJgZSmcYxQuLe0INzLDnTzOdmjbqjB6lDsSwtrEo/KLtXIStrFMKSHIE/QV -96u8nWPomN83HqkVvQmBAoIBAQDrs8eKAQ3meWtmsSqlzCNVAsJA1xV4DtNaWBTz -MJXwRWywem/sHCoPsJ7c5UTUjQDOfNEUu8iW/m60dt0U+81/O9TLBP1Td6jxLg8X -92atLs8wHQDUqrgouce0lyS7to+R3K+N8YtWL2y9w9jbf/XT9iTL5TXGc8RFrmMg -nDQ1EShojU0U0I1lKpDJTx2R1FANfyd3iHSsENRwYj5MF8iQSag79Ek06BKLWHHt -OJj2oiO3VIAKQYVA9aKxfiiOWXWumPHq7r6UoNJK3UNzfBvguhEzl8k6VjZBCR9q -WwvSTba4mOgHMIXdV/9Wr3y8Cus2lX5YGOK4OUx/ZaCdaBtZAoIBAQDZLwwZDHen -Iw1412m/D/6HBS38bX78t+0hL7LNqgVpiZdNbLq57SGRbUnZZ/jlmtyLw3be6BV3 -IcLyflYW+4Wi8AAqVADlXjMC+GIuDNCCicwWxJeIFaAGM7Jt6Fa08H/loIAMM7NC -y1CmQnCR9OnHRdcBaU1y4ForP4f8B/hwh3hSQEFPKgF/MQwDnR7UzPgRrUOTovN/ -4D7j1Wx6FpYX9hGZL0i2K1ygRZE03t6VV7xhCkne96VvDEj1Zo/S4HFaEmDD+EjR -pvXVhPRed7GZ6AMs2JxOPhRiu3G+AQL1HPMDlA8QiPtTh0Zf99j/5NXKBEyH/fp1 -V04L1s7wf7sRAoIBAQCb3/ftJ0dXDSNe9Xl7ziXrmXh3wwYasMtLawbn0VDHZlI7 -36zW28VhPO/CrAi5/En1RIxNBubgHIF/7T/GGcRMCXhvjuwtX+wlG821jtKjY1p3 -uiaLfh9uJ3aP0ojjbxdBYk3jNENuisyCLtviRZyAQb8R7JKEnJjHcE10CnloQuGT -SycXxdhMeDrqNt0aTOtoEZg7L83g4PxtGjuSvQPRkDSm+aXUTEm/R42IUS6vpIi0 -PDi1D6GdVRT0BrexdC4kelc6hAsbZcPM6MkrvX7+Pm8TzKSyZMNafTr+bhnCScy2 -BcEkyA0vVXuyizmVbi8hmPnGLyb4qEQT2FTA5FF5AoIBAQCEj0vCCjMKB8IUTN7V -aGzBeq7b0PVeSODqjZOEJk9RYFLCRigejZccjWky0lw/wGr2v6JRYbSgVzIHEod3 -VaP2lKh1LXqyhPF70aETXGz0EClKiEm5HQHkZy90GAi8PcLCpFkjmXbDwRcDs6/D -1onOQFmAGgbUpA1FMmzMrwy7mmQdR+zU5d2uBYDAv+jumACdwXRqq14WYgfgxgaE -6j5Id7+8EPk/f230wSFk9NdErh1j2YTHG76U7hml9yi33JgzEt6PHn9Lv61y2sjQ -1BvJxawSdk/JDekhbil5gGKOu1G0kG01eXZ1QC77Kmr/nWvD9yXDJ4j0kAop/b2n -Wz8RAoIBAQDn1ZZGOJuVRUoql2A65zwtu34IrYD+2zQQCBf2hGHtwXT6ovqRFqPV -vcQ7KJP+zVT4GimFlZy7lUx8H4j7+/Bxn+PpUHHoDYjVURr12wk2w8pxwcKnbiIw -qaMkF5KG2IUVb7F8STEuKv4KKeuRlB4K2HC2J8GZOLXO21iOqNMhMRO11wp9jkKI -n83wtLH34lLRz4VzIW3rfvPeVoP1zoDkLvD8k/Oyjrf4Bishg9vCHyhQkB1JDtMU -1bfH8mxwKozakpJa23a8lE5NLoc9NOZrKM4+cefY1MZ3FjlaZfkS5jlhY4Qhx+fl -+9j5xRPaH+mkJHaJIqzQad+b1A2eIa+L ------END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/key.p12 b/testing/web3signer_tests/tls/key.p12 deleted file mode 100644 index 2f19e57f026..00000000000 Binary files a/testing/web3signer_tests/tls/key.p12 and /dev/null differ diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem new file mode 100644 index 00000000000..061b0e3cd7b --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUXpTV/0rd/GAoCfCyzPOtwcb4t7YwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDApsaWdodGhvdXNlMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC0 +HrD6fJGcqm8zwEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16P +XZitHgAyg3lph1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofv +vGCA7CjLPKZfW6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3b +nKEaRdLCQgTJS3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJ +MGG+TA2ckKIpGT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4 +dYqYAe+aEL5AB4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS +7Gk9ZSCp/1X+fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+ +HGaeZo1k3M0i3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPUR +SN6OdHrO5x38wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8D +un/NunIagmIL5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjP +DKCsnxZZBtPhXC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEAcCGqC1nhjDiuF87LgzwuKdMV4NEpTGHa8aHrta/UrzO3 +Lf7fcQvB83tR3ZFk9ndlnDbTVr0seAqDDdJxUHyaA3lX6F5g8G6W8bm76w8b5vot +Vl4ohfcA0CIxbCpp773V0qjyZNj9wDIZg8cX8mXcRi4XoUDltD5/yUwRLVjjvJba +tF+vD3NWWuCGRu65qdR3JYJGr4MtbVo06uoeBXcgZrcDsb93chlsuyH337twq2fn +QbqHbuyxAjFxtv125Jmu6li3pu9FUQrnQWQVHzvt2zvR44vOx+yDQHtil9U7H0aU +Nrzqr9OPOApCr7oQ8GoHYn4C7TAs12U/xiPsvuM1puTzbw8ofuKczFRIA8nuyUHU +XTP/9oYyZ/Vs9qyAtIVCCyEfhSobfwZLLFAT4RWzQZ4H0JmtXfNdt+PFPSWg5MZA +W321uulq/JSa4MQUJbNUEeNYeG+NqjhviM00irpt2Baz2EbVAJMT4ClndRQOwrKT +15+icdyvgx5uZbEuvXK6kyU0AHESHxhzN6C5eHPEYkMjVYgftbE7R3cp9TEj3VvK +Ecd1SXTtKOq2J91te10UrceURqquGuGXVUO7PYGVYBNugjlH47qRIwtI0njPg3ep +10XBwkOm1CgvZxHaj4P0NJf+wih+K8Z5Dg1+90nnJ4mxGFFIW8m7Cfn1tPFmEPo= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/config b/testing/web3signer_tests/tls/lighthouse/config similarity index 95% rename from testing/web3signer_tests/tls/config rename to testing/web3signer_tests/tls/lighthouse/config index d19a89b02f6..6295f7fa011 100644 --- a/testing/web3signer_tests/tls/config +++ b/testing/web3signer_tests/tls/lighthouse/config @@ -10,7 +10,7 @@ ST = VA L = SomeCity O = MyCompany OU = MyDivision -CN = 127.0.0.1 +CN = lighthouse [v3_req] keyUsage = keyEncipherment, dataEncipherment extendedKeyUsage = serverAuth diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key new file mode 100644 index 00000000000..bbc69ca38bc --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC0HrD6fJGcqm8z +wEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16PXZitHgAyg3lp +h1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofvvGCA7CjLPKZf +W6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3bnKEaRdLCQgTJ +S3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJMGG+TA2ckKIp +GT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4dYqYAe+aEL5A +B4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS7Gk9ZSCp/1X+ +fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+HGaeZo1k3M0i +3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPURSN6OdHrO5x38 +wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8Dun/NunIagmIL +5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjPDKCsnxZZBtPh +XC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABAoICAQCXUo2W856Vwy5HiQ7t7JWv +CZAdj3pyp7yBnilC8GQhONGsntdw8M2rDVG05Nusqs4nnheNoX3C8mfHO7x/Q3FY +lKTQZ+DuDhyIz9k+N8kP6ca6dnlvkao3asYn1n9rZyy3QUjGJyGilWKlDGroJsrj +dCX6GidHEH8kgruXPdB7wLdi62KgCjkKiK5zPbhiNwd1gGJsoyqMn1BMGQmYFlHG +yJ+C2Lij1lSYboZcj18EK6N/9vfc0GPU+R2dh8qseIkskWQcruJknbJO2vBEh7yI +OKCrOqhHWRQCUwh1WxabNRLP3JGM+BNx8VZgisRnIsdeoMl+KWo1wklDm8+fa9Tx +4xquIy+4PzmobWXiWBpirF7bTNhyZ4vIaMSTOP5TYiliom/hJtcpAwLf9eXxMfti +vRAogZEtr0eKTieH72dwsBVx6wNlxhazvD+ZKIq7OIzJRA6Do2H+BAmz/l4mgVR/ +geL3u0fn0j/Y+8OyFE3P+8D/PqgPzLgTYa5QSp6JtHxNlVcmWefJiLtZDAJvPpeo +UVsA+E2BHsrGveLk15GF9F+vJ867qKT7luQac3zF7V0hE9pktUKM2gY+Jy455w5i +cMxyjt4RAKY8AHAmFvCRQHNdjU2o1UjVFgYsQTYsOdvAiyq0xEJFkbeR2Zxz2sJW +JWK+YlT+UEGDL5SCaXzP4QKCAQEA7gRAy/Xq0Fjq7UZvc7oJ62h6BmseFL9BuKlW +QmvVFAilYeQVejl/ubafyL4Z9ntEeCGTkv8H4DeALs9A/isFOcDxZDoelCETrSxI +CfXllob24276eTc5dBdHmofBjRgIbovnyuFRYzK5uDalVAxYsZPFOp9/qtGa25ex +uIcyJwX+ivqqtA9B5CHu7p/znNrp155xLwGpVczx4xGqjPPr5N2rwZFOXufGFULH +AKbJBSUxiMMJnb1rN8aIuTo/Utr3/i7hc7AUO3//qieyjLdXe8tESqgxzTNvfZk3 +qYtPk4GSHql7Eesxg19fzVdG+LTnzfRKOfOtcZJPRFGGW29fjwKCAQEAwbqXsZvC +7AmmmeVVAPL7q5pXAxSEMK7VsJzPJ7G6MRQ37YjkNRcCf7SRQqNBGQubVkv3Qzvc +rmMhT9I5QfCR2JXQtrH1y09eS45T6NYbRkT6NA3E3XNmRIPO+wIeDV32v5jJwhIk +7ayuG2zBsAryxNvg3us3pWHeIQ45sX0JqNil6BTemYRBrCZmCRWHndl72zDbtR23 +kVt9GKaycSPyCZQ7yE4ZWD2VsrbgEidVJEQagknsjQrldMO68GLbHCP2ZyrIUhKN +2eeuHJpZPz+pahQ55MAEvjIsJKPWsg8cut2Vo4sqgez+xiz0v/nWiPLtvxdN+DHP +tAVbrw+0NeqnMQKCAQB3GsO+DLpLNiOhRpzhAViTZ32glpu/8BEYMgzLQiCnXMg9 +myAwQHOs4DlG//IICJkzsEGjzmEHj15iji3MwoRj6SwiZn8EyySIhN8rtNQFplYH +a3KFk9/5OukG6CYvz7Xwc6wzNts+U5TiHN5Ql7kOa47HjicZuLfQaTFy0JyFMJe2 +vkcLwZLMcTqaSIpklJtt3Yhv6FnvaJYmdaGt1SXXKiIXw/m+via+XuMsbUmsfHc0 +I709JRtxFrU2U3J6qL5ugNEqzhLhz2SFpkXP6rMpbIcpAM+jCrkg1bon6mGQw8b1 +9wNx7Qqi3egX3jPSotxYkIVQSKMjcP6fhlhAixP7AoIBAH1ynKQwHurF3RIuxPqW +XY3jpZCjCm6T6GAzSpmDpvP9CbJRQKV4Pu//N0kVeiQDthUNoBHzg5WRL5MGqHkg +lPDRIpQLbQS4YnE+uus9KfA43mQyvlZAUerwB2nXFyrEu/GZuJxpL2yQszWjGVEr +5cTANT9kxWXcmACDu6xJMaYalGRSj0qNsBEP1GbxgB4hJOjtHHiNw77mpXz/BPHq +uuKlEIlGuXbAel19ul9HBQU07I2N3RYABlG0JStgeE4io35u38T1qtF+CusOr9gb +G1NLwal1Bh07VAZt6arnykzfC/UZOu9jTh96IQrnd5q65GUnbB/Z8Yu7JIGaA7Ie +PyECggEAPZlzqPCdNcmdoCSNIDCDYZBVf2xZX8591xdphMG59Jrckp5kl5LM5bjQ +tysj1LJpMK+l60b3r8BI8a4lvj+eBqwBUck82/IImTedE9/oLF3Z64kLd1tr3aGa +W5jLXjThFF20BqfD+YbmFVEdHTwN2L+4kN0VvP/6oLadxogTLwQruMFoPlsD4B19 +HDcAKe6OnyWMer/X9nq9OY6GFGc4X6wHjJ8pj4aa4HE8VNNq40GMkRZOZaJvaPqh +orK9SC50qdJtrVQeD4fhfZMVzmRyE4RSSQBPfc9zq/sO/pjUfV9uK4c99FDbviIf +JAkxGuYLZeyrHEyeKLm7S77SLipKWg== +-----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 new file mode 100644 index 00000000000..22b7d7f4254 Binary files /dev/null and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/password.txt b/testing/web3signer_tests/tls/lighthouse/password.txt new file mode 100644 index 00000000000..16da1460ff2 --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/password.txt @@ -0,0 +1 @@ +bark diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem new file mode 100644 index 00000000000..460cb8b4003 --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr +aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 +nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 +xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k +toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW +yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo +SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU +2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt +fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g +jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF +/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 +8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL +gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk +zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz +7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt +cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf +w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h +jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q +pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg +JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l +b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G +r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem new file mode 100644 index 00000000000..460cb8b4003 --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr +aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 +nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 +xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k +toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW +yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo +SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU +2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt +fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g +jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF +/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 +8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL +gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk +zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz +7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt +cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf +w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h +jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q +pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg +JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l +b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G +r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/config b/testing/web3signer_tests/tls/web3signer/config new file mode 100644 index 00000000000..4b7e40618ce --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/config @@ -0,0 +1,19 @@ +[req] +default_bits = 4096 +default_md = sha256 +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +C = US +ST = VA +L = SomeCity +O = MyCompany +OU = MyDivision +CN = web3signer +[v3_req] +keyUsage = keyEncipherment, dataEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key new file mode 100644 index 00000000000..6e5171f3742 --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRQIBADANBgkqhkiG9w0BAQEFAASCCS8wggkrAgEAAoICAQDraQUU4O7K/aBA +iH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49nki1q0rLEU/x +J0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85xfO9th/RkJkg +pzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07ktoF0N+yLGW76 +yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPWyAVTDi3bWmvh +3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLoSMKkyPsm6IN6 +0GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU2DDGdHpxut6h +4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/ktfhE+8zavQzjs +xm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06gjjvN9WdsCXmT +EShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF/Pqu0WQd82Cg +G1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn38HEnyLEyA3e8 +a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABAoICAQDihR2kp4Rfw4luT2nNUm5C +JFAxJH/vLT5uX1Gm8XWPI9oC21dnu6Asd5RskrGfSouWszZXyUmg+TmpXRSa796t +hjHS0KW59HBxvYDx18mEXJXHWbcK/L5D5iFmpMYHH6xiFT6i8BrR9ofCSeCU52SF +CkEzGZJ0pfR/w4dIvjGWNNcsoI2mp2hl9/84fco8ol7x6UPL5vwwJPsLS0hqwmAz +v+994IKCT1EQllEGhv0pY7fPscXF9pOXDbnmYjwqpEhzJekpsF0j03A32R/4dOx2 +x8eOpngLv2Hczg5RSpbzRF4X0yJVANg/AlJJZmkYGOZ5qXnSQqqZF+dcSCvVVwhO +GS7uci6Mcy7Ov0Gj9HWX8As0SofPtUMuO7k/nJYOzcgY+4agyIDrylIeG86gdCDQ +hGVz+T5reJZIBMp66GPT6M8r36q50cx2x9nJjxLlIjvly1EruVjQoSMUfjewHG91 +xJI0iFhlbBrCpyLx3X0smMEr0vJzM5J0GtdxQdcSocDy5244+4zuslAXgsEYwHYx +WYFMsotRif8aB2b3OSt0yH+Heh06dZehvwWa4F4/3qlP48e0/CWIL7Y/tBgZv8Gh +n3F7HsHvMx6qQqMY5AxudbkpKdM9W84gXriHPIsO2oZEU6N65J/Bpq5Ve4JBlih1 +Ji0CtvHlAR2dhGkj6Q36MQKCAQEA9z/HTd8hd4FNEfn595OVWr9CeZc1zAlNa94I +lvTLhLEFcMkGsZd9KyV87MOV3p9m+XI7UJmqllIHOkwrECF2wzFssguPk+RAJ5hW +LZJgsF0fPnhX0qJFXzSNzzqAICES6+s9jvHMO9PhtF59uv4zsRFEBmKAr0AN8Zsk +rEk+2Tl2RgC+sxzRS767De9CrbSjxm+qAHuFFh8QX/N/mPoLUa+V5Oh2srA5bTHn +t0vyfQQ9+gqTBJDy51VGYlYw5OQBAiOPTgzbSmm2gqdWYgGn2Sp5IBQLF5nGGGsV +70DvnsoxViqpsv+yObAF9PqXnu6UGoB023Jr8x683bU9/jQFLQKCAQEA8735Vbbc +kncVJIpIlG7SDPmlLCFnxokvWWmyJS2J4SrIJJykn30qknGGZFFn67NB5PulAEaw +mdG58FIxxkm8bEKwebEhdnB9sP8k3TvddPKlBXYb1WuGxzyF/xlHniEJ7jN0YAAz +D1BLxTP1OM47iX5ocyVpOPbAdq/yZK0bffvIUy/QKLeJNx0d59PKpJRb4y6T/LvS +tp3UHrBqCNYYoKsZluS9Kg6WJF4g269yn2LSdtzQlAW1IT3DgO7h+2UBYI4FwMao +BZVew44CjljGCTA2KL4jdsqnTyt0qzzAiJZ0CGkJY9gal16ODHcBUKfNGYvjU8pf +2qDEiCn0HayXNwKCAQEAlOscLuHy9Viyw94NWbnRXlwOPM//fgooUIzmHY4Jur0o +arsZxgNZR5CHws82yGS4EAxmf3Bel7WGVu2jjk6pin2NW1utOcVjgrW1SjN8+xzL +gcPYGazVHbe4phU1MKTbEa+ZXyxx96LxscKr9eG/3qlokHPp0CRDgb8RApgHO6zp +eNZgBd+YjAewAH+YaKmBbza4bRv4l89T/Ibb1pbcFHIuVTZSr+OGYyeIyhT7U6Mn +dR/DVx+6vezVvMrvHh3aIaCrYrZJqnMrk1wYomUe5KU5WUHZQHjFINX22ykAamKb +/qsplP9/KFHF9Lyub/KAz8mJGNe8/y0HUn4kfaR1bQKCAQEAhZHTsx8UXMcZNP76 +qyzXuviLhVWBExFWez8quqjr6BKTv0yAAk6LJ9lCdnMN6eI/+AXW9AHJAWIm7QV9 +9VWvBfy9zNI+rjMTDg2j3ADUaSQXPpjsw9W69C+8loD5+DPOx1Q3L+ysDnZIL3c7 +qLeLdNtqzb7wnKDL876TrIwYhr+VldCb19RMQ4GXQ9WSNQKAIE0EF/mtjRmMhozS +bqk0scdRrJkI+KUpriBPDVRmEeYLw8taGePO0LqSCnPeLu+5A3qQuIWkyfqDBdMq +n2sSizJ6W3Vm5dBEQ2Ri+Pu/3pnkWD+HP8nLOKw+V6JXfCWYhaldGCvMv3heeufS +uPg9nQKCAQEAp/boT63JB+ahU3VQGtqwlDXkRS/Ge8a7FRp4kjdK7d1mtUDqOJ9U +l2RHgOkqhNuAPy64/07caDK3R7vKeOFmSXCV/WHIcgt46SRwFQECZeyA1R+EkTes +tseTngdFrQ10Xf+DmLNqCyX5KpgQf+ccluyyH6uK6FRI/VfU4sLrUGyOblqHq/c4 +bRR4nMwiw5yga45YhQH8uJF54MI7XaD2/hPCAIJBkx88taRzMUlWl1u1VQosIvtZ +5hCRepq9A44P61c+HI/5fzXAn2xvwR2EiV0hAYLn+rmYgBId/RfcstWUR78A9wpT +/OsV3MTX1gCaTE9Q2GlZVybDh20ZvdBC/g== +-----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 new file mode 100644 index 00000000000..459f4fb62e8 Binary files /dev/null and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt new file mode 100644 index 00000000000..de80bb7cebe --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -0,0 +1 @@ +lighthouse 1B:43:E1:58:26:7D:3F:70:BD:DA:32:E9:29:A5:A9:50:EA:B2:A8:C3:0C:82:BF:90:13:ED:5B:E0:7D:5B:0A:C0 diff --git a/testing/web3signer_tests/tls/password.txt b/testing/web3signer_tests/tls/web3signer/password.txt similarity index 100% rename from testing/web3signer_tests/tls/password.txt rename to testing/web3signer_tests/tls/web3signer/password.txt diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 32e63b0705e..2ba81eac7a8 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -328,7 +328,8 @@ impl BlockService { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; - let signed_block = self + // Request block from first responsive beacon node. + let block = self .beacon_nodes .first_success(RequireSynced::No, |beacon_node| async move { let get_timer = metrics::start_timer_vec( @@ -378,14 +379,19 @@ impl BlockService { )); } - let signed_block = self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - .map_err(|e| { - BlockError::Recoverable(format!("Unable to sign block: {:?}", e)) - })?; + Ok::<_, BlockError>(block) + }) + .await?; + + let signed_block = self_ref + .validator_store + .sign_block::(*validator_pubkey_ref, block, current_slot) + .await + .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + // Publish block with first available beacon node. + self.beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_POST], @@ -412,19 +418,17 @@ impl BlockService { })?, } - Ok::<_, BlockError>(signed_block) + info!( + log, + "Successfully published block"; + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); + Ok::<_, BlockError>(()) }) .await?; - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); - Ok(()) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 6428034d8ba..f8ca5a3d44a 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -646,17 +646,18 @@ async fn poll_beacon_attesters_for_epoch( response .data .into_iter() - .filter(|duty| local_pubkeys.contains(&duty.pubkey)) .filter(|duty| { - // Only update the duties if either is true: - // - // - There were no known duties for this epoch. - // - The dependent root has changed, signalling a re-org. - attesters.get(&duty.pubkey).map_or(true, |duties| { - duties - .get(&epoch) - .map_or(true, |(prior, _)| *prior != dependent_root) - }) + local_pubkeys.contains(&duty.pubkey) && { + // Only update the duties if either is true: + // + // - There were no known duties for this epoch. + // - The dependent root has changed, signalling a re-org. + attesters.get(&duty.pubkey).map_or(true, |duties| { + duties + .get(&epoch) + .map_or(true, |(prior, _)| *prior != dependent_root) + }) + } }) .collect::>() }; diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a8e4fd26290..db59c25f758 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -1,5 +1,5 @@ use crate::ValidatorStore; -use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use account_utils::validator_definitions::ValidatorDefinition; use account_utils::{ eth2_wallet::{bip39::Mnemonic, WalletBuilder}, random_mnemonic, random_password, ZeroizeString, @@ -164,24 +164,12 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, } pub async fn create_validators_web3signer( - validator_requests: &[api_types::Web3SignerValidatorRequest], + validators: Vec, validator_store: &ValidatorStore, ) -> Result<(), warp::Rejection> { - for request in validator_requests { - let validator_definition = ValidatorDefinition { - enabled: request.enable, - voting_public_key: request.voting_public_key.clone(), - graffiti: request.graffiti.clone(), - suggested_fee_recipient: request.suggested_fee_recipient, - description: request.description.clone(), - signing_definition: SigningDefinition::Web3Signer { - url: request.url.clone(), - root_certificate_path: request.root_certificate_path.clone(), - request_timeout_ms: request.request_timeout_ms, - }, - }; + for validator in validators { validator_store - .add_validator(validator_definition) + .add_validator(validator) .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index ce6089c5b63..f88aacfca8d 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -1,5 +1,8 @@ //! Implementation of the standard keystore management API. -use crate::{signing_method::SigningMethod, InitializedValidators, ValidatorStore}; +use crate::{ + initialized_validators::Error, signing_method::SigningMethod, InitializedValidators, + ValidatorStore, +}; use account_utils::ZeroizeString; use eth2::lighthouse_vc::std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, @@ -11,8 +14,8 @@ use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::path::PathBuf; use std::sync::Arc; -use std::sync::Weak; -use tokio::runtime::Runtime; +use task_executor::TaskExecutor; +use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; use validator_dir::Builder as ValidatorDirBuilder; use warp::Rejection; @@ -56,7 +59,7 @@ pub fn import( request: ImportKeystoresRequest, validator_dir: PathBuf, validator_store: Arc>, - runtime: Weak, + task_executor: TaskExecutor, log: Logger, ) -> Result { // Check request validity. This is the only cases in which we should return a 4xx code. @@ -119,14 +122,14 @@ pub fn import( ImportKeystoreStatus::Error, format!("slashing protection import failed: {:?}", e), ) - } else if let Some(runtime) = runtime.upgrade() { + } else if let Some(handle) = task_executor.handle() { // Import the keystore. match import_single_keystore( keystore, password, validator_dir.clone(), &validator_store, - runtime, + handle, ) { Ok(status) => Status::ok(status), Err(e) => { @@ -156,7 +159,7 @@ fn import_single_keystore( password: ZeroizeString, validator_dir_path: PathBuf, validator_store: &ValidatorStore, - runtime: Arc, + handle: Handle, ) -> Result { // Check if the validator key already exists, erroring if it is a remote signer validator. let pubkey = keystore @@ -195,7 +198,7 @@ fn import_single_keystore( let voting_keystore_path = validator_dir.voting_keystore_path(); drop(validator_dir); - runtime + handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, password, @@ -211,7 +214,7 @@ fn import_single_keystore( pub fn delete( request: DeleteKeystoresRequest, validator_store: Arc>, - runtime: Weak, + task_executor: TaskExecutor, log: Logger, ) -> Result { // Remove from initialized validators. @@ -222,8 +225,11 @@ pub fn delete( .pubkeys .iter() .map(|pubkey_bytes| { - match delete_single_keystore(pubkey_bytes, &mut initialized_validators, runtime.clone()) - { + match delete_single_keystore( + pubkey_bytes, + &mut initialized_validators, + task_executor.clone(), + ) { Ok(status) => Status::ok(status), Err(error) => { warn!( @@ -241,8 +247,8 @@ pub fn delete( // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out // of date as it resets when it can't be decrypted. We update it just a single time to avoid // continually resetting it after each key deletion. - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on(initialized_validators.update_validators()) .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; } @@ -275,16 +281,21 @@ pub fn delete( fn delete_single_keystore( pubkey_bytes: &PublicKeyBytes, initialized_validators: &mut InitializedValidators, - runtime: Weak, + task_executor: TaskExecutor, ) -> Result { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let pubkey = pubkey_bytes .decompress() .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; - runtime - .block_on(initialized_validators.delete_definition_and_keystore(&pubkey)) - .map_err(|e| format!("unable to disable and delete: {:?}", e)) + match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, true)) + { + Ok(_) => Ok(DeleteKeystoreStatus::Deleted), + Err(e) => match e { + Error::ValidatorNotInitialized(_) => Ok(DeleteKeystoreStatus::NotFound), + _ => Err(format!("unable to disable and delete: {:?}", e)), + }, + } } else { Err("validator client shutdown".into()) } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 8e1f5a7390d..9ee983a35ab 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,10 +1,14 @@ mod api_secret; mod create_validator; mod keystores; +mod remotekeys; mod tests; use crate::ValidatorStore; -use account_utils::mnemonic_from_phrase; +use account_utils::{ + mnemonic_from_phrase, + validator_definitions::{SigningDefinition, ValidatorDefinition}, +}; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ std_types::AuthResponse, @@ -18,8 +22,8 @@ use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; -use std::sync::{Arc, Weak}; -use tokio::runtime::Runtime; +use std::sync::Arc; +use task_executor::TaskExecutor; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use warp::{ @@ -55,7 +59,7 @@ impl From for Error { /// /// The server will gracefully handle the case where any fields are `None`. pub struct Context { - pub runtime: Weak, + pub task_executor: TaskExecutor, pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, @@ -157,8 +161,8 @@ pub fn serve( }) }); - let inner_runtime = ctx.runtime.clone(); - let runtime_filter = warp::any().map(move || inner_runtime.clone()); + let inner_task_executor = ctx.task_executor.clone(); + let task_executor_filter = warp::any().map(move || inner_task_executor.clone()); let inner_validator_dir = ctx.validator_dir.clone(); let validator_dir_filter = warp::any() @@ -286,18 +290,18 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: Vec, validator_dir: PathBuf, validator_store: Arc>, spec: Arc, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let (validators, mnemonic) = - runtime.block_on(create_validators_mnemonic( + handle.block_on(create_validators_mnemonic( None, None, &body, @@ -312,7 +316,7 @@ pub fn serve( Ok(api_types::GenericResponse::from(response)) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -329,16 +333,16 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: api_types::CreateValidatorsMnemonicRequest, validator_dir: PathBuf, validator_store: Arc>, spec: Arc, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { warp_utils::reject::custom_bad_request(format!( @@ -347,7 +351,7 @@ pub fn serve( )) })?; let (validators, _mnemonic) = - runtime.block_on(create_validators_mnemonic( + handle.block_on(create_validators_mnemonic( Some(mnemonic), Some(body.key_derivation_path_offset), &body.validators, @@ -358,7 +362,7 @@ pub fn serve( Ok(api_types::GenericResponse::from(validators)) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -374,13 +378,13 @@ pub fn serve( .and(validator_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: api_types::KeystoreValidatorsPostRequest, validator_dir: PathBuf, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { // Check to ensure the password is correct. let keypair = body @@ -412,8 +416,8 @@ pub fn serve( let suggested_fee_recipient = body.suggested_fee_recipient; let validator_def = { - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, voting_password, @@ -429,7 +433,7 @@ pub fn serve( })? } else { return Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )); } }; @@ -451,19 +455,39 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: Vec, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { - runtime.block_on(create_validators_web3signer(&body, &validator_store))?; + if let Some(handle) = task_executor.handle() { + let web3signers: Vec = body + .into_iter() + .map(|web3signer| ValidatorDefinition { + enabled: web3signer.enable, + voting_public_key: web3signer.voting_public_key, + graffiti: web3signer.graffiti, + suggested_fee_recipient: web3signer.suggested_fee_recipient, + description: web3signer.description, + signing_definition: SigningDefinition::Web3Signer { + url: web3signer.url, + root_certificate_path: web3signer.root_certificate_path, + request_timeout_ms: web3signer.request_timeout_ms, + client_identity_path: web3signer.client_identity_path, + client_identity_password: web3signer.client_identity_password, + }, + }) + .collect(); + handle.block_on(create_validators_web3signer( + web3signers, + &validator_store, + ))?; Ok(()) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -478,13 +502,13 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); @@ -496,8 +520,8 @@ pub fn serve( ))), Some(enabled) if enabled == body.enabled => Ok(()), Some(_) => { - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on( initialized_validators .set_validator_status(&validator_pubkey, body.enabled), @@ -511,7 +535,7 @@ pub fn serve( Ok(()) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } } @@ -536,6 +560,7 @@ pub fn serve( // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); + let std_remotekeys = eth_v1.and(warp::path("remotekeys")).and(warp::path::end()); // GET /eth/v1/keystores let get_std_keystores = std_keystores @@ -551,26 +576,60 @@ pub fn serve( .and(signer.clone()) .and(validator_dir_filter) .and(validator_store_filter.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and(log_filter.clone()) .and_then( - |request, signer, validator_dir, validator_store, runtime, log| { + |request, signer, validator_dir, validator_store, task_executor, log| { blocking_signed_json_task(signer, move || { - keystores::import(request, validator_dir, validator_store, runtime, log) + keystores::import(request, validator_dir, validator_store, task_executor, log) }) }, ); // DELETE /eth/v1/keystores let delete_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + keystores::delete(request, validator_store, task_executor, log) + }) + }); + + // GET /eth/v1/remotekeys + let get_std_remotekeys = std_remotekeys + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and_then(|signer, validator_store: Arc>| { + blocking_signed_json_task(signer, move || Ok(remotekeys::list(validator_store))) + }); + + // POST /eth/v1/remotekeys + let post_std_remotekeys = std_remotekeys + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + remotekeys::import(request, validator_store, task_executor, log) + }) + }); + + // DELETE /eth/v1/remotekeys + let delete_std_remotekeys = std_remotekeys .and(warp::body::json()) .and(signer) .and(validator_store_filter) - .and(runtime_filter) - .and(log_filter) - .and_then(|request, signer, validator_store, runtime, log| { + .and(task_executor_filter) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { blocking_signed_json_task(signer, move || { - keystores::delete(request, validator_store, runtime, log) + remotekeys::delete(request, validator_store, task_executor, log) }) }); @@ -588,17 +647,19 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) - .or(get_std_keystores), + .or(get_std_keystores) + .or(get_std_remotekeys), ) .or(warp::post().and( post_validators .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) - .or(post_std_keystores), + .or(post_std_keystores) + .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) - .or(warp::delete().and(delete_std_keystores)), + .or(warp::delete().and(delete_std_keystores.or(delete_std_remotekeys))), ) // The auth route is the only route that is allowed to be accessed without the API token. .or(warp::get().and(get_auth)) diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs new file mode 100644 index 00000000000..402396d4b4e --- /dev/null +++ b/validator_client/src/http_api/remotekeys.rs @@ -0,0 +1,209 @@ +//! Implementation of the standard remotekey management API. +use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; +use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use eth2::lighthouse_vc::std_types::{ + DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, + ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, + ListRemotekeysResponse, SingleListRemotekeysResponse, Status, +}; +use slog::{info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::runtime::Handle; +use types::{EthSpec, PublicKeyBytes}; +use url::Url; +use warp::Rejection; +use warp_utils::reject::custom_server_error; + +pub fn list( + validator_store: Arc>, +) -> ListRemotekeysResponse { + let initialized_validators_rwlock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rwlock.read(); + + let keystores = initialized_validators + .validator_definitions() + .iter() + .filter(|def| def.enabled) + .filter_map(|def| { + let validating_pubkey = def.voting_public_key.compress(); + + match &def.signing_definition { + SigningDefinition::LocalKeystore { .. } => None, + SigningDefinition::Web3Signer { url, .. } => Some(SingleListRemotekeysResponse { + pubkey: validating_pubkey, + url: url.clone(), + readonly: false, + }), + } + }) + .collect::>(); + + ListRemotekeysResponse { data: keystores } +} + +pub fn import( + request: ImportRemotekeysRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { + info!( + log, + "Importing remotekeys via standard HTTP API"; + "count" => request.remote_keys.len(), + ); + // Import each remotekey. Some remotekeys may fail to be imported, so we record a status for each. + let mut statuses = Vec::with_capacity(request.remote_keys.len()); + + for remotekey in request.remote_keys { + let status = if let Some(handle) = task_executor.handle() { + // Import the keystore. + match import_single_remotekey(remotekey.pubkey, remotekey.url, &validator_store, handle) + { + Ok(status) => Status::ok(status), + Err(e) => { + warn!( + log, + "Error importing keystore, skipped"; + "pubkey" => remotekey.pubkey.to_string(), + "error" => ?e, + ); + Status::error(ImportRemotekeyStatus::Error, e) + } + } + } else { + Status::error( + ImportRemotekeyStatus::Error, + "validator client shutdown".into(), + ) + }; + statuses.push(status); + } + Ok(ImportRemotekeysResponse { data: statuses }) +} + +fn import_single_remotekey( + pubkey: PublicKeyBytes, + url: String, + validator_store: &ValidatorStore, + handle: Handle, +) -> Result { + if let Err(url_err) = Url::parse(&url) { + return Err(format!("failed to parse remotekey URL: {}", url_err)); + } + + let pubkey = pubkey + .decompress() + .map_err(|_| format!("invalid pubkey: {}", pubkey))?; + + if let Some(def) = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .find(|def| def.voting_public_key == pubkey) + { + if def.signing_definition.is_local_keystore() { + return Err("Pubkey already present in local keystore.".into()); + } else if def.enabled { + return Ok(ImportRemotekeyStatus::Duplicate); + } + } + + // Remotekeys are stored as web3signers. + // The remotekey API provides less confgiuration option than the web3signer API. + let web3signer_validator = ValidatorDefinition { + enabled: true, + voting_public_key: pubkey, + graffiti: None, + suggested_fee_recipient: None, + description: String::from("Added by remotekey API"), + signing_definition: SigningDefinition::Web3Signer { + url, + root_certificate_path: None, + request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + }, + }; + handle + .block_on(validator_store.add_validator(web3signer_validator)) + .map_err(|e| format!("failed to initialize validator: {:?}", e))?; + + Ok(ImportRemotekeyStatus::Imported) +} + +pub fn delete( + request: DeleteRemotekeysRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { + info!( + log, + "Deleting remotekeys via standard HTTP API"; + "count" => request.pubkeys.len(), + ); + // Remove from initialized validators. + let initialized_validators_rwlock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rwlock.write(); + + let statuses = request + .pubkeys + .iter() + .map(|pubkey_bytes| { + match delete_single_remotekey( + pubkey_bytes, + &mut initialized_validators, + task_executor.clone(), + ) { + Ok(status) => Status::ok(status), + Err(error) => { + warn!( + log, + "Error deleting keystore"; + "pubkey" => ?pubkey_bytes, + "error" => ?error, + ); + Status::error(DeleteRemotekeyStatus::Error, error) + } + } + }) + .collect::>(); + + // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out + // of date as it resets when it can't be decrypted. We update it just a single time to avoid + // continually resetting it after each key deletion. + if let Some(handle) = task_executor.handle() { + handle + .block_on(initialized_validators.update_validators()) + .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; + } + + Ok(DeleteRemotekeysResponse { data: statuses }) +} + +fn delete_single_remotekey( + pubkey_bytes: &PublicKeyBytes, + initialized_validators: &mut InitializedValidators, + task_executor: TaskExecutor, +) -> Result { + if let Some(handle) = task_executor.handle() { + let pubkey = pubkey_bytes + .decompress() + .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; + + match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, false)) + { + Ok(_) => Ok(DeleteRemotekeyStatus::Deleted), + Err(e) => match e { + Error::ValidatorNotInitialized(_) => Ok(DeleteRemotekeyStatus::NotFound), + _ => Err(format!("unable to disable and delete: {:?}", e)), + }, + } + } else { + Err("validator client shutdown".into()) + } +} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index eef76eb3630..210555d9c00 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -102,7 +102,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, - executor, + executor.clone(), log.clone(), )); @@ -113,7 +113,7 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); let context = Arc::new(Context { - runtime, + task_executor: executor, api_secret, validator_dir: Some(validator_dir.path().into()), validator_store: Some(validator_store.clone()), @@ -457,6 +457,8 @@ impl ApiTester { url: format!("http://signer_{}.com/", i), root_certificate_path: None, request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, } }) .collect(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 427f22adc3e..a381378ffe9 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -8,8 +8,7 @@ use eth2::lighthouse_vc::{ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; -use std::collections::HashMap; -use std::path::Path; +use std::{collections::HashMap, path::Path}; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -41,6 +40,21 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq url: web3_signer_url(), root_certificate_path: None, request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + } +} + +fn new_remotekey_validator() -> (Keypair, SingleImportRemotekeysRequest) { + let keypair = Keypair::random(); + let pk = keypair.pk.clone(); + (keypair, remotekey_validator_with_pubkey(pk)) +} + +fn remotekey_validator_with_pubkey(pubkey: PublicKey) -> SingleImportRemotekeysRequest { + SingleImportRemotekeysRequest { + pubkey: pubkey.compress(), + url: web3_signer_url(), } } @@ -107,7 +121,7 @@ fn all_delete_error(count: usize) -> impl Iterator all_with_status(count, DeleteKeystoreStatus::Error) } -fn check_get_response<'a>( +fn check_keystore_get_response<'a>( response: &ListKeystoresResponse, expected_keystores: impl IntoIterator, ) { @@ -118,7 +132,7 @@ fn check_get_response<'a>( } } -fn check_import_response( +fn check_keystore_import_response( response: &ImportKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -131,7 +145,7 @@ fn check_import_response( } } -fn check_delete_response<'a>( +fn check_keystore_delete_response<'a>( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -144,6 +158,41 @@ fn check_delete_response<'a>( } } +fn check_remotekey_get_response( + response: &ListRemotekeysResponse, + expected_keystores: impl IntoIterator, +) { + for expected in expected_keystores { + assert!(response.data.contains(&expected)); + } +} + +fn check_remotekey_import_response( + response: &ImportRemotekeysResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + expected_status, status.status, + "message: {:?}", + status.message + ); + } +} + +fn check_remotekey_delete_response( + response: &DeleteRemotekeysResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + status.status, expected_status, + "message: {:?}", + status.message + ); + } +} + #[test] fn get_auth_no_token() { run_test(|mut tester| async move { @@ -189,11 +238,11 @@ fn import_new_keystores() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Check that GET lists all the imported keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &keystores); + check_keystore_get_response(&get_res, &keystores); }) } @@ -214,15 +263,15 @@ fn import_only_duplicate_keystores() { // All keystores should be imported on first import. let import_res = tester.client.post_keystores(&req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // No keystores should be imported on repeat import. let import_res = tester.client.post_keystores(&req).await.unwrap(); - check_import_response(&import_res, all_duplicate(keystores.len())); + check_keystore_import_response(&import_res, all_duplicate(keystores.len())); // Check that GET lists all the imported keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &keystores); + check_keystore_get_response(&get_res, &keystores); }) } @@ -262,7 +311,7 @@ fn import_some_duplicate_keystores() { }; let import_res = tester.client.post_keystores(&req1).await.unwrap(); - check_import_response(&import_res, all_imported(keystores1.len())); + check_keystore_import_response(&import_res, all_imported(keystores1.len())); // Check partial import. let expected = (0..num_keystores).map(|i| { @@ -273,7 +322,7 @@ fn import_some_duplicate_keystores() { } }); let import_res = tester.client.post_keystores(&req2).await.unwrap(); - check_import_response(&import_res, expected); + check_keystore_import_response(&import_res, expected); }) } @@ -323,7 +372,7 @@ fn get_web3_signer_keystores() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Add some web3signer validators. let remote_vals = (0..num_remote) @@ -391,14 +440,14 @@ fn import_and_delete_conflicting_web3_signer_keystores() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); // Attempt to delete the web3signer validators, which should fail. let delete_req = DeleteKeystoresRequest { pubkeys: pubkeys.clone(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_delete_error(keystores.len())); + check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); // Get should still list all the validators as `readonly`. let get_res = tester.client.get_keystores().await.unwrap(); @@ -418,9 +467,9 @@ fn import_and_delete_conflicting_web3_signer_keystores() { .unwrap(); } let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_delete_error(keystores.len())); + check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); }) } @@ -464,7 +513,7 @@ fn import_keystores_wrong_password() { ImportKeystoreStatus::Imported } }); - check_import_response(&import_res, expected_statuses); + check_keystore_import_response(&import_res, expected_statuses); // Import again with the correct passwords and check that the statuses are as expected. let correct_import_req = ImportKeystoresRequest { @@ -484,7 +533,7 @@ fn import_keystores_wrong_password() { ImportKeystoreStatus::Duplicate } }); - check_import_response(&import_res, expected_statuses); + check_keystore_import_response(&import_res, expected_statuses); // Import one final time, at which point all keys should be duplicates. let import_res = tester @@ -492,7 +541,7 @@ fn import_keystores_wrong_password() { .post_keystores(&correct_import_req) .await .unwrap(); - check_import_response( + check_keystore_import_response( &import_res, (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), ); @@ -528,11 +577,11 @@ fn import_invalid_slashing_protection() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); // Check that GET lists none of the failed keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &[]); + check_keystore_get_response(&get_res, &[]); }) } @@ -669,7 +718,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Sign attestations on VC1. for (validator_index, mut attestation) in first_vc_attestations { @@ -694,7 +743,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_delete_response(&delete_res, all_deleted(delete_indices.len())); + check_keystore_delete_response(&delete_res, all_deleted(delete_indices.len())); // Check that slashing protection data was returned for all selected validators. assert_eq!( @@ -745,7 +794,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_import_response(&import_res, all_imported(import_indices.len())); + check_keystore_import_response(&import_res, all_imported(import_indices.len())); // Sign attestations on the second VC. for (validator_index, mut attestation, should_succeed) in second_vc_attestations { @@ -779,18 +828,18 @@ fn delete_keystores_twice() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // 2. Delete all. let delete_req = DeleteKeystoresRequest { pubkeys: keystores.iter().map(keystore_pubkey).collect(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_deleted(keystores.len())); + check_keystore_delete_response(&delete_res, all_deleted(keystores.len())); // 3. Delete again. let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_not_active(keystores.len())); + check_keystore_delete_response(&delete_res, all_not_active(keystores.len())); }) } @@ -808,7 +857,7 @@ fn delete_nonexistent_keystores() { pubkeys: keystores.iter().map(keystore_pubkey).collect(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_not_found(keystores.len())); + check_keystore_delete_response(&delete_res, all_not_found(keystores.len())); }) } @@ -868,7 +917,7 @@ fn delete_concurrent_with_signing() { }) .await .unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Start several threads signing attestations at sequential epochs. let mut join_handles = vec![]; @@ -936,8 +985,7 @@ fn delete_concurrent_with_signing() { for interchange in collected_slashing_protection .into_iter() - .map(Result::unwrap) - .flatten() + .flat_map(Result::unwrap) { for validator_data in interchange.data { slashing_protection_map @@ -972,7 +1020,7 @@ fn delete_then_reimport() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // 2. Delete all. let delete_res = tester @@ -982,10 +1030,770 @@ fn delete_then_reimport() { }) .await .unwrap(); - check_delete_response(&delete_res, all_deleted(keystores.len())); + check_keystore_delete_response(&delete_res, all_deleted(keystores.len())); // 3. Re-import let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); + }) +} + +#[test] +fn get_empty_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let res = tester.client.get_remotekeys().await.unwrap(); + assert_eq!(res, ListRemotekeysResponse { data: vec![] }); + }) +} + +#[test] +fn import_new_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..3) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All keystores should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_same_remotekey_different_url() { + run_test(|tester| async move { + let _ = &tester; + + // Create two remotekeys with different urls. + let remotekey1 = new_remotekey_validator().1; + let mut remotekey2 = remotekey1.clone(); + remotekey2.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + let remotekeys = vec![remotekey1, remotekey2]; + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // Both remotekeys have the same public key and therefore only the first one should be imported. + check_remotekey_import_response( + &import_res, + vec![ + ImportRemotekeyStatus::Imported, + ImportRemotekeyStatus::Duplicate, + ] + .into_iter(), + ); + + // Only first key is imported and should be returned. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response( + &get_res, + vec![SingleListRemotekeysResponse { + pubkey: remotekeys[0].pubkey, + url: remotekeys[0].url.clone(), + readonly: false, + }], + ); + }) +} + +#[test] +fn delete_remotekey_then_reimport_different_url() { + run_test(|tester| async move { + let _ = &tester; + + // Create two remotekeys with different urls. + let mut remotekey = new_remotekey_validator().1; + let remotekeys = vec![remotekey.clone()]; + + // Import and Delete remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + vec![ImportRemotekeyStatus::Imported].into_iter(), + ); + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Change remotekey url. + remotekey.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + let remotekeys = vec![remotekey.clone()]; + + // Reimport remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + vec![ImportRemotekeyStatus::Imported].into_iter(), + ); + }) +} + +#[test] +fn import_only_duplicate_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let remotekeys = (0..3) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // All remotekeys should be imported on first import. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // No remotekeys should be imported on repeat import. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Duplicate), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_some_duplicate_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let num_remotekeys = 5; + let remotekeys_all = (0..num_remotekeys) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Select even numbered keystores. + let remotekeys_even = remotekeys_all + .iter() + .enumerate() + .filter_map(|(i, remotekey)| { + if i % 2 == 0 { + Some(remotekey.clone()) + } else { + None + } + }) + .collect::>(); + + // Only import every second remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys_even.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys_even.len(), ImportRemotekeyStatus::Imported), + ); + + let expected = (0..num_remotekeys).map(|i| { + if i % 2 == 0 { + ImportRemotekeyStatus::Duplicate + } else { + ImportRemotekeyStatus::Imported + } + }); + + // Try to import all keys. Every second import should be a duplicate. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys_all.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response(&import_res, expected); + + // Check list response. + let expected_responses = remotekeys_all + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remote_and_local_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + let num_remote = 2; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Import keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Imported), + ); + + // Add some remotekey validators. + let remotekeys = (0..num_remote) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekeys should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check that only remote validators are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleKeystoreResponse { + validating_pubkey: remotekey.pubkey, + derivation_path: None, + readonly: Some(true), + }) + .collect::>(); + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} + +#[test] +fn import_same_local_and_remote_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Generate remotekeys with same pubkey as local keystores. + let mut remotekeys = Vec::new(); + for keystore in keystores.iter() { + remotekeys.push(remotekey_validator_with_pubkey( + keystore.public_key().unwrap(), + )); + } + + // Import keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Imported), + ); + + // Try to import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekey import should fail. Already imported as local keystore. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Error), + ); + + // Check that only local keystores are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + let expected_responses = keystores + .iter() + .map(|local_keystore| SingleKeystoreResponse { + validating_pubkey: keystore_pubkey(local_keystore), + derivation_path: local_keystore.path(), + readonly: None, + }) + .collect::>(); + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} +#[test] +fn import_same_remote_and_local_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Generate remotekeys with same pubkey as local keystores. + let mut remotekeys = Vec::new(); + for keystore in keystores.iter() { + remotekeys.push(remotekey_validator_with_pubkey( + keystore.public_key().unwrap(), + )); + } + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekeys should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Try to import local keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All local keystore imports should fail. Already imported as remotekeys. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Error), + ); + + // Check that only remotekeys are returned. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn delete_remotekeys_twice() { + run_test(|tester| async move { + let _ = &tester; + + // Generate some remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Import all remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Delete all. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Try to delete again. + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::NotFound), + ); + + // Check list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, Vec::new()); + }) +} + +#[test] +fn delete_nonexistent_remotekey() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Try to delete remotekeys. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::NotFound), + ); + + // Check list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, Vec::new()); + }) +} + +#[test] +fn delete_then_reimport_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let mut remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Import all remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Delete all. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Change remote key url + for rk in remotekeys.iter_mut() { + rk.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + } + + // Re-import + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Generate web3signers. + let web3signers = (0..2) + .map(|_| new_web3signer_validator().1) + .collect::>(); + + // Import web3signers. + tester + .client + .post_lighthouse_validators_web3signer(&web3signers) + .await + .unwrap(); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .chain( + web3signers + .iter() + .map(|websigner| SingleListRemotekeysResponse { + pubkey: websigner.voting_public_key.compress(), + url: websigner.url.clone(), + readonly: false, + }), + ) + .collect::>(); + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer_disabled() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekey. + let (kp, remotekey_req) = new_remotekey_validator(); + + // Generate web3signer with same PK. + let mut web3signer_req = web3signer_validator_with_pubkey(kp.pk); + web3signer_req.enable = false; + + // Import web3signers. + let _ = tester + .client + .post_lighthouse_validators_web3signer(&vec![web3signer_req]) + .await + .unwrap(); + + // 1 validator imported. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 0); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: vec![remotekey_req.clone()].clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(1, ImportRemotekeyStatus::Imported), + ); + + // Still only one validator. Web3signer is overwritten by remotekey. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + + // Remotekey overwrites web3signer. + let expected_responses = vec![SingleListRemotekeysResponse { + pubkey: remotekey_req.pubkey, + url: remotekey_req.url.clone(), + readonly: false, + }]; + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer_enabled() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekey. + let (kp, remotekey_req) = new_remotekey_validator(); + + // Generate web3signer with same PK. + let mut web3signer_req = web3signer_validator_with_pubkey(kp.pk); + web3signer_req.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + web3signer_req.enable = true; + + // Import web3signers. + tester + .client + .post_lighthouse_validators_web3signer(&vec![web3signer_req.clone()]) + .await + .unwrap(); + + // 1 validator imported. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + let vals = tester.initialized_validators.read(); + let web3_vals = vals.validator_definitions().clone(); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: vec![remotekey_req.clone()].clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(1, ImportRemotekeyStatus::Duplicate), + ); + + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions().clone(); + + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + + // Remotekey should not be imported. + let expected_responses = vec![SingleListRemotekeysResponse { + pubkey: web3signer_req.voting_public_key.compress(), + url: web3signer_req.url.clone(), + readonly: false, + }]; + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); }) } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 29e52c38701..56c1299b3f6 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -131,6 +131,22 @@ lazy_static::lazy_static! { &["endpoint"] ); + /* + * Beacon node availability metrics + */ + pub static ref AVAILABLE_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_available_count", + "Number of available beacon nodes", + ); + pub static ref SYNCED_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_synced_count", + "Number of synced beacon nodes", + ); + pub static ref TOTAL_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_total_count", + "Total number of beacon nodes", + ); + pub static ref ETH2_FALLBACK_CONFIGURED: Result = try_create_int_gauge( "sync_eth2_fallback_configured", "The number of configured eth2 fallbacks", diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index a4dedf16b2c..0d5d4ad76e9 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -14,12 +14,11 @@ use account_utils::{ }, ZeroizeString, }; -use eth2::lighthouse_vc::std_types::DeleteKeystoreStatus; use eth2_keystore::Keystore; use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; -use reqwest::{Certificate, Client, Error as ReqwestError}; +use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; @@ -89,9 +88,14 @@ pub enum Error { /// Unable to read the root certificate file for the remote signer. InvalidWeb3SignerRootCertificateFile(io::Error), InvalidWeb3SignerRootCertificate(ReqwestError), + /// Unable to read the client certificate for the remote signer. + MissingWeb3SignerClientIdentityCertificateFile, + MissingWeb3SignerClientIdentityPassword, + InvalidWeb3SignerClientIdentityCertificateFile(io::Error), + InvalidWeb3SignerClientIdentityCertificate(ReqwestError), UnableToBuildWeb3SignerClient(ReqwestError), - /// Unable to apply an action to a validator because it is using a remote signer. - InvalidActionOnRemoteValidator, + /// Unable to apply an action to a validator. + InvalidActionOnValidator, } impl From for Error { @@ -239,6 +243,8 @@ impl InitializedValidator { url, root_certificate_path, request_timeout_ms, + client_identity_path, + client_identity_password, } => { let signing_url = build_web3_signer_url(&url, &def.voting_public_key) .map_err(|e| Error::InvalidWeb3SignerUrl(e.to_string()))?; @@ -255,6 +261,20 @@ impl InitializedValidator { builder }; + let builder = if let Some(path) = client_identity_path { + let identity = load_pkcs12_identity( + path, + &client_identity_password + .ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, + )?; + builder.identity(identity) + } else { + if client_identity_password.is_some() { + return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + } + builder + }; + let http_client = builder .build() .map_err(Error::UnableToBuildWeb3SignerClient)?; @@ -295,6 +315,19 @@ pub fn load_pem_certificate>(pem_path: P) -> Result>( + pkcs12_path: P, + password: &str, +) -> Result { + let mut buf = Vec::new(); + File::open(&pkcs12_path) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)? + .read_to_end(&mut buf) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)?; + Identity::from_pkcs12_der(&buf, password) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificate) +} + fn build_web3_signer_url(base_url: &str, voting_public_key: &PublicKey) -> Result { Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } @@ -443,7 +476,8 @@ impl InitializedValidators { pub async fn delete_definition_and_keystore( &mut self, pubkey: &PublicKey, - ) -> Result { + is_local_keystore: bool, + ) -> Result<(), Error> { // 1. Disable the validator definition. // // We disable before removing so that in case of a crash the auto-discovery mechanism @@ -454,16 +488,19 @@ impl InitializedValidators { .iter_mut() .find(|def| &def.voting_public_key == pubkey) { - if def.signing_definition.is_local_keystore() { + // Update definition for local keystore + if def.signing_definition.is_local_keystore() && is_local_keystore { def.enabled = false; self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; + } else if !def.signing_definition.is_local_keystore() && !is_local_keystore { + def.enabled = false; } else { - return Err(Error::InvalidActionOnRemoteValidator); + return Err(Error::InvalidActionOnValidator); } } else { - return Ok(DeleteKeystoreStatus::NotFound); + return Err(Error::ValidatorNotInitialized(pubkey.clone())); } // 2. Delete from `self.validators`, which holds the signing method. @@ -491,7 +528,7 @@ impl InitializedValidators { .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; - Ok(DeleteKeystoreStatus::Deleted) + Ok(()) } /// Attempt to delete the voting keystore file, or its entire validator directory. diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 039b54496cf..ce35a00351f 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -306,8 +306,18 @@ impl ProductionValidatorClient { &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, num_nodes.saturating_sub(1) as i64, ); - // Initialize the number of connected, synced fallbacks to 0. + // Set the total beacon node count. + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_nodes as i64, + ); + + // Initialize the number of connected, synced beacon nodes to 0. set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); + set_gauge(&http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, 0); + // Initialize the number of connected, avaliable beacon nodes to 0. + set_gauge(&http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, 0); + let mut beacon_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new(candidates, context.eth2_config.spec.clone(), log.clone()); @@ -498,7 +508,7 @@ impl ProductionValidatorClient { self.http_api_listen_addr = if self.config.http_api.enabled { let ctx = Arc::new(http_api::Context { - runtime: self.context.executor.runtime(), + task_executor: self.context.executor.clone(), api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 6157027cb1f..732ae68ff86 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -40,8 +40,20 @@ async fn notify( log: &Logger, ) { let num_available = duties_service.beacon_nodes.num_available().await; + set_gauge( + &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, + num_available as i64, + ); let num_synced = duties_service.beacon_nodes.num_synced().await; + set_gauge( + &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, + num_synced as i64, + ); let num_total = duties_service.beacon_nodes.num_total(); + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_total as i64, + ); if num_synced > 0 { info!( log, diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index ad04717cc2e..b4b6caa05db 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -199,7 +199,8 @@ impl PreparationService { .map_err(|e| { error!( log, - "{}", format!("Error loading fee-recipient file: {:?}", e); + "Error loading fee-recipient file"; + "error" => ?e ); }) .unwrap_or(()); @@ -213,44 +214,39 @@ impl PreparationService { all_pubkeys .into_iter() .filter_map(|pubkey| { - let validator_index = self.validator_store.validator_index(&pubkey); - if let Some(validator_index) = validator_index { - let fee_recipient = if let Some(from_validator_defs) = - self.validator_store.suggested_fee_recipient(&pubkey) - { - // If there is a `suggested_fee_recipient` in the validator definitions yaml - // file, use that value. - Some(from_validator_defs) - } else { - // If there's nothing in the validator defs file, check the fee recipient - // file. + // Ignore fee recipients for keys without indices, they are inactive. + let validator_index = self.validator_store.validator_index(&pubkey)?; + + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + let fee_recipient = self + .validator_store + .suggested_fee_recipient(&pubkey) + .or_else(|| { + // If there's nothing in the validator defs file, check the fee + // recipient file. fee_recipient_file - .as_ref() - .and_then(|f| match f.get_fee_recipient(&pubkey) { - Ok(f) => f, - Err(_e) => None, - }) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient) - }; - - if let Some(fee_recipient) = fee_recipient { - Some(ProposerPreparationData { - validator_index, - fee_recipient, - }) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + .as_ref()? + .get_fee_recipient(&pubkey) + .ok()? + }) + // If there's nothing in the file, try the process-level default value. + .or(self.fee_recipient); + + if let Some(fee_recipient) = fee_recipient { + Some(ProposerPreparationData { + validator_index, + fee_recipient, + }) } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } None } }) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 5abe37f4392..b39ef9ef830 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -171,6 +171,8 @@ impl ValidatorStore { /// - Adding the validator definition to the YAML file, saving it to the filesystem. /// - Enabling the validator with the slashing protection database. /// - If `enable == true`, starting to perform duties for the validator. + // FIXME: ignore this clippy lint until the validator store is refactored to use async locks + #[allow(clippy::await_holding_lock)] pub async fn add_validator( &self, validator_def: ValidatorDefinition,