diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000000..dac01630032 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[env] +# Set the number of arenas to 16 when using jemalloc. +JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13b84116955..f2ccaf438ac 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,7 @@ on: branches: - unstable - stable + - capella tags: - v* @@ -34,6 +35,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if capella) + if: github.event.ref == 'refs/heads/capella' + run: | + echo "VERSION=capella" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | @@ -43,7 +49,7 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }} + name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} runs-on: ubuntu-22.04 strategy: matrix: @@ -51,6 +57,10 @@ jobs: aarch64-portable, x86_64, x86_64-portable] + features: [ + {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, + {version_suffix: "-dev", env: "jemalloc,spec-minimal"} + ] include: - profile: maxperf @@ -60,6 +70,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} + FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -70,7 +81,7 @@ jobs: - name: Cross build Lighthouse binary run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') run: | @@ -98,7 +109,8 @@ jobs: docker buildx build \ --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ + --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ + --provenance=false \ --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ca6ab0f923..2e63b4d6c24 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,11 +134,17 @@ jobs: - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release until this rustc issue is fixed: + # + # https://github.com/rust-lang/rust/issues/107781 + # + # tracked at: https://github.com/sigp/lighthouse/issues/3964 + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release (see above) + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8d52f7fa7e2..b7321df7848 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -10,9 +10,10 @@ on: pull_request: env: # Deny warnings in CI - RUSTFLAGS: "-D warnings" + # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) + RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2022-12-15 + PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: @@ -280,7 +281,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.20' - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' @@ -306,16 +307,6 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches - check-consensus: - name: check-consensus - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck consensus code in strict mode - run: make check-consensus clippy: name: clippy runs-on: ubuntu-latest @@ -382,14 +373,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - # NOTE: cargo-udeps version is pinned until this issue is resolved: - # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force --version 0.1.30 + run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/.gitignore b/.gitignore index ae9f83c46dd..1b7e5dbb88b 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ genesis.ssz # IntelliJ /*.iml +.idea \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index f1daf4dbdfb..a0f9fc7491f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,6 +88,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.6.0" @@ -113,17 +123,14 @@ dependencies = [ ] [[package]] -name = "aes-gcm" -version = "0.8.0" +name = "aes" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", + "cfg-if", + "cipher 0.4.4", + "cpufeatures", ] [[package]] @@ -140,6 +147,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "aes-gcm" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +dependencies = [ + "aead 0.5.2", + "aes 0.8.2", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", + "subtle", +] + [[package]] name = "aes-soft" version = "0.6.4" @@ -205,15 +226,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arbitrary" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0224938f92e7aef515fac2ff2d18bd1115c1394ddf4a092e0c87e8be9499ee5" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ "derive_arbitrary", ] @@ -226,9 +246,9 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -245,27 +265,27 @@ dependencies = [ "asn1-rs-derive 0.1.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -276,7 +296,7 @@ checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -288,7 +308,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -300,7 +320,7 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -311,64 +331,64 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-trait" -version = "0.1.61" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -397,9 +417,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "attohttpc" @@ -432,7 +452,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -464,7 +484,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.5", + "itoa", "matchit", "memchr", "mime", @@ -530,16 +550,22 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" +source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" dependencies = [ "ethereum-consensus", "http", @@ -601,10 +627,11 @@ dependencies = [ "state_processing", "store", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "types", "unused_port", @@ -612,7 +639,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.4.0" +version = "4.1.0" dependencies = [ "beacon_chain", "clap", @@ -723,9 +750,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -778,9 +805,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bollard-stubs" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +dependencies = [ + "chrono", + "serde", + "serde_with", +] + [[package]] name = "boot_node" -version = "3.4.0" +version = "4.1.0" dependencies = [ "beacon_node", "clap", @@ -810,18 +848,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -837,6 +863,7 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "lighthouse_version", "reqwest", "sensitive_url", "serde", @@ -845,9 +872,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" @@ -863,9 +890,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" dependencies = [ "serde", ] @@ -914,9 +941,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "ccm" @@ -935,7 +962,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -971,14 +998,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -1002,11 +1030,21 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1063,8 +1101,10 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", + "logging", "monitoring_api", "network", + "operation_pool", "parking_lot 0.12.1", "sensitive_url", "serde", @@ -1074,9 +1114,10 @@ dependencies = [ "slasher_service", "slog", "slot_clock", + "state_processing", "store", "task_executor", - "time 0.3.17", + "time 0.3.20", "timer", "tokio", "types", @@ -1084,9 +1125,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -1113,14 +1154,14 @@ name = "compare_fields_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ "crossbeam-utils", ] @@ -1137,9 +1178,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "convert_case" @@ -1159,9 +1200,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1174,33 +1215,27 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -1249,9 +1284,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1259,9 +1294,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1270,22 +1305,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1315,6 +1350,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1328,16 +1364,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.11.1" @@ -1350,13 +1376,12 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.6" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -1372,30 +1397,30 @@ dependencies = [ [[package]] name = "ctr" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher 0.2.5", + "cipher 0.3.0", ] [[package]] name = "ctr" -version = "0.8.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.3.0", + "cipher 0.4.4", ] [[package]] name = "ctrlc" -version = "3.2.4" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" +checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" dependencies = [ - "nix 0.26.1", - "windows-sys", + "nix 0.26.2", + "windows-sys 0.45.0", ] [[package]] @@ -1413,9 +1438,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-pre.5" +version = "4.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67bc65846be335cb20f4e52d49a437b773a2c1fdb42b19fc84e79e6f6771536f" +checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if", "fiat-crypto", @@ -1427,9 +1452,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.86" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1439,9 +1464,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.86" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1449,24 +1474,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.13", ] [[package]] name = "cxxbridge-flags" -version = "1.0.86" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.86" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -1481,12 +1506,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1500,21 +1525,21 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1525,18 +1550,18 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.2", + "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1582,7 +1607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.109", ] [[package]] @@ -1611,12 +1636,12 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.1.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c4d75d3abfe4830dcbf9bcb1b926954e121669f74dd1ca7aa0183b1755d83f6" +checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.6.10", + "tokio-util 0.7.7", ] [[package]] @@ -1652,7 +1677,7 @@ checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" dependencies = [ "asn1-rs 0.3.1", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1660,13 +1685,13 @@ dependencies = [ [[package]] name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1680,18 +1705,17 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "derive_arbitrary" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf460bbff5f571bfc762da5102729f59f338be7db17a21fade44c5c4f5005350" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1709,10 +1733,10 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.2", + "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1722,7 +1746,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1735,7 +1759,44 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "diesel" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +dependencies = [ + "bitflags", + "byteorder", + "diesel_derives", + "itoa", + "pq-sys", + "r2d2", +] + +[[package]] +name = "diesel_derives" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diesel_migrations" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", ] [[package]] @@ -1753,7 +1814,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1810,18 +1871,18 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767c0e59b3e8d65222d95df723cc2ea1da92bb0f27c563607e6f0bde064f255" +checksum = "b009a99b85b58900df46435307fc5c4c845af7e182582b1fbf869572fa9fce69" dependencies = [ "aes 0.7.5", "aes-gcm 0.9.4", "arrayvec", "delay_map", - "enr", + "enr 0.7.0", "fnv", "futures", - "hashlink", + "hashlink 0.7.0", "hex", "hkdf", "lazy_static", @@ -1832,7 +1893,7 @@ dependencies = [ "rand 0.8.5", "rlp", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1850,14 +1911,14 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "dtoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" +checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" [[package]] name = "ecdsa" @@ -1873,9 +1934,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1927,9 +1988,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1955,9 +2016,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] @@ -1968,7 +2029,26 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ - "base64", + "base64 0.13.1", + "bs58", + "bytes", + "hex", + "k256", + "log", + "rand 0.8.5", + "rlp", + "serde", + "sha3 0.10.6", + "zeroize", +] + +[[package]] +name = "enr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" +dependencies = [ + "base64 0.13.1", "bs58", "bytes", "ed25519-dalek", @@ -1991,7 +2071,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2039,6 +2119,27 @@ dependencies = [ "types", ] +[[package]] +name = "errno" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.45.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -2073,7 +2174,7 @@ dependencies = [ "slog", "sloggers", "state_processing", - "superstruct", + "superstruct 0.5.0", "task_executor", "tokio", "tree_hash", @@ -2145,7 +2246,7 @@ dependencies = [ name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64", + "base64 0.13.1", "bls", "eth2_hashing", "hex", @@ -2194,7 +2295,7 @@ dependencies = [ name = "eth2_network_config" version = "0.2.0" dependencies = [ - "enr", + "discv5", "eth2_config", "eth2_ssz", "serde_yaml", @@ -2226,12 +2327,13 @@ dependencies = [ [[package]] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" dependencies = [ "darling 0.13.4", + "eth2_ssz", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2339,15 +2441,16 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" +source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" dependencies = [ "async-stream", "blst", "bs58", - "enr", + "enr 0.6.2", "hex", "integer-sqrt", "multiaddr 0.14.0", + "multihash 0.16.3", "rand 0.8.5", "serde", "serde_json", @@ -2422,7 +2525,7 @@ checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl", - "base64", + "base64 0.13.1", "ethers-core", "futures-channel", "futures-core", @@ -2470,6 +2573,7 @@ dependencies = [ "fork_choice", "futures", "hex", + "logging", "reqwest", "sensitive_url", "serde_json", @@ -2505,7 +2609,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-build-rs", + "mev-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -2517,6 +2621,7 @@ dependencies = [ "ssz-rs", "state_processing", "strum", + "superstruct 0.6.0", "task_executor", "tempfile", "tokio", @@ -2552,9 +2657,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -2577,18 +2682,18 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e1c54951450cbd39f3dbcf1005ac413b49487dabf18a720ad2383eccfeffb92" +checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" dependencies = [ - "memoffset 0.6.5", - "rustc_version 0.3.3", + "memoffset 0.8.0", + "rustc_version 0.4.0", ] [[package]] @@ -2602,7 +2707,8 @@ dependencies = [ [[package]] name = "fixed-hash" version = "0.7.0" -source = "git+https://github.com/paritytech/parity-common?rev=df638ab0885293d21d656dc300d39236b69ce57d#df638ab0885293d21d656dc300d39236b69ce57d" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", "rand 0.8.5", @@ -2709,9 +2815,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2724,9 +2830,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2734,15 +2840,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2752,9 +2858,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -2773,13 +2879,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -2789,21 +2895,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.7", + "rustls 0.20.8", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2813,9 +2919,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2840,9 +2946,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2897,29 +3003,29 @@ dependencies = [ [[package]] name = "ghash" -version = "0.3.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval 0.4.5", + "polyval 0.5.3", ] [[package]] name = "ghash" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug", - "polyval 0.5.3", + "polyval 0.6.0", ] [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git-version" @@ -2940,7 +3046,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2962,9 +3068,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -2975,7 +3081,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tracing", ] @@ -3036,13 +3142,22 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "headers" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -3063,9 +3178,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -3085,6 +3200,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -3116,16 +3237,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.11.0" @@ -3169,13 +3280,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", - "itoa 1.0.5", + "itoa", ] [[package]] @@ -3205,9 +3316,11 @@ dependencies = [ "environment", "eth1", "eth2", + "eth2_serde_utils", "eth2_ssz", "execution_layer", "futures", + "genesis", "hex", "lazy_static", "lighthouse_metrics", @@ -3216,6 +3329,7 @@ dependencies = [ "logging", "lru 0.7.8", "network", + "operation_pool", "parking_lot 0.12.1", "proto_array", "safe_arith", @@ -3279,9 +3393,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -3292,9 +3406,9 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -3309,7 +3423,7 @@ checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", ] @@ -3329,16 +3443,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows 0.46.0", ] [[package]] @@ -3411,9 +3525,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" dependencies = [ "async-io", "core-foundation", @@ -3425,7 +3539,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] @@ -3456,7 +3570,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.4.0", ] [[package]] @@ -3494,27 +3608,36 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", ] [[package]] -name = "instant" -version = "0.1.12" +name = "inout" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "cfg-if", - "js-sys", + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", + "js-sys", "wasm-bindgen", "web-sys", ] @@ -3556,13 +3679,24 @@ dependencies = [ "webrtc-util", ] +[[package]] +name = "io-lifetimes" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "ipconfig" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi", "winreg", @@ -3570,9 +3704,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3585,21 +3719,46 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.8" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] -name = "itoa" -version = "1.0.5" +name = "jemalloc-ctl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +dependencies = [ + "jemalloc-sys", + "libc", + "paste", +] + +[[package]] +name = "jemalloc-sys" +version = "0.5.3+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +dependencies = [ + "jemalloc-sys", + "libc", +] [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -3621,11 +3780,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64", + "base64 0.21.0", "pem", "ring", "serde", @@ -3682,7 +3841,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.4.0" +version = "4.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -3703,6 +3862,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "log", + "malloc_utils", "sensitive_url", "serde", "serde_json", @@ -3741,15 +3901,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -3758,9 +3918,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -3804,9 +3964,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0a0d2f693675f49ded13c5d510c48b78069e23cbd9108d7ccd59f6dc568819" +checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" dependencies = [ "bytes", "futures", @@ -3852,7 +4012,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.11.0", "p256", "parking_lot 0.12.1", @@ -3886,7 +4046,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.16.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.12.1", "once_cell", "p256", @@ -3905,6 +4065,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-core" +version = "0.39.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "multistream-select 0.12.1", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", +] + [[package]] name = "libp2p-dns" version = "0.38.0" @@ -3926,7 +4114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" dependencies = [ "asynchronous-codec", - "base64", + "base64 0.13.1", "byteorder", "bytes", "fnv", @@ -3970,6 +4158,24 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-identity" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" +dependencies = [ + "bs58", + "ed25519-dalek", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "prost", + "quick-protobuf", + "rand 0.8.5", + "thiserror", + "zeroize", +] + [[package]] name = "libp2p-mdns" version = "0.42.0" @@ -3984,7 +4190,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4077,7 +4283,7 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "tokio", ] @@ -4112,7 +4318,7 @@ checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4127,22 +4333,23 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0-alpha" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core 0.39.1", + "libp2p-identity", "rcgen 0.10.0", "ring", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "webpki 0.22.0", "x509-parser 0.14.0", @@ -4165,7 +4372,7 @@ dependencies = [ "libp2p-core 0.38.0", "libp2p-noise", "log", - "multihash", + "multihash 0.16.3", "prost", "prost-build", "prost-codec", @@ -4176,7 +4383,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "webrtc", ] @@ -4220,7 +4427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", - "base64", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -4263,9 +4470,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -4285,7 +4492,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.4.0" +version = "4.1.0" dependencies = [ "account_manager", "account_utils", @@ -4352,6 +4559,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", + "lru_cache", "parking_lot 0.12.1", "prometheus-client", "quickcheck", @@ -4367,13 +4575,15 @@ dependencies = [ "smallvec", "snap", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tiny-keccak", "tokio", "tokio-io-timeout", "tokio-util 0.6.10", + "tree_hash", + "tree_hash_derive", "types", "unsigned-varint 0.6.0", "unused_port", @@ -4404,6 +4614,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" + [[package]] name = "lmdb-rkv" version = "0.14.0" @@ -4510,6 +4726,8 @@ dependencies = [ name = "malloc_utils" version = "0.1.0" dependencies = [ + "jemalloc-ctl", + "jemallocator", "lazy_static", "libc", "lighthouse_metrics", @@ -4539,9 +4757,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" @@ -4586,9 +4804,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -4625,25 +4843,47 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] -name = "mev-build-rs" +name = "mev-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" +source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" dependencies = [ "async-trait", "axum", "beacon-api-client", "ethereum-consensus", + "hyper", "serde", - "serde_json", "ssz-rs", "thiserror", + "tokio", "tracing", ] +[[package]] +name = "migrations_internals" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4658,9 +4898,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -4689,14 +4929,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -4735,7 +4975,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash", + "multihash 0.16.3", "percent-encoding", "serde", "static_assertions", @@ -4753,7 +4993,26 @@ dependencies = [ "byteorder", "data-encoding", "multibase", - "multihash", + "multihash 0.16.3", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.1", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "log", + "multibase", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -4785,6 +5044,19 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multihash" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +dependencies = [ + "core2", + "digest 0.10.6", + "multihash-derive", + "sha2 0.10.6", + "unsigned-varint 0.7.1", +] + [[package]] name = "multihash-derive" version = "0.8.1" @@ -4795,7 +5067,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -4897,9 +5169,9 @@ dependencies = [ [[package]] name = "netlink-packet-utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" dependencies = [ "anyhow", "byteorder", @@ -4924,9 +5196,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "bytes", "futures", @@ -4947,6 +5219,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_types", "ethereum-types 0.14.1", + "execution_layer", "exit-future", "fnv", "futures", @@ -4962,6 +5235,7 @@ dependencies = [ "lru_cache", "matches", "num_cpus", + "operation_pool", "rand 0.8.5", "rlp", "slog", @@ -5006,9 +5280,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", "cfg-if", @@ -5045,9 +5319,9 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "7.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -5153,9 +5427,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] @@ -5175,14 +5449,14 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", ] [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oneshot_broadcast" @@ -5225,14 +5499,14 @@ dependencies = [ "bytes", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" dependencies = [ "bitflags", "cfg-if", @@ -5245,13 +5519,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -5262,20 +5536,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.24.0+1.1.1s" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3498f259dab01178c6228c6b00dcef0ed2a2d5e20d648c017861227773ea4abd" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -5297,6 +5570,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", + "rand 0.8.5", "rayon", "serde", "serde_derive", @@ -5360,15 +5634,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.3", + "parity-scale-codec-derive 3.1.4", "serde", ] @@ -5381,19 +5655,19 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5420,7 +5694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.7", ] [[package]] @@ -5432,29 +5706,29 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -5482,11 +5756,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -5504,21 +5778,11 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" -[[package]] -name = "pest" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" -dependencies = [ - "thiserror", - "ucd-trie", -] - [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -5534,6 +5798,24 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -5551,7 +5833,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5630,16 +5912,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys", + "pin-project-lite 0.2.9", + "windows-sys 0.45.0", ] [[package]] @@ -5650,30 +5934,60 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] name = "polyval" -version = "0.4.5" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cpuid-bool", + "cfg-if", + "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] name = "polyval" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.5.0", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +dependencies = [ + "base64 0.21.0", + "byteorder", + "bytes", + "fallible-iterator", + "hmac 0.12.1", + "md-5", + "memchr", + "rand 0.8.5", + "sha2 0.10.6", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", ] [[package]] @@ -5682,14 +5996,23 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pq-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +dependencies = [ + "vcpkg", +] + [[package]] name = "prettyplease" -version = "0.1.23" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -5738,7 +6061,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -5761,9 +6084,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "1d0dd4be24fcdcfeaa12a432d588dc59bbad6cad3510c67e74a2b6b2fc950564" dependencies = [ "unicode-ident", ] @@ -5802,7 +6125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.5", + "itoa", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -5815,14 +6138,14 @@ checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost" -version = "0.11.5" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" +checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" dependencies = [ "bytes", "prost-derive", @@ -5830,9 +6153,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.5" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" +checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" dependencies = [ "bytes", "heck", @@ -5845,7 +6168,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] @@ -5865,24 +6188,23 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.5" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" +checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.11.5" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" +checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" dependencies = [ - "bytes", "prost", ] @@ -5930,6 +6252,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -5950,7 +6281,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5966,15 +6297,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.20.7", + "rustls 0.20.8", "slab", "thiserror", "tinyvec", @@ -5984,9 +6315,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -6004,9 +6335,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d24607049214c5e42d3df53ac1d8a23c34cc6a5eefe3122acb2c72174719959" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ "r2d2", "rusqlite", @@ -6106,9 +6437,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -6116,9 +6447,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6134,7 +6465,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "x509-parser 0.13.2", "yasna", ] @@ -6147,7 +6478,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "yasna", ] @@ -6160,6 +6491,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6167,15 +6507,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.8", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -6193,26 +6533,17 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ - "base64", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -6231,7 +6562,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.7", + "rustls 0.20.8", "rustls-pemfile", "serde", "serde_json", @@ -6239,11 +6570,12 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "webpki-roots", "winreg", @@ -6309,7 +6641,7 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6364,24 +6696,23 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.8.1", "libsqlite3-sys", - "memchr", "smallvec", ] [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -6401,16 +6732,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver 0.9.0", -] - -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", + "semver 0.9.0", ] [[package]] @@ -6419,7 +6741,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", ] [[package]] @@ -6428,7 +6750,21 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", +] + +[[package]] +name = "rustix" +version = "0.37.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d097081ed288dfe45699b72f5b5d648e5f15d64d900c7080273baa20c16a6849" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", ] [[package]] @@ -6437,7 +6773,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64", + "base64 0.13.1", "log", "ring", "sct 0.6.1", @@ -6446,9 +6782,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -6458,18 +6794,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" @@ -6484,9 +6820,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe_arith" @@ -6518,26 +6854,26 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.4.0", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6546,14 +6882,14 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "windows-sys", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -6572,9 +6908,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -6654,9 +6990,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -6667,9 +7003,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -6681,23 +7017,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", + "semver-parser", ] [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -6705,20 +7032,11 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "send_wrapper" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sensitive_url" @@ -6730,9 +7048,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" dependencies = [ "serde_derive", ] @@ -6759,35 +7077,35 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ - "itoa 1.0.5", + "itoa", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -6797,7 +7115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -6821,7 +7139,7 @@ dependencies = [ "darling 0.13.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6934,9 +7252,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -6960,7 +7278,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -6981,11 +7299,17 @@ dependencies = [ "types", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -7086,7 +7410,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7131,7 +7455,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7189,7 +7513,7 @@ dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-pre.5", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7199,21 +7523,31 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "soketto" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "flate2", "futures", @@ -7242,11 +7576,10 @@ dependencies = [ [[package]] name = "ssz-rs" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "bitvec 1.0.1", "hex", - "lazy_static", "num-bigint", "serde", "sha2 0.9.9", @@ -7257,11 +7590,11 @@ dependencies = [ [[package]] name = "ssz-rs-derive" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7334,6 +7667,16 @@ dependencies = [ "types", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.8.0" @@ -7365,7 +7708,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -7374,7 +7717,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" dependencies = [ - "base64", + "base64 0.13.1", "crc", "lazy_static", "md-5", @@ -7413,7 +7756,21 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "superstruct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +dependencies = [ + "darling 0.13.4", + "itertools", + "proc-macro2", + "quote", + "smallvec", + "syn 1.0.109", ] [[package]] @@ -7427,9 +7784,20 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" dependencies = [ "proc-macro2", "quote", @@ -7438,9 +7806,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synstructure" @@ -7450,15 +7818,15 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] [[package]] name = "sysinfo" -version = "0.26.8" +version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ddf41e393a9133c81d5f0974195366bd57082deac6e0eb02ed39b8341c2bb6" +checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" dependencies = [ "cfg-if", "core-foundation-sys", @@ -7543,16 +7911,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] @@ -7568,9 +7935,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -7588,7 +7955,24 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "bollard-stubs", + "futures", + "hex", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", ] [[package]] @@ -7602,30 +7986,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -7651,11 +8036,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ - "itoa 1.0.5", + "itoa", "libc", "num_threads", "serde", @@ -7671,9 +8056,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -7738,28 +8123,27 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.24.1" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -7774,25 +8158,49 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.1", + "percent-encoding", + "phf", + "pin-project-lite 0.2.9", + "postgres-protocol", + "postgres-types", + "socket2 0.5.1", + "tokio", + "tokio-util 0.7.7", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -7810,21 +8218,21 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "webpki 0.22.0", ] [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", ] [[package]] @@ -7848,7 +8256,7 @@ checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", @@ -7874,24 +8282,25 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite 0.2.9", + "slab", "tokio", "tracing", ] [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] @@ -7964,7 +8373,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8032,7 +8441,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8056,7 +8465,7 @@ version = "0.4.0" dependencies = [ "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8087,7 +8496,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -8127,7 +8536,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -8146,14 +8555,14 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "sha-1 0.10.1", "thiserror", "url", @@ -8168,7 +8577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" dependencies = [ "async-trait", - "base64", + "base64 0.13.1", "futures", "log", "md-5", @@ -8237,7 +8646,7 @@ dependencies = [ "slog", "smallvec", "state_processing", - "superstruct", + "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", "test_random_derive", @@ -8246,12 +8655,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - [[package]] name = "uint" version = "0.9.5" @@ -8282,15 +8685,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -8323,6 +8726,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8352,6 +8765,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "unused_port" version = "0.1.0" +dependencies = [ + "lazy_static", + "lru_cache", + "parking_lot 0.12.1", +] [[package]] name = "url" @@ -8382,9 +8800,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom 0.2.8", ] @@ -8508,12 +8926,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8595,9 +9012,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8605,24 +9022,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if", "js-sys", @@ -8632,9 +9049,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8642,28 +9059,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "wasm-bindgen-test" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" +checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" dependencies = [ "console_error_panic_hook", "js-sys", @@ -8675,14 +9092,27 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" +checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" dependencies = [ "proc-macro2", "quote", ] +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasm-timer" version = "0.2.5" @@ -8698,11 +9128,44 @@ dependencies = [ "web-sys", ] +[[package]] +name = "watch" +version = "0.1.0" +dependencies = [ + "axum", + "beacon_chain", + "beacon_node", + "bls", + "byteorder", + "clap", + "diesel", + "diesel_migrations", + "env_logger 0.9.3", + "eth2", + "hex", + "http_api", + "hyper", + "log", + "network", + "r2d2", + "rand 0.7.3", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "testcontainers", + "tokio", + "tokio-postgres", + "types", + "unused_port", + "url", +] + [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -8715,7 +9178,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ "arrayvec", - "base64", + "base64 0.13.1", "bytes", "derive_more", "ethabi 16.0.0", @@ -8765,6 +9228,8 @@ dependencies = [ "eth2_network_config", "exit-future", "futures", + "lazy_static", + "parking_lot 0.12.1", "reqwest", "serde", "serde_derive", @@ -8835,7 +9300,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.17", + "time 0.3.20", "tokio", "turn", "url", @@ -8867,22 +9332,22 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.8.0", + "aes-gcm 0.10.1", "async-trait", "bincode", "block-modes", "byteorder", "ccm", "curve25519-dalek 3.2.0", - "der-parser 8.1.0", + "der-parser 8.2.0", "elliptic-curve", "hkdf", - "hmac 0.10.1", + "hmac 0.12.1", "log", "oid-registry 0.6.1", "p256", @@ -8894,23 +9359,23 @@ dependencies = [ "rustls 0.19.1", "sec1", "serde", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha1", + "sha2 0.10.6", "signature", "subtle", "thiserror", "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0-rc.2", "x509-parser 0.13.2", ] [[package]] name = "webrtc-ice" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" dependencies = [ "arc-swap", "async-trait", @@ -8924,7 +9389,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.2.2", + "uuid 1.3.0", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -8937,7 +9402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -9021,20 +9486,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -9097,6 +9553,15 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -9116,19 +9581,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" @@ -9138,9 +9627,9 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" @@ -9150,9 +9639,9 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" @@ -9162,9 +9651,9 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" @@ -9174,15 +9663,15 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" @@ -9192,9 +9681,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winreg" @@ -9207,13 +9696,14 @@ dependencies = [ [[package]] name = "ws_stream_wasm" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" dependencies = [ "async_io_stream", "futures", "js-sys", + "log", "pharos", "rustc_version 0.4.0", "send_wrapper", @@ -9251,12 +9741,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", + "serde", "zeroize", ] @@ -9267,16 +9758,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" dependencies = [ "asn1-rs 0.3.1", - "base64", + "base64 0.13.1", "data-encoding", "der-parser 7.0.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.4.0", "ring", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9285,16 +9776,16 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ - "asn1-rs 0.5.1", - "base64", + "asn1-rs 0.5.2", + "base64 0.13.1", "data-encoding", - "der-parser 8.1.0", + "der-parser 8.2.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9341,28 +9832,27 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" dependencies = [ - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.13", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b35cbbb89cf..66b2b4e2e9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,12 +87,22 @@ members = [ "validator_client", "validator_client/slashing_protection", + + "watch", ] +resolver = "2" [patch] [patch.crates-io] -fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } + +[patch."https://github.com/ralexstokes/mev-rs"] +mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } +[patch."https://github.com/ralexstokes/ethereum-consensus"] +ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } +[patch."https://github.com/ralexstokes/ssz-rs"] +ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } [profile.maxperf] inherits = "release" diff --git a/Dockerfile b/Dockerfile index 72423b17c68..0d268c7e1aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/Makefile b/Makefile index 33077a6c930..89362d12d82 100644 --- a/Makefile +++ b/Makefile @@ -14,28 +14,48 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 +# List of features to use when building natively. Can be overriden via the environment. +# No jemalloc on Windows +ifeq ($(OS),Windows_NT) + FEATURES?= +else + FEATURES?=jemalloc +endif + # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release +# List of features to use when running EF tests. +EF_TEST_FEATURES ?= + # Cargo profile for regular builds. PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge +FORKS=phase0 altair merge capella + +# Extra flags for Cargo +CARGO_INSTALL_EXTRA_FLAGS?= # Builds the Lighthouse binary in release (optimized). # # Binaries will most likely be found in `./target/release` install: - cargo install --path lighthouse --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lighthouse --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # Builds the lcli binary in release (optimized). install-lcli: - cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lcli --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # The following commands use `cross` to build a cross-compile. # @@ -101,23 +121,19 @@ cargo-fmt: check-benches: cargo check --workspace --benches -# Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics. -check-consensus: - cargo check -p state_processing --no-default-features - # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt - cargo test --release -p ef_tests --features "ef_tests" - cargo test --release -p ef_tests --features "ef_tests,fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,milagro" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) @@ -160,7 +176,8 @@ lint: -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ - -A clippy::question-mark + -A clippy::question-mark \ + -A clippy::uninlined-format-args nightly-lint: cp .github/custom/clippy.toml . @@ -185,7 +202,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 + cargo audit --ignore RUSTSEC-2020-0071 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/README.md b/README.md index 859d5c4c63a..3565882d6e7 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ of the Lighthouse book. The best place for discussion is the [Lighthouse Discord server](https://discord.gg/cyAszAh). -Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list for email +Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. Encrypt sensitive messages using our [PGP diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cca8cc969ef..95f145a557d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.4.0" +version = "4.1.0" authors = ["Paul Hauner ", "Age Manning BeaconChain { + pub fn compute_attestation_rewards( + &self, + epoch: Epoch, + validators: Vec, + log: Logger, + ) -> Result { + debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + + // Get state + let spec = &self.spec; + + let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); + + let state_root = self + .state_root_at_slot(state_slot)? + .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + + let mut state = self + .get_state(&state_root, Some(state_slot))? + .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + + // Calculate ideal_rewards + let participation_cache = ParticipationCache::new(&state, spec)?; + + let previous_epoch = state.previous_epoch(); + + let mut ideal_rewards_hashmap = HashMap::new(); + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let weight = get_flag_weight(flag_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_indices = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch)?; + + let unslashed_participating_balance = + unslashed_participating_indices + .total_balance() + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_increments = + unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; + + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + + let active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + + let base_reward_per_increment = + BaseRewardPerIncrement::new(total_active_balance, spec)?; + + for effective_balance_eth in 0..=32 { + let effective_balance = + effective_balance_eth.safe_mul(spec.effective_balance_increment)?; + let base_reward = + effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?; + + let penalty = -(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)? as i64); + + let reward_numerator = base_reward + .safe_mul(weight)? + .safe_mul(unslashed_participating_increments)?; + + let ideal_reward = reward_numerator + .safe_div(active_increments)? + .safe_div(WEIGHT_DENOMINATOR)?; + if !state.is_in_inactivity_leak(previous_epoch, spec) { + ideal_rewards_hashmap + .insert((flag_index, effective_balance), (ideal_reward, penalty)); + } else { + ideal_rewards_hashmap.insert((flag_index, effective_balance), (0, penalty)); + } + } + } + + // Calculate total_rewards + let mut total_rewards: Vec = Vec::new(); + + let validators = if validators.is_empty() { + participation_cache.eligible_validator_indices().to_vec() + } else { + validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()? + }; + + for validator_index in &validators { + let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; + let mut head_reward = 0u64; + let mut target_reward = 0i64; + let mut source_reward = 0i64; + + if eligible { + let effective_balance = state.get_effective_balance(*validator_index)?; + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let (ideal_reward, penalty) = ideal_rewards_hashmap + .get(&(flag_index, effective_balance)) + .ok_or(BeaconChainError::AttestationRewardsError)?; + let voted_correctly = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch) + .map_err(|_| BeaconChainError::AttestationRewardsError)? + .contains(*validator_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + if voted_correctly { + if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward += ideal_reward; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward += *ideal_reward as i64; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward += *ideal_reward as i64; + } + } else if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward = 0; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward = *penalty; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward = *penalty; + } + } + } + total_rewards.push(TotalAttestationRewards { + validator_index: *validator_index as u64, + head: head_reward, + target: target_reward, + source: source_reward, + }); + } + + // Convert hashmap to vector + let mut ideal_rewards: Vec = ideal_rewards_hashmap + .iter() + .map( + |((flag_index, effective_balance), (ideal_reward, _penalty))| { + (flag_index, effective_balance, ideal_reward) + }, + ) + .fold( + HashMap::new(), + |mut acc, (flag_index, &effective_balance, ideal_reward)| { + let entry = acc + .entry(effective_balance) + .or_insert(IdealAttestationRewards { + effective_balance, + head: 0, + target: 0, + source: 0, + }); + match *flag_index { + TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, + TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward, + TIMELY_HEAD_FLAG_INDEX => entry.head += ideal_reward, + _ => {} + } + acc + }, + ) + .into_values() + .collect::>(); + ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance)); + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index b60ce7efe5c..04f601fad97 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -27,6 +27,11 @@ //! â–¼ //! impl VerifiedAttestation //! ``` + +// Ignore this lint for `AttestationSlashInfo` which is of comparable size to the non-error types it +// is returned alongside. +#![allow(clippy::result_large_err)] + mod batch; use crate::{ diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs new file mode 100644 index 00000000000..786402c9978 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -0,0 +1,237 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use operation_pool::RewardCache; +use safe_arith::SafeArith; +use slog::error; +use state_processing::{ + common::{ + altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, + per_block_processing::{ + altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, + }, +}; +use store::{ + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, + RelativeEpoch, +}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; + +type BeaconBlockSubRewardValue = u64; + +impl BeaconChain { + pub fn compute_beacon_block_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let proposer_index = block.proposer_index(); + + let sync_aggregate_reward = + self.compute_beacon_block_sync_aggregate_reward(block, state)?; + + let proposer_slashing_reward = self + .compute_beacon_block_proposer_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating proposer slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let attester_slashing_reward = self + .compute_beacon_block_attester_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating attester slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let block_attestation_reward = if let BeaconState::Base(_) = state { + self.compute_beacon_block_attestation_reward_base(block, block_root, state) + .map_err(|e| { + error!( + self.log, + "Error calculating base block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + } else { + self.compute_beacon_block_attestation_reward_altair(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + }; + + let total_reward = sync_aggregate_reward + .safe_add(proposer_slashing_reward)? + .safe_add(attester_slashing_reward)? + .safe_add(block_attestation_reward)?; + + Ok(StandardBlockReward { + proposer_index, + total: total_reward, + attestations: block_attestation_reward, + sync_aggregate: sync_aggregate_reward, + proposer_slashings: proposer_slashing_reward, + attester_slashings: attester_slashing_reward, + }) + } + + fn compute_beacon_block_sync_aggregate_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + Ok(sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit) + } else { + Ok(0) + } + } + + fn compute_beacon_block_proposer_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut proposer_slashing_reward = 0; + + let proposer_slashings = block.body().proposer_slashings(); + + for proposer_slashing in proposer_slashings { + proposer_slashing_reward.safe_add_assign( + state + .get_validator(proposer_slashing.proposer_index() as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + + Ok(proposer_slashing_reward) + } + + fn compute_beacon_block_attester_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut attester_slashing_reward = 0; + + let attester_slashings = block.body().attester_slashings(); + + for attester_slashing in attester_slashings { + for attester_index in get_slashable_indices(state, attester_slashing)? { + attester_slashing_reward.safe_add_assign( + state + .get_validator(attester_index as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + } + + Ok(attester_slashing_reward) + } + + fn compute_beacon_block_attestation_reward_base>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + // Call compute_block_reward in the base case + // Since base does not have sync aggregate, we only grab attesation portion of the returned + // value + let mut reward_cache = RewardCache::default(); + let block_attestation_reward = self + .compute_block_reward(block, block_root, state, &mut reward_cache, true)? + .attestation_rewards + .total; + + Ok(block_attestation_reward) + } + + fn compute_beacon_block_attestation_reward_altair>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result { + let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?; + + let mut total_proposer_reward = 0; + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + + for attestation in block.body().attestations() { + let data = &attestation.data; + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = get_attestation_participation_flag_indices( + state, + data, + inclusion_delay, + &self.spec, + )?; + + let attesting_indices = get_attesting_indices_from_state(state, attestation)?; + + let mut proposer_reward_numerator = 0; + for index in attesting_indices { + let index = index as usize; + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = + state.get_epoch_participation_mut(data.target.epoch)?; + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if participation_flag_indices.contains(&flag_index) + && !validator_participation.has_flag(flag_index)? + { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator.safe_add_assign( + altair::get_base_reward( + state, + index, + base_reward_per_increment, + &self.spec, + )? + .safe_mul(weight)?, + )?; + } + } + } + total_proposer_reward.safe_add_assign( + proposer_reward_numerator.safe_div(proposer_reward_denominator)?, + )?; + } + + Ok(total_proposer_reward) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs new file mode 100644 index 00000000000..e43f2a8dd81 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -0,0 +1,973 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; +use slog::{crit, debug, Logger}; +use std::collections::HashMap; +use std::sync::Arc; +use store::DatabaseBlock; +use task_executor::TaskExecutor; +use tokio::sync::{ + mpsc::{self, UnboundedSender}, + RwLock, +}; +use tokio_stream::{wrappers::UnboundedReceiverStream, Stream}; +use types::{ + ChainSpec, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, Slot, +}; +use types::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadHeader, ExecutionPayloadMerge, +}; + +#[derive(PartialEq)] +pub enum CheckEarlyAttesterCache { + Yes, + No, +} + +#[derive(Debug)] +pub enum Error { + PayloadReconstruction(String), + BlocksByRangeFailure(Box), + RequestNotFound, + BlockResultNotFound, +} + +const BLOCKS_PER_RANGE_REQUEST: u64 = 32; + +// This is the same as a DatabaseBlock but the Arc allows us to avoid an unnecessary clone. +enum LoadedBeaconBlock { + Full(Arc>), + Blinded(Box>), +} +type LoadResult = Result>, BeaconChainError>; +type BlockResult = Result>>, BeaconChainError>; + +enum RequestState { + UnSent(Vec>), + Sent(HashMap>>), +} + +struct BodiesByRange { + start: u64, + count: u64, + state: RequestState, +} + +// stores the components of a block for future re-construction in a small form +struct BlockParts { + blinded_block: Box>, + header: Box>, + body: Option>>, +} + +impl BlockParts { + pub fn new( + blinded: Box>, + header: ExecutionPayloadHeader, + ) -> Self { + Self { + blinded_block: blinded, + header: Box::new(header), + body: None, + } + } + + pub fn root(&self) -> Hash256 { + self.blinded_block.canonical_root() + } + + pub fn slot(&self) -> Slot { + self.blinded_block.message().slot() + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + self.header.block_hash() + } +} + +fn reconstruct_default_header_block( + blinded_block: Box>, + header_from_block: ExecutionPayloadHeader, + spec: &ChainSpec, +) -> BlockResult { + let fork = blinded_block + .fork_name(spec) + .map_err(BeaconChainError::InconsistentFork)?; + + let payload: ExecutionPayload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::PayloadReconstruction(format!( + "Block with fork variant {} has execution payload", + fork + )) + .into()) + } + }; + + let header_from_payload = ExecutionPayloadHeader::from(payload.to_ref()); + if header_from_payload == header_from_block { + blinded_block + .try_into_full_block(Some(payload)) + .ok_or(BeaconChainError::AddPayloadLogicError) + .map(Arc::new) + .map(Some) + } else { + Err(BeaconChainError::InconsistentPayloadReconstructed { + slot: blinded_block.slot(), + exec_block_hash: header_from_block.block_hash(), + canonical_transactions_root: header_from_block.transactions_root(), + reconstructed_transactions_root: header_from_payload.transactions_root(), + }) + } +} + +fn reconstruct_blocks( + block_map: &mut HashMap>>, + block_parts_with_bodies: HashMap>, + log: &Logger, +) { + for (root, block_parts) in block_parts_with_bodies { + if let Some(payload_body) = block_parts.body { + match payload_body.to_payload(block_parts.header.as_ref().clone()) { + Ok(payload) => { + let header_from_payload = ExecutionPayloadHeader::from(payload.to_ref()); + if header_from_payload == *block_parts.header { + block_map.insert( + root, + Arc::new( + block_parts + .blinded_block + .try_into_full_block(Some(payload)) + .ok_or(BeaconChainError::AddPayloadLogicError) + .map(Arc::new) + .map(Some), + ), + ); + } else { + let error = BeaconChainError::InconsistentPayloadReconstructed { + slot: block_parts.blinded_block.slot(), + exec_block_hash: block_parts.header.block_hash(), + canonical_transactions_root: block_parts.header.transactions_root(), + reconstructed_transactions_root: header_from_payload + .transactions_root(), + }; + debug!(log, "Failed to reconstruct block"; "root" => ?root, "error" => ?error); + block_map.insert(root, Arc::new(Err(error))); + } + } + Err(string) => { + block_map.insert( + root, + Arc::new(Err(Error::PayloadReconstruction(string).into())), + ); + } + } + } else { + block_map.insert( + root, + Arc::new(Err(BeaconChainError::BlockHashMissingFromExecutionLayer( + block_parts.block_hash(), + ))), + ); + } + } +} + +impl BodiesByRange { + pub fn new(maybe_block_parts: Option>) -> Self { + if let Some(block_parts) = maybe_block_parts { + Self { + start: block_parts.header.block_number(), + count: 1, + state: RequestState::UnSent(vec![block_parts]), + } + } else { + Self { + start: 0, + count: 0, + state: RequestState::UnSent(vec![]), + } + } + } + + pub fn is_unsent(&self) -> bool { + matches!(self.state, RequestState::UnSent(_)) + } + + pub fn push_block_parts(&mut self, block_parts: BlockParts) -> Result<(), BlockParts> { + if self.count == BLOCKS_PER_RANGE_REQUEST { + return Err(block_parts); + } + + match &mut self.state { + RequestState::Sent(_) => Err(block_parts), + RequestState::UnSent(blocks_parts_vec) => { + let block_number = block_parts.header.block_number(); + if self.count == 0 { + self.start = block_number; + self.count = 1; + blocks_parts_vec.push(block_parts); + Ok(()) + } else { + // need to figure out if this block fits in the request + if block_number < self.start + || self.start + BLOCKS_PER_RANGE_REQUEST <= block_number + { + return Err(block_parts); + } + + blocks_parts_vec.push(block_parts); + if self.start + self.count <= block_number { + self.count = block_number - self.start + 1; + } + + Ok(()) + } + } + } + } + + async fn execute(&mut self, execution_layer: &ExecutionLayer, log: &Logger) { + if let RequestState::UnSent(blocks_parts_ref) = &mut self.state { + let block_parts_vec = std::mem::take(blocks_parts_ref); + + let mut block_map = HashMap::new(); + match execution_layer + .get_payload_bodies_by_range(self.start, self.count) + .await + { + Ok(bodies) => { + let mut range_map = (self.start..(self.start + self.count)) + .zip(bodies.into_iter().chain(std::iter::repeat(None))) + .collect::>(); + + let mut with_bodies = HashMap::new(); + for mut block_parts in block_parts_vec { + with_bodies + // it's possible the same block is requested twice, using + // or_insert_with() skips duplicates + .entry(block_parts.root()) + .or_insert_with(|| { + let block_number = block_parts.header.block_number(); + block_parts.body = + range_map.remove(&block_number).flatten().map(Box::new); + + block_parts + }); + } + + reconstruct_blocks(&mut block_map, with_bodies, log); + } + Err(e) => { + let block_result = + Arc::new(Err(Error::BlocksByRangeFailure(Box::new(e)).into())); + debug!(log, "Payload bodies by range failure"; "error" => ?block_result); + for block_parts in block_parts_vec { + block_map.insert(block_parts.root(), block_result.clone()); + } + } + } + self.state = RequestState::Sent(block_map); + } + } + + pub async fn get_block_result( + &mut self, + root: &Hash256, + execution_layer: &ExecutionLayer, + log: &Logger, + ) -> Option>> { + self.execute(execution_layer, log).await; + if let RequestState::Sent(map) = &self.state { + return map.get(root).cloned(); + } + // Shouldn't reach this point + None + } +} + +#[derive(Clone)] +enum EngineRequest { + ByRange(Arc>>), + // When we already have the data or there's an error + NoRequest(Arc>>>>), +} + +impl EngineRequest { + pub fn new_by_range() -> Self { + Self::ByRange(Arc::new(RwLock::new(BodiesByRange::new(None)))) + } + pub fn new_no_request() -> Self { + Self::NoRequest(Arc::new(RwLock::new(HashMap::new()))) + } + + pub async fn is_unsent(&self) -> bool { + match self { + Self::ByRange(bodies_by_range) => bodies_by_range.read().await.is_unsent(), + Self::NoRequest(_) => false, + } + } + + pub async fn push_block_parts(&mut self, block_parts: BlockParts, log: &Logger) { + match self { + Self::ByRange(bodies_by_range) => { + let mut request = bodies_by_range.write().await; + + if let Err(block_parts) = request.push_block_parts(block_parts) { + drop(request); + let new_by_range = BodiesByRange::new(Some(block_parts)); + *self = Self::ByRange(Arc::new(RwLock::new(new_by_range))); + } + } + Self::NoRequest(_) => { + // this should _never_ happen + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "push_block_parts called on NoRequest Variant", + ); + } + } + } + + pub async fn push_block_result( + &mut self, + root: Hash256, + block_result: BlockResult, + log: &Logger, + ) { + // this function will only fail if something is seriously wrong + match self { + Self::ByRange(_) => { + // this should _never_ happen + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "push_block_result called on ByRange", + ); + } + Self::NoRequest(results) => { + results.write().await.insert(root, Arc::new(block_result)); + } + } + } + + pub async fn get_block_result( + &self, + root: &Hash256, + execution_layer: &ExecutionLayer, + log: &Logger, + ) -> Arc> { + match self { + Self::ByRange(by_range) => { + by_range + .write() + .await + .get_block_result(root, execution_layer, log) + .await + } + Self::NoRequest(map) => map.read().await.get(root).cloned(), + } + .unwrap_or_else(|| { + crit!( + log, + "Please notify the devs"; + "beacon_block_streamer" => "block_result not found in request", + "root" => ?root, + ); + Arc::new(Err(Error::BlockResultNotFound.into())) + }) + } +} + +pub struct BeaconBlockStreamer { + execution_layer: ExecutionLayer, + check_early_attester_cache: CheckEarlyAttesterCache, + beacon_chain: Arc>, +} + +impl BeaconBlockStreamer { + pub fn new( + beacon_chain: &Arc>, + check_early_attester_cache: CheckEarlyAttesterCache, + ) -> Result { + let execution_layer = beacon_chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing)? + .clone(); + + Ok(Self { + execution_layer, + check_early_attester_cache, + beacon_chain: beacon_chain.clone(), + }) + } + + fn check_early_attester_cache( + &self, + root: Hash256, + ) -> Option>> { + if self.check_early_attester_cache == CheckEarlyAttesterCache::Yes { + self.beacon_chain.early_attester_cache.get_block(root) + } else { + None + } + } + + fn load_payloads(&self, block_roots: Vec) -> Vec<(Hash256, LoadResult)> { + let mut db_blocks = Vec::new(); + + for root in block_roots { + if let Some(cached_block) = self + .check_early_attester_cache(root) + .map(LoadedBeaconBlock::Full) + { + db_blocks.push((root, Ok(Some(cached_block)))); + continue; + } + + match self.beacon_chain.store.try_get_full_block(&root) { + Err(e) => db_blocks.push((root, Err(e.into()))), + Ok(opt_block) => db_blocks.push(( + root, + Ok(opt_block.map(|db_block| match db_block { + DatabaseBlock::Full(block) => LoadedBeaconBlock::Full(Arc::new(block)), + DatabaseBlock::Blinded(block) => { + LoadedBeaconBlock::Blinded(Box::new(block)) + } + })), + )), + } + } + + db_blocks + } + + /// Pre-process the loaded blocks into execution engine requests. + /// + /// The purpose of this function is to separate the blocks into 2 categories: + /// 1) no_request - when we already have the full block or there's an error + /// 2) blocks_by_range - used for blinded blocks + /// + /// The function returns a vector of block roots in the same order as requested + /// along with the engine request that each root corresponds to. + async fn get_requests( + &self, + payloads: Vec<(Hash256, LoadResult)>, + ) -> Vec<(Hash256, EngineRequest)> { + let mut ordered_block_roots = Vec::new(); + let mut requests = HashMap::new(); + + // we sort the by range blocks by slot before adding them to the + // request as it should *better* optimize the number of blocks that + // can fit in the same request + let mut by_range_blocks: Vec> = vec![]; + let mut no_request = EngineRequest::new_no_request(); + + for (root, load_result) in payloads { + // preserve the order of the requested blocks + ordered_block_roots.push(root); + + let block_result = match load_result { + Err(e) => Err(e), + Ok(None) => Ok(None), + Ok(Some(LoadedBeaconBlock::Full(full_block))) => Ok(Some(full_block)), + Ok(Some(LoadedBeaconBlock::Blinded(blinded_block))) => { + match blinded_block + .message() + .execution_payload() + .map(|payload| payload.to_execution_payload_header()) + { + Ok(header) => { + if header.block_hash() == ExecutionBlockHash::zero() { + reconstruct_default_header_block( + blinded_block, + header, + &self.beacon_chain.spec, + ) + } else { + // Add the block to the set requiring a by-range request. + let block_parts = BlockParts::new(blinded_block, header); + by_range_blocks.push(block_parts); + continue; + } + } + Err(e) => Err(BeaconChainError::BeaconStateError(e)), + } + } + }; + + no_request + .push_block_result(root, block_result, &self.beacon_chain.log) + .await; + requests.insert(root, no_request.clone()); + } + + // Now deal with the by_range requests. Sort them in order of increasing slot + let mut by_range = EngineRequest::::new_by_range(); + by_range_blocks.sort_by_key(|block_parts| block_parts.slot()); + for block_parts in by_range_blocks { + let root = block_parts.root(); + by_range + .push_block_parts(block_parts, &self.beacon_chain.log) + .await; + requests.insert(root, by_range.clone()); + } + + let mut result = vec![]; + for root in ordered_block_roots { + if let Some(request) = requests.get(&root) { + result.push((root, request.clone())) + } else { + crit!( + self.beacon_chain.log, + "Please notify the devs"; + "beacon_block_streamer" => "request not found", + "root" => ?root, + ); + no_request + .push_block_result( + root, + Err(Error::RequestNotFound.into()), + &self.beacon_chain.log, + ) + .await; + result.push((root, no_request.clone())); + } + } + + result + } + + // used when the execution engine doesn't support the payload bodies methods + async fn stream_blocks_fallback( + &self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + debug!( + self.beacon_chain.log, + "Using slower fallback method of eth_getBlockByHash()" + ); + for root in block_roots { + let cached_block = self.check_early_attester_cache(root); + let block_result = if cached_block.is_some() { + Ok(cached_block) + } else { + self.beacon_chain + .get_block(&root) + .await + .map(|opt_block| opt_block.map(Arc::new)) + }; + + if sender.send((root, Arc::new(block_result))).is_err() { + break; + } + } + } + + async fn stream_blocks( + &self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + let n_roots = block_roots.len(); + let mut n_success = 0usize; + let mut n_sent = 0usize; + let mut engine_requests = 0usize; + + let payloads = self.load_payloads(block_roots); + let requests = self.get_requests(payloads).await; + + for (root, request) in requests { + if request.is_unsent().await { + engine_requests += 1; + } + + let result = request + .get_block_result(&root, &self.execution_layer, &self.beacon_chain.log) + .await; + + let successful = result + .as_ref() + .as_ref() + .map(|opt| opt.is_some()) + .unwrap_or(false); + + if sender.send((root, result)).is_err() { + break; + } else { + n_sent += 1; + if successful { + n_success += 1; + } + } + } + + debug!( + self.beacon_chain.log, + "BeaconBlockStreamer finished"; + "requested blocks" => n_roots, + "sent" => n_sent, + "succeeded" => n_success, + "failed" => (n_sent - n_success), + "engine requests" => engine_requests, + ); + } + + pub async fn stream( + self, + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + match self + .execution_layer + .get_engine_capabilities(None) + .await + .map_err(Box::new) + .map_err(BeaconChainError::EngineGetCapabilititesFailed) + { + Ok(engine_capabilities) => { + if engine_capabilities.get_payload_bodies_by_range_v1 { + self.stream_blocks(block_roots, sender).await; + } else { + // use the fallback method + self.stream_blocks_fallback(block_roots, sender).await; + } + } + Err(e) => { + send_errors(block_roots, sender, e).await; + } + } + } + + pub fn launch_stream( + self, + block_roots: Vec, + executor: &TaskExecutor, + ) -> impl Stream>)> { + let (block_tx, block_rx) = mpsc::unbounded_channel(); + debug!( + self.beacon_chain.log, + "Launching a BeaconBlockStreamer"; + "blocks" => block_roots.len(), + ); + executor.spawn(self.stream(block_roots, block_tx), "get_blocks_sender"); + UnboundedReceiverStream::new(block_rx) + } +} + +async fn send_errors( + block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + beacon_chain_error: BeaconChainError, +) { + let result = Arc::new(Err(beacon_chain_error)); + for root in block_roots { + if sender.send((root, result.clone())).is_err() { + break; + } + } +} + +impl From for BeaconChainError { + fn from(value: Error) -> Self { + BeaconChainError::BlockStreamerError(value) + } +} + +#[cfg(test)] +mod tests { + use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; + use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; + use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES}; + use execution_layer::EngineCapabilities; + use lazy_static::lazy_static; + use std::time::Duration; + use tokio::sync::mpsc; + use types::{ChainSpec, Epoch, EthSpec, Hash256, Keypair, MinimalEthSpec, Slot}; + + const VALIDATOR_COUNT: usize = 48; + lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + } + + fn get_harness( + validator_count: usize, + spec: ChainSpec, + ) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec) + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(logging::test_logger()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness + } + + #[tokio::test] + async fn check_all_blocks_from_altair_to_capella() { + let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; + let num_epochs = 8; + let bellatrix_fork_epoch = 2usize; + let capella_fork_epoch = 4usize; + let num_blocks_produced = num_epochs * slots_per_epoch; + + let mut spec = test_spec::(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); + spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + + let harness = get_harness(VALIDATOR_COUNT, spec); + // go to bellatrix fork + harness + .extend_slots(bellatrix_fork_epoch * slots_per_epoch) + .await; + // extend half an epoch + harness.extend_slots(slots_per_epoch / 2).await; + // trigger merge + harness + .execution_block_generator() + .move_to_terminal_block() + .expect("should move to terminal block"); + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + // finish out merge epoch + harness.extend_slots(slots_per_epoch / 2).await; + // finish rest of epochs + harness + .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) + .await; + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + + assert_eq!( + state.slot(), + Slot::new(num_blocks_produced as u64), + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let block_roots: Vec = harness + .chain + .forwards_iter_block_roots(Slot::new(0)) + .expect("should get iter") + .map(Result::unwrap) + .map(|(root, _)| root) + .collect(); + + let mut expected_blocks = vec![]; + // get all blocks the old fashioned way + for root in &block_roots { + let block = harness + .chain + .get_block(root) + .await + .expect("should get block") + .expect("block should exist"); + expected_blocks.push(block); + } + + for epoch in 0..num_epochs { + let start = epoch * slots_per_epoch; + let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; + epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + .expect("should create streamer"); + let (block_tx, mut block_rx) = mpsc::unbounded_channel(); + streamer.stream(epoch_roots.clone(), block_tx).await; + + for (i, expected_root) in epoch_roots.into_iter().enumerate() { + let (found_root, found_block_result) = + block_rx.recv().await.expect("should get block"); + + assert_eq!( + found_root, expected_root, + "expected block root should match" + ); + match found_block_result.as_ref() { + Ok(maybe_block) => { + let found_block = maybe_block.clone().expect("should have a block"); + let expected_block = expected_blocks + .get(start + i) + .expect("should get expected block"); + assert_eq!( + found_block.as_ref(), + expected_block, + "expected block should match found block" + ); + } + Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e), + } + } + } + } + + #[tokio::test] + async fn check_fallback_altair_to_capella() { + let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; + let num_epochs = 8; + let bellatrix_fork_epoch = 2usize; + let capella_fork_epoch = 4usize; + let num_blocks_produced = num_epochs * slots_per_epoch; + + let mut spec = test_spec::(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); + spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + + let harness = get_harness(VALIDATOR_COUNT, spec); + + // modify execution engine so it doesn't support engine_payloadBodiesBy* methods + let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .set_engine_capabilities(EngineCapabilities { + get_payload_bodies_by_hash_v1: false, + get_payload_bodies_by_range_v1: false, + ..DEFAULT_ENGINE_CAPABILITIES + }); + // refresh capabilities cache + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .get_engine_capabilities(Some(Duration::ZERO)) + .await + .unwrap(); + + // go to bellatrix fork + harness + .extend_slots(bellatrix_fork_epoch * slots_per_epoch) + .await; + // extend half an epoch + harness.extend_slots(slots_per_epoch / 2).await; + // trigger merge + harness + .execution_block_generator() + .move_to_terminal_block() + .expect("should move to terminal block"); + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + // finish out merge epoch + harness.extend_slots(slots_per_epoch / 2).await; + // finish rest of epochs + harness + .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) + .await; + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + + assert_eq!( + state.slot(), + Slot::new(num_blocks_produced as u64), + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let block_roots: Vec = harness + .chain + .forwards_iter_block_roots(Slot::new(0)) + .expect("should get iter") + .map(Result::unwrap) + .map(|(root, _)| root) + .collect(); + + let mut expected_blocks = vec![]; + // get all blocks the old fashioned way + for root in &block_roots { + let block = harness + .chain + .get_block(root) + .await + .expect("should get block") + .expect("block should exist"); + expected_blocks.push(block); + } + + for epoch in 0..num_epochs { + let start = epoch * slots_per_epoch; + let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; + epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + .expect("should create streamer"); + let (block_tx, mut block_rx) = mpsc::unbounded_channel(); + streamer.stream(epoch_roots.clone(), block_tx).await; + + for (i, expected_root) in epoch_roots.into_iter().enumerate() { + let (found_root, found_block_result) = + block_rx.recv().await.expect("should get block"); + + assert_eq!( + found_root, expected_root, + "expected block root should match" + ); + match found_block_result.as_ref() { + Ok(maybe_block) => { + let found_block = maybe_block.clone().expect("should have a block"); + let expected_block = expected_blocks + .get(start + i) + .expect("should get expected block"); + assert_eq!( + found_block.as_ref(), + expected_block, + "expected block should match found block" + ); + } + Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e), + } + } + } + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 55d6ae29efb..0165c54dc3b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,14 +4,16 @@ use crate::attestation_verification::{ VerifiedUnaggregatedAttestation, }; use crate::attester_cache::{AttesterCache, AttesterCacheKey}; +use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ - check_block_is_finalized_descendant, check_block_relevancy, get_block_root, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; @@ -56,10 +58,12 @@ use crate::validator_monitor::{ }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; -use eth2::types::{EventKind, SseBlock, SyncDuty}; +use eth2::types::{EventKind, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, + BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, + PayloadAttributes, PayloadStatus, }; +pub use fork_choice::CountUnrealized; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -67,9 +71,9 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; -use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; +use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -79,13 +83,14 @@ use state_processing::{ common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, verify_attestation_for_block_inclusion, - VerifySignatures, + errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, VerifySignatures, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; +use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; use std::collections::HashSet; @@ -98,14 +103,11 @@ use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; +use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; -use types::consts::merge::INTERVALS_PER_SLOT; use types::*; -pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; -pub use fork_choice::CountUnrealized; - pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. @@ -125,12 +127,6 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); -/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. -fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { - // Allow at least half of the attestation deadline for the block to propagate. - Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 -} - // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -196,6 +192,9 @@ pub enum ProduceBlockVerification { pub struct PrePayloadAttributes { pub proposer_index: u64, pub prev_randao: Hash256, + /// The parent block number is not part of the payload attributes sent to the EL, but *is* + /// sent to builders via SSE. + pub parent_block_number: u64, } /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already @@ -269,7 +268,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { } /// Used internally to split block production into discrete functions. -struct PartialBeaconBlock { +struct PartialBeaconBlock> { state: BeaconState, slot: Slot, proposer_index: u64, @@ -283,7 +282,8 @@ struct PartialBeaconBlock { deposits: Vec, voluntary_exits: Vec, sync_aggregate: Option>, - prepare_payload_handle: Option>, + prepare_payload_handle: Option>, + bls_to_execution_changes: Vec, } pub type BeaconForkChoice = ForkChoice< @@ -352,7 +352,7 @@ pub struct BeaconChain { /// in recent epochs. pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. - pub(crate) observed_block_producers: RwLock>, + pub observed_block_producers: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -360,6 +360,9 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen attester slashings for. pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, + /// Maintains a record of which validators we've seen BLS to execution changes for. + pub(crate) observed_bls_to_execution_changes: + Mutex>, /// The most recently validated light client finality update received on gossip. pub latest_seen_finality_update: Mutex>>, /// The most recently validated light client optimistic update received on gossip. @@ -422,6 +425,46 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { + /// Checks if a block is finalized. + /// The finalization check is done with the block slot. The block root is used to verify that + /// the finalized slot is in the canonical chain. + pub fn is_finalized_block( + &self, + block_root: &Hash256, + block_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .block_root_at_slot(block_slot, WhenSlotSkipped::None)? + .map_or(false, |canonical_root| block_root == &canonical_root); + Ok(block_slot <= finalized_slot && is_canonical) + } + + /// Checks if a state is finalized. + /// The finalization check is done with the slot. The state root is used to verify that + /// the finalized state is in the canonical chain. + pub fn is_finalized_state( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .state_root_at_slot(state_slot)? + .map_or(false, |canonical_root| state_root == &canonical_root); + Ok(state_slot <= finalized_slot && is_canonical) + } + /// Persists the head tracker and fork choice. /// /// We do it atomically even though no guarantees need to be made about blocks from @@ -469,7 +512,6 @@ impl BeaconChain { pub fn load_fork_choice( store: BeaconStore, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result>, Error> { @@ -486,7 +528,6 @@ impl BeaconChain { persisted_fork_choice.fork_choice, reset_payload_statuses, fc_store, - count_unrealized_full, spec, log, )?)) @@ -933,14 +974,42 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub async fn get_block_checking_early_attester_cache( - &self, - block_root: &Hash256, - ) -> Result>>, Error> { - if let Some(block) = self.early_attester_cache.get_block(*block_root) { - return Ok(Some(block)); - } - Ok(self.get_block(block_root).await?.map(Arc::new)) + pub fn get_blocks_checking_early_attester_cache( + self: &Arc, + block_roots: Vec, + executor: &TaskExecutor, + ) -> Result< + impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), + >, + Error, + > { + Ok( + BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::Yes)? + .launch_stream(block_roots, executor), + ) + } + + pub fn get_blocks( + self: &Arc, + block_roots: Vec, + executor: &TaskExecutor, + ) -> Result< + impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), + >, + Error, + > { + Ok( + BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::No)? + .launch_stream(block_roots, executor), + ) } /// Returns the block at the given root, if any. @@ -959,29 +1028,32 @@ impl BeaconChain { Some(DatabaseBlock::Blinded(block)) => block, None => return Ok(None), }; + let fork = blinded_block.fork_name(&self.spec)?; // If we only have a blinded block, load the execution payload from the EL. let block_message = blinded_block.message(); - let execution_payload_header = &block_message + let execution_payload_header = block_message .execution_payload() .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? - .execution_payload_header; + .to_execution_payload_header(); - let exec_block_hash = execution_payload_header.block_hash; + let exec_block_hash = execution_payload_header.block_hash(); let execution_payload = self .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash) + .get_payload_for_header(&execution_payload_header, fork) .await - .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? + .map_err(|e| { + Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) + })? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; // Verify payload integrity. - let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); - if header_from_payload != *execution_payload_header { - for txn in &execution_payload.transactions { + let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); + if header_from_payload != execution_payload_header { + for txn in execution_payload.transactions() { debug!( self.log, "Reconstructed txn"; @@ -992,10 +1064,8 @@ impl BeaconChain { return Err(Error::InconsistentPayloadReconstructed { slot: blinded_block.slot(), exec_block_hash, - canonical_payload_root: execution_payload_header.tree_hash_root(), - reconstructed_payload_root: header_from_payload.tree_hash_root(), - canonical_transactions_root: execution_payload_header.transactions_root, - reconstructed_transactions_root: header_from_payload.transactions_root, + canonical_transactions_root: execution_payload_header.transactions_root(), + reconstructed_transactions_root: header_from_payload.transactions_root(), }); } @@ -1861,7 +1931,6 @@ impl BeaconChain { self.slot()?, verified.indexed_attestation(), AttestationFromBlock::False, - &self.spec, ) .map_err(Into::into) } @@ -2137,12 +2206,14 @@ impl BeaconChain { &self, exit: SignedVoluntaryExit, ) -> Result, Error> { - // NOTE: this could be more efficient if it avoided cloning the head state - let wall_clock_state = self.wall_clock_state()?; + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + let wall_clock_epoch = self.epoch()?; + Ok(self .observed_voluntary_exits .lock() - .verify_and_observe(exit, &wall_clock_state, &self.spec) + .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) .map(|exit| { // this method is called for both API and gossip exits, so this covers all exit events if let Some(event_handler) = self.event_handler.as_ref() { @@ -2218,6 +2289,79 @@ impl BeaconChain { } } + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_http_api( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Before checking the gossip duplicate filter, check that no prior change is already + // in our op pool. Ignore these messages: do not gossip, do not try to override the pool. + match self + .op_pool + .bls_to_execution_change_in_pool_equals(&bls_to_execution_change) + { + Some(true) => return Ok(ObservationOutcome::AlreadyKnown), + Some(false) => return Err(Error::BlsToExecutionConflictsWithPool), + None => (), + } + + // Use the head state to save advancing to the wall-clock slot unnecessarily. The message is + // signed with respect to the genesis fork version, and the slot check for gossip is applied + // separately. This `Arc` clone of the head is nice and cheap. + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + + Ok(self + .observed_bls_to_execution_changes + .lock() + .verify_and_observe(bls_to_execution_change, head_state, &self.spec)?) + } + + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Ignore BLS to execution changes on gossip prior to Capella. + if !self.current_slot_is_post_capella()? { + return Err(Error::BlsToExecutionPriorToCapella); + } + self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change) + .or_else(|e| { + // On gossip treat conflicts the same as duplicates [IGNORE]. + match e { + Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown), + e => Err(e), + } + }) + } + + /// Check if the current slot is greater than or equal to the Capella fork epoch. + pub fn current_slot_is_post_capella(&self) -> Result { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + Ok(false) + } else { + Ok(true) + } + } + + /// Import a BLS to execution change to the op pool. + /// + /// Return `true` if the change was added to the pool. + pub fn import_bls_to_execution_change( + &self, + bls_to_execution_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + if self.eth1_chain.is_some() { + self.op_pool + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) + } else { + false + } + } + /// Attempt to obtain sync committee duties from the head. pub fn sync_committee_duties_from_head( &self, @@ -2714,7 +2858,7 @@ impl BeaconChain { // is so we don't have to think about lock ordering with respect to the fork choice lock. // There are a bunch of places where we lock both fork choice and the pubkey cache and it // would be difficult to check that they all lock fork choice first. - let mut kv_store_ops = self + let mut ops = self .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? @@ -2736,7 +2880,7 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, &signed_block)?; // Register the new block with the fork choice service. { @@ -2744,7 +2888,7 @@ impl BeaconChain { metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .ok_or(Error::UnableToComputeTimeAtSlot)?; fork_choice @@ -2756,7 +2900,7 @@ impl BeaconChain { &state, payload_verification_status, &self.spec, - count_unrealized.and(self.config.count_unrealized.into()), + count_unrealized, ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2816,9 +2960,14 @@ impl BeaconChain { // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- // Most blocks are now capable of being attested to thanks to the `early_attester_cache` // cache above. Resume non-essential processing. + // + // It is important NOT to return errors here before the database commit, because the block + // has already been added to fork choice and the database would be left in an inconsistent + // state if we returned early without committing. In other words, an error here would + // corrupt the node's database permanently. // ----------------------------------------------------------------------------------------- - self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_update_shuffling_cache(block_root, &mut state); self.import_block_observe_attestations( block, &state, @@ -2841,17 +2990,16 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let mut ops: Vec<_> = confirmed_state_roots - .into_iter() - .map(StoreOp::DeleteStateTemporaryFlag) - .collect(); + ops.extend( + confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag), + ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); - - if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { + if let Err(e) = self.store.do_atomically(ops) { error!( self.log, "Database write failed!"; @@ -2871,7 +3019,6 @@ impl BeaconChain { ResetPayloadStatuses::always_reset_conditionally( self.config.always_reset_payload_statuses, ), - self.config.count_unrealized_full, &self.store, &self.spec, &self.log, @@ -3280,13 +3427,27 @@ impl BeaconChain { } } + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. fn import_block_update_shuffling_cache( &self, block_root: Hash256, state: &mut BeaconState, + ) { + if let Err(e) = self.import_block_update_shuffling_cache_fallible(block_root, state) { + warn!( + self.log, + "Failed to prime shuffling cache"; + "error" => ?e + ); + } + } + + fn import_block_update_shuffling_cache_fallible( + &self, + block_root: Hash256, + state: &mut BeaconState, ) -> Result<(), BlockError> { - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; @@ -3426,7 +3587,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub async fn produce_block>( + pub async fn produce_block + 'static>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3442,7 +3603,9 @@ impl BeaconChain { } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub async fn produce_block_with_verification>( + pub async fn produce_block_with_verification< + Payload: AbstractExecPayload + 'static, + >( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3578,7 +3741,7 @@ impl BeaconChain { let slot_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .or_else(|| { warn!( self.log, @@ -3593,7 +3756,7 @@ impl BeaconChain { // 1. It seems we have time to propagate and still receive the proposer boost. // 2. The current head block was seen late. // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); if !proposing_on_time { debug!( self.log, @@ -3623,6 +3786,7 @@ impl BeaconChain { slot, canonical_head, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| match e { @@ -3767,19 +3931,93 @@ impl BeaconChain { proposer as u64 }; - // Get the `prev_randao` value. - let prev_randao = if proposer_head == parent_block_root { - cached_head.parent_random() + // Get the `prev_randao` and parent block number. + let head_block_number = cached_head.head_block_number()?; + let (prev_randao, parent_block_number) = if proposer_head == parent_block_root { + ( + cached_head.parent_random()?, + head_block_number.saturating_sub(1), + ) } else { - cached_head.head_random() - }?; + (cached_head.head_random()?, head_block_number) + }; Ok(Some(PrePayloadAttributes { proposer_index, prev_randao, + parent_block_number, })) } + pub fn get_expected_withdrawals( + &self, + forkchoice_update_params: &ForkchoiceUpdateParameters, + proposal_slot: Slot, + ) -> Result, Error> { + let cached_head = self.canonical_head.cached_head(); + let head_state = &cached_head.snapshot.beacon_state; + + let parent_block_root = forkchoice_update_params.head_root; + + let (unadvanced_state, unadvanced_state_root) = + if cached_head.head_block_root() == parent_block_root { + (Cow::Borrowed(head_state), cached_head.head_state_root()) + } else if let Some(snapshot) = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .ok_or(Error::SnapshotCacheLockTimeout)? + .get_cloned(parent_block_root, CloneConfig::none()) + { + debug!( + self.log, + "Hit snapshot cache during withdrawals calculation"; + "slot" => proposal_slot, + "parent_block_root" => ?parent_block_root, + ); + let state_root = snapshot.beacon_state_root(); + (Cow::Owned(snapshot.beacon_state), state_root) + } else { + info!( + self.log, + "Missed snapshot cache during withdrawals calculation"; + "slot" => proposal_slot, + "parent_block_root" => ?parent_block_root + ); + let block = self + .get_blinded_block(&parent_block_root)? + .ok_or(Error::MissingBeaconBlock(parent_block_root))?; + let state = self + .get_state(&block.state_root(), Some(block.slot()))? + .ok_or(Error::MissingBeaconState(block.state_root()))?; + (Cow::Owned(state), block.state_root()) + }; + + // Parent state epoch is the same as the proposal, we don't need to advance because the + // list of expected withdrawals can only change after an epoch advance or a + // block application. + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + if head_state.current_epoch() == proposal_epoch { + return get_expected_withdrawals(&unadvanced_state, &self.spec) + .map_err(Error::PrepareProposerFailed); + } + + // Advance the state using the partial method. + debug!( + self.log, + "Advancing state for withdrawals calculation"; + "proposal_slot" => proposal_slot, + "parent_block_root" => ?parent_block_root, + ); + let mut advanced_state = unadvanced_state.into_owned(); + partial_state_advance( + &mut advanced_state, + Some(unadvanced_state_root), + proposal_epoch.start_slot(T::EthSpec::slots_per_epoch()), + &self.spec, + )?; + get_expected_withdrawals(&advanced_state, &self.spec).map_err(Error::PrepareProposerFailed) + } + /// Determine whether a fork choice update to the execution layer should be overridden. /// /// This is *only* necessary when proposer re-orgs are enabled, because we have to prevent the @@ -3827,6 +4065,7 @@ impl BeaconChain { .get_preliminary_proposer_head( head_block_root, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; @@ -3837,7 +4076,7 @@ impl BeaconChain { let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; - // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // If a re-orging proposal isn't made by the `re_org_cutoff` then we give up // and allow the fork choice update for the canonical head through so that we may attest // correctly. let current_slot_ok = if head_slot == fork_choice_slot { @@ -3848,7 +4087,7 @@ impl BeaconChain { .and_then(|slot_start| { let now = self.slot_clock.now_duration()?; let slot_delay = now.saturating_sub(slot_start); - Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot)) }) .unwrap_or(false) } else { @@ -3962,7 +4201,7 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub async fn produce_block_on_state>( + pub async fn produce_block_on_state + 'static>( self: &Arc, state: BeaconState, state_root_opt: Option, @@ -3997,12 +4236,13 @@ impl BeaconChain { // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { - let execution_payload = prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)??; - Some(execution_payload) + let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle { + Some( + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??, + ) } else { None }; @@ -4016,7 +4256,7 @@ impl BeaconChain { move || { chain.complete_partial_beacon_block( partial_beacon_block, - execution_payload, + block_contents, verification, ) }, @@ -4027,7 +4267,7 @@ impl BeaconChain { .map_err(BlockProductionError::TokioJoin)? } - fn produce_partial_beacon_block>( + fn produce_partial_beacon_block + 'static>( self: &Arc, mut state: BeaconState, state_root_opt: Option, @@ -4087,7 +4327,7 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) => { + BeaconState::Merge(_) | BeaconState::Capella(_) => { let prepare_payload_handle = get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) @@ -4100,6 +4340,10 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let bls_to_execution_changes = self + .op_pool + .get_bls_to_execution_changes(&state, &self.spec); + // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. let unagg_import_timer = @@ -4258,13 +4502,14 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, + bls_to_execution_changes, }) } - fn complete_partial_beacon_block>( + fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - execution_payload: Option, + block_contents: Option>, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { let PartialBeaconBlock { @@ -4285,6 +4530,7 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, + bls_to_execution_changes, } = partial_beacon_block; let inner_block = match &state { @@ -4340,8 +4586,35 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: execution_payload - .ok_or(BlockProductionError::MissingExecutionPayload)?, + execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + }, + }), + BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), }, }), }; @@ -4532,7 +4805,9 @@ impl BeaconChain { // Nothing to do if there are no proposers registered with the EL, exit early to avoid // wasting cycles. - if !execution_layer.has_any_proposer_preparation_data().await { + if !self.config.always_prepare_payload + && !execution_layer.has_any_proposer_preparation_data().await + { return Ok(()); } @@ -4589,40 +4864,60 @@ impl BeaconChain { // If the execution layer doesn't have any proposer data for this validator then we assume // it's not connected to this BN and no action is required. let proposer = pre_payload_attributes.proposer_index; - if !execution_layer - .has_proposer_preparation_data(proposer) - .await + if !self.config.always_prepare_payload + && !execution_layer + .has_proposer_preparation_data(proposer) + .await { return Ok(()); } + // Fetch payoad attributes from the execution layer's cache, or compute them from scratch + // if no matching entry is found. This saves recomputing the withdrawals which can take + // considerable time to compute if a state load is required. let head_root = forkchoice_update_params.head_root; - let payload_attributes = PayloadAttributes { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: pre_payload_attributes.prev_randao, - suggested_fee_recipient: execution_layer.get_suggested_fee_recipient(proposer).await, - }; + let payload_attributes = if let Some(payload_attributes) = execution_layer + .payload_attributes(prepare_slot, head_root) + .await + { + payload_attributes + } else { + let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella => { + let chain = self.clone(); + self.spawn_blocking_handle( + move || { + chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot) + }, + "prepare_beacon_proposer_withdrawals", + ) + .await? + .map(Some)? + } + }; - debug!( - self.log, - "Preparing beacon proposer"; - "payload_attributes" => ?payload_attributes, - "prepare_slot" => prepare_slot, - "validator" => proposer, - "parent_root" => ?head_root, - ); + let payload_attributes = PayloadAttributes::new( + self.slot_clock + .start_of(prepare_slot) + .ok_or(Error::InvalidSlot(prepare_slot))? + .as_secs(), + pre_payload_attributes.prev_randao, + execution_layer.get_suggested_fee_recipient(proposer).await, + withdrawals.map(Into::into), + ); - let already_known = execution_layer - .insert_proposer(prepare_slot, head_root, proposer, payload_attributes) - .await; + execution_layer + .insert_proposer( + prepare_slot, + head_root, + proposer, + payload_attributes.clone(), + ) + .await; - // Only push a log to the user if this is the first time we've seen this proposer for this - // slot. - if !already_known { + // Only push a log to the user if this is the first time we've seen this proposer for + // this slot. info!( self.log, "Prepared beacon proposer"; @@ -4630,6 +4925,24 @@ impl BeaconChain { "validator" => proposer, "parent_root" => ?head_root, ); + payload_attributes + }; + + // Push a server-sent event (probably to a block builder or relay). + if let Some(event_handler) = &self.event_handler { + if event_handler.has_payload_attributes_subscribers() { + event_handler.register(EventKind::PayloadAttributes(ForkVersionedResponse { + data: SseExtendedPayloadAttributes { + proposal_slot: prepare_slot, + proposer_index: proposer, + parent_block_root: head_root, + parent_block_number: pre_payload_attributes.parent_block_number, + parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), + payload_attributes: payload_attributes.into(), + }, + version: Some(self.spec.fork_name_at_slot::(prepare_slot)), + })); + } } let till_prepare_slot = @@ -4652,7 +4965,9 @@ impl BeaconChain { // If we are close enough to the proposal slot, send an fcU, which will have payload // attributes filled in by the execution layer cache we just primed. - if till_prepare_slot <= self.config.prepare_payload_lookahead { + if self.config.always_prepare_payload + || till_prepare_slot <= self.config.prepare_payload_lookahead + { debug!( self.log, "Sending forkchoiceUpdate for proposer prep"; @@ -4754,7 +5069,7 @@ impl BeaconChain { { // We are a proposer, check for terminal_pow_block_hash if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp()) .await .map_err(Error::ForkchoiceUpdate)? { @@ -4845,7 +5160,7 @@ impl BeaconChain { latest_valid_hash, ref validation_error, } => { - debug!( + warn!( self.log, "Invalid execution payload"; "validation_error" => ?validation_error, @@ -4854,32 +5169,44 @@ impl BeaconChain { "head_block_root" => ?head_block_root, "method" => "fcU", ); - warn!( - self.log, - "Fork choice update invalidated payload"; - "status" => ?status - ); - // This implies that the terminal block was invalid. We are being explicit in - // invalidating only the head block in this case. - if latest_valid_hash == ExecutionBlockHash::zero() { - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - ) - .await?; - } else { + match latest_valid_hash { + // The `latest_valid_hash` is set to `None` when the EE + // "cannot determine the ancestor of the invalid + // payload". In such a scenario we should only + // invalidate the head block and nothing else. + None => { + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }, + ) + .await?; + } + // An all-zeros execution block hash implies that + // the terminal block was invalid. We are being + // explicit in invalidating only the head block in + // this case. + Some(hash) if hash == ExecutionBlockHash::zero() => { + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }, + ) + .await?; + } // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) - .await?; + Some(latest_valid_hash) => { + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; + } } Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) @@ -4887,7 +5214,7 @@ impl BeaconChain { PayloadStatus::InvalidBlockHash { ref validation_error, } => { - debug!( + warn!( self.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, @@ -4895,11 +5222,6 @@ impl BeaconChain { "head_block_root" => ?head_block_root, "method" => "fcU", ); - warn!( - self.log, - "Fork choice update invalidated payload"; - "status" => ?status - ); // The execution engine has stated that the head block is invalid, however it // hasn't returned a latest valid ancestor. // @@ -4929,7 +5251,7 @@ impl BeaconChain { /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has /// `ExecutionStatus::Invalid`. - pub fn is_optimistic_or_invalid_block>( + pub fn is_optimistic_or_invalid_block>( &self, block: &SignedBeaconBlock, ) -> Result { @@ -4955,7 +5277,7 @@ impl BeaconChain { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_or_invalid_head_block>( + pub fn is_optimistic_or_invalid_head_block>( &self, head_block: &SignedBeaconBlock, ) -> Result { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0b789b8b615..71160fcb638 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -16,10 +16,18 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, }; +/// Ensure this justified checkpoint has an epoch of 0 so that it is never +/// greater than the justified checkpoint and enshrined as the actual justified +/// checkpoint. +const JUNK_BEST_JUSTIFIED_CHECKPOINT: Checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: Hash256::repeat_byte(0), +}; + #[derive(Debug)] pub enum Error { UnableToReadSlot, @@ -144,7 +152,6 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< finalized_checkpoint: Checkpoint, justified_checkpoint: Checkpoint, justified_balances: JustifiedBalances, - best_justified_checkpoint: Checkpoint, unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, @@ -194,7 +201,6 @@ where justified_checkpoint, justified_balances, finalized_checkpoint, - best_justified_checkpoint: justified_checkpoint, unrealized_justified_checkpoint: justified_checkpoint, unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), @@ -212,7 +218,7 @@ where finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.effective_balances.clone(), - best_justified_checkpoint: self.best_justified_checkpoint, + best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, @@ -234,7 +240,6 @@ where finalized_checkpoint: persisted.finalized_checkpoint, justified_checkpoint: persisted.justified_checkpoint, justified_balances, - best_justified_checkpoint: persisted.best_justified_checkpoint, unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, @@ -260,7 +265,7 @@ where self.time = slot } - fn on_verified_block>( + fn on_verified_block>( &mut self, _block: BeaconBlockRef, block_root: Hash256, @@ -277,10 +282,6 @@ where &self.justified_balances } - fn best_justified_checkpoint(&self) -> &Checkpoint { - &self.best_justified_checkpoint - } - fn finalized_checkpoint(&self) -> &Checkpoint { &self.finalized_checkpoint } @@ -333,10 +334,6 @@ where Ok(()) } - fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) { - self.best_justified_checkpoint = checkpoint - } - fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) { self.unrealized_justified_checkpoint = checkpoint; } diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 8491622cb09..7d89df98293 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,20 +1,20 @@ use serde_derive::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, + beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] -pub struct BeaconSnapshot = FullPayload> { +pub struct BeaconSnapshot = FullPayload> { pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } -impl> BeaconSnapshot { +impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( beacon_block: Arc>, diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 3bddd2a5215..fd0cfc7e9bd 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -5,10 +5,10 @@ use state_processing::{ common::get_attesting_indices_from_state, per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, }; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256}; impl BeaconChain { - pub fn compute_block_reward>( + pub fn compute_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ab317e96b96..5102381a1a1 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,11 @@ //! END //! //! ``` + +// Ignore this lint for `BlockSlashInfo` which is of comparable size to the non-error types it is +// returned alongside. +#![allow(clippy::result_large_err)] + use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, @@ -83,6 +88,7 @@ use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; +use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, @@ -274,10 +280,10 @@ pub enum BlockError { /// /// ## Peer scoring /// - /// TODO(merge): reconsider how we score peers for this. - /// - /// The peer sent us an invalid block, but I'm not really sure how to score this in an - /// "optimistic" sync world. + /// The peer sent us an invalid block, we must penalise harshly. + /// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger + /// problems to worry about than losing peers, and we're doing the network a favour by + /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, } @@ -739,7 +745,7 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - check_block_is_finalized_descendant( + check_block_is_finalized_checkpoint_or_descendant( chain, &chain.canonical_head.fork_choice_write_lock(), &block, @@ -1180,7 +1186,7 @@ impl ExecutionPendingBlock { .message() .body() .execution_payload() - .map(|full_payload| full_payload.execution_payload.block_hash); + .map(|full_payload| full_payload.block_hash()); // Ensure the block is a candidate for optimistic import. if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? @@ -1462,7 +1468,6 @@ impl ExecutionPendingBlock { current_slot, indexed_attestation, AttestationFromBlock::True, - &chain.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The @@ -1559,12 +1564,12 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_descendant( +pub fn check_block_is_finalized_checkpoint_or_descendant( chain: &BeaconChain, fork_choice: &BeaconForkChoice, block: &Arc>, ) -> Result<(), BlockError> { - if fork_choice.is_descendant_of_finalized(block.parent_root()) { + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, @@ -1845,7 +1850,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( } /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. -fn get_validator_pubkey_cache( +pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BlockError> { chain diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 48419d46edb..6ee97a95c1a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -18,11 +18,11 @@ use crate::{ }; use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use proto_array::ReOrgThreshold; +use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -175,6 +175,15 @@ where self } + /// Sets the proposer re-org disallowed offsets list. + pub fn proposer_re_org_disallowed_offsets( + mut self, + disallowed_offsets: DisallowedReOrgOffsets, + ) -> Self { + self.chain_config.re_org_disallowed_offsets = disallowed_offsets; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -265,7 +274,6 @@ where ResetPayloadStatuses::always_reset_conditionally( self.chain_config.always_reset_payload_statuses, ), - self.chain_config.count_unrealized_full, &self.spec, log, ) @@ -384,7 +392,6 @@ where &genesis.beacon_block, &genesis.beacon_state, current_slot, - self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -503,7 +510,6 @@ where &snapshot.beacon_block, &snapshot.beacon_state, current_slot, - self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -681,8 +687,7 @@ where store.clone(), Some(current_slot), &self.spec, - self.chain_config.count_unrealized.into(), - self.chain_config.count_unrealized_full, + CountUnrealized::True, )?; } @@ -765,6 +770,7 @@ where let genesis_time = head_snapshot.beacon_state.genesis_time(); let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let shuffling_cache_size = self.chain_config.shuffling_cache_size; let beacon_chain = BeaconChain { spec: self.spec, @@ -800,6 +806,7 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), + observed_bls_to_execution_changes: <_>::default(), latest_seen_finality_update: <_>::default(), latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, @@ -817,7 +824,7 @@ where DEFAULT_SNAPSHOT_CACHE_SIZE, head_for_snapshot_cache, )), - shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), + shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(shuffling_cache_size)), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index dd64e02edf7..0e1c8a5305d 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -45,8 +45,7 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - CountUnrealizedFull, ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, - ResetPayloadStatuses, + ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -167,6 +166,17 @@ impl CachedHead { .map(|payload| payload.prev_randao()) } + /// Returns the execution block number of the block at the head of the chain. + /// + /// Returns an error if the chain is prior to Bellatrix. + pub fn head_block_number(&self) -> Result { + self.snapshot + .beacon_block + .message() + .execution_payload() + .map(|payload| payload.block_number()) + } + /// Returns the active validator count for the current epoch of the head state. /// /// Should only return `None` if the caches have not been built on the head state (this should @@ -274,19 +284,13 @@ impl CanonicalHead { // defensive programming. mut fork_choice_write_lock: RwLockWriteGuard>, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, store: &BeaconStore, spec: &ChainSpec, log: &Logger, ) -> Result<(), Error> { - let fork_choice = >::load_fork_choice( - store.clone(), - reset_payload_statuses, - count_unrealized_full, - spec, - log, - )? - .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice = + >::load_fork_choice(store.clone(), reset_payload_statuses, spec, log)? + .ok_or(Error::MissingPersistedForkChoice)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store @@ -930,8 +934,12 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); - self.op_pool - .prune_all(&new_snapshot.beacon_state, self.epoch()?); + self.op_pool.prune_all( + &new_snapshot.beacon_block, + &new_snapshot.beacon_state, + self.epoch()?, + &self.spec, + ); self.observed_block_producers.write().prune( new_view diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs new file mode 100644 index 00000000000..bb729d89997 --- /dev/null +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -0,0 +1,122 @@ +//! Provides tools for checking if a node is ready for the Capella upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Capella fork when we will start issuing warnings about preparation. +use super::merge_readiness::SECONDS_IN_A_WEEK; +pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum CapellaReadiness { + /// The execution engine is capella-enabled (as far as we can tell) + Ready, + /// We are connected to an execution engine which doesn't support the V2 engine api methods + V2MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for CapellaReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CapellaReadiness::Ready => { + write!(f, "This node appears ready for Capella.") + } + CapellaReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + CapellaReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + CapellaReadiness::V2MethodsNotSupported { error } => write!( + f, + "Execution endpoint does not support Capella methods: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if capella epoch is set and Capella fork has occurred or will + /// occur within `CAPELLA_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_capella(&self, current_slot: Slot) -> bool { + if let Some(capella_epoch) = self.spec.capella_fork_epoch { + let capella_slot = capella_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let capella_readiness_preparation_slots = + CAPELLA_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Capella has happened or is within the preparation time. + current_slot + capella_readiness_preparation_slots > capella_slot + } else { + // The Capella fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for capella. + pub async fn check_capella_readiness(&self) -> CapellaReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + CapellaReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V2); + all_good = false; + } + if !capabilities.forkchoice_updated_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V2); + all_good = false; + } + if !capabilities.new_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V2); + all_good = false; + } + + if all_good { + CapellaReadiness::Ready + } else { + CapellaReadiness::V2MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + CapellaReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index cce2fbb971f..9921435313d 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,10 +1,12 @@ -pub use proto_array::{CountUnrealizedFull, ReOrgThreshold}; +pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; use types::{Checkpoint, Epoch}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); +/// Default to 1/12th of the slot, which is 1 second on mainnet. +pub const DEFAULT_RE_ORG_CUTOFF_DENOMINATOR: u32 = 12; pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; /// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). @@ -34,6 +36,13 @@ pub struct ChainConfig { pub re_org_threshold: Option, /// Maximum number of epochs since finalization for attempting a proposer re-org. pub re_org_max_epochs_since_finalization: Epoch, + /// Maximum delay after the start of the slot at which to propose a reorging block. + pub re_org_cutoff_millis: Option, + /// Additional epoch offsets at which re-orging block proposals are not permitted. + /// + /// By default this list is empty, but it can be useful for reacting to network conditions, e.g. + /// slow gossip of re-org blocks at slot 1 in the epoch. + pub re_org_disallowed_offsets: DisallowedReOrgOffsets, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -48,16 +57,11 @@ pub struct ChainConfig { pub builder_fallback_epochs_since_finalization: usize, /// Whether any chain health checks should be considered when deciding whether to use the builder API. pub builder_fallback_disable_checks: bool, - /// When set to `true`, weigh the "unrealized" FFG progression when choosing a head in fork - /// choice. - pub count_unrealized: bool, /// When set to `true`, forget any valid/invalid/optimistic statuses in fork choice during start /// up. pub always_reset_payload_statuses: bool, /// Whether to apply paranoid checks to blocks proposed by this beacon node. pub paranoid_block_proposal: bool, - /// Whether to strictly count unrealized justified votes. - pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, /// The offset before the start of a proposal slot at which payload attributes should be sent. @@ -67,6 +71,14 @@ pub struct ChainConfig { pub prepare_payload_lookahead: Duration, /// Use EL-free optimistic sync for the finalized part of the chain. pub optimistic_finalized_sync: bool, + /// The size of the shuffling cache, + pub shuffling_cache_size: usize, + /// Whether to send payload attributes every slot, regardless of connected proposers. + /// + /// This is useful for block builders and testing. + pub always_prepare_payload: bool, + /// Whether backfill sync processing should be rate-limited. + pub enable_backfill_rate_limiting: bool, } impl Default for ChainConfig { @@ -79,19 +91,34 @@ impl Default for ChainConfig { max_network_size: 10 * 1_048_576, // 10M re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + re_org_cutoff_millis: None, + re_org_disallowed_offsets: DisallowedReOrgOffsets::default(), fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, builder_fallback_skips_per_epoch: 8, builder_fallback_epochs_since_finalization: 3, builder_fallback_disable_checks: false, - count_unrealized: true, always_reset_payload_statuses: false, paranoid_block_proposal: false, - count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, prepare_payload_lookahead: Duration::from_secs(4), + // This value isn't actually read except in tests. optimistic_finalized_sync: true, + shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + always_prepare_payload: false, + enable_backfill_rate_limiting: true, } } } + +impl ChainConfig { + /// The latest delay from the start of the slot at which to attempt a 1-slot re-org. + pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration { + self.re_org_cutoff_millis + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + }) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 17f58b223f4..e789b54a21b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,4 +1,5 @@ use crate::attester_cache::Error as AttesterCacheError; +use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::eth1_chain::Error as Eth1ChainError; @@ -17,8 +18,9 @@ use ssz_types::Error as SszTypesError; use state_processing::{ block_signature_verifier::Error as BlockSignatureVerifierError, per_block_processing::errors::{ - AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, - ProposerSlashingValidationError, SyncCommitteeMessageValidationError, + AttestationValidationError, AttesterSlashingValidationError, + BlsExecutionChangeValidationError, ExitValidationError, ProposerSlashingValidationError, + SyncCommitteeMessageValidationError, }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, @@ -50,7 +52,6 @@ pub enum BeaconChainError { }, SlotClockDidNotStart, NoStateForSlot(Slot), - UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), @@ -70,6 +71,7 @@ pub enum BeaconChainError { ExitValidationError(ExitValidationError), ProposerSlashingValidationError(ProposerSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError), + BlsExecutionChangeValidationError(BlsExecutionChangeValidationError), StateSkipTooLarge { start_slot: Slot, requested_slot: Slot, @@ -141,25 +143,28 @@ pub enum BeaconChainError { BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), - ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), + ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box), + EngineGetCapabilititesFailed(Box), BlockHashMissingFromExecutionLayer(ExecutionBlockHash), InconsistentPayloadReconstructed { slot: Slot, exec_block_hash: ExecutionBlockHash, - canonical_payload_root: Hash256, - reconstructed_payload_root: Hash256, canonical_transactions_root: Hash256, reconstructed_transactions_root: Hash256, }, + BlockStreamerError(BlockStreamerError), AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), - PrepareProposerBlockingFailed(execution_layer::Error), + PrepareProposerFailed(BlockProcessingError), ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, + BlockRewardError, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, + SyncCommitteeRewardsSyncError, + AttestationRewardsError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), @@ -204,6 +209,9 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + BlsToExecutionPriorToCapella, + BlsToExecutionConflictsWithPool, + InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), } @@ -213,6 +221,7 @@ easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); easy_from_to!(ProposerSlashingValidationError, BeaconChainError); easy_from_to!(AttesterSlashingValidationError, BeaconChainError); +easy_from_to!(BlsExecutionChangeValidationError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError); easy_from_to!(OpPoolError, BeaconChainError); easy_from_to!(NaiveAggregationError, BeaconChainError); @@ -227,6 +236,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); +easy_from_to!(InconsistentFork, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -259,6 +269,7 @@ pub enum BlockProductionError { MissingExecutionPayload, TokioJoin(tokio::task::JoinError), BeaconChain(BeaconChainError), + InvalidPayloadFork, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 6f4415ef4f3..fed05032374 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -14,6 +14,7 @@ pub struct ServerSentEventHandler { exit_tx: Sender>, chain_reorg_tx: Sender>, contribution_tx: Sender>, + payload_attributes_tx: Sender>, late_head: Sender>, block_reward_tx: Sender>, log: Logger, @@ -32,6 +33,7 @@ impl ServerSentEventHandler { let (exit_tx, _) = broadcast::channel(capacity); let (chain_reorg_tx, _) = broadcast::channel(capacity); let (contribution_tx, _) = broadcast::channel(capacity); + let (payload_attributes_tx, _) = broadcast::channel(capacity); let (late_head, _) = broadcast::channel(capacity); let (block_reward_tx, _) = broadcast::channel(capacity); @@ -43,6 +45,7 @@ impl ServerSentEventHandler { exit_tx, chain_reorg_tx, contribution_tx, + payload_attributes_tx, late_head, block_reward_tx, log, @@ -50,28 +53,55 @@ impl ServerSentEventHandler { } pub fn register(&self, kind: EventKind) { - let result = match kind { - EventKind::Attestation(attestation) => self + let log_count = |name, count| { + trace!( + self.log, + "Registering server-sent event"; + "kind" => name, + "receiver_count" => count + ); + }; + let result = match &kind { + EventKind::Attestation(_) => self .attestation_tx - .send(EventKind::Attestation(attestation)) - .map(|count| trace!(self.log, "Registering server-sent attestation event"; "receiver_count" => count)), - EventKind::Block(block) => self.block_tx.send(EventKind::Block(block)) - .map(|count| trace!(self.log, "Registering server-sent block event"; "receiver_count" => count)), - EventKind::FinalizedCheckpoint(checkpoint) => self.finalized_tx - .send(EventKind::FinalizedCheckpoint(checkpoint)) - .map(|count| trace!(self.log, "Registering server-sent finalized checkpoint event"; "receiver_count" => count)), - EventKind::Head(head) => self.head_tx.send(EventKind::Head(head)) - .map(|count| trace!(self.log, "Registering server-sent head event"; "receiver_count" => count)), - EventKind::VoluntaryExit(exit) => self.exit_tx.send(EventKind::VoluntaryExit(exit)) - .map(|count| trace!(self.log, "Registering server-sent voluntary exit event"; "receiver_count" => count)), - EventKind::ChainReorg(reorg) => self.chain_reorg_tx.send(EventKind::ChainReorg(reorg)) - .map(|count| trace!(self.log, "Registering server-sent chain reorg event"; "receiver_count" => count)), - EventKind::ContributionAndProof(contribution_and_proof) => self.contribution_tx.send(EventKind::ContributionAndProof(contribution_and_proof)) - .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), - EventKind::LateHead(late_head) => self.late_head.send(EventKind::LateHead(late_head)) - .map(|count| trace!(self.log, "Registering server-sent late head event"; "receiver_count" => count)), - EventKind::BlockReward(block_reward) => self.block_reward_tx.send(EventKind::BlockReward(block_reward)) - .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), + .send(kind) + .map(|count| log_count("attestation", count)), + EventKind::Block(_) => self + .block_tx + .send(kind) + .map(|count| log_count("block", count)), + EventKind::FinalizedCheckpoint(_) => self + .finalized_tx + .send(kind) + .map(|count| log_count("finalized checkpoint", count)), + EventKind::Head(_) => self + .head_tx + .send(kind) + .map(|count| log_count("head", count)), + EventKind::VoluntaryExit(_) => self + .exit_tx + .send(kind) + .map(|count| log_count("exit", count)), + EventKind::ChainReorg(_) => self + .chain_reorg_tx + .send(kind) + .map(|count| log_count("chain reorg", count)), + EventKind::ContributionAndProof(_) => self + .contribution_tx + .send(kind) + .map(|count| log_count("contribution and proof", count)), + EventKind::PayloadAttributes(_) => self + .payload_attributes_tx + .send(kind) + .map(|count| log_count("payload attributes", count)), + EventKind::LateHead(_) => self + .late_head + .send(kind) + .map(|count| log_count("late head", count)), + EventKind::BlockReward(_) => self + .block_reward_tx + .send(kind) + .map(|count| log_count("block reward", count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -106,6 +136,10 @@ impl ServerSentEventHandler { self.contribution_tx.subscribe() } + pub fn subscribe_payload_attributes(&self) -> Receiver> { + self.payload_attributes_tx.subscribe() + } + pub fn subscribe_late_head(&self) -> Receiver> { self.late_head.subscribe() } @@ -142,6 +176,10 @@ impl ServerSentEventHandler { self.contribution_tx.receiver_count() > 0 } + pub fn has_payload_attributes_subscribers(&self) -> bool { + self.payload_attributes_tx.receiver_count() > 0 + } + pub fn has_late_head_subscribers(&self) -> bool { self.late_head.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7435c3a8cc4..1ac7229cc6d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,22 +12,23 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BuilderParams, PayloadStatus}; +use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::{debug, warn}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ - compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, - partially_verify_execution_payload, + compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, + is_merge_transition_complete, partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::*; -pub type PreparePayloadResult = Result; -pub type PreparePayloadHandle = JoinHandle>>; +pub type PreparePayloadResult = + Result, BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; #[derive(PartialEq)] pub enum AllowOptimisticImport { @@ -68,8 +69,13 @@ impl PayloadNotifier { // where we do not send the block to the EL at all. let block_message = block.message(); let payload = block_message.execution_payload()?; - partially_verify_execution_payload(state, block.slot(), payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; + partially_verify_execution_payload::<_, FullPayload<_>>( + state, + block.slot(), + payload, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; match notify_execution_layer { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { @@ -81,7 +87,7 @@ impl PayloadNotifier { .ok_or(ExecutionPayloadError::NoExecutionConnection)?; if let Err(e) = - execution_layer.verify_payload_block_hash(&payload.execution_payload) + execution_layer.verify_payload_block_hash(payload.execution_payload_ref()) { warn!( chain.log, @@ -140,7 +146,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let new_payload_response = execution_layer - .notify_new_payload(&execution_payload.execution_payload) + .notify_new_payload(&execution_payload.into()) .await; match new_payload_response { @@ -153,12 +159,12 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( latest_valid_hash, ref validation_error, } => { - debug!( + warn!( chain.log, "Invalid execution payload"; "validation_error" => ?validation_error, "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -166,32 +172,45 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( "method" => "new_payload", ); - // latest_valid_hash == 0 implies that this was the terminal block - // Hence, we don't need to run `BeaconChain::process_invalid_execution_payload`. - if latest_valid_hash == ExecutionBlockHash::zero() { - return Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()); + // Only trigger payload invalidation in fork choice if the + // `latest_valid_hash` is `Some` and non-zero. + // + // A `None` latest valid hash indicates that the EE was unable + // to determine the most recent valid ancestor. Since `block` + // has not yet been applied to fork choice, there's nothing to + // invalidate. + // + // An all-zeros payload indicates that an EIP-3675 check has + // failed regarding the validity of the terminal block. Rather + // than iterating back in the chain to find the terminal block + // and invalidating that, we simply reject this block without + // invalidating anything else. + if let Some(latest_valid_hash) = + latest_valid_hash.filter(|hash| *hash != ExecutionBlockHash::zero()) + { + // This block has not yet been applied to fork choice, so the latest block that was + // imported to fork choice was the parent. + let latest_root = block.parent_root(); + + chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { + head_block_root: latest_root, + always_invalidate_head: false, + latest_valid_ancestor: latest_valid_hash, + }) + .await?; } - // This block has not yet been applied to fork choice, so the latest block that was - // imported to fork choice was the parent. - let latest_root = block.parent_root(); - chain - .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { - head_block_root: latest_root, - always_invalidate_head: false, - latest_valid_ancestor: latest_valid_hash, - }) - .await?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } PayloadStatus::InvalidBlockHash { ref validation_error, } => { - debug!( + warn!( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -344,7 +363,7 @@ pub fn validate_execution_payload_for_gossip( } }; - if is_merge_transition_complete || execution_payload != &<_>::default() { + if is_merge_transition_complete || !execution_payload.is_default_with_empty_roots() { let expected_timestamp = chain .slot_clock .start_of(block.slot()) @@ -382,13 +401,13 @@ pub fn validate_execution_payload_for_gossip( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal pub fn get_execution_payload< T: BeaconChainTypes, - Payload: ExecPayload + Default + Send + 'static, + Payload: AbstractExecPayload + 'static, >( chain: Arc>, state: &BeaconState, proposer_index: u64, builder_params: BuilderParams, -) -> Result, BlockProductionError> { +) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; @@ -398,7 +417,13 @@ pub fn get_execution_payload< compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash; + state.latest_execution_payload_header()?.block_hash(); + let withdrawals = match state { + &BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()), + &BeaconState::Merge(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -415,6 +440,7 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, + withdrawals, ) .await }, @@ -448,13 +474,15 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, -) -> Result + withdrawals: Option>, +) -> Result, BlockProductionError> where T: BeaconChainTypes, - Payload: ExecPayload + Default, + Payload: AbstractExecPayload, { let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; + let fork = spec.fork_name_at_slot::(builder_params.slot); let execution_layer = chain .execution_layer .as_ref() @@ -468,7 +496,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(<_>::default()); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } let terminal_pow_block_hash = execution_layer @@ -481,7 +509,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(<_>::default()); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } } else { latest_execution_payload_header_block_hash @@ -505,21 +533,26 @@ where .await .map_err(BlockProductionError::BeaconChain)?; + let suggested_fee_recipient = execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); + // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. - let execution_payload = execution_layer + let block_contents = execution_layer .get_payload::( parent_hash, - timestamp, - random, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + fork, &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; - Ok(execution_payload) + Ok(block_contents) } diff --git a/beacon_node/beacon_chain/src/fork_choice_signal.rs b/beacon_node/beacon_chain/src/fork_choice_signal.rs index fd92de661da..f5424d417eb 100644 --- a/beacon_node/beacon_chain/src/fork_choice_signal.rs +++ b/beacon_node/beacon_chain/src/fork_choice_signal.rs @@ -43,7 +43,7 @@ impl ForkChoiceSignalTx { /// /// Return an error if the provided `slot` is strictly less than any previously provided slot. pub fn notify_fork_choice_complete(&self, slot: Slot) -> Result<(), BeaconChainError> { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); @@ -72,7 +72,7 @@ impl Default for ForkChoiceSignalTx { impl ForkChoiceSignalRx { pub fn wait_for_fork_choice(&self, slot: Slot, timeout: Duration) -> ForkChoiceWaitResult { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 6d5b5ddc4ae..ef23248aba6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,7 +1,6 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; -use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -102,7 +101,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It current_slot: Option, spec: &ChainSpec, count_unrealized_config: CountUnrealized, - count_unrealized_full_config: CountUnrealizedFull, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -156,7 +154,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, current_slot, - count_unrealized_full_config, spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ae1c5e4b766..be1522a3b80 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,6 +1,8 @@ -#![recursion_limit = "128"] // For lazy-static +pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; +pub mod beacon_block_reward; +mod beacon_block_streamer; mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; @@ -10,6 +12,7 @@ mod block_times_cache; mod block_verification; pub mod builder; pub mod canonical_head; +pub mod capella_readiness; pub mod chain_config; mod early_attester_cache; mod errors; @@ -29,7 +32,7 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; -mod observed_block_producers; +pub mod observed_block_producers; pub mod observed_operations; pub mod otb_verification_service; mod persisted_beacon_chain; @@ -37,9 +40,10 @@ mod persisted_fork_choice; mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; -mod shuffling_cache; +pub mod shuffling_cache; mod snapshot_cache; pub mod state_advance_timer; +pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; @@ -53,7 +57,7 @@ pub use self::beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; -pub use self::chain_config::{ChainConfig, CountUnrealizedFull}; +pub use self::chain_config::ChainConfig; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index ec9c90e7355..20d7181808a 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -2,6 +2,7 @@ use crate::{ beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; +use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; @@ -36,6 +37,8 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, + /// Unknown block with parent root. + UnknownBlockParentRoot(Hash256), /// Beacon chain error occured. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), @@ -58,6 +61,7 @@ impl From for Error { #[derivative(Clone(bound = "T: BeaconChainTypes"))] pub struct VerifiedLightClientOptimisticUpdate { light_client_optimistic_update: LightClientOptimisticUpdate, + pub parent_root: Hash256, seen_timestamp: Duration, } @@ -107,6 +111,16 @@ impl VerifiedLightClientOptimisticUpdate { None => return Err(Error::SigSlotStartIsNone), } + // check if we can process the optimistic update immediately + // otherwise queue + let canonical_root = light_client_optimistic_update + .attested_header + .canonical_root(); + + if canonical_root != head_block.message().parent_root() { + return Err(Error::UnknownBlockParentRoot(canonical_root)); + } + let optimistic_update = LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; @@ -119,6 +133,7 @@ impl VerifiedLightClientOptimisticUpdate { Ok(Self { light_client_optimistic_update, + parent_root: canonical_root, seen_timestamp, }) } diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index 4ef2102fd51..c66df39eedf 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -8,7 +8,7 @@ use std::fmt::Write; use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. -const SECONDS_IN_A_WEEK: u64 = 604800; +pub const SECONDS_IN_A_WEEK: u64 = 604800; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 8d8272b67d7..4121111b3ee 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,12 +1,12 @@ use derivative::Derivative; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; -use state_processing::{SigVerifiedOp, VerifyOperation}; +use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, - SignedVoluntaryExit, Slot, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, ProposerSlashing, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; /// Number of validator indices to store on the stack in `observed_validators`. @@ -39,7 +39,7 @@ pub enum ObservationOutcome { AlreadyKnown, } -/// Trait for exits and slashings which can be observed using `ObservedOperations`. +/// Trait for operations which can be observed using `ObservedOperations`. pub trait ObservableOperation: VerifyOperation + Sized { /// The set of validator indices involved in this operation. /// @@ -49,13 +49,13 @@ pub trait ObservableOperation: VerifyOperation + Sized { impl ObservableOperation for SignedVoluntaryExit { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.message.validator_index).collect() + smallvec![self.message.validator_index] } } impl ObservableOperation for ProposerSlashing { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.signed_header_1.message.proposer_index).collect() + smallvec![self.signed_header_1.message.proposer_index] } } @@ -80,13 +80,23 @@ impl ObservableOperation for AttesterSlashing { } } +impl ObservableOperation for SignedBlsToExecutionChange { + fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { + smallvec![self.message.validator_index] + } +} + impl, E: EthSpec> ObservedOperations { - pub fn verify_and_observe( + pub fn verify_and_observe_parametric( &mut self, op: T, + validate: F, head_state: &BeaconState, spec: &ChainSpec, - ) -> Result, T::Error> { + ) -> Result, T::Error> + where + F: Fn(T) -> Result, T::Error>, + { self.reset_at_fork_boundary(head_state.slot(), spec); let observed_validator_indices = &mut self.observed_validator_indices; @@ -106,7 +116,7 @@ impl, E: EthSpec> ObservedOperations { } // Validate the op using operation-specific logic (`verify_attester_slashing`, etc). - let verified_op = op.validate(head_state, spec)?; + let verified_op = validate(op)?; // Add the relevant indices to the set of known indices to prevent processing of duplicates // in the future. @@ -115,6 +125,16 @@ impl, E: EthSpec> ObservedOperations { Ok(ObservationOutcome::New(verified_op)) } + pub fn verify_and_observe( + &mut self, + op: T, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate(head_state, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } + /// Reset the cache when crossing a fork boundary. /// /// This prevents an attacker from crafting a self-slashing which is only valid before the fork @@ -134,3 +154,16 @@ impl, E: EthSpec> ObservedOperations { } } } + +impl + VerifyOperationAt, E: EthSpec> ObservedOperations { + pub fn verify_and_observe_at( + &mut self, + op: T, + verify_at_epoch: Epoch, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate_at(head_state, verify_at_epoch, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 73906b1b586..5808e648a2c 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,9 @@ //! Utilities for managing database schema changes. mod migration_schema_v12; mod migration_schema_v13; +mod migration_schema_v14; +mod migration_schema_v15; +mod migration_schema_v16; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -114,6 +117,30 @@ pub fn migrate_schema( Ok(()) } + (SchemaVersion(13), SchemaVersion(14)) => { + let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(13)) => { + let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(15)) => { + let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(15), SchemaVersion(14)) => { + let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(15), SchemaVersion(16)) => { + let ops = migration_schema_v16::upgrade_to_v16::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(16), SchemaVersion(15)) => { + let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index bb72b28c0ec..c9aa2097f8a 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -168,16 +168,14 @@ pub fn downgrade_from_v12( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12 { + let PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(PersistedOperationPool::::V12(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { - op_pool + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 } else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs new file mode 100644 index 00000000000..be913d8cc5f --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -0,0 +1,125 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, +}; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::{EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +pub fn upgrade_to_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V12 op pool and transform it to V14. + let PersistedOperationPoolV12:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // initialize with empty vector + let bls_to_execution_changes = vec![]; + let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }); + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // We cannot downgrade from V14 once the Capella fork has been reached because there will + // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions + // of Lighthouse can't handle that. + if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { + let current_epoch = get_slot_clock::(&db, &log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration)?; + + if current_epoch >= capella_fork_epoch { + error!( + log, + "Capella already active: v14+ is mandatory"; + "current_epoch" => current_epoch, + "capella_fork_epoch" => capella_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } + } + + // Load a V14 op pool and transform it to V12. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping bls_to_execution_changes from pool"; + "count" => bls_to_execution_changes.len(), + ); + + let v12 = PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }; + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs new file mode 100644 index 00000000000..07c86bd931f --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -0,0 +1,76 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V15. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v14 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + // Initialize with empty set + capella_bls_change_broadcast_indices: <_>::default(), + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V15 op pool and transform it to V14. + let PersistedOperationPoolV15:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Forgetting address changes for Capella broadcast"; + "count" => capella_bls_change_broadcast_indices.len(), + ); + + let v14 = PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }; + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs new file mode 100644 index 00000000000..230573b0288 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs @@ -0,0 +1,46 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::persisted_fork_choice::PersistedForkChoiceV11; +use slog::{debug, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v16( + db: Arc>, + log: Logger, +) -> Result, Error> { + drop_balances_cache::(db, log) +} + +pub fn downgrade_from_v16( + db: Arc>, + log: Logger, +) -> Result, Error> { + drop_balances_cache::(db, log) +} + +/// Drop the balances cache from the fork choice store. +/// +/// There aren't any type-level changes in this schema migration, however the +/// way that we compute the `JustifiedBalances` has changed due to: +/// https://github.com/sigp/lighthouse/pull/3962 +pub fn drop_balances_cache( + db: Arc>, + log: Logger, +) -> Result, Error> { + let mut persisted_fork_choice = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + debug!( + log, + "Dropping fork choice balances cache"; + "item_count" => persisted_fork_choice.fork_choice_store.balances_cache.items.len() + ); + + // Drop all items in the balances cache. + persisted_fork_choice.fork_choice_store.balances_cache = <_>::default(); + + let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY); + + Ok(vec![kv_op]) +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a01847a0e13..91a1e24d82b 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -9,7 +9,7 @@ use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256 /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this /// ignores a few extra bytes in the caches that should be insignificant compared to the indices). -const CACHE_SIZE: usize = 16; +pub const DEFAULT_CACHE_SIZE: usize = 16; /// The maximum number of concurrent committee cache "promises" that can be issued. In effect, this /// limits the number of concurrent states that can be loaded into memory for the committee cache. @@ -54,9 +54,9 @@ pub struct ShufflingCache { } impl ShufflingCache { - pub fn new() -> Self { + pub fn new(cache_size: usize) -> Self { Self { - cache: LruCache::new(CACHE_SIZE), + cache: LruCache::new(cache_size), } } @@ -172,7 +172,7 @@ impl ToArcCommitteeCache for Arc { impl Default for ShufflingCache { fn default() -> Self { - Self::new() + Self::new(DEFAULT_CACHE_SIZE) } } @@ -249,7 +249,7 @@ mod test { fn resolved_promise() { let (committee_a, _) = committee_caches(); let id_a = shuffling_id(1); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -276,7 +276,7 @@ mod test { #[test] fn unresolved_promise() { let id_a = shuffling_id(1); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -301,7 +301,7 @@ mod test { fn two_promises() { let (committee_a, committee_b) = committee_caches(); let (id_a, id_b) = (shuffling_id(1), shuffling_id(2)); - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); // Create promise A. let sender_a = cache.create_promise(id_a.clone()).unwrap(); @@ -355,7 +355,7 @@ mod test { #[test] fn too_many_promises() { - let mut cache = ShufflingCache::new(); + let mut cache = ShufflingCache::default(); for i in 0..MAX_CONCURRENT_PROMISES { cache.create_promise(shuffling_id(i as u64)).unwrap(); diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs new file mode 100644 index 00000000000..2221aa1d5eb --- /dev/null +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -0,0 +1,87 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; + +use eth2::lighthouse::SyncCommitteeReward; +use safe_arith::SafeArith; +use slog::error; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use std::collections::HashMap; +use store::RelativeEpoch; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState}; + +impl BeaconChain { + pub fn compute_sync_committee_rewards>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result, BeaconChainError> { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let spec = &self.spec; + + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + let sync_aggregate = block.body().sync_aggregate()?; + + let sync_committee = state.current_sync_committee()?.clone(); + + let sync_committee_indices = state.get_sync_committee_indices(&sync_committee)?; + + let (participant_reward_value, proposer_reward_per_bit) = + compute_sync_aggregate_rewards(state, spec).map_err(|e| { + error!( + self.log, "Error calculating sync aggregate rewards"; + "error" => ?e + ); + BeaconChainError::SyncCommitteeRewardsSyncError + })?; + + let mut balances = HashMap::::new(); + + let mut total_proposer_rewards = 0; + let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + + // Apply rewards to participant balances. Keep track of proposer rewards + for (validator_index, participant_bit) in sync_committee_indices + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + { + let participant_balance = balances + .entry(*validator_index) + .or_insert_with(|| state.balances()[*validator_index]); + + if participant_bit { + participant_balance.safe_add_assign(participant_reward_value)?; + + balances + .entry(proposer_index) + .or_insert_with(|| state.balances()[proposer_index]) + .safe_add_assign(proposer_reward_per_bit)?; + + total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; + } else { + *participant_balance = participant_balance.saturating_sub(participant_reward_value); + } + } + + Ok(balances + .iter() + .filter_map(|(i, new_balance)| { + let reward = if *i != proposer_index { + *new_balance as i64 - state.balances()[*i] as i64 + } else if sync_committee_indices.contains(i) { + *new_balance as i64 + - state.balances()[*i] as i64 + - total_proposer_rewards as i64 + } else { + return None; + }; + Some(SyncCommitteeReward { + validator_index: *i as u64, + reward, + }) + }) + .collect()) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 66de3f02d23..3c5d1fd3b1a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + sync_committee_verification::Error as SyncCommitteeError, validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; @@ -12,17 +13,17 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; -use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use parking_lot::Mutex; @@ -107,6 +108,14 @@ pub enum AttestationStrategy { SomeValidators(Vec), } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SyncCommitteeStrategy { + /// All sync committee validators sign. + AllValidators, + /// No validators sign. + NoValidators, +} + /// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or /// `state.next_sync_committee` when creating sync messages or contributions. #[derive(Clone, Debug)] @@ -148,6 +157,7 @@ pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, + withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] @@ -179,7 +189,7 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -240,7 +250,7 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -282,6 +292,7 @@ where eth_spec_instance, spec: None, validator_keypairs: None, + withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, @@ -307,6 +318,26 @@ where self } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to + /// the "determistic" values, regardless of wether or not the validator has + /// a BLS or execution address in the genesis deposits. + /// + /// This aligns with the withdrawal commitments used in the "interop" + /// genesis states. + pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self { + self.withdrawal_keypairs( + types::test_utils::generate_deterministic_keypairs(num_keypairs) + .into_iter() + .map(Option::Some) + .collect(), + ) + } + + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + pub fn default_spec(self) -> Self { self.spec_or_default(None) } @@ -384,15 +415,35 @@ where self } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { + let mock = self + .mock_execution_layer + .as_mut() + .expect("must have mock execution layer to recalculate fork times"); + let spec = self + .spec + .clone() + .expect("cannot recalculate fork times without spec"); + mock.server.execution_block_generator().shanghai_time = + spec.capella_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + + self + } + pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, + shanghai_time, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ); self.execution_layer = Some(mock.el.clone()); @@ -400,19 +451,26 @@ where self } - pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + pub fn mock_execution_layer_with_builder( + mut self, + beacon_url: SensitiveUrl, + builder_threshold: Option, + ) -> Self { // Get a random unused port - let port = unused_port::unused_tcp_port().unwrap(); + let port = unused_port::unused_tcp4_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, + shanghai_time, + builder_threshold, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec.clone(), Some(builder_url.clone()), ) .move_to_terminal_block(); @@ -504,6 +562,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, + withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, @@ -519,6 +578,12 @@ where /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, + /// Optional BLS withdrawal keys for each validator. + /// + /// If a validator index is missing from this vec or their entry is `None` then either + /// no BLS withdrawal key was set for them (they had an address from genesis) or the test + /// initializer neglected to set this field. + pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: ChainSpec, @@ -1430,6 +1495,44 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn make_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> SignedBlsToExecutionChange { + let keypair = self.get_withdrawal_keypair(validator_index); + self.make_bls_to_execution_change_with_keys( + validator_index, + address, + &keypair.pk, + &keypair.sk, + ) + } + + pub fn make_bls_to_execution_change_with_keys( + &self, + validator_index: u64, + address: Address, + pubkey: &PublicKey, + secret_key: &SecretKey, + ) -> SignedBlsToExecutionChange { + let genesis_validators_root = self.chain.genesis_validators_root; + BlsToExecutionChange { + validator_index, + from_bls_pubkey: pubkey.compress(), + to_execution_address: address, + } + .sign(secret_key, genesis_validators_root, &self.chain.spec) + } + + pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { + self.withdrawal_keypairs + .get(validator_index as usize) + .expect("BLS withdrawal key missing from harness") + .as_ref() + .expect("no withdrawal key for validator") + } + pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, @@ -1657,15 +1760,64 @@ where self.process_attestations(attestations); } + pub fn sync_committee_sign_block( + &self, + state: &BeaconState, + block_hash: Hash256, + slot: Slot, + relative_sync_committee: RelativeSyncCommittee, + ) { + let sync_contributions = + self.make_sync_contributions(state, block_hash, slot, relative_sync_committee); + self.process_sync_contributions(sync_contributions).unwrap() + } + pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], + ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { + self.add_attested_block_at_slot_with_sync( + slot, + state, + state_root, + validators, + SyncCommitteeStrategy::NoValidators, + ) + .await + } + + pub async fn add_attested_block_at_slot_with_sync( + &self, + slot: Slot, + state: BeaconState, + state_root: Hash256, + validators: &[usize], + sync_committee_strategy: SyncCommitteeStrategy, ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); + + if sync_committee_strategy == SyncCommitteeStrategy::AllValidators + && state.current_sync_committee().is_ok() + { + self.sync_committee_sign_block( + &state, + block_hash.into(), + slot, + if (slot + 1).epoch(E::slots_per_epoch()) + % self.spec.epochs_per_sync_committee_period + == 0 + { + RelativeSyncCommittee::Next + } else { + RelativeSyncCommittee::Current + }, + ); + } + Ok((block_hash, state)) } @@ -1675,10 +1827,35 @@ where state_root: Hash256, slots: &[Slot], validators: &[usize], + ) -> AddBlocksResult { + self.add_attested_blocks_at_slots_with_sync( + state, + state_root, + slots, + validators, + SyncCommitteeStrategy::NoValidators, + ) + .await + } + + pub async fn add_attested_blocks_at_slots_with_sync( + &self, + state: BeaconState, + state_root: Hash256, + slots: &[Slot], + validators: &[usize], + sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { assert!(!slots.is_empty()); - self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) - .await + self.add_attested_blocks_at_slots_given_lbh( + state, + state_root, + slots, + validators, + None, + sync_committee_strategy, + ) + .await } async fn add_attested_blocks_at_slots_given_lbh( @@ -1688,6 +1865,7 @@ where slots: &[Slot], validators: &[usize], mut latest_block_hash: Option, + sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { assert!( slots.windows(2).all(|w| w[0] <= w[1]), @@ -1697,7 +1875,13 @@ where let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { let (block_hash, new_state) = self - .add_attested_block_at_slot(*slot, state, state_root, validators) + .add_attested_block_at_slot_with_sync( + *slot, + state, + state_root, + validators, + sync_committee_strategy, + ) .await .unwrap(); state = new_state; @@ -1779,6 +1963,7 @@ where &epoch_slots, &validators, Some(head_block), + SyncCommitteeStrategy::NoValidators, // for backwards compat ) .await; @@ -1895,6 +2080,22 @@ where num_blocks: usize, block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, + ) -> Hash256 { + self.extend_chain_with_sync( + num_blocks, + block_strategy, + attestation_strategy, + SyncCommitteeStrategy::NoValidators, + ) + .await + } + + pub async fn extend_chain_with_sync( + &self, + num_blocks: usize, + block_strategy: BlockStrategy, + attestation_strategy: AttestationStrategy, + sync_committee_strategy: SyncCommitteeStrategy, ) -> Hash256 { let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { @@ -1926,7 +2127,13 @@ where }; let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = self - .add_attested_blocks_at_slots(state, state_root, &slots, &validators) + .add_attested_blocks_at_slots_with_sync( + state, + state_root, + &slots, + &validators, + sync_committee_strategy, + ) .await; last_produced_block_hash.into() } @@ -1980,6 +2187,30 @@ where (honest_head, faulty_head) } + + pub fn process_sync_contributions( + &self, + sync_contributions: HarnessSyncContributions, + ) -> Result<(), SyncCommitteeError> { + let mut verified_contributions = Vec::with_capacity(sync_contributions.len()); + + for (_, contribution_and_proof) in sync_contributions { + let signed_contribution_and_proof = contribution_and_proof.unwrap(); + + let verified_contribution = self + .chain + .verify_sync_contribution_for_gossip(signed_contribution_and_proof)?; + + verified_contributions.push(verified_contribution); + } + + for verified_contribution in verified_contributions { + self.chain + .add_contribution_to_block_inclusion_pool(verified_contribution)?; + } + + Ok(()) + } } // Junk `Debug` impl to satistfy certain trait bounds during testing. diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index dad5e1517ad..d79a56df6b2 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -15,6 +15,7 @@ use std::io; use std::marker::PhantomData; use std::str::Utf8Error; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use store::AbstractExecPayload; use types::{ AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, @@ -29,7 +30,7 @@ const TOTAL_LABEL: &str = "total"; /// The validator monitor collects per-epoch data about each monitored validator. Historical data /// will be kept around for `HISTORIC_EPOCHS` before it is pruned. -pub const HISTORIC_EPOCHS: usize = 4; +pub const HISTORIC_EPOCHS: usize = 10; /// Once the validator monitor reaches this number of validators it will stop /// tracking their metrics/logging individually in an effort to reduce @@ -45,7 +46,7 @@ pub enum Error { /// Contains data pertaining to one validator for one epoch. #[derive(Default)] -struct EpochSummary { +pub struct EpochSummary { /* * Attestations with a target in the current epoch. */ @@ -103,6 +104,12 @@ struct EpochSummary { pub proposer_slashings: usize, /// The number of attester slashings observed. pub attester_slashings: usize, + + /* + * Other validator info helpful for the UI. + */ + /// The total balance of the validator. + pub total_balance: Option, } impl EpochSummary { @@ -176,18 +183,60 @@ impl EpochSummary { pub fn register_attester_slashing(&mut self) { self.attester_slashings += 1; } + + pub fn register_validator_total_balance(&mut self, total_balance: u64) { + self.total_balance = Some(total_balance) + } } type SummaryMap = HashMap; +#[derive(Default)] +pub struct ValidatorMetrics { + pub attestation_hits: u64, + pub attestation_misses: u64, + pub attestation_head_hits: u64, + pub attestation_head_misses: u64, + pub attestation_target_hits: u64, + pub attestation_target_misses: u64, +} + +impl ValidatorMetrics { + pub fn increment_hits(&mut self) { + self.attestation_hits += 1; + } + + pub fn increment_misses(&mut self) { + self.attestation_misses += 1; + } + + pub fn increment_target_hits(&mut self) { + self.attestation_target_hits += 1; + } + + pub fn increment_target_misses(&mut self) { + self.attestation_target_misses += 1; + } + + pub fn increment_head_hits(&mut self) { + self.attestation_head_hits += 1; + } + + pub fn increment_head_misses(&mut self) { + self.attestation_head_misses += 1; + } +} + /// A validator that is being monitored by the `ValidatorMonitor`. -struct MonitoredValidator { +pub struct MonitoredValidator { /// A human-readable identifier for the validator. pub id: String, /// The validator index in the state. pub index: Option, /// A history of the validator over time. pub summaries: RwLock, + /// Validator metrics to be exposed over the HTTP API. + pub metrics: RwLock, } impl MonitoredValidator { @@ -198,6 +247,7 @@ impl MonitoredValidator { .unwrap_or_else(|| pubkey.to_string()), index, summaries: <_>::default(), + metrics: <_>::default(), } } @@ -252,6 +302,20 @@ impl MonitoredValidator { fn touch_epoch_summary(&self, epoch: Epoch) { self.with_epoch_summary(epoch, |_| {}); } + + fn get_from_epoch_summary(&self, epoch: Epoch, func: F) -> Option + where + F: Fn(Option<&EpochSummary>) -> Option, + { + let summaries = self.summaries.read(); + func(summaries.get(&epoch)) + } + + pub fn get_total_balance(&self, epoch: Epoch) -> Option { + self.get_from_epoch_summary(epoch, |summary_opt| { + summary_opt.and_then(|summary| summary.total_balance) + }) + } } /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P @@ -347,12 +411,20 @@ impl ValidatorMonitor { if let Some(i) = monitored_validator.index { monitored_validator.touch_epoch_summary(current_epoch); + let i = i as usize; + + // Cache relevant validator info. + if let Some(balance) = state.balances().get(i) { + monitored_validator.with_epoch_summary(current_epoch, |summary| { + summary.register_validator_total_balance(*balance) + }); + } + // Only log the per-validator metrics if it's enabled. if !self.individual_tracking() { continue; } - let i = i as usize; let id = &monitored_validator.id; if let Some(balance) = state.balances().get(i) { @@ -479,6 +551,25 @@ impl ValidatorMonitor { continue; } + // Store some metrics directly to be re-exposed on the HTTP API. + let mut validator_metrics = monitored_validator.metrics.write(); + if previous_epoch_matched_any { + validator_metrics.increment_hits(); + if previous_epoch_matched_target { + validator_metrics.increment_target_hits() + } else { + validator_metrics.increment_target_misses() + } + if previous_epoch_matched_head { + validator_metrics.increment_head_hits() + } else { + validator_metrics.increment_head_misses() + } + } else { + validator_metrics.increment_misses() + } + drop(validator_metrics); + // Indicates if any attestation made it on-chain. // // For Base states, this will be *any* attestation whatsoever. For Altair states, @@ -717,6 +808,14 @@ impl ValidatorMonitor { self.validators.values().map(|val| val.id.clone()).collect() } + pub fn get_monitored_validator(&self, index: u64) -> Option<&MonitoredValidator> { + if let Some(pubkey) = self.indices.get(&index) { + self.validators.get(pubkey) + } else { + None + } + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { @@ -1638,9 +1737,9 @@ fn u64_to_i64(n: impl Into) -> i64 { } /// Returns the delay between the start of `block.slot` and `seen_timestamp`. -pub fn get_block_delay_ms( +pub fn get_block_delay_ms>( seen_timestamp: Duration, - block: BeaconBlockRef<'_, T>, + block: BeaconBlockRef<'_, T, P>, slot_clock: &S, ) -> Duration { get_slot_delay_ms::(seen_timestamp, block.slot(), slot_clock) diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 26aea2d2722..79910df2923 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; use std::marker::PhantomData; -use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.hot_db.do_atomically(store_ops)?; + store.do_atomically(store_ops)?; Ok(cache) } @@ -79,7 +79,7 @@ impl ValidatorPubkeyCache { pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result, BeaconChainError> { + ) -> Result>, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -92,7 +92,10 @@ impl ValidatorPubkeyCache { } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> + fn import( + &mut self, + validator_keys: I, + ) -> Result>, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -112,7 +115,9 @@ impl ValidatorPubkeyCache { // It will be committed atomically when the block that introduced it is written to disk. // Notably it is NOT written while the write lock on the cache is held. // See: https://github.com/sigp/lighthouse/issues/2327 - store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); + store_ops.push(StoreOp::KeyValueOp( + DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)), + )); self.pubkeys.push( (&pubkey) @@ -294,7 +299,7 @@ mod test { let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); - store.hot_db.do_atomically(ops).unwrap(); + store.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs new file mode 100644 index 00000000000..e910e8134f1 --- /dev/null +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -0,0 +1,167 @@ +#![cfg(not(debug_assertions))] // Tests run too slow in debug. + +use beacon_chain::test_utils::BeaconChainHarness; +use execution_layer::test_utils::Block; +use types::*; + +const VALIDATOR_COUNT: usize = 32; +type E = MainnetEthSpec; + +fn verify_execution_payload_chain(chain: &[FullPayload]) { + let mut prev_ep: Option> = None; + + for ep in chain { + assert!(!ep.is_default_with_empty_roots()); + assert!(ep.block_hash() != ExecutionBlockHash::zero()); + + // Check against previous `ExecutionPayload`. + if let Some(prev_ep) = prev_ep { + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); + } + prev_ep = Some(ep.clone()); + } +} + +#[tokio::test] +async fn base_altair_merge_capella() { + let altair_fork_epoch = Epoch::new(4); + let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(12); + let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + /* + * Start with the base fork. + */ + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); + + /* + * Do the Altair fork. + */ + harness.extend_to_slot(altair_fork_slot).await; + + let altair_head = &harness.chain.head_snapshot().beacon_block; + assert!(altair_head.as_altair().is_ok()); + assert_eq!(altair_head.slot(), altair_fork_slot); + + /* + * Do the merge fork, without a terminal PoW block. + */ + harness.extend_to_slot(merge_fork_slot).await; + + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), merge_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + + /* + * Next merge block shouldn't include an exec payload. + */ + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + + /* + * Trigger the terminal PoW block. + */ + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + harness.extend_slots(1).await; + + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" + ); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + + /* + * Next merge block should include an exec payload. + */ + let mut execution_payloads = vec![]; + for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // pre-capella shouldn't have withdrawals + assert!(full_payload.withdrawals_root().is_err()); + execution_payloads.push(full_payload); + } + + /* + * Should enter capella fork now. + */ + for _ in 0..16 { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // post-capella should have withdrawals + assert!(full_payload.withdrawals_root().is_ok()); + execution_payloads.push(full_payload); + } + + verify_execution_payload_chain(execution_payloads.as_slice()); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 1c61e9927fc..c81a547406a 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,9 +1,11 @@ mod attestation_production; mod attestation_verification; mod block_verification; +mod capella; mod merge; mod op_verification; mod payload_invalidation; +mod rewards; mod store_tests; mod sync_committee_verification; mod tests; diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index c8c47c99041..1e0112a4954 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -12,17 +12,14 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { let mut prev_ep: Option> = None; for ep in chain { - assert!(*ep != FullPayload::default()); + assert!(!ep.is_default_with_empty_roots()); assert!(ep.block_hash() != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.execution_payload.parent_hash); - assert_eq!( - prev_ep.execution_payload.block_number + 1, - ep.execution_payload.block_number - ); - assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); } prev_ep = Some(ep.clone()); } @@ -89,7 +86,7 @@ async fn merge_with_terminal_block_hash_override() { if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } - execution_payloads.push(execution_payload); + execution_payloads.push(execution_payload.into()); } verify_execution_payload_chain(execution_payloads.as_slice()); @@ -141,9 +138,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); - assert_eq!( - *merge_head.message().body().execution_payload().unwrap(), - FullPayload::default() + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" ); /* @@ -153,13 +155,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + assert!( + one_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" ); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); @@ -185,26 +188,34 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); /* * Next merge block should include an exec payload. */ - for _ in 0..4 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); + execution_payloads.push( + block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(), + ); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 0b9eaaee0f0..54d7734471c 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -13,9 +13,9 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkChoiceState, PayloadAttributes, + ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -120,7 +120,7 @@ impl InvalidPayloadRig { &self.harness.chain.canonical_head } - fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { + fn previous_forkchoice_update_params(&self) -> (ForkchoiceState, PayloadAttributes) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let json = mock_execution_layer .server @@ -129,14 +129,17 @@ impl InvalidPayloadRig { let params = json.get("params").expect("no params"); let fork_choice_state_json = params.get(0).expect("no payload param"); - let fork_choice_state: JsonForkChoiceStateV1 = + let fork_choice_state: JsonForkchoiceStateV1 = serde_json::from_value(fork_choice_state_json.clone()).unwrap(); let payload_param_json = params.get(1).expect("no payload param"); let attributes: JsonPayloadAttributesV1 = serde_json::from_value(payload_param_json.clone()).unwrap(); - (fork_choice_state.into(), attributes.into()) + ( + fork_choice_state.into(), + JsonPayloadAttributes::V1(attributes).into(), + ) } fn previous_payload_attributes(&self) -> PayloadAttributes { @@ -991,20 +994,20 @@ async fn payload_preparation() { .await .unwrap(); - let payload_attributes = PayloadAttributes { - timestamp: rig - .harness + let payload_attributes = PayloadAttributes::new( + rig.harness .chain .slot_clock .start_of(next_slot) .unwrap() .as_secs(), - prev_randao: *head + *head .beacon_state .get_randao_mix(head.beacon_state.current_epoch()) .unwrap(), - suggested_fee_recipient: fee_recipient, - }; + fee_recipient, + None, + ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } @@ -1138,7 +1141,7 @@ async fn payload_preparation_before_transition_block() { let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); let latest_block_hash = rig.latest_execution_block_hash(); - assert_eq!(payload_attributes.suggested_fee_recipient, fee_recipient); + assert_eq!(payload_attributes.suggested_fee_recipient(), fee_recipient); assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } @@ -1385,18 +1388,16 @@ async fn build_optimistic_chain( .body() .execution_payload() .unwrap() - .execution_payload - == <_>::default(), + .is_default_with_empty_roots(), "the block *has not* undergone the merge transition" ); assert!( - post_transition_block + !post_transition_block .message() .body() .execution_payload() .unwrap() - .execution_payload - != <_>::default(), + .is_default_with_empty_roots(), "the block *has* undergone the merge transition" ); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs new file mode 100644 index 00000000000..b61bea12429 --- /dev/null +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -0,0 +1,121 @@ +#![cfg(test)] + +use std::collections::HashMap; + +use beacon_chain::test_utils::{ + generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, +}; +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, + types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, +}; +use lazy_static::lazy_static; + +pub const VALIDATOR_COUNT: usize = 64; + +lazy_static! { + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_harness() -> BeaconChainHarness> { + let mut spec = E::default_spec(); + + spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .keypairs(KEYPAIRS.to_vec()) + .fresh_ephemeral_store() + .build(); + + harness.advance_slot(); + + harness +} + +#[tokio::test] +async fn test_sync_committee_rewards() { + let num_block_produced = MinimalEthSpec::slots_per_epoch(); + let harness = get_harness::(); + + let latest_block_root = harness + .extend_chain( + num_block_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Create and add sync committee message to op_pool + let sync_contributions = harness.make_sync_contributions( + &harness.get_current_state(), + latest_block_root, + harness.get_current_slot(), + RelativeSyncCommittee::Current, + ); + + harness + .process_sync_contributions(sync_contributions) + .unwrap(); + + // Add block + let chain = &harness.chain; + let (head_state, head_state_root) = harness.get_current_state_and_root(); + let target_slot = harness.get_current_slot() + 1; + + let (block_root, mut state) = harness + .add_attested_block_at_slot(target_slot, head_state, head_state_root, &[]) + .await + .unwrap(); + + let block = harness.get_block(block_root).unwrap(); + let parent_block = chain + .get_blinded_block(&block.parent_root()) + .unwrap() + .unwrap(); + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .unwrap(); + + let rewards = reward_payload + .iter() + .map(|reward| (reward.validator_index, reward.reward)) + .collect::>(); + + let proposer_index = state + .get_beacon_proposer_index(target_slot, &MinimalEthSpec::default_spec()) + .unwrap(); + + let mut mismatches = vec![]; + + for validator in state.validators() { + let validator_index = state + .clone() + .get_validator_index(&validator.pubkey) + .unwrap() + .unwrap(); + let pre_state_balance = parent_state.balances()[validator_index]; + let post_state_balance = state.balances()[validator_index]; + let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); + + if validator_index == proposer_index { + continue; // Ignore proposer + } + + if pre_state_balance as i64 + *sync_committee_reward != post_state_balance as i64 { + mismatches.push(validator_index.to_string()); + } + } + + assert_eq!( + mismatches.len(), + 0, + "Expect 0 mismatches, but these validators have mismatches on balance: {} ", + mismatches.join(",") + ); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8a6ea9cfe1a..2f40443b996 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,6 +2,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; @@ -22,6 +23,7 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, HotColdDB, LevelDB, StoreConfig, @@ -68,6 +70,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() .build(); @@ -1013,8 +1016,8 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1123,8 +1126,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1255,8 +1258,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { #[tokio::test] async fn pruning_does_not_touch_blocks_prior_to_finalization() { - const HONEST_VALIDATOR_COUNT: usize = 16; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1350,8 +1353,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1495,8 +1498,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1624,8 +1627,8 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -2529,6 +2532,91 @@ async fn revert_minority_fork_on_resume() { assert_eq!(heads.len(), 1); } +// This test checks whether the schema downgrade from the latest version to some minimum supported +// version is correct. This is the easiest schema test to write without historic versions of +// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually +// as old downgrades are deprecated. +#[tokio::test] +async fn schema_downgrade_to_min_version() { + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec.clone(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let min_version = if harness.spec.capella_fork_epoch.is_some() { + // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that + // at all if Capella is enabled. + SchemaVersion(14) + } else { + SchemaVersion(11) + }; + + // Close the database to ensure everything is written to disk. + drop(store); + drop(harness); + + // Re-open the store. + let store = get_store(&db_path); + + // Downgrade. + let deposit_contract_deploy_block = 0; + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version, + store.logger().clone(), + spec, + ) + .expect("schema downgrade to minimum version should work"); + + // Upgrade back. + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + min_version, + CURRENT_SCHEMA_VERSION, + store.logger().clone(), + spec, + ) + .expect("schema upgrade from minimum version should work"); + + // Rescreate the harness. + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) + .logger(store.logger().clone()) + .resumed_disk_store(store.clone()) + .mock_execution_layer() + .build(); + + check_finalization(&harness, num_blocks_produced); + check_split_slot(&harness, store.clone()); + check_chain_dump(&harness, num_blocks_produced + 1); + check_iterators(&harness); + + // Check that downgrading beyond the minimum version fails (bound is *tight*). + let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version_sub_1, + harness.logger().clone(), + spec, + ) + .expect_err("should not downgrade below minimum version"); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 1e51b0ffb9b..239f55e7d38 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -45,6 +45,7 @@ fn get_valid_sync_committee_message( harness: &BeaconChainHarness>, slot: Slot, relative_sync_committee: RelativeSyncCommittee, + message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; @@ -52,7 +53,7 @@ fn get_valid_sync_committee_message( .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") - .get(0) + .get(message_index) .expect("first sync message should exist") .clone(); @@ -494,7 +495,7 @@ async fn unaggregated_gossip_verification() { let current_slot = harness.chain.slot().expect("should get slot"); let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = - get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -644,7 +645,7 @@ async fn unaggregated_gossip_verification() { // **Incorrectly** create a sync message using the current sync committee let (next_valid_sync_committee_message, _, _, next_subnet_id) = - get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current, 1); assert_invalid!( "sync message on incorrect subnet", diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d80db132ef9..b4eabc8093f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -19,7 +19,7 @@ use types::{ }; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const VALIDATOR_COUNT: usize = 48; lazy_static! { /// A cached set of keys. @@ -500,7 +500,7 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) + .update_time(harness.chain.slot().unwrap()) .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) @@ -614,7 +614,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) + .update_time(harness.chain.slot().unwrap()) .unwrap(); let validators: Vec = (0..VALIDATOR_COUNT).collect(); diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 48ac0300c98..b79fc5e4073 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -10,3 +10,4 @@ sensitive_url = { path = "../../common/sensitive_url" } eth2 = { path = "../../common/eth2" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +lighthouse_version = { path = "../../common/lighthouse_version" } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 3517d06b15b..255c2fdd19b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,6 +1,6 @@ use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, Slot, }; @@ -17,6 +17,9 @@ pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; /// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; +/// Default user agent for HTTP requests. +pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION; + #[derive(Clone)] pub struct Timeouts { get_header: Duration, @@ -41,23 +44,23 @@ pub struct BuilderHttpClient { client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, + user_agent: String, } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl) -> Result { + pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); + let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { - client: reqwest::Client::new(), + client, server, timeouts: Timeouts::default(), + user_agent, }) } - pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { - Ok(Self { - client: reqwest::Client::new(), - server, - timeouts, - }) + pub fn get_user_agent(&self) -> &str { + &self.user_agent } async fn get_with_timeout( @@ -160,7 +163,7 @@ impl BuilderHttpClient { } /// `GET /eth/v1/builder/header` - pub async fn get_builder_header>( + pub async fn get_builder_header>( &self, slot: Slot, parent_hash: ExecutionBlockHash, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d01f2505cce..876458eea52 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,6 +6,10 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" +logging = { path = "../../common/logging" } +state_processing = { path = "../../consensus/state_processing" } +operation_pool = { path = "../operation_pool" } +tokio = "1.14.0" [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -35,7 +39,7 @@ time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } -slasher = { path = "../../slasher" } +slasher = { path = "../../slasher", default-features = false } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs new file mode 100644 index 00000000000..272ee908fba --- /dev/null +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -0,0 +1,322 @@ +use crate::*; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{debug, info, warn, Logger}; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::HashSet; +use std::mem; +use std::time::Duration; +use tokio::sync::mpsc::UnboundedSender; +use tokio::time::sleep; +use types::EthSpec; + +/// The size of each chunk of addresses changes to be broadcast at the Capella +/// fork. +const BROADCAST_CHUNK_SIZE: usize = 128; +/// The delay between broadcasting each chunk. +const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); + +/// If the Capella fork has already been reached, `broadcast_address_changes` is +/// called immediately. +/// +/// If the Capella fork has not been reached, waits until the start of the fork +/// epoch and then calls `broadcast_address_changes`. +pub async fn broadcast_address_changes_at_capella( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let spec = &chain.spec; + let slot_clock = &chain.slot_clock; + + let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { + epoch.start_slot(T::EthSpec::slots_per_epoch()) + } else { + // Exit now if Capella is not defined. + return; + }; + + // Wait until the Capella fork epoch. + while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { + match slot_clock.duration_to_slot(capella_fork_slot) { + Some(duration) => { + // Sleep until the Capella fork. + sleep(duration).await; + break; + } + None => { + // We were unable to read the slot clock wait another slot + // and then try again. + sleep(slot_clock.slot_duration()).await; + } + } + } + + // The following function will be called in two scenarios: + // + // 1. The node has been running for some time and the Capella fork has just + // been reached. + // 2. The node has just started and it is *after* the Capella fork. + broadcast_address_changes(chain, network_send, log).await +} + +/// Broadcasts any address changes that are flagged for broadcasting at the +/// Capella fork epoch. +/// +/// Address changes are published in chunks, with a delay between each chunk. +/// This helps reduce the load on the P2P network and also helps prevent us from +/// clogging our `network_send` channel and being late to publish +/// blocks, attestations, etc. +pub async fn broadcast_address_changes( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let head = chain.head_snapshot(); + let mut changes = chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); + + while !changes.is_empty() { + // This `split_off` approach is to allow us to have owned chunks of the + // `changes` vec. The `std::slice::Chunks` method uses references and + // the `itertools` iterator that achives this isn't `Send` so it doesn't + // work well with the `sleep` at the end of the loop. + let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); + let chunk = mem::replace(&mut changes, tail); + + let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); + let mut num_ok = 0; + let mut num_err = 0; + + // Publish each individual address change. + for address_change in chunk { + let validator_index = address_change.message.validator_index; + + let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); + let message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + // It seems highly unlikely that this unbounded send will fail, but + // we handle the result nontheless. + if let Err(e) = network_send.send(message) { + debug!( + log, + "Failed to publish change message"; + "error" => ?e, + "validator_index" => validator_index + ); + num_err += 1; + } else { + debug!( + log, + "Published address change message"; + "validator_index" => validator_index + ); + num_ok += 1; + published_indices.insert(validator_index); + } + } + + // Remove any published indices from the list of indices that need to be + // published. + chain + .op_pool + .register_indices_broadcasted_at_capella(&published_indices); + + info!( + log, + "Published address change messages"; + "num_published" => num_ok, + ); + + if num_err > 0 { + warn!( + log, + "Failed to publish address changes"; + "info" => "failed messages will be retried", + "num_unable_to_publish" => num_err, + ); + } + + sleep(BROADCAST_CHUNK_DELAY).await; + } + + debug!( + log, + "Address change routine complete"; + ); +} + +#[cfg(not(debug_assertions))] // Tests run too slow in debug. +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use operation_pool::ReceivedPreCapella; + use state_processing::{SigVerifiedOp, VerifyOperation}; + use std::collections::HashSet; + use tokio::sync::mpsc; + use types::*; + + type E = MainnetEthSpec; + + pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; + pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); + + struct Tester { + harness: BeaconChainHarness>, + /// Changes which should be broadcast at the Capella fork. + received_pre_capella_changes: Vec>, + /// Changes which should *not* be broadcast at the Capella fork. + not_received_pre_capella_changes: Vec>, + } + + impl Tester { + fn new() -> Self { + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); + let capella_fork_epoch = Epoch::new(2); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { + harness, + received_pre_capella_changes: <_>::default(), + not_received_pre_capella_changes: <_>::default(), + } + } + + fn produce_verified_address_change( + &self, + validator_index: u64, + ) -> SigVerifiedOp { + let change = self + .harness + .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); + let head = self.harness.chain.head_snapshot(); + + change + .validate(&head.beacon_state, &self.harness.spec) + .unwrap() + } + + fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.not_received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + async fn run(self) { + let harness = self.harness; + let chain = harness.chain.clone(); + + let mut broadcast_indices = HashSet::new(); + for change in self.received_pre_capella_changes { + broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); + } + + let mut non_broadcast_indices = HashSet::new(); + for change in self.not_received_pre_capella_changes { + non_broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::No); + } + + harness.set_current_slot( + chain + .spec + .capella_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()), + ); + + let (sender, mut receiver) = mpsc::unbounded_channel(); + + broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; + + let mut broadcasted_changes = vec![]; + while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { + match messages.pop().unwrap() { + PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), + _ => panic!("unexpected message"), + } + } + + assert_eq!( + broadcasted_changes.len(), + broadcast_indices.len(), + "all expected changes should have been broadcast" + ); + + for broadcasted in &broadcasted_changes { + assert!( + !non_broadcast_indices.contains(&broadcasted.message.validator_index), + "messages not flagged for broadcast should not have been broadcast" + ); + } + + let head = chain.head_snapshot(); + assert!( + chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &head.beacon_state, + &chain.spec, + ) + .is_empty(), + "there shouldn't be any capella broadcast changes left in the op pool" + ); + } + } + + // Useful for generating even-numbered indices. Required since only even + // numbered genesis validators have BLS credentials. + fn even_indices(start: u64, count: usize) -> Vec { + (start..).filter(|i| i % 2 == 0).take(count).collect() + } + + #[tokio::test] + async fn one_chunk() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, 4)) + .produce_not_received_pre_capella_changes(even_indices(10, 4)) + .run() + .await; + } + + #[tokio::test] + async fn multiple_chunks() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) + .run() + .await; + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3b016ebda9c..d4b785cb119 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,3 +1,4 @@ +use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -346,12 +347,6 @@ where while block.slot() % slots_per_epoch != 0 { block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot, - ); - debug!( context.log(), "Searching for aligned checkpoint block"; @@ -802,6 +797,25 @@ where // Spawns a routine that polls the `exchange_transition_configuration` endpoint. execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } + + // Spawn a service to publish BLS to execution changes at the Capella fork. + if let Some(network_senders) = self.network_senders { + let inner_chain = beacon_chain.clone(); + let broadcast_context = + runtime_context.service_context("addr_bcast".to_string()); + let log = broadcast_context.log().clone(); + broadcast_context.executor.spawn( + async move { + broadcast_address_changes_at_capella( + &inner_chain, + network_senders.network_send(), + &log, + ) + .await + }, + "addr_broadcast", + ); + } } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 22b868256ad..95a00b37492 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -79,6 +79,7 @@ pub struct Config { pub monitoring_api: Option, pub slasher: Option, pub logger_config: LoggerConfig, + pub always_prefer_builder_payload: bool, } impl Default for Config { @@ -105,6 +106,7 @@ impl Default for Config { validator_monitor_pubkeys: vec![], validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), + always_prefer_builder_payload: false, } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 24df8740863..584a0d736de 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,6 @@ extern crate slog; +mod address_change_broadcast; pub mod config; mod metrics; mod notifier; @@ -45,9 +46,18 @@ impl Client { self.http_metrics_listen_addr } - /// Returns the port of the client's libp2p stack, if it was started. - pub fn libp2p_listen_port(&self) -> Option { - self.network_globals.as_ref().map(|n| n.listen_port_tcp()) + /// Returns the ipv4 port of the client's libp2p stack, if it was started. + pub fn libp2p_listen_ipv4_port(&self) -> Option { + self.network_globals + .as_ref() + .and_then(|n| n.listen_port_tcp4()) + } + + /// Returns the ipv6 port of the client's libp2p stack, if it was started. + pub fn libp2p_listen_ipv6_port(&self) -> Option { + self.network_globals + .as_ref() + .and_then(|n| n.listen_port_tcp6()) } /// Returns the list of libp2p addresses the client is listening to. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1da7a79707d..1105bc41f67 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,6 @@ use crate::metrics; use beacon_chain::{ + capella_readiness::CapellaReadiness, merge_readiness::{MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; @@ -313,6 +314,7 @@ pub fn spawn_notifier( eth1_logging(&beacon_chain, &log); merge_readiness_logging(current_slot, &beacon_chain, &log).await; + capella_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -350,12 +352,15 @@ async fn merge_readiness_logging( } if merge_completed && !has_execution_layer { - error!( - log, - "Execution endpoint required"; - "info" => "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" - ); + if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { + // logging of the EE being offline is handled in `capella_readiness_logging()` + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + } return; } @@ -419,6 +424,65 @@ async fn merge_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Capella +async fn capella_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let capella_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| payload.withdrawals_root().is_ok()); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if capella_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_capella(current_slot) + { + return; + } + + if capella_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_capella_readiness().await { + CapellaReadiness::Ready => { + info!( + log, + "Ready for Capella"; + "info" => "ensure the execution endpoint is updated to the latest Capella/Shanghai release" + ) + } + readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Capella"; + "hint" => "the execution endpoint may be offline", + "info" => %readiness, + ) + } + readiness => warn!( + log, + "Not ready for Capella"; + "hint" => "try updating the execution endpoint", + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index fb988d73989..9e8179aff4f 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -21,7 +21,7 @@ hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../../consensus/ssz_derive" } tree_hash = { version = "0.4.1", path = "../../consensus/tree_hash" } parking_lot = "0.12.0" slog = "2.5.2" diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 069a6e4aade..cd680478cc5 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -697,6 +697,7 @@ mod fast { let web3 = eth1.web3(); let now = get_block_number(&web3).await; + let spec = MainnetEthSpec::default_spec(); let service = Service::new( Config { endpoint: Eth1Endpoint::NoAuth( @@ -710,7 +711,7 @@ mod fast { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + spec.clone(), ) .unwrap(); let client = diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index d1190d85da0..786472ed811 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -26,6 +26,7 @@ eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } eth2_ssz_types = { version = "0.2.2", path = "../../consensus/ssz_types" } eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } +superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" tree_hash = { version = "0.4.1", path = "../../consensus/tree_hash" } @@ -40,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index f023c038aec..e9b7dcc17f3 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -1,4 +1,5 @@ use crate::{ + json_structures::JsonWithdrawal, keccak::{keccak256, KeccakHasher}, metrics, Error, ExecutionLayer, }; @@ -6,39 +7,51 @@ use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayload, Hash256, Hash64, Uint256, + map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash, + ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; impl ExecutionLayer { /// Verify `payload.block_hash` locally within Lighthouse. /// /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, payload: &ExecutionPayload) -> Result<(), Error> { + pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a // better alternative when one appears, possibly following Reth. let rlp_transactions_root = ordered_trie_root::( - payload.transactions.iter().map(|txn_bytes| &**txn_bytes), + payload.transactions().iter().map(|txn_bytes| &**txn_bytes), ); + // Calculate withdrawals root (post-Capella). + let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { + Some(ordered_trie_root::( + withdrawals.iter().map(|withdrawal| { + rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone())) + }), + )) + } else { + None + }; + // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( payload, KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), rlp_transactions_root, + rlp_withdrawals_root, ); // Hash the RLP encoding of the block header. let rlp_block_header = rlp_encode_block_header(&exec_block_header); let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); - if header_hash != payload.block_hash { + if header_hash != payload.block_hash() { return Err(Error::BlockHashMismatch { computed: header_hash, - payload: payload.block_hash, + payload: payload.block_hash(), transactions_root: rlp_transactions_root, }); } @@ -47,13 +60,27 @@ impl ExecutionLayer { } } +/// RLP encode a withdrawal. +pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec { + let mut rlp_stream = RlpStream::new(); + rlp_stream.begin_list(4); + rlp_stream.append(&withdrawal.index); + rlp_stream.append(&withdrawal.validator_index); + rlp_stream.append(&withdrawal.address); + rlp_stream.append(&withdrawal.amount); + rlp_stream.out().into() +} + /// RLP encode an execution block header. pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { let mut rlp_header_stream = RlpStream::new(); rlp_header_stream.begin_unbounded_list(); - map_execution_block_header_fields!(&header, |_, field| { + map_execution_block_header_fields_except_withdrawals!(&header, |_, field| { rlp_header_stream.append(field); }); + if let Some(withdrawals_root) = &header.withdrawals_root { + rlp_header_stream.append(withdrawals_root); + } rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.out().into() } @@ -99,6 +126,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -126,6 +154,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -154,6 +183,7 @@ mod test { mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x34187b238_u64.into(), + withdrawals_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ba0a37736b0..3ecb36d0938 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,14 +1,26 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; +use crate::http::{ + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, +}; +use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; pub use ethers_core::types::Transaction; +use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; -pub use json_structures::TransitionConfigurationV1; +pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use strum::IntoStaticStr; +use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, + Withdrawal, Withdrawals, }; +use types::{ExecutionPayloadCapella, ExecutionPayloadMerge}; pub mod auth; pub mod http; @@ -38,7 +50,13 @@ pub enum Error { PayloadConversionLogicFlaw, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), + IncorrectStateVariant, + RequiredMethodUnsupported(&'static str), + UnsupportedForkVariant(String), + BadConversion(String), + RlpDecoderError(rlp::DecoderError), } impl From for Error { @@ -72,6 +90,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: rlp::DecoderError) -> Self { + Error::RlpDecoderError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -111,9 +135,18 @@ pub struct ExecutionBlock { pub timestamp: u64, } -/// Representation of an exection block with enough detail to reconstruct a payload. +/// Representation of an execution block with enough detail to reconstruct a payload. +#[superstruct( + variants(Merge, Capella), + variant_attributes( + derive(Clone, Debug, PartialEq, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] pub struct ExecutionBlockWithTransactions { pub parent_hash: ExecutionBlockHash, #[serde(alias = "miner")] @@ -138,13 +171,132 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, + #[superstruct(only(Capella))] + pub withdrawals: Vec, } -#[derive(Clone, Copy, Debug, PartialEq)] +impl TryFrom> for ExecutionBlockWithTransactions { + type Error = Error; + + fn try_from(payload: ExecutionPayload) -> Result { + let json_payload = match payload { + ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + }), + ExecutionPayload::Capella(block) => { + Self::Capella(ExecutionBlockWithTransactionsCapella { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), + }) + } + }; + Ok(json_payload) + } +} + +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct PayloadAttributes { + #[superstruct(getter(copy))] pub timestamp: u64, + #[superstruct(getter(copy))] pub prev_randao: Hash256, + #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, +} + +impl PayloadAttributes { + pub fn new( + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + withdrawals: Option>, + ) -> Self { + match withdrawals { + Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + None => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }), + } + } +} + +impl From for SsePayloadAttributes { + fn from(pa: PayloadAttributes) -> Self { + match pa { + PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }) => Self::V1(SsePayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }), + PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }) => Self::V2(SsePayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + } + } } #[derive(Clone, Debug, PartialEq)] @@ -166,3 +318,171 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } + +#[superstruct( + variants(Merge, Capella), + variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, PartialEq)] +pub struct GetPayloadResponse { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + pub block_value: Uint256, +} + +impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { + fn from(response: GetPayloadResponseRef<'a, T>) -> Self { + map_get_payload_response_ref_into_execution_payload_ref!(&'a _, response, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl From> for ExecutionPayload { + fn from(response: GetPayloadResponse) -> Self { + map_get_payload_response_into_execution_payload!(response, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl From> for (ExecutionPayload, Uint256) { + fn from(response: GetPayloadResponse) -> Self { + match response { + GetPayloadResponse::Merge(inner) => ( + ExecutionPayload::Merge(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Capella(inner) => ( + ExecutionPayload::Capella(inner.execution_payload), + inner.block_value, + ), + } + } +} + +impl GetPayloadResponse { + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { + self.to_ref().into() + } +} + +#[derive(Clone, Debug)] +pub struct ExecutionPayloadBodyV1 { + pub transactions: Transactions, + pub withdrawals: Option>, +} + +impl ExecutionPayloadBodyV1 { + pub fn to_payload( + self, + header: ExecutionPayloadHeader, + ) -> Result, String> { + match header { + ExecutionPayloadHeader::Merge(header) => { + if self.withdrawals.is_some() { + return Err(format!( + "block {} is merge but payload body has withdrawals", + header.block_hash + )); + } + Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + })) + } + ExecutionPayloadHeader::Capella(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + })) + } else { + Err(format!( + "block {} is capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } + } + } +} + +#[derive(Clone, Copy, Debug)] +pub struct EngineCapabilities { + pub new_payload_v1: bool, + pub new_payload_v2: bool, + pub forkchoice_updated_v1: bool, + pub forkchoice_updated_v2: bool, + pub get_payload_bodies_by_hash_v1: bool, + pub get_payload_bodies_by_range_v1: bool, + pub get_payload_v1: bool, + pub get_payload_v2: bool, + pub exchange_transition_configuration_v1: bool, +} + +impl EngineCapabilities { + pub fn to_response(&self) -> Vec<&str> { + let mut response = Vec::new(); + if self.new_payload_v1 { + response.push(ENGINE_NEW_PAYLOAD_V1); + } + if self.new_payload_v2 { + response.push(ENGINE_NEW_PAYLOAD_V2); + } + if self.forkchoice_updated_v1 { + response.push(ENGINE_FORKCHOICE_UPDATED_V1); + } + if self.forkchoice_updated_v2 { + response.push(ENGINE_FORKCHOICE_UPDATED_V2); + } + if self.get_payload_bodies_by_hash_v1 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); + } + if self.get_payload_bodies_by_range_v1 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); + } + if self.get_payload_v1 { + response.push(ENGINE_GET_PAYLOAD_V1); + } + if self.get_payload_v2 { + response.push(ENGINE_GET_PAYLOAD_V2); + } + if self.exchange_transition_configuration_v1 { + response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); + } + + response + } +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 74536630128..993957450bc 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,8 +7,10 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use std::collections::HashSet; +use tokio::sync::Mutex; -use std::time::Duration; +use std::time::{Duration, Instant}; use types::EthSpec; pub use deposit_log::{DepositLog, Log}; @@ -29,22 +31,62 @@ pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; +pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; +pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); +pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; +pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; +pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); + pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; +pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; - -/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +/// This code is returned by all clients when a method is not supported +/// (verified geth, nethermind, erigon, besu) +pub const METHOD_NOT_FOUND_CODE: i64 = -32601; + +pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ + ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, + ENGINE_GET_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, +]; + +/// This is necessary because a user might run a capella-enabled version of +/// lighthouse before they update to a capella-enabled execution engine. +// TODO (mark): rip this out once we are post-capella on mainnet +pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: false, + forkchoice_updated_v1: true, + forkchoice_updated_v2: false, + get_payload_bodies_by_hash_v1: false, + get_payload_bodies_by_range_v1: false, + get_payload_v1: true, + get_payload_v2: false, + exchange_transition_configuration_v1: true, +}; + +/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; @@ -519,10 +561,39 @@ pub mod deposit_methods { } } +#[derive(Clone, Debug)] +pub struct CapabilitiesCacheEntry { + engine_capabilities: EngineCapabilities, + fetch_time: Instant, +} + +impl CapabilitiesCacheEntry { + pub fn new(engine_capabilities: EngineCapabilities) -> Self { + Self { + engine_capabilities, + fetch_time: Instant::now(), + } + } + + pub fn engine_capabilities(&self) -> EngineCapabilities { + self.engine_capabilities + } + + pub fn age(&self) -> Duration { + Instant::now().duration_since(self.fetch_time) + } + + /// returns `true` if the entry's age is >= age_limit + pub fn older_than(&self, age_limit: Option) -> bool { + age_limit.map_or(false, |limit| self.age() >= limit) + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, + pub engine_capabilities_cache: Mutex>, auth: Option, } @@ -535,6 +606,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + engine_capabilities_cache: Mutex::new(None), auth: None, }) } @@ -548,6 +620,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + engine_capabilities_cache: Mutex::new(None), auth: Some(auth), }) } @@ -654,21 +727,40 @@ impl HttpJsonRpc { pub async fn get_block_by_hash_with_txns( &self, block_hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await + Ok(Some(match fork { + ForkName::Merge => ExecutionBlockWithTransactions::Merge( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Capella => ExecutionBlockWithTransactions::Capella( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Base | ForkName::Altair => { + return Err(Error::UnsupportedForkVariant(format!( + "called get_block_by_hash_with_txns with fork {:?}", + fork + ))) + } + })) } pub async fn new_payload_v1( &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -681,13 +773,30 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v2( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let params = json!([JsonExecutionPayload::from(execution_payload)]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V2, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonExecutionPayloadV1 = self + let payload_v1: JsonExecutionPayloadV1 = self .rpc_request( ENGINE_GET_PAYLOAD_V1, params, @@ -695,17 +804,58 @@ impl HttpJsonRpc { ) .await?; - Ok(response.into()) + Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: payload_v1.into(), + // Set the V1 payload values from the EE to be zero. This simulates + // the pre-block-value functionality of always choosing the builder + // block. + block_value: Uint256::zero(), + })) + } + + pub async fn get_payload_v2( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Merge => { + let response: JsonGetPayloadResponseV1 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V1(response).into()) + } + ForkName::Capella => { + let response: JsonGetPayloadResponseV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V2(response).into()) + } + ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v2 with {}", + fork_name + ))), + } } pub async fn forkchoice_updated_v1( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) ]); let response: JsonForkchoiceUpdatedV1Response = self @@ -719,6 +869,71 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v2( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V2, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + + pub async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([block_hashes]); + + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(From::from)) + .collect()) + } + + pub async fn get_payload_bodies_by_range_v1( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + #[derive(Serialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] u64); + + let params = json!([Quantity(start), Quantity(count)]); + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(From::from)) + .collect()) + } + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, @@ -736,6 +951,122 @@ impl HttpJsonRpc { Ok(response) } + + pub async fn exchange_capabilities(&self) -> Result { + let params = json!([LIGHTHOUSE_CAPABILITIES]); + + let response: Result, _> = self + .rpc_request( + ENGINE_EXCHANGE_CAPABILITIES, + params, + ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await; + + match response { + // TODO (mark): rip this out once we are post capella on mainnet + Err(error) => match error { + Error::ServerMessage { code, message: _ } if code == METHOD_NOT_FOUND_CODE => { + Ok(PRE_CAPELLA_ENGINE_CAPABILITIES) + } + _ => Err(error), + }, + Ok(capabilities) => Ok(EngineCapabilities { + new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), + new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), + forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + get_payload_bodies_by_hash_v1: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), + get_payload_bodies_by_range_v1: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), + get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), + get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + exchange_transition_configuration_v1: capabilities + .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), + }), + } + } + + pub async fn clear_exchange_capabilties_cache(&self) { + *self.engine_capabilities_cache.lock().await = None; + } + + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + let mut lock = self.engine_capabilities_cache.lock().await; + + if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { + Ok(lock.engine_capabilities()) + } else { + let engine_capabilities = self.exchange_capabilities().await?; + *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + Ok(engine_capabilities) + } + } + + // automatically selects the latest version of + // new_payload that the execution engine supports + pub async fn new_payload( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.new_payload_v2 { + self.new_payload_v2(execution_payload).await + } else if engine_capabilities.new_payload_v1 { + self.new_payload_v1(execution_payload).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + + // automatically selects the latest version of + // get_payload that the execution engine supports + pub async fn get_payload( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_v2 { + self.get_payload_v2(fork_name, payload_id).await + } else if engine_capabilities.new_payload_v1 { + self.get_payload_v1(payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + + // automatically selects the latest version of + // forkchoice_updated that the execution engine supports + pub async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v1 { + self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } } #[cfg(test)] @@ -746,7 +1077,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{MainnetEthSpec, Transactions, Unsigned, VariableList}; + use types::{ExecutionPayloadMerge, MainnetEthSpec, Transactions, Unsigned, VariableList}; struct Tester { server: MockServer, @@ -852,10 +1183,10 @@ mod test { fn encode_transactions( transactions: Transactions, ) -> Result { - let ep: JsonExecutionPayloadV1 = JsonExecutionPayloadV1 { + let ep: JsonExecutionPayload = JsonExecutionPayload::V1(JsonExecutionPayloadV1 { transactions, ..<_>::default() - }; + }); let json = serde_json::to_value(&ep)?; Ok(json.get("transactions").unwrap().clone()) } @@ -882,8 +1213,8 @@ mod test { json.as_object_mut() .unwrap() .insert("transactions".into(), transactions); - let ep: JsonExecutionPayloadV1 = serde_json::from_value(json)?; - Ok(ep.transactions) + let ep: JsonExecutionPayload = serde_json::from_value(json)?; + Ok(ep.transactions().clone()) } fn assert_transactions_serde( @@ -1029,16 +1360,16 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await; }, @@ -1064,16 +1395,16 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await }) @@ -1109,22 +1440,24 @@ mod test { .assert_request_equals( |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await; }, json!({ @@ -1154,22 +1487,24 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await }) .await; @@ -1182,7 +1517,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1208,7 +1543,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1247,16 +1582,16 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await; }, @@ -1294,16 +1629,16 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await .unwrap(); @@ -1357,12 +1692,13 @@ mod test { } })], |client| async move { - let payload = client + let payload: ExecutionPayload<_> = client .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await - .unwrap(); + .unwrap() + .into(); - let expected = ExecutionPayload { + let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1377,7 +1713,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), - }; + }); assert_eq!(payload, expected); }, @@ -1387,7 +1723,7 @@ mod test { // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge{ parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1402,7 +1738,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), transactions: vec![].into(), - }) + })) .await; }, json!({ @@ -1441,7 +1777,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(ExecutionPayload::default()) + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge::default())) .await .unwrap(); @@ -1460,7 +1796,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), @@ -1499,7 +1835,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 560569c92f2..6d33bbabe2a 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,11 @@ use super::*; use serde::{Deserialize, Serialize}; use strum::EnumString; -use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; +use superstruct::superstruct; +use types::{ + EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, +}; +use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -56,9 +60,18 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadHeaderV1 { +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, PartialEq, Default, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] +pub struct JsonExecutionPayload { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, @@ -79,209 +92,265 @@ pub struct JsonExecutionPayloadHeaderV1 { #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, - pub transactions_root: Hash256, + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + pub transactions: Transactions, + #[superstruct(only(V2))] + pub withdrawals: VariableList, } -impl From> for ExecutionPayloadHeader { - fn from(e: JsonExecutionPayloadHeaderV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadHeaderV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, - } = e; +impl From> for JsonExecutionPayloadV1 { + fn from(payload: ExecutionPayloadMerge) -> Self { + JsonExecutionPayloadV1 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for JsonExecutionPayloadV2 { + fn from(payload: ExecutionPayloadCapella) -> Self { + JsonExecutionPayloadV2 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + } + } +} - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, +impl From> for JsonExecutionPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + match execution_payload { + ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), + ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), } } } -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadV1 { - pub parent_hash: ExecutionBlockHash, - pub fee_recipient: Address, - pub state_root: Hash256, - pub receipts_root: Hash256, - #[serde(with = "serde_logs_bloom")] - pub logs_bloom: FixedVector, - pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub block_number: u64, +impl From> for ExecutionPayloadMerge { + fn from(payload: JsonExecutionPayloadV1) -> Self { + ExecutionPayloadMerge { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for ExecutionPayloadCapella { + fn from(payload: JsonExecutionPayloadV2) -> Self { + ExecutionPayloadCapella { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + } + } +} + +impl From> for ExecutionPayload { + fn from(json_execution_payload: JsonExecutionPayload) -> Self { + match json_execution_payload { + JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), + JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + } + } +} + +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, PartialEq, Serialize, Deserialize), + serde(bound = "T: EthSpec", rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub struct JsonGetPayloadResponse { + #[superstruct(only(V1), partial_getter(rename = "execution_payload_v1"))] + pub execution_payload: JsonExecutionPayloadV1, + #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] + pub execution_payload: JsonExecutionPayloadV2, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub block_value: Uint256, +} + +impl From> for GetPayloadResponse { + fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { + match json_get_payload_response { + JsonGetPayloadResponse::V1(response) => { + GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V2(response) => { + GetPayloadResponse::Capella(GetPayloadResponseCapella { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonWithdrawal { #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_limit: u64, + pub index: u64, #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_used: u64, + pub validator_index: u64, + pub address: Address, #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub timestamp: u64, - #[serde(with = "ssz_types::serde_utils::hex_var_list")] - pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] - pub base_fee_per_gas: Uint256, - pub block_hash: ExecutionBlockHash, - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] - pub transactions: - VariableList, T::MaxTransactionsPerPayload>, + pub amount: u64, } -impl From> for JsonExecutionPayloadV1 { - fn from(e: ExecutionPayload) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutionPayload { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; - +impl From for JsonWithdrawal { + fn from(withdrawal: Withdrawal) -> Self { Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, } } } -impl From> for ExecutionPayload { - fn from(e: JsonExecutionPayloadV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; - +impl From for Withdrawal { + fn from(jw: JsonWithdrawal) -> Self { Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, + index: jw.index, + validator_index: jw.validator_index, + address: jw.address, + amount: jw.amount, } } } -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct JsonPayloadAttributesV1 { +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, Clone, PartialEq, Serialize, Deserialize), + serde(rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub struct JsonPayloadAttributes { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, } -impl From for JsonPayloadAttributesV1 { - fn from(p: PayloadAttributes) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - } = p; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for JsonPayloadAttributes { + fn from(payload_atributes: PayloadAttributes) -> Self { + match payload_atributes { + PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + }), + PayloadAttributes::V2(pa) => Self::V2(JsonPayloadAttributesV2 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } -impl From for PayloadAttributes { - fn from(j: JsonPayloadAttributesV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonPayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient, - } = j; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for PayloadAttributes { + fn from(json_payload_attributes: JsonPayloadAttributes) -> Self { + match json_payload_attributes { + JsonPayloadAttributes::V1(jpa) => Self::V1(PayloadAttributesV1 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + }), + JsonPayloadAttributes::V2(jpa) => Self::V2(PayloadAttributesV2 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonForkChoiceStateV1 { +pub struct JsonForkchoiceStateV1 { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, } -impl From for JsonForkChoiceStateV1 { - fn from(f: ForkChoiceState) -> Self { +impl From for JsonForkchoiceStateV1 { + fn from(f: ForkchoiceState) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkChoiceState { + let ForkchoiceState { head_block_hash, safe_block_hash, finalized_block_hash, @@ -295,10 +364,10 @@ impl From for JsonForkChoiceStateV1 { } } -impl From for ForkChoiceState { - fn from(j: JsonForkChoiceStateV1) -> Self { +impl From for ForkchoiceState { + fn from(j: JsonForkchoiceStateV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkChoiceStateV1 { + let JsonForkchoiceStateV1 { head_block_hash, safe_block_hash, finalized_block_hash, @@ -424,6 +493,30 @@ impl From for JsonForkchoiceUpdatedV1Response { } } +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct JsonExecutionPayloadBodyV1 { + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + pub transactions: Transactions, + pub withdrawals: Option>, +} + +impl From> for ExecutionPayloadBodyV1 { + fn from(value: JsonExecutionPayloadBodyV1) -> Self { + Self { + transactions: value.transactions, + withdrawals: value.withdrawals.map(|json_withdrawals| { + Withdrawals::::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + } + } +} + #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 339006c1ba6..ce413cb1139 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,22 +1,25 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, + PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; -use types::{Address, ExecutionBlockHash, Hash256}; +use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// -/// Since the size of each value is small (~100 bytes) a large number is used for safety. +/// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -28,6 +31,14 @@ enum EngineStateInternal { AuthFailed, } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +enum CapabilitiesCacheAction { + #[default] + None, + Update, + Clear, +} + /// A subset of the engine state to inform other services if the engine is online or offline. #[derive(Debug, Clone, PartialEq, Eq, Copy)] pub enum EngineState { @@ -88,7 +99,7 @@ impl State { } #[derive(Copy, Clone, PartialEq, Debug)] -pub struct ForkChoiceState { +pub struct ForkchoiceState { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, @@ -97,9 +108,7 @@ pub struct ForkChoiceState { #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, - pub timestamp: u64, - pub prev_randao: Hash256, - pub suggested_fee_recipient: Address, + pub payload_attributes: PayloadAttributes, } #[derive(Debug)] @@ -115,7 +124,7 @@ pub struct Engine { pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, - latest_forkchoice_state: RwLock>, + latest_forkchoice_state: RwLock>, executor: TaskExecutor, log: Logger, } @@ -142,37 +151,30 @@ impl Engine { pub async fn get_payload_id( &self, - head_block_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + head_block_hash: &ExecutionBlockHash, + payload_attributes: &PayloadAttributes, ) -> Option { self.payload_id_cache .lock() .await - .get(&PayloadIdCacheKey { - head_block_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - }) + .get(&PayloadIdCacheKey::new(head_block_hash, payload_attributes)) .cloned() } pub async fn notify_forkchoice_updated( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, log: &Logger, ) -> Result { let response = self .api - .forkchoice_updated_v1(forkchoice_state, payload_attributes) + .forkchoice_updated(forkchoice_state, payload_attributes.clone()) .await?; if let Some(payload_id) = response.payload_id { - if let Some(key) = - payload_attributes.map(|pa| PayloadIdCacheKey::new(&forkchoice_state, &pa)) + if let Some(key) = payload_attributes + .map(|pa| PayloadIdCacheKey::new(&forkchoice_state.head_block_hash, &pa)) { self.payload_id_cache.lock().await.put(key, payload_id); } else { @@ -187,11 +189,11 @@ impl Engine { Ok(response) } - async fn get_latest_forkchoice_state(&self) -> Option { + async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } - pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { + pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) { *self.latest_forkchoice_state.write().await = Some(state); } @@ -216,7 +218,7 @@ impl Engine { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { + if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -239,7 +241,7 @@ impl Engine { /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { - let state: EngineStateInternal = match self.api.upcheck().await { + let (state, cache_action) = match self.api.upcheck().await { Ok(()) => { let mut state = self.state.write().await; if **state != EngineStateInternal::Synced { @@ -257,12 +259,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -273,7 +275,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - **state + (**state, CapabilitiesCacheAction::Clear) } Err(e) => { error!( @@ -284,10 +286,30 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - **state + // need to clear the engine capabilities cache if we detect the + // execution engine is offline as it is likely the engine is being + // updated to a newer version with new capabilities + (**state, CapabilitiesCacheAction::Clear) } }; + // do this after dropping state lock guard to avoid holding two locks at once + match cache_action { + CapabilitiesCacheAction::None => {} + CapabilitiesCacheAction::Update => { + if let Err(e) = self + .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .await + { + warn!(self.log, + "Error during exchange capabilities"; + "error" => ?e, + ) + } + } + CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + } + debug!( self.log, "Execution engine upcheck complete"; @@ -295,6 +317,22 @@ impl Engine { ); } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.api.get_engine_capabilities(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note @@ -303,7 +341,7 @@ impl Engine { /// deadlock. pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: FnOnce(&'a Engine) -> G, G: Future>, { match func(self).await { @@ -325,7 +363,7 @@ impl Engine { Ok(result) } Err(error) => { - error!( + warn!( self.log, "Execution engine call failed"; "error" => ?error, @@ -348,12 +386,10 @@ impl Engine { } impl PayloadIdCacheKey { - fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { + fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self { Self { - head_block_hash: state.head_block_hash, - timestamp: attributes.timestamp, - prev_randao: attributes.prev_randao, - suggested_fee_recipient: attributes.suggested_fee_recipient, + head_block_hash: *head_block_hash, + payload_attributes: attributes.clone(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a4d15abb364..09be379d240 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,12 +7,13 @@ use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; +pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; -pub use engines::{EngineState, ForkChoiceState}; -use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; +pub use engines::{EngineState, ForkchoiceState}; +use eth2::types::builder_bid::SignedBuilderBid; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -25,6 +26,7 @@ use std::collections::HashMap; use std::fmt; use std::future::Future; use std::io::Write; +use std::marker::PhantomData; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -35,14 +37,17 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +use tree_hash::TreeHash; +use types::{AbstractExecPayload, BeaconStateError, ExecPayload, Withdrawals}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadCapella, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, }; mod block_hash; mod engine_api; -mod engines; +pub mod engines; mod keccak; mod metrics; pub mod payload_cache; @@ -72,7 +77,7 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); /// A payload alongside some information about where it came from. -enum ProvenancedPayload

{ +pub enum ProvenancedPayload

{ /// A good ol' fashioned farm-to-table payload from your local EE. Local(P), /// A payload from a builder (e.g. mev-boost). @@ -98,6 +103,15 @@ pub enum Error { transactions_root: Hash256, }, InvalidJWTSecret(String), + InvalidForkForPayload, + InvalidPayloadBody(String), + BeaconStateError(BeaconStateError), +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } } impl From for Error { @@ -106,6 +120,56 @@ impl From for Error { } } +pub enum BlockProposalContents> { + Payload { + payload: Payload, + block_value: Uint256, + // TODO: remove for 4844, since it appears in PayloadAndBlobs + _phantom: PhantomData, + }, +} + +impl> BlockProposalContents { + pub fn payload(&self) -> &Payload { + match self { + Self::Payload { + payload, + block_value: _, + _phantom: _, + } => payload, + } + } + pub fn to_payload(self) -> Payload { + match self { + Self::Payload { + payload, + block_value: _, + _phantom: _, + } => payload, + } + } + pub fn block_value(&self) -> &Uint256 { + match self { + Self::Payload { + payload: _, + block_value, + _phantom: _, + } => block_value, + } + } + pub fn default_at_fork(fork_name: ForkName) -> Result { + Ok(match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + BlockProposalContents::Payload { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + _phantom: PhantomData::default(), + } + } + }) + } +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, @@ -157,6 +221,7 @@ struct Inner { payload_cache: PayloadCache, builder_profit_threshold: Uint256, log: Logger, + always_prefer_builder_payload: bool, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] @@ -165,6 +230,8 @@ pub struct Config { pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// User agent to send with requests to the builder API. + pub builder_user_agent: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -179,6 +246,7 @@ pub struct Config { /// The minimum value of an external payload for it to be considered in a proposal. pub builder_profit_threshold: u128, pub execution_timeout_multiplier: Option, + pub always_prefer_builder_payload: bool, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -194,6 +262,7 @@ impl ExecutionLayer { let Config { execution_endpoints: urls, builder_url, + builder_user_agent, secret_files, suggested_fee_recipient, jwt_id, @@ -201,6 +270,7 @@ impl ExecutionLayer { default_datadir, builder_profit_threshold, execution_timeout_multiplier, + always_prefer_builder_payload, } = config; if urls.len() > 1 { @@ -228,6 +298,7 @@ impl ExecutionLayer { .map_err(Error::InvalidJWTSecret) } else { // Create a new file and write a randomly generated secret to it if file does not exist + warn!(log, "No JWT found on disk. Generating"; "path" => %secret_file.display()); std::fs::File::options() .write(true) .create_new(true) @@ -252,12 +323,17 @@ impl ExecutionLayer { let builder = builder_url .map(|url| { - let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); - info!(log, + let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) + .map_err(Error::Builder)?; + + info!( + log, "Connected to external block builder"; "builder_url" => ?url, - "builder_profit_threshold" => builder_profit_threshold); - builder_client + "builder_profit_threshold" => builder_profit_threshold, + "local_user_agent" => builder_client.get_user_agent(), + ); + Ok::<_, Error>(builder_client) }) .transpose()?; @@ -273,6 +349,7 @@ impl ExecutionLayer { payload_cache: PayloadCache::default(), builder_profit_threshold: Uint256::from(builder_profit_threshold), log, + always_prefer_builder_payload, }; Ok(Self { @@ -290,12 +367,12 @@ impl ExecutionLayer { &self.inner.builder } - /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. - fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { - self.inner.payload_cache.put(payload.clone()) + /// Cache a full payload, keyed on the `tree_hash_root` of the payload + fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { + self.inner.payload_cache.put(payload.clone_from_ref()) } - /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + /// Attempt to retrieve a full payload from the payload cache by the payload root pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.pop(root) } @@ -566,19 +643,15 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] - pub async fn get_payload>( + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - proposer_index: u64, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, spec: &ChainSpec, - ) -> Result { - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - + ) -> Result, Error> { let payload_result = match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( @@ -587,11 +660,10 @@ impl ExecutionLayer { ); self.get_blinded_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, builder_params, + current_fork, spec, ) .await @@ -603,10 +675,9 @@ impl ExecutionLayer { ); self.get_full_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await .map(ProvenancedPayload::Local) @@ -615,7 +686,7 @@ impl ExecutionLayer { // Track some metrics and return the result. match payload_result { - Ok(ProvenancedPayload::Local(payload)) => { + Ok(ProvenancedPayload::Local(block_proposal_contents)) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -624,9 +695,9 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::LOCAL], ); - Ok(payload) + Ok(block_proposal_contents) } - Ok(ProvenancedPayload::Builder(payload)) => { + Ok(ProvenancedPayload::Builder(block_proposal_contents)) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -635,7 +706,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::BUILDER], ); - Ok(payload) + Ok(block_proposal_contents) } Err(e) => { metrics::inc_counter_vec( @@ -647,17 +718,15 @@ impl ExecutionLayer { } } - #[allow(clippy::too_many_arguments)] - async fn get_blinded_payload>( + async fn get_blinded_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result>, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -682,10 +751,9 @@ impl ExecutionLayer { timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { self.get_full_payload_caching::( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await }) @@ -701,7 +769,7 @@ impl ExecutionLayer { }, "relay_response_ms" => relay_duration.as_millis(), "local_fee_recipient" => match &local_result { - Ok(header) => format!("{:?}", header.fee_recipient()), + Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()), Err(_) => "request failed".to_string() }, "local_response_ms" => local_duration.as_millis(), @@ -715,7 +783,7 @@ impl ExecutionLayer { "Builder error when requesting payload"; "info" => "falling back to local execution client", "relay_error" => ?e, - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); Ok(ProvenancedPayload::Local(local)) @@ -725,7 +793,7 @@ impl ExecutionLayer { self.log(), "Builder did not return a payload"; "info" => "falling back to local execution client", - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); Ok(ProvenancedPayload::Local(local)) @@ -737,22 +805,40 @@ impl ExecutionLayer { self.log(), "Received local and builder payloads"; "relay_block_hash" => ?header.block_hash(), - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); + let relay_value = relay.data.message.value; + let local_value = *local.block_value(); + if !self.inner.always_prefer_builder_payload + && local_value >= relay_value + { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local)); + } + match verify_builder_bid( &relay, parent_hash, - prev_randao, - timestamp, - Some(local.block_number()), + payload_attributes, + Some(local.payload().block_number()), self.inner.builder_profit_threshold, + current_fork, spec, ) { - Ok(()) => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Ok(()) => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + _phantom: PhantomData::default(), + }, + )), Err(reason) if !reason.payload_invalid() => { info!( self.log(), @@ -795,20 +881,28 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - prev_randao, - timestamp, + payload_attributes, None, self.inner.builder_profit_threshold, + current_fork, spec, ) { - Ok(()) => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Ok(()) => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + _phantom: PhantomData::default(), + }, + )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. - Err(e) if !e.payload_invalid() => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + _phantom: PhantomData::default(), + }, + )), Err(reason) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -871,76 +965,62 @@ impl ExecutionLayer { } self.get_full_payload_caching( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. - async fn get_full_payload>( + async fn get_full_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + current_fork: ForkName, + ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, noop, ) .await } /// Get a full payload and cache its result in the execution layer's payload cache. - async fn get_full_payload_caching>( + async fn get_full_payload_caching>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + current_fork: ForkName, + ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, Self::cache_payload, ) .await } - async fn get_full_payload_with>( + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, - ) -> Result { - debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); + current_fork: ForkName, + f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, + ) -> Result, Error> { self.engine() - .request(|engine| async move { + .request(move |engine| async move { let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .get_payload_id(&parent_hash, payload_attributes) .await { // The payload id has been cached for this engine. @@ -956,7 +1036,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); - let fork_choice_state = ForkChoiceState { + let fork_choice_state = ForkchoiceState { head_block_hash: parent_hash, safe_block_hash: forkchoice_update_params .justified_hash @@ -965,16 +1045,11 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; let response = engine .notify_forkchoice_updated( fork_choice_state, - Some(payload_attributes), + Some(payload_attributes.clone()), self.log(), ) .await?; @@ -994,33 +1069,46 @@ impl ExecutionLayer { } }; - engine - .api - .get_payload_v1::(payload_id) - .await - .map(|full_payload| { - if full_payload.fee_recipient != suggested_fee_recipient { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient, - "suggested_fee_recipient" => ?suggested_fee_recipient, - ); - } - if f(self, &full_payload).is_some() { - warn!( - self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ + let payload_fut = async { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + "prev_randao" => ?payload_attributes.prev_randao(), + "timestamp" => payload_attributes.timestamp(), + "parent_hash" => ?parent_hash, + ); + engine.api.get_payload::(current_fork, payload_id).await + }; + let payload_response = payload_fut.await; + let (execution_payload, block_value) = payload_response.map(|payload_response| { + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + ); + } + if f(self, payload_response.execution_payload_ref()).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ attempts." - ); - } - full_payload.into() - }) + ); + } + payload_response.into() + })?; + Ok(BlockProposalContents::Payload { + payload: execution_payload.into(), + block_value, + _phantom: PhantomData::default(), + }) }) .await .map_err(Box::new) @@ -1052,14 +1140,14 @@ impl ExecutionLayer { trace!( self.log(), "Issuing engine_newPayload"; - "parent_hash" => ?execution_payload.parent_hash, - "block_hash" => ?execution_payload.block_hash, - "block_number" => execution_payload.block_number, + "parent_hash" => ?execution_payload.parent_hash(), + "block_hash" => ?execution_payload.block_hash(), + "block_number" => execution_payload.block_number(), ); let result = self .engine() - .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) + .request(|engine| engine.api.new_payload(execution_payload.clone())) .await; if let Ok(status) = &result { @@ -1069,7 +1157,7 @@ impl ExecutionLayer { ); } - process_payload_status(execution_payload.block_hash, result, self.log()) + process_payload_status(execution_payload.block_hash(), result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1172,9 +1260,9 @@ impl ExecutionLayer { let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; // Compute the "lookahead", the time between when the payload will be produced and now. - if let Some(payload_attributes) = payload_attributes { + if let Some(ref payload_attributes) = payload_attributes { if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let timestamp = Duration::from_secs(payload_attributes.timestamp); + let timestamp = Duration::from_secs(payload_attributes.timestamp()); if let Some(lookahead) = timestamp.checked_sub(now) { metrics::observe_duration( &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, @@ -1191,7 +1279,7 @@ impl ExecutionLayer { } } - let forkchoice_state = ForkChoiceState { + let forkchoice_state = ForkchoiceState { head_block_hash, safe_block_hash: justified_block_hash, finalized_block_hash, @@ -1273,6 +1361,26 @@ impl ExecutionLayer { } } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.engine() + .request(|engine| engine.get_engine_capabilities(age_limit)) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Used during block production to determine if the merge has been triggered. /// /// ## Specification @@ -1473,13 +1581,90 @@ impl ExecutionLayer { } } - pub async fn get_payload_by_block_hash( + pub async fn get_payload_bodies_by_hash( + &self, + hashes: Vec, + ) -> Result>>, Error> { + self.engine() + .request(|engine: &Engine| async move { + engine.api.get_payload_bodies_by_hash_v1(hashes).await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + + pub async fn get_payload_bodies_by_range( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); + self.engine() + .request(|engine: &Engine| async move { + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + + /// Fetch a full payload from the execution node. + /// + /// This will fail if the payload is not from the finalized portion of the chain. + pub async fn get_payload_for_header( + &self, + header: &ExecutionPayloadHeader, + fork: ForkName, + ) -> Result>, Error> { + let hash = header.block_hash(); + let block_number = header.block_number(); + + // Handle default payload body. + if header.block_hash() == ExecutionBlockHash::zero() { + let payload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::InvalidForkForPayload); + } + }; + return Ok(Some(payload)); + } + + // Use efficient payload bodies by range method if supported. + let capabilities = self.get_engine_capabilities(None).await?; + if capabilities.get_payload_bodies_by_range_v1 { + let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; + + if payload_bodies.len() != 1 { + return Ok(None); + } + + let opt_payload_body = payload_bodies.pop().flatten(); + opt_payload_body + .map(|body| { + body.to_payload(header.clone()) + .map_err(Error::InvalidPayloadBody) + }) + .transpose() + } else { + // Fall back to eth_blockByHash. + self.get_payload_by_hash_legacy(hash, fork).await + } + } + + pub async fn get_payload_by_hash_legacy( &self, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash) + self.get_payload_by_hash_from_engine(engine, hash, fork) .await }) .await @@ -1487,18 +1672,29 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); if hash == ExecutionBlockHash::zero() { - return Ok(Some(ExecutionPayload::default())); + return match fork { + ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), + ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( + format!("called get_payload_by_hash_from_engine with {}", fork), + )), + }; } - let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + let block = if let Some(block) = engine + .api + .get_block_by_hash_with_txns::(hash, fork) + .await? + { block } else { return Ok(None); @@ -1506,30 +1702,63 @@ impl ExecutionLayer { let transactions = VariableList::new( block - .transactions - .into_iter() + .transactions() + .iter() .map(|transaction| VariableList::new(transaction.rlp().to_vec())) .collect::>() .map_err(ApiError::DeserializeTransaction)?, ) .map_err(ApiError::DeserializeTransactions)?; - Ok(Some(ExecutionPayload { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions, - })) + let payload = match block { + ExecutionBlockWithTransactions::Merge(merge_block) => { + ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: merge_block.parent_hash, + fee_recipient: merge_block.fee_recipient, + state_root: merge_block.state_root, + receipts_root: merge_block.receipts_root, + logs_bloom: merge_block.logs_bloom, + prev_randao: merge_block.prev_randao, + block_number: merge_block.block_number, + gas_limit: merge_block.gas_limit, + gas_used: merge_block.gas_used, + timestamp: merge_block.timestamp, + extra_data: merge_block.extra_data, + base_fee_per_gas: merge_block.base_fee_per_gas, + block_hash: merge_block.block_hash, + transactions, + }) + } + ExecutionBlockWithTransactions::Capella(capella_block) => { + let withdrawals = VariableList::new( + capella_block + .withdrawals + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: capella_block.parent_hash, + fee_recipient: capella_block.fee_recipient, + state_root: capella_block.state_root, + receipts_root: capella_block.receipts_root, + logs_bloom: capella_block.logs_bloom, + prev_randao: capella_block.prev_randao, + block_number: capella_block.block_number, + gas_limit: capella_block.gas_limit, + gas_used: capella_block.gas_used, + timestamp: capella_block.timestamp, + extra_data: capella_block.extra_data, + base_fee_per_gas: capella_block.base_fee_per_gas, + block_hash: capella_block.block_hash, + transactions, + withdrawals, + }) + } + }; + + Ok(Some(payload)) } pub async fn propose_blinded_beacon_block( @@ -1565,9 +1794,9 @@ impl ExecutionLayer { "Builder successfully revealed payload"; "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, - "fee_recipient" => ?payload.fee_recipient, - "block_hash" => ?payload.block_hash, - "parent_hash" => ?payload.parent_hash + "fee_recipient" => ?payload.fee_recipient(), + "block_hash" => ?payload.block_hash(), + "parent_hash" => ?payload.parent_hash() ) } Err(e) => { @@ -1575,10 +1804,10 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - error!( + warn!( self.log(), "Builder failed to reveal payload"; - "info" => "this relay failure may cause a missed proposal", + "info" => "this is common behaviour for some builders and may not indicate an issue", "error" => ?e, "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, @@ -1629,6 +1858,10 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, + WithdrawalsRoot { + payload: Option, + expected: Option, + }, } impl InvalidBuilderPayload { @@ -1643,6 +1876,7 @@ impl InvalidBuilderPayload { InvalidBuilderPayload::BlockNumber { .. } => true, InvalidBuilderPayload::Fork { .. } => true, InvalidBuilderPayload::Signature { .. } => true, + InvalidBuilderPayload::WithdrawalsRoot { .. } => true, } } } @@ -1678,18 +1912,31 @@ impl fmt::Display for InvalidBuilderPayload { "invalid payload signature {} for pubkey {}", signature, pubkey ), + InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + let opt_string = |opt_hash: &Option| { + opt_hash + .map(|hash| hash.to_string()) + .unwrap_or_else(|| "None".to_string()) + }; + write!( + f, + "payload withdrawals root was {} not {}", + opt_string(payload), + opt_string(expected) + ) + } } } } /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. -fn verify_builder_bid>( +fn verify_builder_bid>( bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, - prev_randao: Hash256, - timestamp: u64, + payload_attributes: &PayloadAttributes, block_number: Option, profit_threshold: Uint256, + current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); @@ -1706,6 +1953,13 @@ fn verify_builder_bid>( ); } + let expected_withdrawals_root = payload_attributes + .withdrawals() + .ok() + .cloned() + .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + let payload_withdrawals_root = header.withdrawals_root().ok(); + if payload_value < profit_threshold { Err(Box::new(InvalidBuilderPayload::LowValue { profit_threshold, @@ -1716,35 +1970,36 @@ fn verify_builder_bid>( payload: header.parent_hash(), expected: parent_hash, })) - } else if header.prev_randao() != prev_randao { + } else if header.prev_randao() != payload_attributes.prev_randao() { Err(Box::new(InvalidBuilderPayload::PrevRandao { payload: header.prev_randao(), - expected: prev_randao, + expected: payload_attributes.prev_randao(), })) - } else if header.timestamp() != timestamp { + } else if header.timestamp() != payload_attributes.timestamp() { Err(Box::new(InvalidBuilderPayload::Timestamp { payload: header.timestamp(), - expected: timestamp, + expected: payload_attributes.timestamp(), })) } else if block_number.map_or(false, |n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, })) - } else if !matches!(bid.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. + } else if bid.version != Some(current_fork) { Err(Box::new(InvalidBuilderPayload::Fork { payload: bid.version, - expected: ForkName::Merge, + expected: current_fork, })) } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { signature: bid.data.signature.clone(), pubkey: bid.data.message.pubkey, })) + } else if payload_withdrawals_root != expected_withdrawals_root { + Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { + payload: payload_withdrawals_root, + expected: expected_withdrawals_root, + })) } else { Ok(()) } @@ -1906,7 +2161,10 @@ mod test { } } -fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { None } diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 287050f66be..3ed99ca6068 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -45,6 +45,10 @@ lazy_static::lazy_static! { "execution_layer_get_payload_by_block_hash_time", "Time to reconstruct a payload from the EE using eth_getBlockByHash" ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: Result = try_create_histogram( + "execution_layer_get_payload_bodies_by_range_time", + "Time to fetch a range of payload bodies from the EE" + ); pub static ref EXECUTION_LAYER_VERIFY_BLOCK_HASH: Result = try_create_histogram_with_buckets( "execution_layer_verify_block_hash_time", "Time to verify the execution block hash in Lighthouse, without the EL", diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 7db8e234d11..5405fd70099 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -10,7 +10,9 @@ use types::ExecutionBlockHash; pub enum PayloadStatus { Valid, Invalid { - latest_valid_hash: ExecutionBlockHash, + /// The EE will provide a `None` LVH when it is unable to determine the + /// latest valid ancestor. + latest_valid_hash: Option, validation_error: Option, }, Syncing, @@ -55,22 +57,10 @@ pub fn process_payload_status( }) } } - PayloadStatusV1Status::Invalid => { - if let Some(latest_valid_hash) = response.latest_valid_hash { - // The response is only valid if `latest_valid_hash` is not `null`. - Ok(PayloadStatus::Invalid { - latest_valid_hash, - validation_error: response.validation_error.clone(), - }) - } else { - Err(EngineError::Api { - error: ApiError::BadResponse( - "new_payload: response.status = INVALID but null latest_valid_hash" - .to_string(), - ), - }) - } - } + PayloadStatusV1Status::Invalid => Ok(PayloadStatus::Invalid { + latest_valid_hash: response.latest_valid_hash, + validation_error: response.validation_error, + }), PayloadStatusV1Status::InvalidBlockHash => { // In the interests of being liberal with what we accept, only raise a // warning here. diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 22dcb400708..a8d98a767fb 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; use crate::{ engine_api::{ json_structures::{ @@ -12,7 +12,10 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; +use types::{ + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, + ForkName, Hash256, Uint256, +}; const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -28,21 +31,21 @@ impl Block { pub fn block_number(&self) -> u64 { match self { Block::PoW(block) => block.block_number, - Block::PoS(payload) => payload.block_number, + Block::PoS(payload) => payload.block_number(), } } pub fn parent_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.parent_hash, - Block::PoS(payload) => payload.parent_hash, + Block::PoS(payload) => payload.parent_hash(), } } pub fn block_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.block_hash, - Block::PoS(payload) => payload.block_hash, + Block::PoS(payload) => payload.block_hash(), } } @@ -63,33 +66,18 @@ impl Block { timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { - block_hash: payload.block_hash, - block_number: payload.block_number, - parent_hash: payload.parent_hash, + block_hash: payload.block_hash(), + block_number: payload.block_number(), + parent_hash: payload.parent_hash(), total_difficulty, - timestamp: payload.timestamp, + timestamp: payload.timestamp(), }, } } pub fn as_execution_block_with_tx(&self) -> Option> { match self { - Block::PoS(payload) => Some(ExecutionBlockWithTransactions { - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions: vec![], - }), + Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), Block::PoW(_) => None, } } @@ -126,6 +114,10 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + /* + * Post-merge fork triggers + */ + pub shanghai_time: Option, // withdrawals } impl ExecutionBlockGenerator { @@ -133,6 +125,7 @@ impl ExecutionBlockGenerator { terminal_total_difficulty: Uint256, terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -145,6 +138,7 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + shanghai_time, }; gen.insert_pow_block(0).unwrap(); @@ -176,6 +170,13 @@ impl ExecutionBlockGenerator { } } + pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { + match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + } + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -198,6 +199,14 @@ impl ExecutionBlockGenerator { .and_then(|block| block.as_execution_block_with_tx()) } + pub fn execution_block_with_txs_by_number( + &self, + number: u64, + ) -> Option> { + self.block_by_number(number) + .and_then(|block| block.as_execution_block_with_tx()) + } + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { let target_block = self .terminal_block_number @@ -357,7 +366,9 @@ impl ExecutionBlockGenerator { // Update the block hash after modifying the block match &mut block { Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), - Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => { + *b.block_hash_mut() = ExecutionBlockHash::from_root(b.tree_hash_root()) + } } // Update head. @@ -378,7 +389,7 @@ impl ExecutionBlockGenerator { } pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { - let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { + let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) { parent } else { return PayloadStatusV1 { @@ -388,7 +399,7 @@ impl ExecutionBlockGenerator { }; }; - if payload.block_number != parent.block_number() + 1 { + if payload.block_number() != parent.block_number() + 1 { return PayloadStatusV1 { status: PayloadStatusV1Status::Invalid, latest_valid_hash: Some(parent.block_hash()), @@ -396,8 +407,8 @@ impl ExecutionBlockGenerator { }; } - let valid_hash = payload.block_hash; - self.pending_payloads.insert(payload.block_hash, payload); + let valid_hash = payload.block_hash(); + self.pending_payloads.insert(payload.block_hash(), payload); PayloadStatusV1 { status: PayloadStatusV1Status::Valid, @@ -406,9 +417,11 @@ impl ExecutionBlockGenerator { } } - pub fn forkchoice_updated_v1( + // This function expects payload_attributes to already be validated with respect to + // the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)] + pub fn forkchoice_updated( &mut self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { if let Some(payload) = self @@ -462,24 +475,62 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = ExecutionPayload { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: attributes.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: attributes.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: attributes.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + let mut execution_payload = match &attributes { + PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + }), + _ => unreachable!(), + }, }; - execution_payload.block_hash = + *execution_payload.block_hash_mut() = ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); self.payload_ids.insert(id, execution_payload); @@ -566,6 +617,7 @@ mod test { TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 97c52357559..bda0c782dcc 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,25 +1,33 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; -use serde::de::DeserializeOwned; +use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; +use serde::{de::DeserializeOwned, Deserialize}; use serde_json::Value as JsonValue; use std::sync::Arc; -use types::EthSpec; +use types::{EthSpec, ForkName}; + +pub const GENERIC_ERROR_CODE: i64 = -1234; +pub const BAD_PARAMS_ERROR_CODE: i64 = -32602; +pub const UNKNOWN_PAYLOAD_ERROR_CODE: i64 = -38001; +pub const FORK_REQUEST_MISMATCH_ERROR_CODE: i64 = -32000; pub async fn handle_rpc( body: JsonValue, ctx: Arc>, -) -> Result { +) -> Result { *ctx.previous_request.lock() = Some(body.clone()); let method = body .get("method") .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid method field".to_string())?; + .ok_or_else(|| "missing/invalid method field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; let params = body .get("params") - .ok_or_else(|| "missing/invalid params field".to_string())?; + .ok_or_else(|| "missing/invalid params field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { ETH_SYNCING => Ok(JsonValue::Bool(false)), @@ -27,7 +35,8 @@ pub async fn handle_rpc( let tag = params .get(0) .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid params[0] value".to_string())?; + .ok_or_else(|| "missing/invalid params[0] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; match tag { "latest" => Ok(serde_json::to_value( @@ -36,7 +45,10 @@ pub async fn handle_rpc( .latest_execution_block(), ) .unwrap()), - other => Err(format!("The tag {} is not supported", other)), + other => Err(( + format!("The tag {} is not supported", other), + BAD_PARAMS_ERROR_CODE, + )), } } ETH_GET_BLOCK_BY_HASH => { @@ -47,7 +59,8 @@ pub async fn handle_rpc( .and_then(|s| { s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) - })?; + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; // If we have a static response set, just return that. if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { @@ -57,7 +70,8 @@ pub async fn handle_rpc( let full_tx = params .get(1) .and_then(JsonValue::as_bool) - .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + .ok_or_else(|| "missing/invalid params[1] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; if full_tx { Ok(serde_json::to_value( ctx.execution_block_generator @@ -74,18 +88,70 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 => { - let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + let request = match method { + ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( + get_param::>(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ), + ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + // TODO(4844) add that here.. + _ => unreachable!(), + }; + + let fork = ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*request.timestamp()); + // validate method called correctly according to shanghai fork time + match fork { + ForkName::Merge => { + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + ForkName::Capella => { + if method == ENGINE_NEW_PAYLOAD_V1 { + return Err(( + format!("{} called after Capella fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; // Canned responses set by block hash take priority. - if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { + if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); } let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { - response.status.latest_valid_hash = Some(request.block_hash) + response.status.latest_valid_hash = Some(*request.block_hash()) } (Some(response.status), response.should_import) @@ -107,21 +173,140 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 => { - let request: JsonPayloadIdRequest = get_param(params, 0)?; + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { + let request: JsonPayloadIdRequest = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); let response = ctx .execution_block_generator .write() .get_payload(&id) - .ok_or_else(|| format!("no payload for id {:?}", id))?; + .ok_or_else(|| { + ( + format!("no payload for id {:?}", id), + UNKNOWN_PAYLOAD_ERROR_CODE, + ) + })?; + + // validate method called correctly according to shanghai fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Capella + && method == ENGINE_GET_PAYLOAD_V1 + { + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + // TODO(4844) add 4844 error checking here - Ok(serde_json::to_value(JsonExecutionPayloadV1::from(response)).unwrap()) + match method { + ENGINE_GET_PAYLOAD_V1 => { + Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + } + ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V1(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV1 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + }) + .unwrap() + } + JsonExecutionPayload::V2(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV2 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + }) + .unwrap() + } + }), + _ => unreachable!(), + } } - ENGINE_FORKCHOICE_UPDATED_V1 => { - let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; - let payload_attributes: Option = get_param(params, 1)?; + ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { + let forkchoice_state: JsonForkchoiceStateV1 = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + let payload_attributes = match method { + ENGINE_FORKCHOICE_UPDATED_V1 => { + let jpa1: Option = + get_param(params, 1).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + jpa1.map(JsonPayloadAttributes::V1) + } + ENGINE_FORKCHOICE_UPDATED_V2 => { + // we can't use `deny_unknown_fields` without breaking compatibility with some + // clients that haven't updated to the latest engine_api spec. So instead we'll + // need to deserialize based on timestamp + get_param::>(params, 1) + .and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), + } + }) + .transpose() + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } + _ => unreachable!(), + }; + + // validate method called correctly according to shanghai fork time + if let Some(pa) = payload_attributes.as_ref() { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + if matches!(pa, JsonPayloadAttributes::V2(_)) { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + ForkName::Capella => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + if matches!(pa, JsonPayloadAttributes::V1(_)) { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV1` after Capella fork!", + method + ), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; + } if let Some(hook_response) = ctx .hook @@ -145,10 +330,11 @@ pub async fn handle_rpc( let mut response = ctx .execution_block_generator .write() - .forkchoice_updated_v1( + .forkchoice_updated( forkchoice_state.into(), payload_attributes.map(|json| json.into()), - )?; + ) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { @@ -169,9 +355,68 @@ pub async fn handle_rpc( }; Ok(serde_json::to_value(transition_config).unwrap()) } - other => Err(format!( - "The method {} does not exist/is not available", - other + ENGINE_EXCHANGE_CAPABILITIES => { + let engine_capabilities = ctx.engine_capabilities.read(); + Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) + } + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { + #[derive(Deserialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] pub u64); + + let start = get_param::(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + let count = get_param::(params, 1) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + + let mut response = vec![]; + for block_num in start..(start + count) { + let maybe_block = ctx + .execution_block_generator + .read() + .execution_block_with_txs_by_number(block_num); + + match maybe_block { + Some(block) => { + let transactions = Transactions::::new( + block + .transactions() + .iter() + .map(|transaction| VariableList::new(transaction.rlp().to_vec())) + .collect::>() + .map_err(|e| { + ( + format!("failed to deserialize transaction: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?, + ) + .map_err(|e| { + ( + format!("failed to deserialize transactions: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?; + + response.push(Some(JsonExecutionPayloadBodyV1:: { + transactions, + withdrawals: block + .withdrawals() + .ok() + .map(|withdrawals| VariableList::from(withdrawals.clone())), + })); + } + None => response.push(None), + } + } + + Ok(serde_json::to_value(response).unwrap()) + } + other => Err(( + format!("The method {} does not exist/is not available", other), + METHOD_NOT_FOUND_CODE, )), } } diff --git a/beacon_node/execution_layer/src/test_utils/hook.rs b/beacon_node/execution_layer/src/test_utils/hook.rs index a3748103e3e..4653811ac90 100644 --- a/beacon_node/execution_layer/src/test_utils/hook.rs +++ b/beacon_node/execution_layer/src/test_utils/hook.rs @@ -1,8 +1,8 @@ use crate::json_structures::*; type ForkChoiceUpdatedHook = dyn Fn( - JsonForkChoiceStateV1, - Option, + JsonForkchoiceStateV1, + Option, ) -> Option + Send + Sync; @@ -15,8 +15,8 @@ pub struct Hook { impl Hook { pub fn on_forkchoice_updated( &self, - state: JsonForkChoiceStateV1, - payload_attributes: Option, + state: JsonForkchoiceStateV1, + payload_attributes: Option, ) -> Option { (self.forkchoice_updated.as_ref()?)(state, payload_attributes) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index b8f74c1c93f..668d1fb3b1c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,17 +1,21 @@ -use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -use ethereum_consensus::crypto::{SecretKey, Signature}; -use ethereum_consensus::primitives::BlsPublicKey; pub use ethereum_consensus::state_transition::Context; +use ethereum_consensus::{ + crypto::{SecretKey, Signature}, + primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, + state_transition::Error, +}; use fork_choice::ForkchoiceUpdateParameters; -use mev_build_rs::{ +use mev_rs::{ + bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, - SignedValidatorRegistration, + SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -26,7 +30,8 @@ use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Uint256, }; #[derive(Clone)] @@ -38,25 +43,129 @@ pub enum Operation { PrevRandao(Hash256), BlockNumber(usize), Timestamp(usize), + WithdrawalsRoot(Hash256), } impl Operation { - fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { match self { Operation::FeeRecipient(fee_recipient) => { - bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? } - Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, - Operation::Value(value) => bid.value = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, - Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, + Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, + Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, + Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, + Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, } Ok(()) } } +// contains functions we need for BuilderBids.. not sure what to call this +pub trait BidStuff { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; + fn gas_limit_mut(&mut self) -> &mut u64; + fn value_mut(&mut self) -> &mut U256; + fn parent_hash_mut(&mut self) -> &mut Hash32; + fn prev_randao_mut(&mut self) -> &mut Hash32; + fn block_number_mut(&mut self) -> &mut u64; + fn timestamp_mut(&mut self) -> &mut u64; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result; + + fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; +} + +impl BidStuff for BuilderBid { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { + match self { + Self::Bellatrix(bid) => &mut bid.header.fee_recipient, + Self::Capella(bid) => &mut bid.header.fee_recipient, + } + } + + fn gas_limit_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.gas_limit, + Self::Capella(bid) => &mut bid.header.gas_limit, + } + } + + fn value_mut(&mut self) -> &mut U256 { + match self { + Self::Bellatrix(bid) => &mut bid.value, + Self::Capella(bid) => &mut bid.value, + } + } + + fn parent_hash_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.parent_hash, + Self::Capella(bid) => &mut bid.header.parent_hash, + } + } + + fn prev_randao_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.prev_randao, + Self::Capella(bid) => &mut bid.header.prev_randao, + } + } + + fn block_number_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.block_number, + Self::Capella(bid) => &mut bid.header.block_number, + } + } + + fn timestamp_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.timestamp, + Self::Capella(bid) => &mut bid.header.timestamp, + } + } + + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + match self { + Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( + "withdrawals_root called on bellatrix bid".to_string(), + )), + Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), + } + } + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result { + match self { + Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), + Self::Capella(message) => sign_builder_message(message, signing_key, context), + } + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + match self { + Self::Bellatrix(message) => { + SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) + } + Self::Capella(message) => { + SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + } + } + } +} + pub struct TestingBuilder { server: BlindedBlockProviderServer>, pub builder: MockBuilder, @@ -111,7 +220,10 @@ impl TestingBuilder { } pub async fn run(&self) { - self.server.run().await + let server = self.server.serve(); + if let Err(err) = server.await { + println!("error while listening for incoming: {err}") + } } } @@ -162,7 +274,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -172,7 +284,7 @@ impl MockBuilder { } #[async_trait] -impl mev_build_rs::BlindedBlockProvider for MockBuilder { +impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], @@ -200,6 +312,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { bid_request: &BidRequest, ) -> Result { let slot = Slot::new(bid_request.slot); + let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self .val_registration_cache .read() @@ -215,9 +328,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing head block"))?; - let block = head.data.message_merge().map_err(convert_err)?; + let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + let head_execution_hash = block + .body() + .execution_payload() + .map_err(convert_err)? + .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { return Err(BlindedBlockProviderError::Custom(format!( "head mismatch: {} {}", @@ -232,12 +349,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let justified_execution_hash = self .beacon_client @@ -246,12 +362,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let val_index = self .beacon_client @@ -287,14 +402,22 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .get_randao_mix(head_state.current_epoch()) .map_err(convert_err)?; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao: *prev_randao, - suggested_fee_recipient: fee_recipient, + let payload_attributes = match fork { + ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), + // the withdrawals root is filled in by operations + ForkName::Capella => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) + } + ForkName::Base | ForkName::Altair => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))); + } }; self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) .await; let forkchoice_update_params = ForkchoiceUpdateParameters { @@ -308,54 +431,64 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .el .get_full_payload_caching::>( head_execution_hash, - timestamp, - *prev_randao, - fee_recipient, + &payload_attributes, forkchoice_update_params, + fork, ) .await .map_err(convert_err)? + .to_payload() .to_execution_payload_header(); let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut header: ServerPayloadHeader = - serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; - - header.gas_limit = cached_data.gas_limit; - - let mut message = BuilderBid { - header, - value: ssz_rs::U256::default(), - public_key: self.builder_sk.public_key(), + let mut message = match fork { + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Base | ForkName::Altair => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))) + } }; + *message.gas_limit_mut() = cached_data.gas_limit; self.apply_operations(&mut message)?; - let mut signature = - sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; if *self.invalidate_signatures.read() { signature = Signature::default(); } - let signed_bid = SignedBuilderBid { message, signature }; - Ok(signed_bid) + Ok(message.to_signed_bid(signature)) } async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, ) -> Result { + let node = match signed_block { + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + } + .map_err(convert_err)?; + let payload = self .el - .get_payload_by_root(&from_ssz_rs( - &signed_block - .message - .body - .execution_payload_header - .hash_tree_root() - .map_err(convert_err)?, - )?) + .get_payload_by_root(&from_ssz_rs(&node)?) .ok_or_else(|| convert_err("missing payload for tx root"))?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index e9d4b2121be..2b512d8b1c2 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -9,7 +9,7 @@ use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; -use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; +use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec}; pub struct MockExecutionLayer { pub server: MockServer, @@ -20,40 +20,41 @@ pub struct MockExecutionLayer { impl MockExecutionLayer { pub fn default_params(executor: TaskExecutor) -> Self { + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into(); + spec.terminal_block_hash = ExecutionBlockHash::zero(); + spec.terminal_block_hash_activation_epoch = Epoch::new(0); Self::new( executor, - DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - ExecutionBlockHash::zero(), - Epoch::new(0), + None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ) } + #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, - terminal_total_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - terminal_block_hash_activation_epoch: Epoch, + shanghai_time: Option, + builder_threshold: Option, jwt_key: Option, + spec: ChainSpec, builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); - let mut spec = T::default_spec(); - spec.terminal_total_difficulty = terminal_total_difficulty; - spec.terminal_block_hash = terminal_block_hash; - spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; - let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, jwt_key, - terminal_total_difficulty, + spec.terminal_total_difficulty, terminal_block, - terminal_block_hash, + spec.terminal_block_hash, + shanghai_time, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); @@ -67,7 +68,7 @@ impl MockExecutionLayer { builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, + builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = @@ -98,21 +99,19 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + // FIXME: think about how to handle different forks / withdrawals here.. + None, + ); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); let validator_index = 0; self.el - .insert_proposer( - slot, - head_block_root, - validator_index, - PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - }, - ) + .insert_proposer(slot, head_block_root, validator_index, payload_attributes) .await; self.el @@ -132,25 +131,30 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; - let payload = self + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + let payload: ExecutionPayload = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; - let block_hash = payload.block_hash; - assert_eq!(payload.parent_hash, parent_hash); - assert_eq!(payload.block_number, block_number); - assert_eq!(payload.timestamp, timestamp); - assert_eq!(payload.prev_randao, prev_randao); + .to_payload() + .into(); + + let block_hash = payload.block_hash(); + assert_eq!(payload.parent_hash(), parent_hash); + assert_eq!(payload.block_number(), block_number); + assert_eq!(payload.timestamp(), timestamp); + assert_eq!(payload.prev_randao(), prev_randao); // Ensure the payload cache is empty. assert!(self @@ -162,25 +166,29 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let payload_header = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload_header; - assert_eq!(payload_header.block_hash, block_hash); - assert_eq!(payload_header.parent_hash, parent_hash); - assert_eq!(payload_header.block_number, block_number); - assert_eq!(payload_header.timestamp, timestamp); - assert_eq!(payload_header.prev_randao, prev_randao); + .to_payload(); + + assert_eq!(payload_header.block_hash(), block_hash); + assert_eq!(payload_header.parent_hash(), parent_hash); + assert_eq!(payload_header.block_number(), block_number); + assert_eq!(payload_header.timestamp(), timestamp); + assert_eq!(payload_header.prev_randao(), prev_randao); // Ensure the payload cache has the correct payload. assert_eq!( diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f18ecbe6226..9379a3c2389 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; +use crate::EngineCapabilities; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; @@ -31,6 +32,19 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; +pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; +pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: true, + forkchoice_updated_v1: true, + forkchoice_updated_v2: true, + get_payload_bodies_by_hash_v1: true, + get_payload_bodies_by_range_v1: true, + get_payload_v1: true, + get_payload_v2: true, + exchange_transition_configuration_v1: true, +}; mod execution_block_generator; mod handle_rpc; @@ -45,6 +59,7 @@ pub struct MockExecutionConfig { pub terminal_difficulty: Uint256, pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, + pub shanghai_time: Option, } impl Default for MockExecutionConfig { @@ -55,6 +70,7 @@ impl Default for MockExecutionConfig { terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), + shanghai_time: None, } } } @@ -74,6 +90,7 @@ impl MockServer { DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, // FIXME(capella): should this be the default? ) } @@ -84,11 +101,16 @@ impl MockServer { terminal_block, terminal_block_hash, server_config, + shanghai_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); - let execution_block_generator = - ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); + let execution_block_generator = ExecutionBlockGenerator::new( + terminal_difficulty, + terminal_block, + terminal_block_hash, + shanghai_time, + ); let ctx: Arc> = Arc::new(Context { config: server_config, @@ -104,6 +126,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -134,12 +157,17 @@ impl MockServer { } } + pub fn set_engine_capabilities(&self, engine_capabilities: EngineCapabilities) { + *self.ctx.engine_capabilities.write() = engine_capabilities; + } + pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, terminal_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, ) -> Self { Self::new_with_config( handle, @@ -149,6 +177,7 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + shanghai_time, }, ) } @@ -452,6 +481,7 @@ pub struct Context { pub new_payload_statuses: Arc>>, pub fcu_payload_statuses: Arc>>, + pub engine_capabilities: Arc>, pub _phantom: PhantomData, } @@ -603,11 +633,11 @@ pub fn serve( "jsonrpc": JSONRPC_VERSION, "result": result }), - Err(message) => json!({ + Err((message, code)) => json!({ "id": id, "jsonrpc": JSONRPC_VERSION, "error": { - "code": -1234, // Junk error code. + "code": code, "message": message } }), diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index d8c25baec80..122ca8eda6b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,6 +10,20 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; +pub fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) +} + +fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20]; + let mut credentials = [0u8; 32]; + credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + credentials[12..].copy_from_slice(fake_execution_address); + Hash256::from_slice(&credentials) +} + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: @@ -21,20 +35,75 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +// returns an interop genesis state except every other +// validator has eth1 withdrawal credentials +pub fn interop_genesis_state_with_eth1( + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .enumerate() + .map(|(index, keypair)| { + if index % 2 == 0 { + bls_withdrawal_credentials(&keypair.pk, spec) + } else { + eth1_withdrawal_credentials(&keypair.pk, spec) + } + }) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +pub fn interop_genesis_state_with_withdrawal_credentials( + keypairs: &[Keypair], + withdrawal_credentials: &[Hash256], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + if keypairs.len() != withdrawal_credentials.len() { + return Err(format!( + "wrong number of withdrawal credentials, expected: {}, got: {}", + keypairs.len(), + withdrawal_credentials.len() + )); + } + let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) - }; - let datas = keypairs .into_par_iter() - .map(|keypair| { + .zip(withdrawal_credentials.into_par_iter()) + .map(|(keypair, &withdrawal_credentials)| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(&keypair.pk), + withdrawal_credentials, pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), @@ -133,4 +202,83 @@ mod test { "validator count should be correct" ); } + + #[test] + fn interop_state_with_eth1() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state_with_eth1::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); + + assert_eq!( + state.eth1_data().block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time(), + genesis_time, + "genesis time should be as specified" + ); + + for b in state.balances() { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for (index, v) in state.validators().iter().enumerate() { + let creds = v.withdrawal_credentials.as_bytes(); + if index % 2 == 0 { + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ); + } else { + assert_eq!( + creds[0], spec.eth1_address_withdrawal_prefix_byte, + "first byte of withdrawal creds should be eth1 prefix" + ); + assert_eq!( + creds[1..12], + [0u8; 11], + "bytes [1:12] of withdrawal creds must be zero" + ); + assert_eq!( + &creds[12..], + &hash(&v.pubkey.as_ssz_bytes())[0..20], + "rest of withdrawal creds should be first 20 bytes of pubkey hash" + ) + } + } + + assert_eq!( + state.balances().len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators().len(), + validator_count, + "validator count should be correct" + ); + } } diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1233d99fd31..3fb053bf880 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -5,5 +5,8 @@ mod interop; pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; -pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use interop::{ + bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, + interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, +}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index da8331b7ad8..a871e0c35f4 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,15 +36,18 @@ tree_hash = { version = "0.4.1", path = "../../consensus/tree_hash" } sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +eth2_serde_utils = { version = "0.1.1", path = "../../consensus/serde_utils" } +operation_pool = { path = "../operation_pool" } +sensitive_url = { path = "../../common/sensitive_url" } +unused_port = {path = "../../common/unused_port"} +logging = { path = "../../common/logging" } +store = { path = "../store" } [dev-dependencies] -store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -sensitive_url = { path = "../../common/sensitive_url" } -logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } -unused_port = {path = "../../common/unused_port"} +genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index ca68d4d04cc..3e7d8d5e316 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -77,8 +77,8 @@ pub fn get_attestation_performance( // query is within permitted bounds to prevent potential OOM errors. if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { return Err(custom_bad_request(format!( - "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", - query.start_epoch, query.end_epoch + "end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}", + MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch ))); } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9febae5b197..5c3e420839d 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -114,8 +114,10 @@ fn compute_historic_attester_duties( )?; (state, execution_optimistic) } else { - StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)?; + (state, execution_optimistic) }; // Sanity-check the state lookup. diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 5c785fe6517..f1a42b87442 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -4,13 +4,15 @@ use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. #[derive(Debug)] pub struct BlockId(pub CoreBlockId); +type Finalized = bool; + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -24,7 +26,7 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -34,22 +36,23 @@ impl BlockId { Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), + false, )) } - CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false, true)), CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; - Ok((finalized_checkpoint.root, execution_optimistic)) + Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; - Ok((justified_checkpoint.root, execution_optimistic)) + Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { let execution_optimistic = chain @@ -66,7 +69,14 @@ impl BlockId { )) }) })?; - Ok((root, execution_optimistic)) + let finalized = *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + Ok((root, execution_optimistic, finalized)) } CoreBlockId::Root(root) => { // This matches the behaviour of other consensus clients (e.g. Teku). @@ -88,7 +98,20 @@ impl BlockId { .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - Ok((*root, execution_optimistic)) + let blinded_block = chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + })?; + let block_slot = blinded_block.slot(); + let finalized = chain + .is_finalized_block(root, block_slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -103,7 +126,14 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + SignedBlindedBeaconBlock, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -113,10 +143,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -128,7 +159,7 @@ impl BlockId { slot ))); } - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -137,7 +168,7 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -149,7 +180,7 @@ impl BlockId { )) }) })?; - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } } } @@ -158,7 +189,14 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + Arc>, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -168,10 +206,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await @@ -184,7 +223,7 @@ impl BlockId { slot ))); } - Ok((Arc::new(block), execution_optimistic)) + Ok((Arc::new(block), execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -193,14 +232,14 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { block_opt - .map(|block| (Arc::new(block), execution_optimistic)) + .map(|block| (Arc::new(block), execution_optimistic, finalized)) .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 05886a4d023..828be8e5760 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -4,7 +4,7 @@ use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use types::BlindedBeaconBlock; +use types::beacon_block::BlindedBeaconBlock; use warp_utils::reject::{ beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6cfdaf5db6a..d19187cb44e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,4 +1,3 @@ -#![recursion_limit = "256"] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs @@ -15,8 +14,11 @@ mod database; mod metrics; mod proposer_duties; mod publish_blocks; +mod standard_block_rewards; mod state_id; +mod sync_committee_rewards; mod sync_committees; +pub mod test_utils; mod ui; mod validator_inclusion; mod version; @@ -29,12 +31,15 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, + self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, + ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; +use publish_blocks::ProvenancedBlock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -51,15 +56,15 @@ use system_health::observe_system_health_bn; use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, - CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, + BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, execution_optimistic_fork_versioned_response, + add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; @@ -68,7 +73,8 @@ use warp::Reply; use warp::{http::Response, Filter}; use warp_utils::{ query::multi_key_query, - task::{blocking_json_task, blocking_task}, + task::{blocking_json_task, blocking_response_task}, + uor::UnifyingOrFilter, }; const API_PREFIX: &str = "eth"; @@ -517,12 +523,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = state_id.root(&chain)?; - + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -533,11 +540,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (fork, execution_optimistic) = - state_id.fork_and_execution_optimistic(&chain)?; - Ok(api_types::ExecutionOptimisticResponse { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(api_types::ExecutionOptimisticFinalizedResponse { data: fork, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -549,23 +557,26 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( - &chain, - |state, execution_optimistic| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - )) - }, - )?; + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -582,10 +593,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { Ok(( state .validators() @@ -613,13 +624,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -637,10 +650,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = state.current_epoch(); let far_future_epoch = chain.spec.far_future_epoch; @@ -690,13 +703,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -715,10 +730,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { state.validators().iter().position(|v| v.pubkey == *pubkey) @@ -752,13 +767,15 @@ pub fn serve( )) })?, execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -773,46 +790,119 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state - .committee_cache_is_initialized(relative_epoch) => - { - state.committee_cache(relative_epoch).map(Cow::Borrowed) - } - _ => CommitteeCache::initialized(state, epoch, &chain.spec) + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = if let Some(ref shuffling) = + maybe_cached_shuffling + { + Cow::Borrowed(&**shuffling) + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state + .committee_cache(relative_epoch) + .map(Cow::Borrowed) + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ) .map(Cow::Owned), - } - .map_err(|e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() as u64; - let first_subsequent_restore_point_slot = ((epoch - .start_slot(T::EthSpec::slots_per_epoch()) - / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request(format!( - "epoch out of bounds, try state at slot {}", - first_subsequent_restore_point_slot, - )) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, too far in future".into(), - ) + } + .map_err(|e| { + match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => { + warp_utils::reject::beacon_chain_error(e.into()) + } + } + })?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + { + if let Some(shuffling_id) = shuffling_id { + if let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &*possibly_built_cache, + ); } } - _ => warp_utils::reject::beacon_chain_error(e.into()), - })?; + } + possibly_built_cache + }; // Use either the supplied slot or all slots in the epoch. let slots = @@ -859,12 +949,13 @@ pub fn serve( } } - Ok((response, execution_optimistic)) + Ok((response, execution_optimistic, finalized)) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -881,10 +972,10 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let (sync_committee, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); Ok(( @@ -894,9 +985,10 @@ pub fn serve( .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no sync committee for epoch {}", - current_epoch, epoch - )) + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) } BeaconStateError::IncorrectStateVariant => { warp_utils::reject::custom_bad_request(format!( @@ -907,6 +999,7 @@ pub fn serve( e => warp_utils::reject::beacon_state_error(e), })?, execution_optimistic, + finalized, )) }, )?; @@ -928,7 +1021,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -942,23 +1035,23 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { blocking_json_task(move || { - let (randao, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); let randao = *state.get_randao_mix(epoch).map_err(|e| { warp_utils::reject::custom_bad_request(format!( "epoch out of range: {e:?}" )) })?; - Ok((randao, execution_optimistic)) + Ok((randao, execution_optimistic, finalized)) }, )?; Ok( api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, @@ -980,72 +1073,73 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block, execution_optimistic) = match (query.slot, query.parent_root) - { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) + } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic)| { - (root, block, execution_optimistic) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt { - if block.parent_root() != parent_root { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } - (root, block, execution_optimistic) - } - }; + (root, block, execution_optimistic, finalized) + } + }; let data = api_types::BlockHeaderData { root, @@ -1057,7 +1151,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1075,10 +1169,10 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = block_id.root(&chain)?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; // Ignore the second `execution_optimistic` since the first one has more // information about the original request. - let (block, _execution_optimistic) = + let (block, _execution_optimistic, _finalized) = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain @@ -1095,8 +1189,9 @@ pub fn serve( }, }; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) }) @@ -1120,9 +1215,15 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_block(None, block, chain, &network_tx, log) - .await - .map(|()| warp::reply()) + publish_blocks::publish_block( + None, + ProvenancedBlock::Local(block), + chain, + &network_tx, + log, + ) + .await + .map(|()| warp::reply().into_response()) }, ); @@ -1146,7 +1247,7 @@ pub fn serve( log: Logger| async move { publish_blocks::publish_blinded_block(block, chain, &network_tx, log) .await - .map(|()| warp::reply()) + .map(|()| warp::reply().into_response()) }, ); @@ -1179,7 +1280,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let (block, execution_optimistic) = block_id.full_block(&chain).await?; + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1195,10 +1297,11 @@ pub fn serve( e )) }), - _ => execution_optimistic_fork_versioned_response( + _ => execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()), @@ -1215,12 +1318,11 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok(api_types::GenericResponse::from(api_types::RootData::from( block.canonical_root(), )) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }); @@ -1231,11 +1333,10 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok( api_types::GenericResponse::from(block.message().body().attestations().clone()) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }); @@ -1252,8 +1353,9 @@ pub fn serve( |block_id: BlockId, chain: Arc>, accept_header: Option| { - blocking_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + blocking_response_task(move || { + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1271,10 +1373,11 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - execution_optimistic_fork_versioned_response( + execution_optimistic_finalized_fork_versioned_response( V2, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()) @@ -1652,6 +1755,109 @@ pub fn serve( }, ); + // GET beacon/pool/bls_to_execution_changes + let get_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(api_types::GenericResponse::from(address_changes)) + }) + }); + + // POST beacon/pool/bls_to_execution_changes + let post_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + address_changes: Vec, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_http_api(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = + chain.import_bls_to_execution_change(verified_address_change, received_pre_capella); + + info!( + log, + "Processed BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + "published" => matches!(received_pre_capella, ReceivedPreCapella::No), + "imported" => imported, + ); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + log, + "BLS to execution change already known"; + "validator_index" => validator_index, + ); + } + Err(e) => { + warn!( + log, + "Invalid BLS to execution change"; + "validator_index" => validator_index, + "reason" => ?e, + "source" => "HTTP", + ); + failures.push(api_types::Failure::new( + index, + format!("invalid: {e:?}"), + )); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } + }) + }, + ); + // GET beacon/deposit_snapshot let get_beacon_deposit_snapshot = eth_v1 .and(warp::path("beacon")) @@ -1661,7 +1867,7 @@ pub fn serve( .and(eth1_service_filter.clone()) .and_then( |accept_header: Option, eth1_service: eth1::Service| { - blocking_task(move || match accept_header { + blocking_response_task(move || match accept_header { Some(api_types::Accept::Json) | None => { let snapshot = eth1_service.get_deposit_snapshot(); Ok( @@ -1699,6 +1905,118 @@ pub fn serve( }, ); + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .and_then(|chain: Arc>, block_id: BlockId| { + blocking_json_task(move || { + let (rewards, execution_optimistic, finalized) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }); + + /* + * beacon/rewards + */ + + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + epoch: Epoch, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators, log) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", + )) + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" + )) + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" + )) + } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(attestation_rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + + // POST beacon/rewards/sync_committee/{block_id} + let post_beacon_rewards_sync_committee = beacon_rewards_path + .clone() + .and(warp::path("sync_committee")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let (rewards, execution_optimistic, finalized) = + sync_committee_rewards::compute_sync_committee_rewards( + chain, block_id, validators, log, + )?; + + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ); + /* * config */ @@ -1772,12 +2090,12 @@ pub fn serve( state_id: StateId, accept_header: Option, chain: Arc>| { - blocking_task(move || match accept_header { + blocking_response_task(move || match accept_header { Some(api_types::Accept::Ssz) => { // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the // beacon chain. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1785,7 +2103,9 @@ pub fn serve( .status(200) .header("Content-Type", "application/octet-stream") .body(state.as_ssz_bytes().into()) - .map(|resp| add_consensus_version_header(resp, fork_name)) + .map(|resp: warp::reply::Response| { + add_consensus_version_header(resp, fork_name) + }) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -1793,16 +2113,17 @@ pub fn serve( )) }) } - _ => state_id.map_state_and_execution_optimistic( + _ => state_id.map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_fork_versioned_response( + let res = execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, &state, )?; Ok(add_consensus_version_header( @@ -1852,6 +2173,58 @@ pub fn serve( }, ); + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .and(warp::path("debug")) + .and(warp::path("fork_choice")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node + .justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + finalized_epoch: node + .finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, + fork_choice_nodes, + }) + }) + }); + /* * node */ @@ -1948,7 +2321,7 @@ pub fn serve( .and(warp::path::end()) .and(network_globals.clone()) .and_then(|network_globals: Arc>| { - blocking_task(move || match *network_globals.sync_state.read() { + blocking_response_task(move || match *network_globals.sync_state.read() { SyncState::SyncingFinalized { .. } | SyncState::SyncingHead { .. } | SyncState::SyncTransition @@ -2164,11 +2537,19 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(warp::query::()) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| async move { + chain: Arc>, + log: Logger| async move { + debug!( + log, + "Block production request from HTTP API"; + "slot" => slot + ); + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { warp_utils::reject::custom_bad_request(format!( "randao reveal is not a valid BLS signature: {:?}", @@ -2204,7 +2585,7 @@ pub fn serve( .map_err(inconsistent_fork_rejection)?; fork_versioned_response(endpoint_version, fork_name, block) - .map(|response| warp::reply::json(&response)) + .map(|response| warp::reply::json(&response).into_response()) }, ); @@ -2261,7 +2642,7 @@ pub fn serve( // Pose as a V2 endpoint so we return the fork `version`. fork_versioned_response(V2, fork_name, block) - .map(|response| warp::reply::json(&response)) + .map(|response| warp::reply::json(&response).into_response()) }, ); @@ -2634,7 +3015,7 @@ pub fn serve( )) })?; - Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) }, ); @@ -2743,9 +3124,9 @@ pub fn serve( builder .post_builder_validators(&filtered_registration_data) .await - .map(|resp| warp::reply::json(&resp)) + .map(|resp| warp::reply::json(&resp).into_response()) .map_err(|e| { - error!( + warn!( log, "Relay error when registering validator(s)"; "num_registrations" => filtered_registration_data.len(), @@ -2915,6 +3296,22 @@ pub fn serve( }, ); + // POST lighthouse/ui/validator_info + let post_lighthouse_ui_validator_info = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_info")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: ui::ValidatorInfoRequestData, chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_info(request_data, chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -2989,7 +3386,7 @@ pub fn serve( .and(warp::path::end()) .and(chain_filter.clone()) .and_then(|chain: Arc>| { - blocking_task(move || { + blocking_response_task(move || { Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( chain .canonical_head @@ -3108,9 +3505,9 @@ pub fn serve( .and(warp::path::end()) .and(chain_filter.clone()) .and_then(|state_id: StateId, chain: Arc>| { - blocking_task(move || { + blocking_response_task(move || { // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -3244,9 +3641,10 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| async move { let merge_readiness = chain.check_merge_readiness().await; - Ok::<_, warp::reject::Rejection>(warp::reply::json(&api_types::GenericResponse::from( - merge_readiness, - ))) + Ok::<_, warp::reject::Rejection>( + warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) + .into_response(), + ) }); let get_events = eth_v1 @@ -3257,7 +3655,7 @@ pub fn serve( .and_then( |topics_res: Result, chain: Arc>| { - blocking_task(move || { + blocking_response_task(move || { let topics = topics_res?; // for each topic subscribed spawn a new subscription let mut receivers = Vec::with_capacity(topics.topics.len()); @@ -3282,6 +3680,9 @@ pub fn serve( api_types::EventTopic::ContributionAndProof => { event_handler.subscribe_contributions() } + api_types::EventTopic::PayloadAttributes => { + event_handler.subscribe_payload_attributes() + } api_types::EventTopic::LateHead => { event_handler.subscribe_late_head() } @@ -3321,100 +3722,111 @@ pub fn serve( ); // Define the ultimate set of routes that will be provided to the server. + // Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`). let routes = warp::get() .and( get_beacon_genesis - .boxed() - .or(get_beacon_state_root.boxed()) - .or(get_beacon_state_fork.boxed()) - .or(get_beacon_state_finality_checkpoints.boxed()) - .or(get_beacon_state_validator_balances.boxed()) - .or(get_beacon_state_validators_id.boxed()) - .or(get_beacon_state_validators.boxed()) - .or(get_beacon_state_committees.boxed()) - .or(get_beacon_state_sync_committees.boxed()) - .or(get_beacon_state_randao.boxed()) - .or(get_beacon_headers.boxed()) - .or(get_beacon_headers_block_id.boxed()) - .or(get_beacon_block.boxed()) - .or(get_beacon_block_attestations.boxed()) - .or(get_beacon_blinded_block.boxed()) - .or(get_beacon_block_root.boxed()) - .or(get_beacon_pool_attestations.boxed()) - .or(get_beacon_pool_attester_slashings.boxed()) - .or(get_beacon_pool_proposer_slashings.boxed()) - .or(get_beacon_pool_voluntary_exits.boxed()) - .or(get_beacon_deposit_snapshot.boxed()) - .or(get_config_fork_schedule.boxed()) - .or(get_config_spec.boxed()) - .or(get_config_deposit_contract.boxed()) - .or(get_debug_beacon_states.boxed()) - .or(get_debug_beacon_heads.boxed()) - .or(get_node_identity.boxed()) - .or(get_node_version.boxed()) - .or(get_node_syncing.boxed()) - .or(get_node_health.boxed()) - .or(get_node_peers_by_id.boxed()) - .or(get_node_peers.boxed()) - .or(get_node_peer_count.boxed()) - .or(get_validator_duties_proposer.boxed()) - .or(get_validator_blocks.boxed()) - .or(get_validator_blinded_blocks.boxed()) - .or(get_validator_attestation_data.boxed()) - .or(get_validator_aggregate_attestation.boxed()) - .or(get_validator_sync_committee_contribution.boxed()) - .or(get_lighthouse_health.boxed()) - .or(get_lighthouse_ui_health.boxed()) - .or(get_lighthouse_ui_validator_count.boxed()) - .or(get_lighthouse_syncing.boxed()) - .or(get_lighthouse_nat.boxed()) - .or(get_lighthouse_peers.boxed()) - .or(get_lighthouse_peers_connected.boxed()) - .or(get_lighthouse_proto_array.boxed()) - .or(get_lighthouse_validator_inclusion_global.boxed()) - .or(get_lighthouse_validator_inclusion.boxed()) - .or(get_lighthouse_eth1_syncing.boxed()) - .or(get_lighthouse_eth1_block_cache.boxed()) - .or(get_lighthouse_eth1_deposit_cache.boxed()) - .or(get_lighthouse_beacon_states_ssz.boxed()) - .or(get_lighthouse_staking.boxed()) - .or(get_lighthouse_database_info.boxed()) - .or(get_lighthouse_block_rewards.boxed()) - .or(get_lighthouse_attestation_performance.boxed()) - .or(get_lighthouse_block_packing_efficiency.boxed()) - .or(get_lighthouse_merge_readiness.boxed()) - .or(get_events.boxed()), + .uor(get_beacon_state_root) + .uor(get_beacon_state_fork) + .uor(get_beacon_state_finality_checkpoints) + .uor(get_beacon_state_validator_balances) + .uor(get_beacon_state_validators_id) + .uor(get_beacon_state_validators) + .uor(get_beacon_state_committees) + .uor(get_beacon_state_sync_committees) + .uor(get_beacon_state_randao) + .uor(get_beacon_headers) + .uor(get_beacon_headers_block_id) + .uor(get_beacon_block) + .uor(get_beacon_block_attestations) + .uor(get_beacon_blinded_block) + .uor(get_beacon_block_root) + .uor(get_beacon_pool_attestations) + .uor(get_beacon_pool_attester_slashings) + .uor(get_beacon_pool_proposer_slashings) + .uor(get_beacon_pool_voluntary_exits) + .uor(get_beacon_pool_bls_to_execution_changes) + .uor(get_beacon_deposit_snapshot) + .uor(get_beacon_rewards_blocks) + .uor(get_config_fork_schedule) + .uor(get_config_spec) + .uor(get_config_deposit_contract) + .uor(get_debug_beacon_states) + .uor(get_debug_beacon_heads) + .uor(get_debug_fork_choice) + .uor(get_node_identity) + .uor(get_node_version) + .uor(get_node_syncing) + .uor(get_node_health) + .uor(get_node_peers_by_id) + .uor(get_node_peers) + .uor(get_node_peer_count) + .uor(get_validator_duties_proposer) + .uor(get_validator_blocks) + .uor(get_validator_blinded_blocks) + .uor(get_validator_attestation_data) + .uor(get_validator_aggregate_attestation) + .uor(get_validator_sync_committee_contribution) + .uor(get_lighthouse_health) + .uor(get_lighthouse_ui_health) + .uor(get_lighthouse_ui_validator_count) + .uor(get_lighthouse_syncing) + .uor(get_lighthouse_nat) + .uor(get_lighthouse_peers) + .uor(get_lighthouse_peers_connected) + .uor(get_lighthouse_proto_array) + .uor(get_lighthouse_validator_inclusion_global) + .uor(get_lighthouse_validator_inclusion) + .uor(get_lighthouse_eth1_syncing) + .uor(get_lighthouse_eth1_block_cache) + .uor(get_lighthouse_eth1_deposit_cache) + .uor(get_lighthouse_beacon_states_ssz) + .uor(get_lighthouse_staking) + .uor(get_lighthouse_database_info) + .uor(get_lighthouse_block_rewards) + .uor(get_lighthouse_attestation_performance) + .uor(get_lighthouse_block_packing_efficiency) + .uor(get_lighthouse_merge_readiness) + .uor(get_events) + .recover(warp_utils::reject::handle_rejection), ) .boxed() - .or(warp::post().and( - post_beacon_blocks - .boxed() - .or(post_beacon_blinded_blocks.boxed()) - .or(post_beacon_pool_attestations.boxed()) - .or(post_beacon_pool_attester_slashings.boxed()) - .or(post_beacon_pool_proposer_slashings.boxed()) - .or(post_beacon_pool_voluntary_exits.boxed()) - .or(post_beacon_pool_sync_committees.boxed()) - .or(post_validator_duties_attester.boxed()) - .or(post_validator_duties_sync.boxed()) - .or(post_validator_aggregate_and_proofs.boxed()) - .or(post_validator_contribution_and_proofs.boxed()) - .or(post_validator_beacon_committee_subscriptions.boxed()) - .or(post_validator_sync_committee_subscriptions.boxed()) - .or(post_validator_prepare_beacon_proposer.boxed()) - .or(post_validator_register_validator.boxed()) - .or(post_lighthouse_liveness.boxed()) - .or(post_lighthouse_database_reconstruct.boxed()) - .or(post_lighthouse_database_historical_blocks.boxed()) - .or(post_lighthouse_block_rewards.boxed()) - .or(post_lighthouse_ui_validator_metrics.boxed()), - )) + .uor( + warp::post().and( + post_beacon_blocks + .uor(post_beacon_blinded_blocks) + .uor(post_beacon_pool_attestations) + .uor(post_beacon_pool_attester_slashings) + .uor(post_beacon_pool_proposer_slashings) + .uor(post_beacon_pool_voluntary_exits) + .uor(post_beacon_pool_sync_committees) + .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_rewards_attestations) + .uor(post_beacon_rewards_sync_committee) + .uor(post_validator_duties_attester) + .uor(post_validator_duties_sync) + .uor(post_validator_aggregate_and_proofs) + .uor(post_validator_contribution_and_proofs) + .uor(post_validator_beacon_committee_subscriptions) + .uor(post_validator_sync_committee_subscriptions) + .uor(post_validator_prepare_beacon_proposer) + .uor(post_validator_register_validator) + .uor(post_lighthouse_liveness) + .uor(post_lighthouse_database_reconstruct) + .uor(post_lighthouse_database_historical_blocks) + .uor(post_lighthouse_block_rewards) + .uor(post_lighthouse_ui_validator_metrics) + .uor(post_lighthouse_ui_validator_info) + .recover(warp_utils::reject::handle_rejection), + ), + ) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) .with(prometheus_metrics()) // Add a `Server` header. .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) - .with(cors_builder.build()); + .with(cors_builder.build()) + .boxed(); let http_socket: SocketAddr = SocketAddr::new(config.listen_addr, config.listen_port); let http_server: HttpServer = match config.tls_config { diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 1c3ab1f6804..26ee183c83f 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -29,9 +29,10 @@ lazy_static::lazy_static! { "http_api_beacon_proposer_cache_misses_total", "Count of times the proposer cache has been missed", ); - pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram( + pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram_vec( "http_api_block_broadcast_delay_times", - "Time between start of the slot and when the block was broadcast" + "Time between start of the slot and when the block was broadcast", + &["provenance"] ); pub static ref HTTP_API_BLOCK_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( "http_api_block_published_late_total", diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 877d64e20f8..7e946b89e72 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -209,7 +209,9 @@ fn compute_historic_proposer_duties( .map_err(warp_utils::reject::beacon_chain_error)?; (state, execution_optimistic) } else { - StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; + (state, execution_optimistic) }; // Ensure the state lookup was correct. diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 5d27f117b02..1a5d5175bc2 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -3,36 +3,55 @@ use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, }; +use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{error, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, + FullPayload, Hash256, SignedBeaconBlock, }; use warp::Rejection; +pub enum ProvenancedBlock { + /// The payload was built using a local EE. + Local(Arc>>), + /// The payload was build using a remote builder (e.g., via a mev-boost + /// compatible relay). + Builder(Arc>>), +} + /// Handles a request from the HTTP API for full blocks. pub async fn publish_block( block_root: Option, - block: Arc>, + provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, ) -> Result<(), Rejection> { let seen_timestamp = timestamp_now(); + let (block, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block) => (block, true), + ProvenancedBlock::Builder(block) => (block, false), + }; + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + + debug!( + log, + "Signed block published to HTTP API"; + "slot" => block.slot() + ); // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + let message = PubsubMessage::BeaconBlock(block.clone()); + crate::publish_pubsub_message(network_tx, message)?; let block_root = block_root.unwrap_or_else(|| block.canonical_root()); @@ -67,31 +86,11 @@ pub async fn publish_block( // head. chain.recompute_head_at_current_slot().await; - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let delayed_threshold = too_late_threshold / 2; - if delay >= too_late_threshold { - error!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= delayed_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) + // Only perform late-block logging here if the block is local. For + // blocks built with builders we consider the broadcast time to be + // when the blinded block is published to the builder. + if is_locally_built_block { + late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) } Ok(()) @@ -139,14 +138,7 @@ pub async fn publish_blinded_block( ) -> Result<(), Rejection> { let block_root = block.canonical_root(); let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; - publish_block::( - Some(block_root), - Arc::new(full_block), - chain, - network_tx, - log, - ) - .await + publish_block::(Some(block_root), full_block, chain, network_tx, log).await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the @@ -157,23 +149,48 @@ async fn reconstruct_block( block_root: Hash256, block: SignedBeaconBlock>, log: Logger, -) -> Result>, Rejection> { - let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { +) -> Result, Rejection> { + let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) })?; // If the execution block hash is zero, use an empty payload. let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - ExecutionPayload::default() - // If we already have an execution payload with this transactions root cached, use it. + let payload = FullPayload::default_at_fork( + chain + .spec + .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Default payload construction error: {e:?}" + )) + })? + .into(); + ProvenancedPayload::Local(payload) + // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) { - info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); - cached_payload - // Otherwise, this means we are attempting a blind block proposal. + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash()); + ProvenancedPayload::Local(cached_payload) + // Otherwise, this means we are attempting a blind block proposal. } else { + // Perform the logging for late blocks when we publish to the + // builder, rather than when we publish to the network. This helps + // prevent false positive logs when the builder publishes to the P2P + // network significantly earlier than when they return the block to + // us. + late_block_logging( + &chain, + timestamp_now(), + block.message(), + block_root, + "builder", + &log, + ); + let full_payload = el .propose_blinded_beacon_block(block_root, &block) .await @@ -183,8 +200,8 @@ async fn reconstruct_block( e )) })?; - info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); - full_payload + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash()); + ProvenancedPayload::Builder(full_payload) }; Some(full_payload) @@ -192,7 +209,71 @@ async fn reconstruct_block( None }; - block.try_into_full_block(full_payload).ok_or_else(|| { + match full_payload_opt { + // A block without a payload is pre-merge and we consider it locally + // built. + None => block + .try_into_full_block(None) + .map(Arc::new) + .map(ProvenancedBlock::Local), + Some(ProvenancedPayload::Local(full_payload)) => block + .try_into_full_block(Some(full_payload)) + .map(Arc::new) + .map(ProvenancedBlock::Local), + Some(ProvenancedPayload::Builder(full_payload)) => block + .try_into_full_block(Some(full_payload)) + .map(Arc::new) + .map(ProvenancedBlock::Builder), + } + .ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) }) } + +/// If the `seen_timestamp` is some time after the start of the slot for +/// `block`, create some logs to indicate that the block was published late. +fn late_block_logging>( + chain: &BeaconChain, + seen_timestamp: Duration, + block: BeaconBlockRef, + root: Hash256, + provenance: &str, + log: &Logger, +) { + let delay = get_block_delay_ms(seen_timestamp, block, &chain.slot_clock); + + metrics::observe_timer_vec( + &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, + &[provenance], + delay, + ); + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let delayed_threshold = too_late_threshold / 2; + if delay >= too_late_threshold { + error!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "provenance" => provenance, + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= delayed_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "provenance" => provenance, + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } +} diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs new file mode 100644 index 00000000000..de7e5eb7d3b --- /dev/null +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -0,0 +1,27 @@ +use crate::sync_committee_rewards::get_state_before_applying_block; +use crate::BlockId; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; +//// The difference between block_rewards and beacon_block_rewards is the later returns block +//// reward format that satisfies beacon-api specs +pub fn compute_beacon_block_rewards( + chain: Arc>, + block_id: BlockId, +) -> Result<(StandardBlockReward, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; + + let block_ref = block.message(); + + let block_root = block.canonical_root(); + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let rewards = chain + .compute_beacon_block_reward(block_ref, block_root, &mut state) + .map_err(beacon_chain_error)?; + + Ok((rewards, execution_optimistic, finalized)) +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 44354217bc4..9e4aadef17e 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -10,6 +10,9 @@ use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; #[derive(Debug)] pub struct StateId(pub CoreStateId); +// More clarity when returning if the state is finalized or not in the root function. +type Finalized = bool; + impl StateId { pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) @@ -19,8 +22,8 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { - let (slot, execution_optimistic) = match &self.0 { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { + let (slot, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -29,24 +32,36 @@ impl StateId { return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), + false, )); } - CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false, true)), CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( *slot, chain .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?, + *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), ), CoreStateId::Root(root) => { if let Some(hot_summary) = chain @@ -61,7 +76,10 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + let finalized = chain + .is_finalized_state(root, hot_summary.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) @@ -77,7 +95,7 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( "beacon state for state root {}", @@ -94,7 +112,7 @@ impl StateId { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; - Ok((root, execution_optimistic)) + Ok((root, execution_optimistic, finalized)) } /// Return the `fork` field of the state identified by `self`. @@ -103,9 +121,25 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result<(Fork, bool), warp::Rejection> { - self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { - Ok((state.fork(), execution_optimistic)) - }) + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, _finalized| Ok((state.fork(), execution_optimistic)), + ) + } + + /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + /// Also returns the `finalized` value of the state. + pub fn fork_and_execution_optimistic_and_finalized( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool, bool), warp::Rejection> { + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, finalized| { + Ok((state.fork(), execution_optimistic, finalized)) + }, + ) } /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. @@ -121,8 +155,8 @@ impl StateId { pub fn state( &self, chain: &BeaconChain, - ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { - let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + ) -> Result<(BeaconState, ExecutionOptimistic, Finalized), warp::Rejection> { + let ((state_root, execution_optimistic, finalized), slot_opt) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -134,6 +168,7 @@ impl StateId { .beacon_state .clone_with_only_committee_caches(), execution_status.is_optimistic_or_invalid(), + false, )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), @@ -152,24 +187,25 @@ impl StateId { }) })?; - Ok((state, execution_optimistic)) + Ok((state, execution_optimistic, finalized)) } /// Map a function across the `BeaconState` identified by `self`. /// - /// The optimistic status of the requested state is also provided to the `func` closure. + /// The optimistic and finalization status of the requested state is also provided to the `func` + /// closure. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - pub fn map_state_and_execution_optimistic( + pub fn map_state_and_execution_optimistic_and_finalized( &self, chain: &BeaconChain, func: F, ) -> Result where - F: Fn(&BeaconState, bool) -> Result, + F: Fn(&BeaconState, bool, bool) -> Result, { - let (state, execution_optimistic) = match &self.0 { + let (state, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (head, execution_status) = chain .canonical_head @@ -178,12 +214,13 @@ impl StateId { return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), + false, ); } _ => self.state(chain)?, }; - func(&state, execution_optimistic) + func(&state, execution_optimistic, finalized) } } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs new file mode 100644 index 00000000000..68a06b1ce8c --- /dev/null +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -0,0 +1,77 @@ +use crate::{BlockId, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::SyncCommitteeReward; +use eth2::types::ValidatorId; +use slog::{debug, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use types::{BeaconState, SignedBlindedBeaconBlock}; +use warp_utils::reject::{beacon_chain_error, custom_not_found}; + +pub fn compute_sync_committee_rewards( + chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger, +) -> Result<(Option>, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .map_err(beacon_chain_error)?; + + let data = if reward_payload.is_empty() { + debug!(log, "compute_sync_committee_rewards returned empty"); + None + } else if validators.is_empty() { + Some(reward_payload) + } else { + Some( + reward_payload + .into_iter() + .filter(|reward| { + validators.iter().any(|validator| match validator { + ValidatorId::Index(i) => reward.validator_index == *i, + ValidatorId::PublicKey(pubkey) => match state.get_validator_index(pubkey) { + Ok(Some(i)) => reward.validator_index == i as u64, + _ => false, + }, + }) + }) + .collect::>(), + ) + }; + + Ok((data, execution_optimistic, finalized)) +} + +pub fn get_state_before_applying_block( + chain: Arc>, + block: &SignedBlindedBeaconBlock, +) -> Result, warp::reject::Rejection> { + let parent_block: SignedBlindedBeaconBlock = chain + .get_blinded_block(&block.parent_root()) + .and_then(|maybe_block| { + maybe_block.ok_or_else(|| BeaconChainError::MissingBeaconBlock(block.parent_root())) + }) + .map_err(|e| custom_not_found(format!("Parent block is not available! {:?}", e)))?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .and_then(|maybe_state| { + maybe_state + .ok_or_else(|| BeaconChainError::MissingBeaconState(parent_block.state_root())) + }) + .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + + let replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + Ok(replayer.into_state()) +} diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/src/test_utils.rs similarity index 82% rename from beacon_node/http_api/tests/common.rs rename to beacon_node/http_api/src/test_utils.rs index 7c228d9803f..8dc9be7dd43 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,10 +1,12 @@ +use crate::{Config, Context}; use beacon_chain::{ - test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, + test_utils::{ + BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, + }, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; -use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, libp2p::{ @@ -55,25 +57,39 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Initializer = Box< + dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, +>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_mutator(spec, validator_count, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await } - pub async fn new_with_mutator( + pub async fn new_with_initializer_and_mutator( spec: Option, validator_count: usize, + initializer: Option>, mutator: Option>, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) - .deterministic_keypairs(validator_count) .logger(test_logger()) - .mock_execution_layer() - .fresh_ephemeral_store(); - + .mock_execution_layer(); + + harness_builder = if let Some(initializer) = initializer { + // Apply custom initialization provided by the caller. + initializer(harness_builder) + } else { + // Apply default initial configuration. + harness_builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + }; + + // Add a mutator for the beacon chain builder which will be called in + // `HarnessBuilder::build`. if let Some(mutator) = mutator { harness_builder = harness_builder.initial_mutator(mutator); } @@ -114,7 +130,7 @@ pub async fn create_api_server( log: Logger, ) -> ApiServer> { // Get a random unused port. - let port = unused_port::unused_tcp_port().unwrap(); + let port = unused_port::unused_tcp4_port().unwrap(); create_api_server_on_port(chain, log, port).await } @@ -135,10 +151,11 @@ pub async fn create_api_server_on_port( let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), - TCP_PORT, - UDP_PORT, + Some(TCP_PORT), + None, meta_data, vec![], + false, &log, )); @@ -166,7 +183,7 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let context = Arc::new(Context { + let ctx = Arc::new(Context { config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -177,19 +194,19 @@ pub async fn create_api_server_on_port( data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, - chain: Some(chain.clone()), + chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, }); - let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); let server_shutdown = async { // It's not really interesting why this triggered, just that it happened. let _ = shutdown_rx.await; }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); ApiServer { server, diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index a5b3a8b2f2e..e8280a796a3 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,5 +1,7 @@ -use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::types::ValidatorStatus; +use beacon_chain::{ + validator_monitor::HISTORIC_EPOCHS, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -71,6 +73,82 @@ pub fn get_validator_count( }) } +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoRequestData { + #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + indices: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoValues { + #[serde(with = "eth2_serde_utils::quoted_u64")] + epoch: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + total_balance: u64, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfo { + info: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoResponse { + validators: HashMap, +} + +pub fn get_validator_info( + request_data: ValidatorInfoRequestData, + chain: Arc>, +) -> Result { + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + + let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64(); + + let validator_ids = chain + .validator_monitor + .read() + .get_all_monitored_validators() + .iter() + .cloned() + .collect::>(); + + let indices = request_data + .indices + .iter() + .map(|index| index.to_string()) + .collect::>(); + + let ids = validator_ids + .intersection(&indices) + .collect::>(); + + let mut validators = HashMap::new(); + + for id in ids { + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let mut info = vec![]; + for epoch in epochs.clone() { + if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) { + info.push(ValidatorInfoValues { + epoch, + total_balance, + }); + } + } + validators.insert(id.clone(), ValidatorInfo { info }); + } + } + } + + Ok(ValidatorInfoResponse { validators }) +} + #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorMetricsRequestData { indices: Vec, @@ -119,76 +197,56 @@ pub fn post_validator_monitor_metrics( let mut validators = HashMap::new(); for id in ids { - let attestation_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestations = attestation_hits + attestation_misses; - let attestation_hit_percentage: f64 = if attestations == 0 { - 0.0 - } else { - (100 * attestation_hits / attestations) as f64 - }; - - let attestation_head_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_head_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let head_attestations = attestation_head_hits + attestation_head_misses; - let attestation_head_hit_percentage: f64 = if head_attestations == 0 { - 0.0 - } else { - (100 * attestation_head_hits / head_attestations) as f64 - }; - - let attestation_target_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_target_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let target_attestations = attestation_target_hits + attestation_target_misses; - let attestation_target_hit_percentage: f64 = if target_attestations == 0 { - 0.0 - } else { - (100 * attestation_target_hits / target_attestations) as f64 - }; - - let metrics = ValidatorMetrics { - attestation_hits, - attestation_misses, - attestation_hit_percentage, - attestation_head_hits, - attestation_head_misses, - attestation_head_hit_percentage, - attestation_target_hits, - attestation_target_misses, - attestation_target_hit_percentage, - }; - - validators.insert(id.clone(), metrics); + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let val_metrics = validator.metrics.read(); + let attestation_hits = val_metrics.attestation_hits; + let attestation_misses = val_metrics.attestation_misses; + let attestation_head_hits = val_metrics.attestation_head_hits; + let attestation_head_misses = val_metrics.attestation_head_misses; + let attestation_target_hits = val_metrics.attestation_target_hits; + let attestation_target_misses = val_metrics.attestation_target_misses; + drop(val_metrics); + + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; + + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; + + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + }; + + validators.insert(id.clone(), metrics); + } + } } Ok(ValidatorMetricsResponse { validators }) diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 917e85e6493..f22ced1e693 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -18,7 +18,7 @@ fn end_of_epoch_state( let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); // The execution status is not returned, any functions which rely upon this method might return // optimistic information without explicitly declaring so. - let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + let (state, _execution_status, _finalized) = StateId::from_slot(target_slot).state(chain)?; Ok(state) } diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 87ba3a4663f..e01ff982201 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,10 +1,9 @@ -use crate::api_types::{ - EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, -}; +use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; +use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ForkName, InconsistentFork}; -use warp::reply::{self, Reply, WithHeader}; +use types::{ForkName, ForkVersionedResponse, InconsistentFork}; +use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); @@ -27,12 +26,13 @@ pub fn fork_versioned_response( }) } -pub fn execution_optimistic_fork_versioned_response( +pub fn execution_optimistic_finalized_fork_versioned_response( endpoint_version: EndpointVersion, fork_name: ForkName, execution_optimistic: bool, + finalized: bool, data: T, -) -> Result, warp::reject::Rejection> { +) -> Result, warp::reject::Rejection> { let fork_name = if endpoint_version == V1 { None } else if endpoint_version == V2 { @@ -40,16 +40,17 @@ pub fn execution_optimistic_fork_versioned_response( } else { return Err(unsupported_version_rejection(endpoint_version)); }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) } /// Add the `Eth-Consensus-Version` header to a response. -pub fn add_consensus_version_header(reply: T, fork_name: ForkName) -> WithHeader { - reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()) +pub fn add_consensus_version_header(reply: T, fork_name: ForkName) -> Response { + reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()).into_response() } pub fn inconsistent_fork_rejection(error: InconsistentFork) -> warp::reject::Rejection { diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 942a1167c2f..8a3ba887b39 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,16 @@ //! Tests for API behaviour across fork boundaries. -use crate::common::*; -use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; -use eth2::types::{StateId, SyncSubcommittee}; -use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use beacon_chain::{ + test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, + StateSkipConfig, +}; +use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use http_api::test_utils::*; +use std::collections::HashSet; +use types::{ + test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, +}; type E = MinimalEthSpec; @@ -12,6 +20,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { spec } +fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_committee_duties_across_fork() { let validator_count = E::sync_committee_size(); @@ -307,3 +323,219 @@ async fn sync_committee_indices_across_fork() { ); } } + +/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. +fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { + code, + failures, + .. + }) = error else { + panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") + }; + assert_eq!(code, status_code); + assert_eq!(failures.len(), indices.len()); + for (index, failure) in indices.into_iter().zip(failures) { + assert_eq!(failure.index, index as u64); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn bls_to_execution_changes_update_all_around_capella_fork() { + let validator_count = 128; + let fork_epoch = Epoch::new(2); + let spec = capella_spec(fork_epoch); + let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); + + // Use a genesis state with entirely BLS withdrawal credentials. + // Offset keypairs by `validator_count` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(validator_count); + let withdrawal_keypairs = (0..validator_count) + .map(|i| Some(generate_deterministic_keypair(i + validator_count))) + .collect::>(); + let withdrawal_credentials = withdrawal_keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + .collect::>(); + let genesis_state = interop_genesis_state_with_withdrawal_credentials( + &validator_keypairs, + &withdrawal_credentials, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(|harness_builder| { + harness_builder + .keypairs(validator_keypairs) + .withdrawal_keypairs(withdrawal_keypairs) + .genesis_state_ephemeral_store(genesis_state) + })), + None, + ) + .await; + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + // Create a bunch of valid address changes. + let valid_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index), + ) + }) + .collect::>(); + + // Address changes which conflict with `valid_address_changes` on the address chosen. + let conflicting_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index + 1), + ) + }) + .collect::>(); + + // Address changes signed with the wrong key. + let wrong_key_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + // Use the correct pubkey. + let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; + // And the wrong secret key. + let secret_key = &harness + .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .sk; + harness.make_bls_to_execution_change_with_keys( + validator_index, + Address::from_low_u64_be(validator_index), + pubkey, + secret_key, + ) + }) + .collect::>(); + + // Submit some changes before Capella. Just enough to fill two blocks. + let num_pre_capella = validator_count / 4; + let blocks_filled_pre_capella = 2; + assert_eq!( + num_pre_capella, + blocks_filled_pre_capella * max_bls_to_execution_changes + ); + + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec(); + + // Conflicting changes for the same validators should all fail. + let error = client + .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, (0..num_pre_capella).collect()); + + // Re-submitting the same changes should be accepted. + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Invalid changes signed with the wrong keys should all be rejected without affecting the seen + // indices filters (apply ALL of them). + let error = client + .post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, all_validators.clone()); + + // Advance to right before Capella. + let capella_slot = fork_epoch.start_slot(E::slots_per_epoch()); + harness.extend_to_slot(capella_slot - 1).await; + assert_eq!(harness.head_slot(), capella_slot - 1); + + assert_eq!( + harness + .chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &harness.chain.head_snapshot().beacon_state, + &spec, + ) + .into_iter() + .collect::>(), + HashSet::from_iter(expected_received_pre_capella_messages.into_iter()), + "all pre-capella messages should be queued for capella broadcast" + ); + + // Add Capella blocks which should be full of BLS to execution changes. + for i in 0..validator_count / max_bls_to_execution_changes { + let head_block_root = harness.extend_slots(1).await; + let head_block = harness + .chain + .get_block(&head_block_root) + .await + .unwrap() + .unwrap(); + + let bls_to_execution_changes = head_block + .message() + .body() + .bls_to_execution_changes() + .unwrap(); + + // Block should be full. + assert_eq!( + bls_to_execution_changes.len(), + max_bls_to_execution_changes, + "block not full on iteration {i}" + ); + + // Included changes should be the ones from `valid_address_changes` in any order. + for address_change in bls_to_execution_changes.iter() { + assert!(valid_address_changes.contains(address_change)); + } + + // After the initial 2 blocks, add the rest of the changes using a large + // request containing all the valid, all the conflicting and all the invalid. + // Despite the invalid and duplicate messages, the new ones should still get picked up by + // the pool. + if i == blocks_filled_pre_capella - 1 { + let all_address_changes: Vec<_> = [ + valid_address_changes.clone(), + conflicting_address_changes.clone(), + wrong_key_address_changes.clone(), + ] + .concat(); + + let error = client + .post_beacon_pool_bls_to_execution_changes(&all_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error( + error, + 400, + (validator_count..3 * validator_count).collect(), + ); + } + } + + // Eventually all validators should have eth1 withdrawal credentials. + let head_state = harness.get_current_state(); + for validator in head_state.validators() { + assert!(validator.has_eth1_withdrawal_credential(&spec)); + } +} diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 17a3624afed..da92419744e 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,14 +1,16 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` -use crate::common::*; use beacon_chain::{ - chain_config::ReOrgThreshold, - test_utils::{AttestationStrategy, BlockStrategy}, + chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, + test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; -use execution_layer::{ForkChoiceState, PayloadAttributes}; +use execution_layer::{ForkchoiceState, PayloadAttributes}; +use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; -use state_processing::state_advance::complete_state_advance; +use state_processing::{ + per_block_processing::get_expected_withdrawals, state_advance::complete_state_advance, +}; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; @@ -55,7 +57,7 @@ struct ForkChoiceUpdates { #[derive(Debug, Clone)] struct ForkChoiceUpdateMetadata { received_at: Duration, - state: ForkChoiceState, + state: ForkchoiceState, payload_attributes: Option, } @@ -86,7 +88,7 @@ impl ForkChoiceUpdates { .payload_attributes .as_ref() .map_or(false, |payload_attributes| { - payload_attributes.timestamp == proposal_timestamp + payload_attributes.timestamp() == proposal_timestamp }) }) .cloned() @@ -106,13 +108,17 @@ pub struct ReOrgTest { percent_head_votes: usize, should_re_org: bool, misprediction: bool, + /// Whether to expect withdrawals to change on epoch boundaries. + expect_withdrawals_change_on_epoch: bool, + /// Epoch offsets to avoid proposing reorg blocks at. + disallowed_offsets: Vec, } impl Default for ReOrgTest { /// Default config represents a regular easy re-org. fn default() -> Self { Self { - head_slot: Slot::new(30), + head_slot: Slot::new(E::slots_per_epoch() - 2), parent_distance: 1, head_distance: 1, re_org_threshold: 20, @@ -122,6 +128,8 @@ impl Default for ReOrgTest { percent_head_votes: 0, should_re_org: true, misprediction: false, + expect_withdrawals_change_on_epoch: false, + disallowed_offsets: vec![], } } } @@ -136,8 +144,35 @@ pub async fn proposer_boost_re_org_zero_weight() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_epoch_boundary() { proposer_boost_re_org_test(ReOrgTest { - head_slot: Slot::new(31), + head_slot: Slot::new(E::slots_per_epoch() - 1), + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_epoch_boundary_skip1() { + // Proposing a block on a boundary after a skip will change the set of expected withdrawals + // sent in the payload attributes. + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(2 * E::slots_per_epoch() - 2), + head_distance: 2, + should_re_org: false, + expect_withdrawals_change_on_epoch: true, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_epoch_boundary_skip32() { + // Propose a block at 64 after a whole epoch of skipped slots. + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() - 1), + head_distance: E::slots_per_epoch() + 1, should_re_org: false, + expect_withdrawals_change_on_epoch: true, ..Default::default() }) .await; @@ -187,7 +222,7 @@ pub async fn proposer_boost_re_org_finality() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_parent_distance() { proposer_boost_re_org_test(ReOrgTest { - head_slot: Slot::new(30), + head_slot: Slot::new(E::slots_per_epoch() - 2), parent_distance: 2, should_re_org: false, ..Default::default() @@ -198,7 +233,7 @@ pub async fn proposer_boost_re_org_parent_distance() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_head_distance() { proposer_boost_re_org_test(ReOrgTest { - head_slot: Slot::new(29), + head_slot: Slot::new(E::slots_per_epoch() - 3), head_distance: 2, should_re_org: false, ..Default::default() @@ -206,10 +241,36 @@ pub async fn proposer_boost_re_org_head_distance() { .await; } +// Check that a re-org at a disallowed offset fails. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset() { + let offset = 4; + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets: vec![offset], + should_re_org: false, + ..Default::default() + }) + .await; +} + +// Check that a re-org at the *only* allowed offset succeeds. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset_exact() { + let offset = 4; + let disallowed_offsets = (0..E::slots_per_epoch()).filter(|o| *o != offset).collect(); + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets, + ..Default::default() + }) + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_very_unhealthy() { proposer_boost_re_org_test(ReOrgTest { - head_slot: Slot::new(31), + head_slot: Slot::new(E::slots_per_epoch() - 1), parent_distance: 2, head_distance: 2, percent_parent_votes: 10, @@ -225,7 +286,6 @@ pub async fn proposer_boost_re_org_very_unhealthy() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_weight_misprediction() { proposer_boost_re_org_test(ReOrgTest { - head_slot: Slot::new(30), percent_empty_votes: 70, percent_head_votes: 30, should_re_org: false, @@ -254,12 +314,14 @@ pub async fn proposer_boost_re_org_test( percent_head_votes, should_re_org, misprediction, + expect_withdrawals_change_on_epoch, + disallowed_offsets, }: ReOrgTest, ) { assert!(head_slot > 0); - // We require a network with execution enabled so we can check EL message timings. - let mut spec = ForkName::Merge.make_genesis_spec(E::default_spec()); + // Test using Capella so that we simulate conditions as similar to mainnet as possible. + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); spec.terminal_total_difficulty = 1.into(); // Ensure there are enough validators to have `attesters_per_slot`. @@ -278,15 +340,19 @@ pub async fn proposer_boost_re_org_test( let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); - let tester = InteractiveTester::::new_with_mutator( + let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), validator_count, + None, Some(Box::new(move |builder| { builder .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) .proposer_re_org_max_epochs_since_finalization(Epoch::new( max_epochs_since_finalization, )) + .proposer_re_org_disallowed_offsets( + DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), + ) })), ) .await; @@ -322,13 +388,15 @@ pub async fn proposer_boost_re_org_test( ) .await; - // Create some chain depth. + // Create some chain depth. Sign sync committee signatures so validator balances don't dip + // below 32 ETH and become ineligible for withdrawals. harness.advance_slot(); harness - .extend_chain( + .extend_chain_with_sync( num_initial as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, + SyncCommitteeStrategy::AllValidators, ) .await; @@ -342,7 +410,7 @@ pub async fn proposer_boost_re_org_test( .lock() .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { let received_at = chain_inner.slot_clock.now_duration().unwrap(); - let state = ForkChoiceState::from(state); + let state = ForkchoiceState::from(state); let payload_attributes = payload_attributes.map(Into::into); let update = ForkChoiceUpdateMetadata { received_at, @@ -363,6 +431,16 @@ pub async fn proposer_boost_re_org_test( let slot_b = slot_a + parent_distance; let slot_c = slot_b + head_distance; + // We need to transition to at least epoch 2 in order to trigger + // `process_rewards_and_penalties`. This allows us to test withdrawals changes at epoch + // boundaries. + if expect_withdrawals_change_on_epoch { + assert!( + slot_c.epoch(E::slots_per_epoch()) >= 2, + "for withdrawals to change, test must end at an epoch >= 2" + ); + } + harness.advance_slot(); let (block_a_root, block_a, state_a) = harness .add_block_at_slot(slot_a, harness.get_current_state()) @@ -456,6 +534,10 @@ pub async fn proposer_boost_re_org_test( // Produce block C. // Advance state_b so we can get the proposer. + assert_eq!(state_b.slot(), slot_b); + let pre_advance_withdrawals = get_expected_withdrawals(&state_b, &harness.chain.spec) + .unwrap() + .to_vec(); complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap(); let proposer_index = state_b @@ -513,6 +595,28 @@ pub async fn proposer_boost_re_org_test( .unwrap(); let payload_attribs = first_update.payload_attributes.as_ref().unwrap(); + // Check that withdrawals from the payload attributes match those computed from the parent's + // advanced state. + let expected_withdrawals = if should_re_org { + let mut state_a_advanced = state_a.clone(); + complete_state_advance(&mut state_a_advanced, None, slot_c, &harness.chain.spec).unwrap(); + get_expected_withdrawals(&state_a_advanced, &harness.chain.spec) + } else { + get_expected_withdrawals(&state_b, &harness.chain.spec) + } + .unwrap() + .to_vec(); + let payload_attribs_withdrawals = payload_attribs.withdrawals().unwrap(); + assert_eq!(expected_withdrawals, *payload_attribs_withdrawals); + assert!(!expected_withdrawals.is_empty()); + + if should_re_org + || expect_withdrawals_change_on_epoch + && slot_c.epoch(E::slots_per_epoch()) != slot_b.epoch(E::slots_per_epoch()) + { + assert_ne!(expected_withdrawals, pre_advance_withdrawals); + } + let lookahead = slot_clock .start_of(slot_c) .unwrap() @@ -521,16 +625,20 @@ pub async fn proposer_boost_re_org_test( if !misprediction { assert_eq!( - lookahead, payload_lookahead, + lookahead, + payload_lookahead, "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } else { // On a misprediction we issue the first fcU 500ms before creating a block! assert_eq!( - lookahead, fork_choice_lookahead, + lookahead, + fork_choice_lookahead, "timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } } @@ -540,7 +648,7 @@ pub async fn proposer_boost_re_org_test( pub async fn fork_choice_before_proposal() { // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. - let validator_count = 32; + let validator_count = 64; let all_validators = (0..validator_count).collect::>(); let num_initial: u64 = 31; diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index ca6a27530a6..342b72cc7de 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,7 +1,5 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. -#![recursion_limit = "256"] -pub mod common; pub mod fork_tests; pub mod interactive_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2e795e522d5..a54f17e96f6 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,3 @@ -use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -8,20 +7,26 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, + types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::Operation; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; +use execution_layer::test_utils::{ + Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, +}; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{BlockId, StateId}; +use http_api::{ + test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + BlockId, StateId, +}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; @@ -72,38 +77,53 @@ struct ApiTester { mock_builder: Option>>, } +struct ApiTesterConfig { + spec: ChainSpec, + builder_threshold: Option, +} + +impl Default for ApiTesterConfig { + fn default() -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + Self { + spec, + builder_threshold: None, + } + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; - Self::new_from_spec(spec).await + Self::new_from_config(ApiTesterConfig::default()).await } pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; + let mut config = ApiTesterConfig::default(); // Set whether the chain has undergone each hard fork. if altair { - spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); } if bellatrix { - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); } - Self::new_from_spec(spec).await + Self::new_from_config(config).await } - pub async fn new_from_spec(spec: ChainSpec) -> Self { + pub async fn new_from_config(config: ApiTesterConfig) -> Self { // Get a random unused port - let port = unused_port::unused_tcp_port().unwrap(); + let spec = config.spec; + let port = unused_port::unused_tcp4_port().unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_builder(beacon_url.clone()) + .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold) .build(), ); @@ -358,6 +378,28 @@ impl ApiTester { tester } + pub async fn new_mev_tester_no_builder_threshold() -> Self { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + let tester = Self::new_from_config(config) + .await + .test_post_validator_register_validator() + .await; + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, + ))); + tester + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -422,6 +464,264 @@ impl ApiTester { self } + // finalization tests + pub async fn test_beacon_states_root_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_root(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_fork(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_headers_block_id_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_headers_block_id(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blinded_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_debug_beacon_states_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_debug_beacon_states::(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + pub async fn test_beacon_states_root(self) -> Self { for state_id in self.interesting_state_ids() { let result = self @@ -434,7 +734,7 @@ impl ApiTester { let expected = state_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -468,15 +768,13 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = - state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic)| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = state_id.state(&self.chain).ok().map( + |(state, _execution_optimistic, _finalized)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + ); assert_eq!(result, expected, "{:?}", state_id); } @@ -489,7 +787,9 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some((state, _execution_optimistic)) => state.validators().clone().into(), + Some((state, _execution_optimistic, _finalized)) => { + state.validators().clone().into() + } None => vec![], }; let validator_index_ids = validator_indices @@ -528,7 +828,7 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = state_opt.map(|(state, _execution_optimistic)| { + let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -558,7 +858,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -648,7 +948,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -703,7 +1003,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self @@ -750,7 +1050,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let result = self @@ -860,7 +1160,7 @@ impl ApiTester { let block_root_opt = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { @@ -874,7 +1174,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if block_opt.is_none() && result.is_none() { continue; @@ -920,7 +1220,7 @@ impl ApiTester { let expected = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); @@ -967,7 +1267,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1051,7 +1351,7 @@ impl ApiTester { let expected = block_id .blinded_block(&self.chain) .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1132,7 +1432,7 @@ impl ApiTester { .map(|res| res.data); let expected = block_id.full_block(&self.chain).await.ok().map( - |(block, _execution_optimistic)| { + |(block, _execution_optimistic, _finalized)| { block.message().body().attestations().clone().into() }, ); @@ -1372,9 +1672,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); @@ -1553,7 +1853,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1575,21 +1875,6 @@ impl ApiTester { .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); - // Check legacy v1 API. - let result_v1 = self - .client - .get_debug_beacon_states_v1(state_id.0) - .await - .unwrap(); - - if let (Some(json), Some(expected)) = (&result_v1, &expected) { - assert_eq!(json.version, None); - assert_eq!(json.data, *expected, "{:?}", state_id); - } else { - assert_eq!(result_v1, None); - assert_eq!(expected, None); - } - // Check that version headers are provided. let url = self .client @@ -1639,6 +1924,59 @@ impl ApiTester { self } + pub async fn test_get_debug_fork_choice(self) -> Self { + let result = self.client.get_debug_fork_choice().await.unwrap(); + + let beacon_fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + + let expected_proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + assert_eq!( + result.justified_checkpoint, + expected_proto_array.justified_checkpoint + ); + assert_eq!( + result.finalized_checkpoint, + expected_proto_array.finalized_checkpoint + ); + + let expected_fork_choice_nodes: Vec = expected_proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch), + finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch), + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect(); + + assert_eq!(result.fork_choice_nodes, expected_fork_choice_nodes); + + // need to drop beacon_fork_choice here, else borrow checker will complain + // that self cannot be moved out since beacon_fork_choice borrowed self.chain + // and might still live after self is moved out + drop(beacon_fork_choice); + self + } + fn validator_count(&self) -> usize { self.chain.head_snapshot().beacon_state.validators().len() } @@ -2122,7 +2460,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production>(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2182,7 +2520,7 @@ impl ApiTester { } } - pub async fn test_blinded_block_production_no_verify_randao>( + pub async fn test_blinded_block_production_no_verify_randao>( self, ) -> Self { for _ in 0..E::slots_per_epoch() { @@ -2206,7 +2544,9 @@ impl ApiTester { self } - pub async fn test_blinded_block_production_verify_randao_invalid>( + pub async fn test_blinded_block_production_verify_randao_invalid< + Payload: AbstractExecPayload, + >( self, ) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -2664,7 +3004,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2673,14 +3013,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -2707,7 +3044,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2716,14 +3053,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 30_000_000); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2753,7 +3087,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2762,12 +3096,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.fee_recipient, - test_fee_recipient - ); + assert_eq!(payload.fee_recipient(), test_fee_recipient); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2801,11 +3132,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_hash; + .block_hash(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2814,12 +3145,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.parent_hash, - expected_parent_hash - ); + assert_eq!(payload.parent_hash(), expected_parent_hash); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2856,7 +3184,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2865,12 +3193,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.prev_randao, - expected_prev_randao - ); + assert_eq!(payload.prev_randao(), expected_prev_randao); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2901,12 +3226,12 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_number + .block_number() + 1; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2915,12 +3240,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.block_number, - expected_block_number - ); + assert_eq!(payload.block_number(), expected_block_number); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2951,11 +3273,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .timestamp; + .timestamp(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2964,9 +3286,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + assert!(payload.timestamp() > min_expected_timestamp); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2991,7 +3313,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3000,7 +3322,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3028,7 +3350,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3037,7 +3359,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3071,7 +3393,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3080,7 +3402,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3100,7 +3422,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3109,7 +3431,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3149,7 +3471,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3158,7 +3480,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3188,7 +3510,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3197,7 +3519,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3231,7 +3553,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3240,13 +3562,10 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3275,7 +3594,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3284,7 +3603,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3297,6 +3616,209 @@ impl ApiTester { self } + pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_works_post_capella(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let propose_state = self + .harness + .chain + .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); + let withdrawals_root = withdrawals.tree_hash_root(); + // Set withdrawals root for builder + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); + + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen because the builder's was invalid + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -3380,7 +3902,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -3766,9 +4288,9 @@ async fn get_events() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_events_altair() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - ApiTester::new_from_spec(spec) + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) .await .test_get_events_altair() .await; @@ -3788,6 +4310,20 @@ async fn beacon_get() { .await .test_beacon_genesis() .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_fork_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized::() + .await + .test_beacon_blinded_blocks_finalized::() + .await + .test_debug_beacon_states_finalized() + .await .test_beacon_states_root() .await .test_beacon_states_fork() @@ -3924,6 +4460,8 @@ async fn debug_get() { .test_get_debug_beacon_states() .await .test_get_debug_beacon_heads() + .await + .test_get_debug_fork_choice() .await; } @@ -4281,6 +4819,38 @@ async fn builder_inadequate_builder_threshold() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit() { + ApiTester::new_mev_tester_no_builder_threshold() + .await + .test_builder_payload_chosen_when_more_profitable() + .await + .test_local_payload_chosen_when_equally_profitable() + .await + .test_local_payload_chosen_when_more_profitable() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_capella() { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_capella() + .await + .test_lighthouse_rejects_invalid_withdrawals_root() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index dfdb8f7ff1b..2895506c3b3 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -116,7 +116,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500) diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index b3e02d4cb6f..89fde323746 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,7 @@ use beacon_chain::test_utils::EphemeralHarnessType; use environment::null_logger; use http_metrics::Config; +use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; @@ -45,7 +46,13 @@ async fn returns_200_ok() { listening_socket.port() ); - assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); + let response = reqwest::get(&url).await.unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.headers().get("Content-Type").unwrap(), + &HeaderValue::from_str("text/plain").unwrap() + ); } .await } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c7a2b99c9c3..8928edfb7f9 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.1.0", features = ["libp2p"] } +discv5 = { version = "0.2.2", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } eth2_ssz_types = { version = "0.2.2", path = "../../consensus/ssz_types" } @@ -13,6 +13,8 @@ serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +tree_hash = { version = "0.4.1", path = "../../consensus/tree_hash" } +tree_hash_derive = { version = "0.4.0", path = "../../consensus/tree_hash_derive" } slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } @@ -25,6 +27,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" +lru_cache = { path = "../../common/lru_cache" } parking_lot = "0.12.0" sha2 = "0.10" snap = "1.0.1" @@ -39,7 +42,7 @@ strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } -delay_map = "0.1.1" +delay_map = "0.3.0" void = "1" [dependencies.libp2p] diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 0ae3d9a23b6..d8efa20209c 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,5 @@ +use crate::listen_addr::{ListenAddr, ListenAddress}; +use crate::rpc::config::OutboundRateLimiterConfig; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; use directory::{ @@ -11,6 +13,7 @@ use libp2p::gossipsub::{ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use std::net::{Ipv4Addr, Ipv6Addr}; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -56,24 +59,24 @@ pub struct Config { /// Data directory where node's keyfile is stored pub network_dir: PathBuf, - /// IP address to listen on. - pub listen_address: std::net::IpAddr, - - /// The TCP port that libp2p listens on. - pub libp2p_port: u16, - - /// UDP port that discovery listens on. - pub discovery_port: u16, + /// IP addresses to listen on. + listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. - pub enr_address: Option, + pub enr_address: (Option, Option), + + /// The udp4 port to broadcast to peers in order to reach back for discovery. + pub enr_udp4_port: Option, + + /// The tcp4 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp4_port: Option, - /// The udp port to broadcast to peers in order to reach back for discovery. - pub enr_udp_port: Option, + /// The udp6 port to broadcast to peers in order to reach back for discovery. + pub enr_udp6_port: Option, - /// The tcp port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp_port: Option, + /// The tcp6 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp6_port: Option, /// Target number of connected peers. pub target_peers: usize, @@ -98,6 +101,9 @@ pub struct Config { /// List of trusted libp2p nodes which are not scored. pub trusted_peers: Vec, + /// Disables peer scoring altogether. + pub disable_peer_scoring: bool, + /// Client version pub client_version: String, @@ -133,6 +139,108 @@ pub struct Config { /// Whether light client protocols should be enabled. pub enable_light_client_server: bool, + + /// Configuration for the outbound rate limiter (requests made by this node). + pub outbound_rate_limiter_config: Option, +} + +impl Config { + /// Sets the listening address to use an ipv4 address. The discv5 ip_mode and table filter are + /// adjusted accordingly to ensure addresses that are present in the enr are globally + /// reachable. + pub fn set_ipv4_listening_address(&mut self, addr: Ipv4Addr, tcp_port: u16, udp_port: u16) { + self.listen_addresses = ListenAddress::V4(ListenAddr { + addr, + udp_port, + tcp_port, + }); + self.discv5_config.ip_mode = discv5::IpMode::Ip4; + self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) + } + + /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is + /// adjusted accordingly to ensure addresses that are present in the enr are globally + /// reachable. + pub fn set_ipv6_listening_address(&mut self, addr: Ipv6Addr, tcp_port: u16, udp_port: u16) { + self.listen_addresses = ListenAddress::V6(ListenAddr { + addr, + udp_port, + tcp_port, + }); + self.discv5_config.ip_mode = discv5::IpMode::Ip6 { + enable_mapped_addresses: false, + }; + self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) + } + + /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and + /// table filter is adjusted accordingly to ensure addresses that are present in the enr are + /// globally reachable. + pub fn set_ipv4_ipv6_listening_addresses( + &mut self, + v4_addr: Ipv4Addr, + tcp4_port: u16, + udp4_port: u16, + v6_addr: Ipv6Addr, + tcp6_port: u16, + udp6_port: u16, + ) { + self.listen_addresses = ListenAddress::DualStack( + ListenAddr { + addr: v4_addr, + udp_port: udp4_port, + tcp_port: tcp4_port, + }, + ListenAddr { + addr: v6_addr, + udp_port: udp6_port, + tcp_port: tcp6_port, + }, + ); + + self.discv5_config.ip_mode = discv5::IpMode::Ip6 { + enable_mapped_addresses: true, + }; + self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) { + (None, None) => false, + (None, Some(ip6)) => is_global_ipv6(ip6), + (Some(ip4), None) => is_global_ipv4(ip4), + (Some(ip4), Some(ip6)) => is_global_ipv4(ip4) && is_global_ipv6(ip6), + }; + } + + pub fn set_listening_addr(&mut self, listen_addr: ListenAddress) { + match listen_addr { + ListenAddress::V4(ListenAddr { + addr, + udp_port, + tcp_port, + }) => self.set_ipv4_listening_address(addr, tcp_port, udp_port), + ListenAddress::V6(ListenAddr { + addr, + udp_port, + tcp_port, + }) => self.set_ipv6_listening_address(addr, tcp_port, udp_port), + ListenAddress::DualStack( + ListenAddr { + addr: ip4addr, + udp_port: udp4_port, + tcp_port: tcp4_port, + }, + ListenAddr { + addr: ip6addr, + udp_port: udp6_port, + tcp_port: tcp6_port, + }, + ) => self.set_ipv4_ipv6_listening_addresses( + ip4addr, tcp4_port, udp4_port, ip6addr, tcp6_port, udp6_port, + ), + } + } + + pub fn listen_addrs(&self) -> &ListenAddress { + &self.listen_addresses + } } impl Default for Config { @@ -179,7 +287,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global_ipv4(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); @@ -187,12 +295,16 @@ impl Default for Config { // NOTE: Some of these get overridden by the corresponding CLI default values. Config { network_dir, - listen_address: "0.0.0.0".parse().expect("valid ip address"), - libp2p_port: 9000, - discovery_port: 9000, - enr_address: None, - enr_udp_port: None, - enr_tcp_port: None, + listen_addresses: ListenAddress::V4(ListenAddr { + addr: Ipv4Addr::UNSPECIFIED, + udp_port: 9000, + tcp_port: 9000, + }), + enr_address: (None, None), + enr_udp4_port: None, + enr_tcp4_port: None, + enr_udp6_port: None, + enr_tcp6_port: None, target_peers: 50, gs_config, discv5_config, @@ -200,6 +312,7 @@ impl Default for Config { boot_nodes_multiaddr: vec![], libp2p_nodes: vec![], trusted_peers: vec![], + disable_peer_scoring: false, client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, @@ -211,6 +324,7 @@ impl Default for Config { topics: Vec::new(), metrics_enabled: false, enable_light_client_server: false, + outbound_rate_limiter_config: None, } } } @@ -300,9 +414,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { - // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub - // the derivation of the message-id remains the same in the merge - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Merge | ForkName::Capella => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), @@ -358,7 +470,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos /// Helper function to determine if the IpAddr is a global address or not. The `is_global()` /// function is not yet stable on IpAddr. #[allow(clippy::nonminimal_bool)] -fn is_global(addr: &std::net::Ipv4Addr) -> bool { +fn is_global_ipv4(addr: &Ipv4Addr) -> bool { // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two // globally routable addresses in the 192.0.0.0/24 range. if u32::from_be_bytes(addr.octets()) == 0xc0000009 @@ -379,3 +491,60 @@ fn is_global(addr: &std::net::Ipv4Addr) -> bool { // Make sure the address is not in 0.0.0.0/8 && addr.octets()[0] != 0 } + +/// NOTE: Docs taken from https://doc.rust-lang.org/stable/std/net/struct.Ipv6Addr.html#method.is_global +/// +/// Returns true if the address appears to be globally reachable as specified by the IANA IPv6 +/// Special-Purpose Address Registry. Whether or not an address is practically reachable will +/// depend on your network configuration. +/// +/// Most IPv6 addresses are globally reachable; unless they are specifically defined as not +/// globally reachable. +/// +/// Non-exhaustive list of notable addresses that are not globally reachable: +/// +/// - The unspecified address (is_unspecified) +/// - The loopback address (is_loopback) +/// - IPv4-mapped addresses +/// - Addresses reserved for benchmarking +/// - Addresses reserved for documentation (is_documentation) +/// - Unique local addresses (is_unique_local) +/// - Unicast addresses with link-local scope (is_unicast_link_local) +// TODO: replace with [`Ipv6Addr::is_global`] once +// [Ip](https://github.com/rust-lang/rust/issues/27709) is stable. +pub const fn is_global_ipv6(addr: &Ipv6Addr) -> bool { + const fn is_documentation(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] == 0x2001) && (addr.segments()[1] == 0xdb8) + } + const fn is_unique_local(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] & 0xfe00) == 0xfc00 + } + const fn is_unicast_link_local(addr: &Ipv6Addr) -> bool { + (addr.segments()[0] & 0xffc0) == 0xfe80 + } + !(addr.is_unspecified() + || addr.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(addr.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(addr.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(addr.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(addr.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(addr.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(addr.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(addr.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(addr.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(addr.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || is_documentation(addr) + || is_unique_local(addr) + || is_unicast_link_local(addr)) +} diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 6b4b87a5f80..938e7cfa257 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -145,16 +145,39 @@ pub fn create_enr_builder_from_config( enable_tcp: bool, ) -> EnrBuilder { let mut builder = EnrBuilder::new("v4"); - if let Some(enr_address) = config.enr_address { - builder.ip(enr_address); + let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; + + if let Some(ip) = maybe_ipv4_address { + builder.ip4(*ip); + } + + if let Some(ip) = maybe_ipv6_address { + builder.ip6(*ip); + } + + if let Some(udp4_port) = config.enr_udp4_port { + builder.udp4(udp4_port); } - if let Some(udp_port) = config.enr_udp_port { - builder.udp4(udp_port); + + if let Some(udp6_port) = config.enr_udp6_port { + builder.udp6(udp6_port); } - // we always give it our listening tcp port + if enable_tcp { - let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); - builder.tcp4(tcp_port); + // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. + let tcp4_port = config + .enr_tcp4_port + .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); + if let Some(tcp4_port) = tcp4_port { + builder.tcp4(tcp4_port); + } + + let tcp6_port = config + .enr_tcp6_port + .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); + if let Some(tcp6_port) = tcp6_port { + builder.tcp6(tcp6_port); + } } builder } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index c41844c2c59..13fdf8ed577 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -177,6 +177,13 @@ pub struct Discovery { /// always false. pub started: bool, + /// This keeps track of whether an external UDP port change should also indicate an internal + /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP + /// port is also our external TCP port. This assumption only holds if the user has not + /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the + /// second indicates tcp6. + update_tcp_port: (bool, bool), + /// Logger for the discovery behaviour. log: slog::Logger, } @@ -197,12 +204,18 @@ impl Discovery { }; let local_enr = network_globals.local_enr.read().clone(); + let local_node_id = local_enr.node_id(); info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() ); - - let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); + let listen_socket = match config.listen_addrs() { + crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(), + crate::listen_addr::ListenAddress::V6(v6_addr) => v6_addr.udp_socket_addr(), + crate::listen_addr::ListenAddress::DualStack(_v4_addr, v6_addr) => { + v6_addr.udp_socket_addr() + } + }; // convert the keypair into an ENR key let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?; @@ -212,6 +225,10 @@ impl Discovery { // Add bootnodes to routing table for bootnode_enr in config.boot_nodes_enr.clone() { + if bootnode_enr.node_id() == local_node_id { + // If we are a boot node, ignore adding it to the routing table + continue; + } debug!( log, "Adding node to routing table"; @@ -290,6 +307,11 @@ impl Discovery { } } + let update_tcp_port = ( + config.enr_tcp4_port.is_none(), + config.enr_tcp6_port.is_none(), + ); + Ok(Self { cached_enrs: LruCache::new(50), network_globals, @@ -299,6 +321,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, + update_tcp_port, log, enr_dir, }) @@ -1009,20 +1032,40 @@ impl NetworkBehaviour for Discovery { metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. + + if (self.update_tcp_port.0 && socket_addr.is_ipv4()) + || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + { + // Update the TCP port in the ENR + self.discv5.update_local_enr_socket(socket_addr, true); + } let enr = self.discv5.local_enr(); enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; // A new UDP socket has been detected. // Build a multiaddr to report to libp2p - let mut address = Multiaddr::from(socket_addr.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - address.push(Protocol::Tcp(self.network_globals.listen_port_tcp())); - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + let addr = match socket_addr.ip() { + IpAddr::V4(v4_addr) => { + self.network_globals.listen_port_tcp4().map(|tcp4_port| { + Multiaddr::from(v4_addr).with(Protocol::Tcp(tcp4_port)) + }) + } + IpAddr::V6(v6_addr) => { + self.network_globals.listen_port_tcp6().map(|tcp6_port| { + Multiaddr::from(v6_addr).with(Protocol::Tcp(tcp6_port)) + }) + } + }; + + if let Some(address) = addr { + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); + } } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) @@ -1087,7 +1130,6 @@ mod tests { use enr::EnrBuilder; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; - use unused_port::unused_udp_port; type E = MinimalEthSpec; @@ -1105,23 +1147,22 @@ mod tests { async fn build_discovery() -> Discovery { let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let config = NetworkConfig { - discovery_port: unused_udp_port().unwrap(), - ..Default::default() - }; + let mut config = NetworkConfig::default(); + config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, - 9000, - 9000, + Some(9000), + None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), }), vec![], + false, &log, ); Discovery::new(&keypair, &config, Arc::new(globals), &log) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index be4da809cb2..3d539af3b28 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,12 +10,14 @@ pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; +pub mod listen_addr; pub mod metrics; pub mod peer_manager; pub mod rpc; pub mod types; pub use config::gossip_max_size; +pub use listen_addr::*; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::str::FromStr; diff --git a/beacon_node/lighthouse_network/src/listen_addr.rs b/beacon_node/lighthouse_network/src/listen_addr.rs new file mode 100644 index 00000000000..20d87d403cd --- /dev/null +++ b/beacon_node/lighthouse_network/src/listen_addr.rs @@ -0,0 +1,97 @@ +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + +use libp2p::{multiaddr::Protocol, Multiaddr}; +use serde::{Deserialize, Serialize}; + +/// A listening address composed by an Ip, an UDP port and a TCP port. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ListenAddr { + pub addr: Ip, + pub udp_port: u16, + pub tcp_port: u16, +} + +impl + Clone> ListenAddr { + pub fn udp_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.udp_port).into() + } + + pub fn tcp_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.tcp_port).into() + } +} + +/// Types of listening addresses Lighthouse can accept. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ListenAddress { + V4(ListenAddr), + V6(ListenAddr), + DualStack(ListenAddr, ListenAddr), +} + +impl ListenAddress { + /// Return the listening address over IpV4 if any. + pub fn v4(&self) -> Option<&ListenAddr> { + match self { + ListenAddress::V4(v4_addr) | ListenAddress::DualStack(v4_addr, _) => Some(v4_addr), + ListenAddress::V6(_) => None, + } + } + + /// Return the listening address over IpV6 if any. + pub fn v6(&self) -> Option<&ListenAddr> { + match self { + ListenAddress::V6(v6_addr) | ListenAddress::DualStack(_, v6_addr) => Some(v6_addr), + ListenAddress::V4(_) => None, + } + } + + /// Returns the TCP addresses. + pub fn tcp_addresses(&self) -> impl Iterator + '_ { + let v4_multiaddr = self + .v4() + .map(|v4_addr| Multiaddr::from(v4_addr.addr).with(Protocol::Tcp(v4_addr.tcp_port))); + let v6_multiaddr = self + .v6() + .map(|v6_addr| Multiaddr::from(v6_addr.addr).with(Protocol::Tcp(v6_addr.tcp_port))); + v4_multiaddr.into_iter().chain(v6_multiaddr) + } + + #[cfg(test)] + pub fn unused_v4_ports() -> Self { + ListenAddress::V4(ListenAddr { + addr: Ipv4Addr::UNSPECIFIED, + udp_port: unused_port::unused_udp4_port().unwrap(), + tcp_port: unused_port::unused_tcp4_port().unwrap(), + }) + } + + #[cfg(test)] + pub fn unused_v6_ports() -> Self { + ListenAddress::V6(ListenAddr { + addr: Ipv6Addr::UNSPECIFIED, + udp_port: unused_port::unused_udp6_port().unwrap(), + tcp_port: unused_port::unused_tcp6_port().unwrap(), + }) + } +} + +impl slog::KV for ListenAddress { + fn serialize( + &self, + _record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + if let Some(v4_addr) = self.v4() { + serializer.emit_arguments("ip4_address", &format_args!("{}", v4_addr.addr))?; + serializer.emit_u16("udp4_port", v4_addr.udp_port)?; + serializer.emit_u16("tcp4_port", v4_addr.tcp_port)?; + } + if let Some(v6_addr) = self.v6() { + serializer.emit_arguments("ip6_address", &format_args!("{}", v6_addr.addr))?; + serializer.emit_u16("udp6_port", v6_addr.udp_port)?; + serializer.emit_u16("tcp6_port", v6_addr.tcp_port)?; + } + slog::Result::Ok(()) + } +} diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 2ee224d5e28..58cc9920126 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -159,7 +159,7 @@ pub fn check_nat() { if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { return; } - if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) == 0 + if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) != 0 || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 { inc_counter(&NAT_OPEN); @@ -167,7 +167,8 @@ pub fn check_nat() { } pub fn scrape_discovery_metrics() { - let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); + let metrics = + discv5::metrics::Metrics::from(discv5::Discv5::::raw_metrics()); set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 89670a2eb3c..a461a12e530 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -8,11 +8,12 @@ use crate::{Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; +use lru_cache::LRUTimeCache; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; -use std::collections::VecDeque; +use std::collections::BTreeMap; use std::{ sync::Arc, time::{Duration, Instant}, @@ -39,6 +40,9 @@ mod network_behaviour; /// requests. This defines the interval in seconds. const HEARTBEAT_INTERVAL: u64 = 30; +/// The minimum amount of time we allow peers to reconnect to us after a disconnect when we are +/// saturated with peers. This effectively looks like a swarm BAN for this amount of time. +pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600); /// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would /// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet /// peers. @@ -73,7 +77,21 @@ pub struct PeerManager { /// The target number of peers we would like to connect to. target_peers: usize, /// Peers queued to be dialed. - peers_to_dial: VecDeque<(PeerId, Option)>, + peers_to_dial: BTreeMap>, + /// The number of temporarily banned peers. This is used to prevent instantaneous + /// reconnection. + // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A + // peer can be in a disconnected state and new connections will be refused and logged as if the + // peer is banned without it being reflected in the peer's state. + // Also the banned state can out-last the peer's reference in the peer db. So peers that are + // unknown to us can still be temporarily banned. This is fundamentally a relationship with + // the swarm. Regardless of our knowledge of the peer in the db, it will be temporarily banned + // at the swarm layer. + // NOTE: An LRUTimeCache is used compared to a structure that needs to be polled to avoid very + // frequent polling to unban peers. Instead, this cache piggy-backs the PeerManager heartbeat + // to update and clear the cache. Therefore the PEER_RECONNECTION_TIMEOUT only has a resolution + // of the HEARTBEAT_INTERVAL. + temporary_banned_peers: LRUTimeCache, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -143,6 +161,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), target_peers: target_peer_count, + temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, @@ -243,6 +262,15 @@ impl PeerManager { reason: Option, ) { match ban_operation { + BanOperation::TemporaryBan => { + // The peer could be temporarily banned. We only do this in the case that + // we have currently reached our peer target limit. + if self.network_globals.connected_peers() >= self.target_peers { + // We have enough peers, prevent this reconnection. + self.temporary_banned_peers.raw_insert(*peer_id); + self.events.push(PeerManagerEvent::Banned(*peer_id, vec![])); + } + } BanOperation::DisconnectThePeer => { // The peer was currently connected, so we start a disconnection. // Once the peer has disconnected, its connection state will transition to a @@ -259,9 +287,23 @@ impl PeerManager { BanOperation::ReadyToBan(banned_ips) => { // The peer is not currently connected, we can safely ban it at the swarm // level. - // Inform the Swarm to ban the peer - self.events - .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + + // If a peer is being banned, this trumps any temporary ban the peer might be + // under. We no longer track it in the temporary ban list. + if !self.temporary_banned_peers.raw_remove(peer_id) { + // If the peer is not already banned, inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + // If the peer was in the process of being un-banned, remove it (a rare race + // condition) + self.events.retain(|event| { + if let PeerManagerEvent::UnBanned(unbanned_peer_id, _) = event { + unbanned_peer_id != peer_id // Remove matching peer ids + } else { + true + } + }); + } } } } @@ -275,7 +317,7 @@ impl PeerManager { /// proves resource constraining, we should switch to multiaddr dialling here. #[allow(clippy::mutable_key_type)] pub fn peers_discovered(&mut self, results: HashMap>) -> Vec { - let mut to_dial_peers = Vec::new(); + let mut to_dial_peers = Vec::with_capacity(4); let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (peer_id, min_ttl) in results { @@ -365,7 +407,7 @@ impl PeerManager { // A peer is being dialed. pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option) { - self.peers_to_dial.push_back((*peer_id, enr)); + self.peers_to_dial.insert(*peer_id, enr); } /// Reports if a peer is banned or not. @@ -519,8 +561,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, - Protocol::MetaData => PeerAction::LowToleranceError, - Protocol::Status => PeerAction::LowToleranceError, + Protocol::MetaData => PeerAction::Fatal, + Protocol::Status => PeerAction::Fatal, } } RPCError::StreamTimeout => match direction { @@ -1109,6 +1151,14 @@ impl PeerManager { } } + /// Unbans any temporarily banned peers that have served their timeout. + fn unban_temporary_banned_peers(&mut self) { + for peer_id in self.temporary_banned_peers.remove_expired() { + self.events + .push(PeerManagerEvent::UnBanned(peer_id, Vec::new())); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -1141,6 +1191,21 @@ impl PeerManager { // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); + + // Unban any peers that have served their temporary ban timeout + self.unban_temporary_banned_peers(); + + // Maintains memory by shrinking mappings + self.shrink_mappings(); + } + + // Reduce memory footprint by routinely shrinking associating mappings. + fn shrink_mappings(&mut self) { + self.inbound_ping_peers.shrink_to(5); + self.outbound_ping_peers.shrink_to(5); + self.status_peers.shrink_to(5); + self.temporary_banned_peers.shrink_to_fit(); + self.sync_committee_subnets.shrink_to_fit(); } // Update metrics related to peer scoring. diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 42eb270c40e..24de83a61da 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -89,7 +89,7 @@ impl NetworkBehaviour for PeerManager { self.events.shrink_to_fit(); } - if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_front() { + if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); let handler = self.new_handler(); return Poll::Ready(NetworkBehaviourAction::Dial { @@ -156,8 +156,10 @@ impl PeerManager { BanResult::BadScore => { // This is a faulty state error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Reban the peer + // Disconnect the peer. self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); + // Re-ban the peer to prevent repeated errors. + self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); return; } BanResult::BannedIp(ip_addr) => { @@ -170,7 +172,7 @@ impl PeerManager { BanResult::NotBanned => {} } - // Count dialing peers in the limit if the peer dialied us. + // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits if self.peer_limit_reached(count_dialing) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 1f44488a569..20870656883 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -41,12 +41,14 @@ pub struct PeerDB { disconnected_peers: usize, /// Counts banned peers in total and per ip banned_peers_count: BannedPeersCount, + /// Specifies if peer scoring is disabled. + disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, } impl PeerDB { - pub fn new(trusted_peers: Vec, log: &slog::Logger) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -56,6 +58,7 @@ impl PeerDB { log: log.clone(), disconnected_peers: 0, banned_peers_count: BannedPeersCount::default(), + disable_peer_scoring, peers, } } @@ -704,7 +707,11 @@ impl PeerDB { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); } - PeerInfo::default() + if self.disable_peer_scoring { + PeerInfo::trusted_peer_info() + } else { + PeerInfo::default() + } }); // Ban the peer if the score is not already low enough. @@ -844,8 +851,16 @@ impl PeerDB { .collect::>(); return Some(BanOperation::ReadyToBan(banned_ips)); } - PeerConnectionStatus::Disconnecting { .. } - | PeerConnectionStatus::Unknown + PeerConnectionStatus::Disconnecting { .. } => { + // The peer has been disconnected but not banned. Inform the peer manager + // that this peer could be eligible for a temporary ban. + self.disconnected_peers += 1; + info.set_connection_status(PeerConnectionStatus::Disconnected { + since: Instant::now(), + }); + return Some(BanOperation::TemporaryBan); + } + PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } | PeerConnectionStatus::Dialing { .. } => { self.disconnected_peers += 1; @@ -1177,6 +1192,9 @@ impl From> for ScoreUpdateResult { /// When attempting to ban a peer provides the peer manager with the operation that must be taken. pub enum BanOperation { + /// Optionally temporarily ban this peer to prevent instantaneous reconnection. + /// The peer manager will decide if temporary banning is required. + TemporaryBan, // The peer is currently connected. Perform a graceful disconnect before banning at the swarm // level. DisconnectThePeer, @@ -1289,7 +1307,7 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], &log) + PeerDB::new(vec![], false, &log) } #[test] @@ -1988,7 +2006,7 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], &log); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2007,4 +2025,28 @@ mod tests { Score::max_score().score() ); } + + #[test] + fn test_disable_peer_scoring() { + let peer = PeerId::random(); + let log = build_log(slog::Level::Debug, false); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + + pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Check trusted status and score + assert!(pdb.peer_info(&peer).unwrap().is_trusted()); + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + + // Adding/Subtracting score should have no effect on a trusted peer + add_score(&mut pdb, &peer, -50.0); + + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 53f85d9a7b6..6c6ce2da32f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -193,14 +193,17 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index eccbf0dd623..28fea40a20d 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,9 +15,10 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; +use types::light_client_bootstrap::LightClientBootstrap; use types::{ - light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, + EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -409,6 +410,10 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Capella { .. } => { + // Capella context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Capella) + } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -595,6 +600,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( @@ -610,6 +620,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, @@ -645,8 +660,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, - FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, + ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -659,14 +674,17 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs new file mode 100644 index 00000000000..bea0929fb0b --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -0,0 +1,173 @@ +use std::{ + fmt::{Debug, Display}, + str::FromStr, + time::Duration, +}; + +use super::{methods, rate_limiter::Quota, Protocol}; + +use serde_derive::{Deserialize, Serialize}; + +/// Auxiliary struct to aid on configuration parsing. +/// +/// A protocol's quota is specified as `protocol_name:tokens/time_in_seconds`. +#[derive(Debug, PartialEq, Eq)] +struct ProtocolQuota { + protocol: Protocol, + quota: Quota, +} + +impl Display for ProtocolQuota { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}:{}/{}", + self.protocol.as_ref(), + self.quota.max_tokens, + self.quota.replenish_all_every.as_secs() + ) + } +} + +impl FromStr for ProtocolQuota { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let (protocol_str, quota_str) = s + .split_once(':') + .ok_or("Missing ':' from quota definition.")?; + let protocol = protocol_str + .parse() + .map_err(|_parse_err| "Wrong protocol representation in quota")?; + let (tokens_str, time_str) = quota_str + .split_once('/') + .ok_or("Quota should be defined as \"n/t\" (t in seconds). Missing '/' from quota.")?; + let tokens = tokens_str + .parse() + .map_err(|_| "Failed to parse tokens from quota.")?; + let seconds = time_str + .parse::() + .map_err(|_| "Failed to parse time in seconds from quota.")?; + Ok(ProtocolQuota { + protocol, + quota: Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: tokens, + }, + }) + } +} + +/// Configurations for the rate limiter applied to outbound requests (made by the node itself). +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundRateLimiterConfig { + pub(super) ping_quota: Quota, + pub(super) meta_data_quota: Quota, + pub(super) status_quota: Quota, + pub(super) goodbye_quota: Quota, + pub(super) blocks_by_range_quota: Quota, + pub(super) blocks_by_root_quota: Quota, +} + +impl OutboundRateLimiterConfig { + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); +} + +impl Default for OutboundRateLimiterConfig { + fn default() -> Self { + OutboundRateLimiterConfig { + ping_quota: Self::DEFAULT_PING_QUOTA, + meta_data_quota: Self::DEFAULT_META_DATA_QUOTA, + status_quota: Self::DEFAULT_STATUS_QUOTA, + goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, + blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, + blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + } + } +} + +impl Debug for OutboundRateLimiterConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + macro_rules! fmt_q { + ($quota:expr) => { + &format_args!( + "{}/{}s", + $quota.max_tokens, + $quota.replenish_all_every.as_secs() + ) + }; + } + + f.debug_struct("OutboundRateLimiterConfig") + .field("ping", fmt_q!(&self.ping_quota)) + .field("metadata", fmt_q!(&self.meta_data_quota)) + .field("status", fmt_q!(&self.status_quota)) + .field("goodbye", fmt_q!(&self.goodbye_quota)) + .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) + .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .finish() + } +} + +/// Parse configurations for the outbound rate limiter. Protocols that are not specified use +/// the default values. Protocol specified more than once use only the first given Quota. +/// +/// The expected format is a ';' separated list of [`ProtocolQuota`]. +impl FromStr for OutboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let mut ping_quota = None; + let mut meta_data_quota = None; + let mut status_quota = None; + let mut goodbye_quota = None; + let mut blocks_by_range_quota = None; + let mut blocks_by_root_quota = None; + for proto_def in s.split(';') { + let ProtocolQuota { protocol, quota } = proto_def.parse()?; + let quota = Some(quota); + match protocol { + Protocol::Status => status_quota = status_quota.or(quota), + Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), + Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), + Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::Ping => ping_quota = ping_quota.or(quota), + Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), + } + } + Ok(OutboundRateLimiterConfig { + ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA), + meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA), + status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA), + goodbye_quota: goodbye_quota.unwrap_or(Self::DEFAULT_GOODBYE_QUOTA), + blocks_by_range_quota: blocks_by_range_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), + blocks_by_root_quota: blocks_by_root_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_quota_inverse() { + let quota = ProtocolQuota { + protocol: Protocol::Goodbye, + quota: Quota { + replenish_all_every: Duration::from_secs(10), + max_tokens: 8, + }, + }; + assert_eq!(quota.to_string().parse(), Ok(quota)) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 203a642a8be..31569b820b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ PollParameters, SubstreamProtocol, }; use libp2p::PeerId; -use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; +use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; @@ -32,12 +32,17 @@ pub use methods::{ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; +use self::config::OutboundRateLimiterConfig; +use self::self_limiter::SelfRateLimiter; + pub(crate) mod codec; +pub mod config; mod handler; pub mod methods; mod outbound; mod protocol; mod rate_limiter; +mod self_limiter; /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -100,13 +105,18 @@ pub struct RPCMessage { pub event: HandlerEvent, } +type BehaviourAction = + NetworkBehaviourAction, RPCHandler>; + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { /// Rate limiter limiter: RateLimiter, + /// Rate limiter for our own requests. + self_limiter: Option>, /// Queue of events to be processed. - events: Vec, RPCHandler>>, + events: Vec>, fork_context: Arc, enable_light_client_server: bool, /// Slog logger for RPC behaviour. @@ -117,10 +127,12 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + outbound_rate_limiter_config: Option, log: slog::Logger, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); - let limiter = RPCRateLimiterBuilder::new() + + let limiter = RateLimiter::builder() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) @@ -134,8 +146,14 @@ impl RPC { .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); + + let self_limiter = outbound_rate_limiter_config.map(|config| { + SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + }); + RPC { limiter, + self_limiter, events: Vec::new(), fork_context, enable_light_client_server, @@ -162,12 +180,24 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, event: OutboundRequest) { - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: RPCSend::Request(request_id, event), - }); + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + let event = if let Some(self_limiter) = self.self_limiter.as_mut() { + match self_limiter.allows(peer_id, request_id, req) { + Ok(event) => event, + Err(_e) => { + // Request is logged and queued internally in the self rate limiter. + return; + } + } + } else { + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + } + }; + + self.events.push(event); } /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This @@ -272,11 +302,19 @@ where cx: &mut Context, _: &mut impl PollParameters, ) -> Poll> { - // let the rate limiter prune + // let the rate limiter prune. let _ = self.limiter.poll_unpin(cx); + + if let Some(self_limiter) = self.self_limiter.as_mut() { + if let Poll::Ready(event) = self_limiter.poll_ready(cx) { + self.events.push(event) + } + } + if !self.events.is_empty() { return Poll::Ready(self.events.remove(0)); } + Poll::Pending } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 1f40f81971c..a8423e47b0b 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -14,15 +14,16 @@ use std::io; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use strum::IntoStaticStr; +use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, - ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, + EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + SignedBeaconBlock, }; lazy_static! { @@ -61,6 +62,13 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -68,7 +76,11 @@ lazy_static! { pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX - + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = @@ -95,13 +107,13 @@ lazy_static! { ]) .as_ssz_bytes() .len(); - } /// The maximum bytes that can be sent across the RPC pre-merge. pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M +pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -113,8 +125,9 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext) -> usize { match fork_context.current_fork() { - ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, + ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, + ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, } } @@ -135,25 +148,34 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks ), + ForkName::Capella => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks + ), } } /// Protocol names to be used. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, AsRefStr, Display)] +#[strum(serialize_all = "snake_case")] pub enum Protocol { /// The Status protocol name. Status, /// The Goodbye protocol name. Goodbye, /// The `BlocksByRange` protocol name. + #[strum(serialize = "beacon_blocks_by_range")] BlocksByRange, /// The `BlocksByRoot` protocol name. + #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. + #[strum(serialize = "metadata")] MetaData, /// The `LightClientBootstrap` protocol name. + #[strum(serialize = "light_client_bootstrap")] LightClientBootstrap, } @@ -172,21 +194,6 @@ pub enum Encoding { SSZSnappy, } -impl std::fmt::Display for Protocol { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let repr = match self { - Protocol::Status => "status", - Protocol::Goodbye => "goodbye", - Protocol::BlocksByRange => "beacon_blocks_by_range", - Protocol::BlocksByRoot => "beacon_blocks_by_root", - Protocol::Ping => "ping", - Protocol::MetaData => "metadata", - Protocol::LightClientBootstrap => "light_client_bootstrap", - }; - f.write_str(repr) - } -} - impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { @@ -319,7 +326,6 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -338,13 +344,16 @@ impl ProtocolId { /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the /// beginning of the stream, else returns `false`. pub fn has_context_bytes(&self) -> bool { - if self.version == Version::V2 { - match self.message_name { - Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, - _ => return false, - } + match self.message_name { + Protocol::BlocksByRange | Protocol::BlocksByRoot => match self.version { + Version::V2 => true, + Version::V1 => false, + }, + Protocol::LightClientBootstrap => match self.version { + Version::V2 | Version::V1 => true, + }, + Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false, } - false } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 6ba9f6e9419..a1f7b89a2f2 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,6 +1,7 @@ -use crate::rpc::{InboundRequest, Protocol}; +use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; +use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -47,12 +48,31 @@ type Nanosecs = u64; /// n*`replenish_all_every`/`max_tokens` units of time since their last request. /// /// To produce hard limits, set `max_tokens` to 1. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Quota { /// How often are `max_tokens` fully replenished. - replenish_all_every: Duration, + pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - max_tokens: u64, + pub(super) max_tokens: u64, +} + +impl Quota { + /// A hard limit of one token every `seconds`. + pub const fn one_every(seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: 1, + } + } + + /// Allow `n` tokens to be use used every `seconds`. + pub const fn n_every(n: u64, seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: n, + } + } } /// Manages rate limiting of requests per peer, with differentiated rates per protocol. @@ -78,6 +98,7 @@ pub struct RPCRateLimiter { } /// Error type for non conformant requests +#[derive(Debug)] pub enum RateLimitedErr { /// Required tokens for this request exceed the maximum TooLarge, @@ -86,7 +107,7 @@ pub enum RateLimitedErr { } /// User-friendly builder of a `RPCRateLimiter` -#[derive(Default)] +#[derive(Default, Clone)] pub struct RPCRateLimiterBuilder { /// Quota for the Goodbye protocol. goodbye_quota: Option, @@ -105,13 +126,8 @@ pub struct RPCRateLimiterBuilder { } impl RPCRateLimiterBuilder { - /// Get an empty `RPCRateLimiterBuilder`. - pub fn new() -> Self { - Default::default() - } - /// Set a quota for a protocol. - fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { + pub fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { let q = Some(quota); match protocol { Protocol::Ping => self.ping_quota = q, @@ -191,11 +207,40 @@ impl RPCRateLimiterBuilder { } } +pub trait RateLimiterItem { + fn protocol(&self) -> Protocol; + fn expected_responses(&self) -> u64; +} + +impl RateLimiterItem for super::InboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} + +impl RateLimiterItem for super::OutboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} impl RPCRateLimiter { - pub fn allows( + /// Get a builder instance. + pub fn builder() -> RPCRateLimiterBuilder { + RPCRateLimiterBuilder::default() + } + + pub fn allows( &mut self, peer_id: &PeerId, - request: &InboundRequest, + request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request.expected_responses().max(1); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs new file mode 100644 index 00000000000..451c6206f37 --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -0,0 +1,202 @@ +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + task::{Context, Poll}, + time::Duration, +}; + +use futures::FutureExt; +use libp2p::{swarm::NotifyHandler, PeerId}; +use slog::{crit, debug, Logger}; +use smallvec::SmallVec; +use tokio_util::time::DelayQueue; +use types::EthSpec; + +use super::{ + config::OutboundRateLimiterConfig, + rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, + BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, +}; + +/// A request that was rate limited or waiting on rate limited requests for the same peer and +/// protocol. +struct QueuedRequest { + req: OutboundRequest, + request_id: Id, +} + +pub(crate) struct SelfRateLimiter { + /// Requests queued for sending per peer. This requests are stored when the self rate + /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore + /// are stored in the same way. + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + /// The delay required to allow a peer's outbound request per protocol. + next_peer_request: DelayQueue<(PeerId, Protocol)>, + /// Rate limiter for our own requests. + limiter: RateLimiter, + /// Requests that are ready to be sent. + ready_requests: SmallVec<[BehaviourAction; 3]>, + /// Slog logger. + log: Logger, +} + +/// Error returned when the rate limiter does not accept a request. +// NOTE: this is currently not used, but might be useful for debugging. +pub enum Error { + /// There are queued requests for this same peer and protocol. + PendingRequests, + /// Request was tried but rate limited. + RateLimited, +} + +impl SelfRateLimiter { + /// Creates a new [`SelfRateLimiter`] based on configration values. + pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + debug!(log, "Using self rate limiting params"; "config" => ?config); + // Destructure to make sure every configuration value is used. + let OutboundRateLimiterConfig { + ping_quota, + meta_data_quota, + status_quota, + goodbye_quota, + blocks_by_range_quota, + blocks_by_root_quota, + } = config; + + let limiter = RateLimiter::builder() + .set_quota(Protocol::Ping, ping_quota) + .set_quota(Protocol::MetaData, meta_data_quota) + .set_quota(Protocol::Status, status_quota) + .set_quota(Protocol::Goodbye, goodbye_quota) + .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) + .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + // Manually set the LightClientBootstrap quota, since we use the same rate limiter for + // inbound and outbound requests, and the LightClientBootstrap is an only inbound + // protocol. + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) + .build()?; + + Ok(SelfRateLimiter { + delayed_requests: Default::default(), + next_peer_request: Default::default(), + limiter, + ready_requests: Default::default(), + log, + }) + } + + /// Checks if the rate limiter allows the request. If it's allowed, returns the + /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// until it can be sent. + pub fn allows( + &mut self, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + ) -> Result, Error> { + let protocol = req.protocol(); + // First check that there are not already other requests waiting to be sent. + if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) { + queued_requests.push_back(QueuedRequest { req, request_id }); + + return Err(Error::PendingRequests); + } + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + self.delayed_requests + .entry(key) + .or_default() + .push_back(rate_limited_req); + + Err(Error::RateLimited) + } + Ok(event) => Ok(event), + } + } + + /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the + /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// should be delayed, it's returned with the duration to wait. + fn try_send_request( + limiter: &mut RateLimiter, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + log: &Logger, + ) -> Result, (QueuedRequest, Duration)> { + match limiter.allows(&peer_id, &req) { + Ok(()) => Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }), + Err(e) => { + let protocol = req.protocol(); + match e { + RateLimitedErr::TooLarge => { + // this should never happen with default parameters. Let's just send the request. + // Log a crit since this is a config issue. + crit!( + log, + "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; + "protocol" => %req.protocol() + ); + Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }) + } + RateLimitedErr::TooSoon(wait_time) => { + debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + Err((QueuedRequest { req, request_id }, wait_time)) + } + } + } + } + } + + /// When a peer and protocol are allowed to send a next request, this function checks the + /// queued requests and attempts marking as ready as many as the limiter allows. + fn next_peer_request_ready(&mut self, peer_id: PeerId, protocol: Protocol) { + if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) { + let queued_requests = entry.get_mut(); + while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) + { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + queued_requests.push_back(rate_limited_req); + // If one fails just wait for the next window that allows sending requests. + return; + } + Ok(event) => self.ready_requests.push(event), + } + } + if queued_requests.is_empty() { + entry.remove(); + } + } + } + + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // First check the requests that were self rate limited, since those might add events to + // the queue. Also do this this before rate limiter prunning to avoid removing and + // immediately adding rate limiting keys. + if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) { + let (peer_id, protocol) = expired.into_inner(); + self.next_peer_request_ready(peer_id, protocol); + } + // Prune the rate limiter. + let _ = self.limiter.poll_unpin(cx); + + // Finally return any queued events. + if !self.ready_requests.is_empty() { + return Poll::Ready(self.ready_requests.remove(0)); + } + + Poll::Pending + } +} diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 849a86f51ba..bd3df797699 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,7 +1,8 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; +use types::light_client_bootstrap::LightClientBootstrap; +use types::{EthSpec, SignedBeaconBlock}; use crate::rpc::{ methods::{ diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index c784191cd30..2865d5b3f6a 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -34,6 +34,8 @@ pub struct GossipCache { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, /// Timeout for light client finality updates. light_client_finality_update: Option, /// Timeout for light client optimistic updates. @@ -59,6 +61,8 @@ pub struct GossipCacheBuilder { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, /// Timeout for light client finality updates. light_client_finality_update: Option, /// Timeout for light client optimistic updates. @@ -121,6 +125,12 @@ impl GossipCacheBuilder { self } + /// Timeout for BLS to execution change messages. + pub fn bls_to_execution_change_timeout(mut self, timeout: Duration) -> Self { + self.bls_to_execution_change = Some(timeout); + self + } + /// Timeout for light client finality update messages. pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self { self.light_client_finality_update = Some(timeout); @@ -144,6 +154,7 @@ impl GossipCacheBuilder { attester_slashing, signed_contribution_and_proof, sync_committee_message, + bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, } = self; @@ -158,6 +169,7 @@ impl GossipCacheBuilder { attester_slashing: attester_slashing.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout), + bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), } @@ -182,6 +194,7 @@ impl GossipCache { GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, + GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 5b3598216b5..f815e3bd36b 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,3 +1,5 @@ +use self::behaviour::Behaviour; +use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, @@ -7,15 +9,16 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ - subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, - SubnetDiscovery, + fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, + SnappyTransform, Subnet, SubnetDiscovery, }; +use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use crate::{rpc::*, EnrExt}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; @@ -31,20 +34,19 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; - -use std::marker::PhantomData; use std::path::PathBuf; use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; +use std::{ + marker::PhantomData, + sync::Arc, + task::{Context, Poll}, +}; +use types::ForkName; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; -use self::behaviour::Behaviour; -use self::gossip_cache::GossipCache; - pub mod api_types; mod behaviour; mod gossip_cache; @@ -161,14 +163,15 @@ impl Network { let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( enr, - config.libp2p_port, - config.discovery_port, + config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port), + config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port), meta_data, config .trusted_peers .iter() .map(|x| PeerId::from(x.clone())) .collect(), + config.disable_peer_scoring, &log, ); Arc::new(globals) @@ -197,6 +200,7 @@ impl Network { .attester_slashing_timeout(half_epoch * 2) // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry + .bls_to_execution_change_timeout(half_epoch * 2) .build() }; @@ -262,6 +266,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.outbound_rate_limiter_config.clone(), log.clone(), ); @@ -384,36 +389,26 @@ impl Network { async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - let discovery_string = if config.disable_discovery { - "None".into() - } else { - config.discovery_port.to_string() - }; - - debug!(self.log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - - let listen_multiaddr = { - let mut m = Multiaddr::from(config.listen_address); - m.push(MProtocol::Tcp(config.libp2p_port)); - m - }; - - match self.swarm.listen_on(listen_multiaddr.clone()) { - Ok(_) => { - let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(enr.peer_id().into())); - info!(self.log, "Listening established"; "address" => %log_address); - } - Err(err) => { - crit!( - self.log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, - ); - return Err("Libp2p was unable to listen on the given listen address.".into()); - } - }; + debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery); + + for listen_multiaddr in config.listen_addrs().tcp_addresses() { + match self.swarm.listen_on(listen_multiaddr.clone()) { + Ok(_) => { + let mut log_address = listen_multiaddr; + log_address.push(MProtocol::P2p(enr.peer_id().into())); + info!(self.log, "Listening established"; "address" => %log_address); + } + Err(err) => { + crit!( + self.log, + "Unable to listen on libp2p address"; + "error" => ?err, + "listen_multiaddr" => %listen_multiaddr, + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } + }; + } // helper closure for dialing peers let mut dial = |mut multiaddr: Multiaddr| { @@ -556,13 +551,20 @@ impl Network { self.unsubscribe(gossip_topic) } - /// Subscribe to all currently subscribed topics with the new fork digest. - pub fn subscribe_new_fork_topics(&mut self, new_fork_digest: [u8; 4]) { + /// Subscribe to all required topics for the `new_fork` with the given `new_fork_digest`. + pub fn subscribe_new_fork_topics(&mut self, new_fork: ForkName, new_fork_digest: [u8; 4]) { + // Subscribe to existing topics with new fork digest let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); for mut topic in subscriptions.into_iter() { topic.fork_digest = new_fork_digest; self.subscribe(topic); } + + // Subscribe to core topics for the new fork + for kind in fork_core_topics(&new_fork) { + let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); + self.subscribe(topic); + } } /// Unsubscribe from all topics that doesn't have the given fork_digest @@ -1118,7 +1120,7 @@ impl Network { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, - PeerAction::LowToleranceError, + PeerAction::Fatal, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), "does_not_support_gossipsub", diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index addaaf5b5e9..625df65ee9d 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -252,6 +252,7 @@ pub(crate) fn create_whitelist_filter( add(ProposerSlashing); add(AttesterSlashing); add(SignedContributionAndProof); + add(BlsToExecutionChange); add(LightClientFinalityUpdate); add(LightClientOptimisticUpdate); for id in 0..attestation_subnet_count { diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index aadd13a236b..43e8ebd76a5 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -7,7 +7,6 @@ use crate::EnrExt; use crate::{Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; -use std::sync::atomic::{AtomicU16, Ordering}; use types::EthSpec; pub struct NetworkGlobals { @@ -17,10 +16,10 @@ pub struct NetworkGlobals { pub peer_id: RwLock, /// Listening multiaddrs. pub listen_multiaddrs: RwLock>, - /// The TCP port that the libp2p service is listening on - pub listen_port_tcp: AtomicU16, - /// The UDP port that the discovery service is listening on - pub listen_port_udp: AtomicU16, + /// The TCP port that the libp2p service is listening on over Ipv4. + listen_port_tcp4: Option, + /// The TCP port that the libp2p service is listening on over Ipv6. + listen_port_tcp6: Option, /// The collection of known peers. pub peers: RwLock>, // The local meta data of our node. @@ -36,20 +35,21 @@ pub struct NetworkGlobals { impl NetworkGlobals { pub fn new( enr: Enr, - tcp_port: u16, - udp_port: u16, + listen_port_tcp4: Option, + listen_port_tcp6: Option, local_metadata: MetaData, trusted_peers: Vec, + disable_peer_scoring: bool, log: &slog::Logger, ) -> Self { NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), - listen_port_tcp: AtomicU16::new(tcp_port), - listen_port_udp: AtomicU16::new(udp_port), + listen_port_tcp4, + listen_port_tcp6, local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, log)), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), @@ -73,13 +73,13 @@ impl NetworkGlobals { } /// Returns the libp2p TCP port that this node has been configured to listen on. - pub fn listen_port_tcp(&self) -> u16 { - self.listen_port_tcp.load(Ordering::Relaxed) + pub fn listen_port_tcp4(&self) -> Option { + self.listen_port_tcp4 } /// Returns the UDP discovery port that this node has been configured to listen on. - pub fn listen_port_udp(&self) -> u16 { - self.listen_port_udp.load(Ordering::Relaxed) + pub fn listen_port_tcp6(&self) -> Option { + self.listen_port_tcp6 } /// Returns the number of libp2p connected peers. @@ -137,14 +137,15 @@ impl NetworkGlobals { let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, - 9000, - 9000, + Some(9000), + None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), }), vec![], + false, log, ) } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 2a5ca6c8062..e7457f25dac 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -17,6 +17,6 @@ pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ - subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS, - LIGHT_CLIENT_GOSSIP_TOPICS, + core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, + GossipTopic, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index b036e558c99..bb0397de1e2 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -11,8 +11,9 @@ use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -33,6 +34,8 @@ pub enum PubsubMessage { SignedContributionAndProof(Box>), /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), + /// Gossipsub message for BLS to execution change messages. + BlsToExecutionChange(Box), /// Gossipsub message providing notification of a light client finality update. LightClientFinalityUpdate(Box>), /// Gossipsub message providing notification of a light client optimistic update. @@ -119,6 +122,7 @@ impl PubsubMessage { PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), + PubsubMessage::BlsToExecutionChange(_) => GossipKind::BlsToExecutionChange, PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate, PubsubMessage::LightClientOptimisticUpdate(_) => { GossipKind::LightClientOptimisticUpdate @@ -175,6 +179,10 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", @@ -214,6 +222,14 @@ impl PubsubMessage { sync_committee, )))) } + GossipKind::BlsToExecutionChange => { + let bls_to_execution_change = + SignedBlsToExecutionChange::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BlsToExecutionChange(Box::new( + bls_to_execution_change, + ))) + } GossipKind::LightClientFinalityUpdate => { let light_client_finality_update = LightClientFinalityUpdate::from_ssz_bytes(data) @@ -251,6 +267,7 @@ impl PubsubMessage { PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), + PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), } @@ -287,6 +304,13 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::SyncCommitteeMessage(data) => { write!(f, "Sync committee message: subnet_id: {}", *data.0) } + PubsubMessage::BlsToExecutionChange(data) => { + write!( + f, + "Signed BLS to execution change: validator_index: {}, address: {:?}", + data.message.validator_index, data.message.to_execution_address + ) + } PubsubMessage::LightClientFinalityUpdate(_data) => { write!(f, "Light CLient Finality Update") } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index e7e3cf4abbe..0e4aefbb5c1 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,7 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; use serde_derive::{Deserialize, Serialize}; use strum::AsRefStr; -use types::{SubnetId, SyncSubnetId}; +use types::{ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; @@ -18,23 +18,49 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; +pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const CORE_TOPICS: [GossipKind; 6] = [ +pub const BASE_CORE_TOPICS: [GossipKind; 5] = [ GossipKind::BeaconBlock, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, GossipKind::AttesterSlashing, - GossipKind::SignedContributionAndProof, ]; +pub const ALTAIR_CORE_TOPICS: [GossipKind; 1] = [GossipKind::SignedContributionAndProof]; + +pub const CAPELLA_CORE_TOPICS: [GossipKind; 1] = [GossipKind::BlsToExecutionChange]; + pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientFinalityUpdate, GossipKind::LightClientOptimisticUpdate, ]; +/// Returns the core topics associated with each fork that are new to the previous fork +pub fn fork_core_topics(fork_name: &ForkName) -> Vec { + match fork_name { + ForkName::Base => BASE_CORE_TOPICS.to_vec(), + ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), + ForkName::Merge => vec![], + ForkName::Capella => CAPELLA_CORE_TOPICS.to_vec(), + } +} + +/// Returns all the topics that we need to subscribe to for a given fork +/// including topics from older forks and new topics for the current fork. +pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec { + let mut topics = fork_core_topics(¤t_fork); + while let Some(previous_fork) = current_fork.previous_fork() { + let previous_fork_topics = fork_core_topics(&previous_fork); + topics.extend(previous_fork_topics); + current_fork = previous_fork; + } + topics +} + /// A gossipsub topic which encapsulates the type of messages that should be sent and received over /// the pubsub protocol and the way the messages should be encoded. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] @@ -70,6 +96,8 @@ pub enum GossipKind { /// Topic for publishing unaggregated sync committee signatures on a particular subnet. #[strum(serialize = "sync_committee")] SyncCommitteeMessage(SyncSubnetId), + /// Topic for validator messages which change their withdrawal address. + BlsToExecutionChange, /// Topic for publishing finality updates for light clients. LightClientFinalityUpdate, /// Topic for publishing optimistic updates for light clients. @@ -147,6 +175,7 @@ impl GossipTopic { VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match committee_topic_index(topic) { @@ -207,6 +236,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), }; @@ -384,4 +414,15 @@ mod tests { assert_eq!("proposer_slashing", ProposerSlashing.as_ref()); assert_eq!("attester_slashing", AttesterSlashing.as_ref()); } + + #[test] + fn test_core_topics_to_subscribe() { + let mut all_topics = Vec::new(); + all_topics.extend(CAPELLA_CORE_TOPICS); + all_topics.extend(ALTAIR_CORE_TOPICS); + all_topics.extend(BASE_CORE_TOPICS); + + let latest_fork = *ForkName::list_all().last().unwrap(); + assert_eq!(core_topics_to_subscribe(latest_fork), all_topics); + } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index b67b412cfc2..d44f20c0806 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -13,7 +13,7 @@ use tokio::runtime::Runtime; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, }; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; type E = MinimalEthSpec; type ReqId = usize; @@ -25,14 +25,17 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -72,11 +75,9 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { .tempdir() .unwrap(); - config.libp2p_port = port; // tcp port - config.discovery_port = port; // udp port - config.enr_tcp_port = Some(port); - config.enr_udp_port = Some(port); - config.enr_address = Some("127.0.0.1".parse().unwrap()); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port); + config.enr_udp4_port = Some(port); + config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters @@ -94,7 +95,7 @@ pub async fn build_libp2p_instance( log: slog::Logger, fork_name: ForkName, ) -> Libp2pInstance { - let port = unused_tcp_port().unwrap(); + let port = unused_tcp4_port().unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 9183453492c..ebdbb67421f 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -9,8 +9,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, - ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec, + ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; mod common; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 43c6cef8464..9a0b7946466 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -43,8 +43,10 @@ if-addrs = "0.6.4" strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" -delay_map = "0.1.1" +delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } +operation_pool = { path = "../operation_pool" } +execution_layer = { path = "../execution_layer" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 743a97a29c2..96032052284 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -61,13 +61,15 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, + QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -76,7 +78,9 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; +use crate::beacon_processor::work_reprocessing_queue::{ + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, +}; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -137,6 +141,10 @@ const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored +/// for reprocessing before we start dropping them. +const MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN: usize = 128; + /// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping /// them. const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; @@ -165,6 +173,12 @@ const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. +/// +/// This value is set high to accommodate the large spike that is expected immediately after Capella +/// is activated. +const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; + /// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; @@ -207,12 +221,15 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; +pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; +pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; +pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -538,6 +555,22 @@ impl WorkEvent { } } + /// Create a new `Work` event for some BLS to execution change. + pub fn gossip_bls_to_execution_change( + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + }, + } + } + /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( @@ -694,6 +727,24 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::LightClientUpdate(QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update, + seen_timestamp, + .. + }) => Self { + drop_during_sync: true, + work: Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + }, + }, + ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { + WorkEvent::chain_segment(process_id, blocks) + } } } } @@ -733,6 +784,12 @@ pub enum Work { aggregate: Box>, seen_timestamp: Duration, }, + UnknownLightClientOptimisticUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + }, GossipAggregateBatch { packages: Vec>, }, @@ -813,6 +870,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + GossipBlsToExecutionChange { + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + }, LightClientBootstrapRequest { peer_id: PeerId, request_id: PeerRequestId, @@ -838,6 +900,10 @@ impl Work { Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::ChainSegment { + process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, + .. + } => CHAIN_SEGMENT_BACKFILL, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, @@ -845,6 +911,8 @@ impl Work { Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, + Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } } @@ -979,6 +1047,8 @@ impl BeaconProcessor { // Using a FIFO queue for light client updates to maintain sequence order. let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + let mut unknown_light_client_update_queue = + FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN); // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); @@ -990,24 +1060,28 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + + let mut gossip_bls_to_execution_change_queue = + FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + + let chain = match self.beacon_chain.upgrade() { + Some(chain) => chain, + // No need to proceed any further if the beacon chain has been dropped, the client + // is shutting down. + None => return, + }; + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = { - if let Some(chain) = self.beacon_chain.upgrade() { - spawn_reprocess_scheduler( - ready_work_tx, - &self.executor, - chain.slot_clock.clone(), - self.log.clone(), - ) - } else { - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - return; - } - }; + let work_reprocessing_tx = spawn_reprocess_scheduler( + ready_work_tx, + &self.executor, + chain.slot_clock.clone(), + self.log.clone(), + ); let executor = self.executor.clone(); @@ -1020,12 +1094,55 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; + let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + loop { let work_event = match inbound_events.next().await { Some(InboundEvent::WorkerIdle) => { self.current_workers = self.current_workers.saturating_sub(1); None } + Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => { + match QueuedBackfillBatch::try_from(event) { + Ok(backfill_batch) => { + match work_reprocessing_tx + .try_send(ReprocessQueueMessage::BackfillSync(backfill_batch)) + { + Err(e) => { + warn!( + self.log, + "Unable to queue backfill work event. Will try to process now."; + "error" => %e + ); + match e { + TrySendError::Full(reprocess_queue_message) + | TrySendError::Closed(reprocess_queue_message) => { + match reprocess_queue_message { + ReprocessQueueMessage::BackfillSync( + backfill_batch, + ) => Some(backfill_batch.into()), + other => { + crit!( + self.log, + "Unexpected queue message type"; + "message_type" => other.as_ref() + ); + // This is an unhandled exception, drop the message. + continue; + } + } + } + } + } + Ok(..) => { + // backfill work sent to "reprocessing" queue. Process the next event. + continue; + } + } + } + Err(event) => Some(event), + } + } Some(InboundEvent::WorkEvent(event)) | Some(InboundEvent::ReprocessingWork(event)) => Some(event), None => { @@ -1222,9 +1339,12 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = gossip_proposer_slashing_queue.pop() { self.spawn_worker(item, toolbox); - // Check exits last since our validators don't get rewards from them. + // Check exits and address changes late since our validators don't get + // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { + self.spawn_worker(item, toolbox); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); @@ -1346,6 +1466,12 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::GossipBlsToExecutionChange { .. } => { + gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) + } + Work::UnknownLightClientOptimisticUpdate { .. } => { + unknown_light_client_update_queue.push(work, work_id, &self.log) + } } } } @@ -1398,6 +1524,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, gossip_attester_slashing_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, + gossip_bls_to_execution_change_queue.len() as i64, + ); if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( @@ -1636,6 +1766,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + /* + * BLS to execution change verification. + */ + Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_bls_to_execution_change( + message_id, + peer_id, + *bls_to_execution_change, + ) + }), /* * Light client finality update verification. */ @@ -1665,6 +1809,7 @@ impl BeaconProcessor { message_id, peer_id, *light_client_optimistic_update, + Some(work_reprocessing_tx), seen_timestamp, ) }), @@ -1787,6 +1932,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + None, + seen_timestamp, + ) + }), }; } } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index ea1a59e0d05..4b0a159eb4b 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,7 +9,7 @@ use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -23,8 +23,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, - SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -36,7 +36,6 @@ const SMALL_CHAIN: u64 = 2; const LONG_CHAIN: u64 = SLOTS_PER_EPOCH * 2; const TCP_PORT: u16 = 42; -const UDP_PORT: u16 = 42; const SEQ_NUMBER: u64 = 0; /// The default time to wait for `BeaconProcessor` events. @@ -71,6 +70,10 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { + Self::new_with_chain_config(chain_length, ChainConfig::default()).await + } + + pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -79,6 +82,7 @@ impl TestRig { .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -177,10 +181,11 @@ impl TestRig { let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr, - TCP_PORT, - UDP_PORT, + Some(TCP_PORT), + None, meta_data, vec![], + false, &log, )); @@ -262,6 +267,14 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_backfill_batch(&self) { + let event = WorkEvent::chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -874,3 +887,49 @@ async fn test_rpc_block_reprocessing() { // cache handle was dropped. assert_eq!(next_block_root, rig.head_root()); } + +/// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals. +#[tokio::test] +async fn test_backfill_sync_processing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + // Note: to verify the exact event times in an integration test is not straight forward here + // (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code) + // and makes the test very slow, hence timing calculation is unit tested separately in + // `work_reprocessing_queue`. + for _ in 0..1 { + rig.enqueue_backfill_batch(); + // ensure queued batch is not processed until later + rig.assert_no_events_for(Duration::from_millis(100)).await; + // A new batch should be processed within a slot. + rig.assert_event_journal_with_timeout( + &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + rig.chain.slot_clock.slot_duration(), + ) + .await; + } +} + +/// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. +#[tokio::test] +async fn test_backfill_sync_processing_rate_limiting_disabled() { + let chain_config = ChainConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await; + + for _ in 0..3 { + rig.enqueue_backfill_batch(); + } + + // ensure all batches are processed + rig.assert_event_journal_with_timeout( + &[ + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + ], + Duration::from_millis(100), + ) + .await; +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 2aeec11c325..427be6d5138 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -11,31 +11,39 @@ //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; +use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; +use itertools::Itertools; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; +use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{ + Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, + SignedBeaconBlock, SubnetId, +}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; +const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. @@ -44,8 +52,11 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue light client updates for re-processing. +pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); + /// For how long to queue rpc blocks before sending them back for reprocessing. -pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but @@ -55,20 +66,44 @@ const MAXIMUM_QUEUED_BLOCKS: usize = 16; /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; +/// How many light client updates we keep before new ones get dropped. +const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; + +// Process backfill batch 50%, 60%, 80% through each slot. +// +// Note: use caution to set these fractions in a way that won't cause panic-y +// arithmetic. +pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ + // One half: 6s on mainnet, 2.5s on Gnosis. + (1, 2), + // Three fifths: 7.2s on mainnet, 3s on Gnosis. + (3, 5), + // Four fifths: 9.6s on mainnet, 4s on Gnosis. + (4, 5), +]; + /// Messages that the scheduler can receive. +#[derive(AsRefStr)] pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), - /// A block that was successfully processed. We use this to handle attestations for unknown - /// blocks. - BlockImported(Hash256), + /// A block that was successfully processed. We use this to handle attestations and light client updates + /// for unknown blocks. + BlockImported { + block_root: Hash256, + parent_root: Hash256, + }, /// An unaggregated attestation that references an unknown block. UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. UnknownBlockAggregate(QueuedAggregate), + /// A light client optimistic update that references a parent root that has not been seen as a parent. + UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + /// A new backfill batch that needs to be scheduled for processing. + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. @@ -77,6 +112,8 @@ pub enum ReadyWork { RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), + LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -99,6 +136,16 @@ pub struct QueuedAggregate { pub seen_timestamp: Duration, } +/// A light client update for which the corresponding parent block was not seen while processing, +/// queued for later. +pub struct QueuedLightClientUpdate { + pub peer_id: PeerId, + pub message_id: MessageId, + pub light_client_optimistic_update: Box>, + pub parent_root: Hash256, + pub seen_timestamp: Duration, +} + /// A block that arrived early and has been queued for later import. pub struct QueuedGossipBlock { pub peer_id: PeerId, @@ -118,6 +165,40 @@ pub struct QueuedRpcBlock { pub should_process: bool, } +/// A backfill batch work that has been queued for processing later. +#[derive(Clone)] +pub struct QueuedBackfillBatch { + pub process_id: ChainSegmentProcessId, + pub blocks: Vec>>, +} + +impl TryFrom> for QueuedBackfillBatch { + type Error = WorkEvent; + + fn try_from(event: WorkEvent) -> Result> { + match event { + WorkEvent { + work: + Work::ChainSegment { + process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), + blocks, + }, + .. + } => Ok(QueuedBackfillBatch { process_id, blocks }), + _ => Err(event), + } + } +} + +impl From> for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent::chain_segment( + queued_backfill_batch.process_id, + queued_backfill_batch.blocks, + ) + } +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -127,6 +208,10 @@ enum InboundEvent { ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), + /// A light client update that is ready for re-processing. + ReadyLightClientUpdate(QueuedLightClientUpdateId), + /// A backfill batch that was queued is ready for processing. + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -147,6 +232,8 @@ struct ReprocessQueue { rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, + /// Queue to manage scheduled light client updates. + lc_updates_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -157,15 +244,27 @@ struct ReprocessQueue { queued_unaggregates: FnvHashMap, DelayKey)>, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, + /// Queued Light Client Updates. + queued_lc_updates: FnvHashMap, DelayKey)>, + /// Light Client Updates per parent_root. + awaiting_lc_updates_per_parent_root: HashMap>, + /// Queued backfill batches + queued_backfill_batches: Vec>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, + next_lc_update: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, + lc_update_delay_debounce: TimeLatch, + next_backfill_batch_event: Option>>, + slot_clock: Pin>, } +pub type QueuedLightClientUpdateId = usize; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum QueuedAttestationId { Aggregate(usize), @@ -235,6 +334,34 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.lc_updates_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(lc_id))) => { + return Poll::Ready(Some(InboundEvent::ReadyLightClientUpdate( + lc_id.into_inner(), + ))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "lc_updates_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + + if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() { + match next_backfill_batch_event.as_mut().poll(cx) { + Poll::Ready(_) => { + let maybe_batch = self.queued_backfill_batches.pop(); + self.recompute_next_backfill_batch_event(); + + if let Some(batch) = maybe_batch { + return Poll::Ready(Some(InboundEvent::ReadyBackfillSync(batch))); + } + } + Poll::Pending => (), + } + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -264,14 +391,22 @@ pub fn spawn_reprocess_scheduler( gossip_block_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), + lc_updates_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), + queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), + awaiting_lc_updates_per_parent_root: HashMap::new(), + queued_backfill_batches: Vec::new(), next_attestation: 0, + next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), + lc_update_delay_debounce: TimeLatch::default(), + next_backfill_batch_event: None, + slot_clock: Box::pin(slot_clock.clone()), }; executor.spawn( @@ -386,7 +521,7 @@ impl ReprocessQueue { return; } - // Queue the block for 1/4th of a slot + // Queue the block for 1/3rd of a slot self.rpc_block_delay_queue .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); } @@ -473,9 +608,52 @@ impl ReprocessQueue { self.next_attestation += 1; } - InboundEvent::Msg(BlockImported(root)) => { + InboundEvent::Msg(UnknownLightClientOptimisticUpdate( + queued_light_client_optimistic_update, + )) => { + if self.lc_updates_delay_queue.len() >= MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES { + if self.lc_update_delay_debounce.elapsed() { + error!( + log, + "Light client updates delay queue is full"; + "queue_size" => MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES, + "msg" => "check system clock" + ); + } + // Drop the light client update. + return; + } + + let lc_id: QueuedLightClientUpdateId = self.next_lc_update; + + // Register the delay. + let delay_key = self + .lc_updates_delay_queue + .insert(lc_id, QUEUED_LIGHT_CLIENT_UPDATE_DELAY); + + // Register the light client update for the corresponding root. + self.awaiting_lc_updates_per_parent_root + .entry(queued_light_client_optimistic_update.parent_root) + .or_default() + .push(lc_id); + + // Store the light client update and its info. + self.queued_lc_updates.insert( + self.next_lc_update, + (queued_light_client_optimistic_update, delay_key), + ); + + self.next_lc_update += 1; + } + InboundEvent::Msg(BlockImported { + block_root, + parent_root, + }) => { // Unqueue the attestations we have for this root, if any. - if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&root) { + if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&block_root) { + let mut sent_count = 0; + let mut failed_to_send_count = 0; + for id in queued_ids { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, @@ -500,10 +678,9 @@ impl ReprocessQueue { // Send the work. if self.ready_work_tx.try_send(work).is_err() { - error!( - log, - "Failed to send scheduled attestation"; - ); + failed_to_send_count += 1; + } else { + sent_count += 1; } } else { // There is a mismatch between the attestation ids registered for this @@ -511,11 +688,81 @@ impl ReprocessQueue { error!( log, "Unknown queued attestation for block root"; - "block_root" => ?root, + "block_root" => ?block_root, "att_id" => ?id, ); } } + + if failed_to_send_count > 0 { + error!( + log, + "Ignored scheduled attestation(s) for block"; + "hint" => "system may be overloaded", + "parent_root" => ?parent_root, + "block_root" => ?block_root, + "failed_count" => failed_to_send_count, + "sent_count" => sent_count, + ); + } + } + // Unqueue the light client optimistic updates we have for this root, if any. + if let Some(queued_lc_id) = self + .awaiting_lc_updates_per_parent_root + .remove(&parent_root) + { + debug!( + log, + "Dequeuing light client optimistic updates"; + "parent_root" => %parent_root, + "count" => queued_lc_id.len(), + ); + + for lc_id in queued_lc_id { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES, + ); + if let Some((work, delay_key)) = self.queued_lc_updates.remove(&lc_id).map( + |(light_client_optimistic_update, delay_key)| { + ( + ReadyWork::LightClientUpdate(light_client_optimistic_update), + delay_key, + ) + }, + ) { + // Remove the delay + self.lc_updates_delay_queue.remove(&delay_key); + + // Send the work + match self.ready_work_tx.try_send(work) { + Ok(_) => trace!( + log, + "reprocessing light client update sent"; + ), + Err(_) => error!( + log, + "Failed to send scheduled light client update"; + ), + } + } else { + // There is a mismatch between the light client update ids registered for this + // root and the queued light client updates. This should never happen. + error!( + log, + "Unknown queued light client update for parent root"; + "parent_root" => ?parent_root, + "lc_id" => ?lc_id, + ); + } + } + } + } + InboundEvent::Msg(BackfillSync(queued_backfill_batch)) => { + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + // only recompute if there is no `next_backfill_batch_event` already scheduled + if self.next_backfill_batch_event.is_none() { + self.recompute_next_backfill_batch_event(); } } // A block that was queued for later processing is now ready to be processed. @@ -580,7 +827,9 @@ impl ReprocessQueue { if self.ready_work_tx.try_send(work).is_err() { error!( log, - "Failed to send scheduled attestation"; + "Ignored scheduled attestation"; + "hint" => "system may be overloaded", + "beacon_block_root" => ?root ); } @@ -591,6 +840,65 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyLightClientUpdate(queued_id) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES, + ); + + if let Some((parent_root, work)) = self.queued_lc_updates.remove(&queued_id).map( + |(queued_lc_update, _delay_key)| { + ( + queued_lc_update.parent_root, + ReadyWork::LightClientUpdate(queued_lc_update), + ) + }, + ) { + if self.ready_work_tx.try_send(work).is_err() { + error!( + log, + "Failed to send scheduled light client optimistic update"; + ); + } + + if let Some(queued_lc_updates) = self + .awaiting_lc_updates_per_parent_root + .get_mut(&parent_root) + { + if let Some(index) = + queued_lc_updates.iter().position(|&id| id == queued_id) + { + queued_lc_updates.swap_remove(index); + } + } + } + } + InboundEvent::ReadyBackfillSync(queued_backfill_batch) => { + let millis_from_slot_start = slot_clock + .millis_from_current_slot_start() + .map_or("null".to_string(), |duration| { + duration.as_millis().to_string() + }); + + debug!( + log, + "Sending scheduled backfill work"; + "millis_from_slot_start" => millis_from_slot_start + ); + + if self + .ready_work_tx + .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) + .is_err() + { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + } + } } metrics::set_gauge_vec( @@ -608,5 +916,101 @@ impl ReprocessQueue { &[ATTESTATIONS], self.attestations_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[LIGHT_CLIENT_UPDATES], + self.lc_updates_delay_queue.len() as i64, + ); + } + + fn recompute_next_backfill_batch_event(&mut self) { + // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue + if !self.queued_backfill_batches.is_empty() { + self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ))); + } else { + self.next_backfill_batch_event = None + } + } + + /// Returns duration until the next scheduled processing time. The schedule ensure that backfill + /// processing is done in windows of time that aren't critical + fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + let slot_duration = slot_clock.slot_duration(); + slot_clock + .millis_from_current_slot_start() + .and_then(|duration_from_slot_start| { + BACKFILL_SCHEDULE_IN_SLOT + .into_iter() + // Convert fractions to seconds from slot start. + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier) + .find_or_first(|&event_duration_from_slot_start| { + event_duration_from_slot_start > duration_from_slot_start + }) + .map(|next_event_time| { + if duration_from_slot_start >= next_event_time { + // event is in the next slot, add duration to next slot + let duration_to_next_slot = slot_duration - duration_from_slot_start; + duration_to_next_slot + next_event_time + } else { + next_event_time - duration_from_slot_start + } + }) + }) + // If we can't read the slot clock, just wait another slot. + .unwrap_or(slot_duration) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use slot_clock::TestingSlotClock; + use store::MemoryStore; + use types::MainnetEthSpec as E; + use types::Slot; + + type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + + #[test] + fn backfill_processing_schedule_calculation() { + let slot_duration = Duration::from_secs(12); + let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), slot_duration); + let current_slot_start = slot_clock.start_of(Slot::new(100)).unwrap(); + slot_clock.set_current_time(current_slot_start); + + let event_times = BACKFILL_SCHEDULE_IN_SLOT + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier); + + for &event_duration_from_slot_start in event_times.iter() { + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + + let current_time = slot_clock.millis_from_current_slot_start().unwrap(); + + assert_eq!( + duration_to_next_event, + event_duration_from_slot_start - current_time + ); + + slot_clock.set_current_time(current_slot_start + event_duration_from_slot_start) + } + + // check for next event beyond the current slot + let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + assert_eq!( + duration_to_next_event, + duration_to_next_slot + event_times[0] + ); } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index ef23f6761f6..1ec03ae954f 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -12,6 +12,7 @@ use beacon_chain::{ GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -22,13 +23,14 @@ use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, - SyncSubnetId, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, + ReprocessQueueMessage, }, Worker, }; @@ -675,6 +677,7 @@ impl Worker { .await { let block_root = gossip_verified_block.block_root; + if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( peer_id, @@ -715,6 +718,10 @@ impl Worker { &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, block_delay, ); + metrics::set_gauge( + &metrics::BEACON_BLOCK_LAST_DELAY, + block_delay.as_millis() as i64, + ); let verification_result = self .chain @@ -827,7 +834,6 @@ impl Worker { | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) - // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { warn!(self.log, "Could not verify block for gossip. Rejecting the block"; @@ -949,7 +955,10 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::BlockImported(block_root)) + .try_send(ReprocessQueueMessage::BlockImported { + block_root, + parent_root: block.message().parent_root(), + }) .is_err() { error!( @@ -1182,6 +1191,83 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } + pub fn process_gossip_bls_to_execution_change( + self, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: SignedBlsToExecutionChange, + ) { + let validator_index = bls_to_execution_change.message.validator_index; + let address = bls_to_execution_change.message.to_execution_address; + + let change = match self + .chain + .verify_bls_to_execution_change_for_gossip(bls_to_execution_change) + { + Ok(ObservationOutcome::New(change)) => change, + Ok(ObservationOutcome::AlreadyKnown) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + debug!( + self.log, + "Dropping BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id + ); + return; + } + Err(e) => { + debug!( + self.log, + "Dropping invalid BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id, + "error" => ?e + ); + // We ignore pre-capella messages without penalizing peers. + if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } else { + // We penalize the peer slightly to prevent overuse of invalids. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_bls_to_execution_change", + ); + } + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Address change messages from gossip are only processed *after* the + // Capella fork epoch. + let received_pre_capella = ReceivedPreCapella::No; + + self.chain + .import_bls_to_execution_change(change, received_pre_capella); + + debug!( + self.log, + "Successfully imported BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + ); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL); + } + /// Process the sync committee signature received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -1326,7 +1412,7 @@ impl Worker { LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { debug!( self.log, - "LC invalid finality update"; + "Light client invalid finality update"; "peer" => %peer_id, "error" => ?e, ); @@ -1340,7 +1426,7 @@ impl Worker { LightClientFinalityUpdateError::TooEarly => { debug!( self.log, - "LC finality update too early"; + "Light client finality update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1353,7 +1439,7 @@ impl Worker { } LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( self.log, - "LC finality update already seen"; + "Light client finality update already seen"; "peer" => %peer_id, "error" => ?e, ), @@ -1362,7 +1448,7 @@ impl Worker { | LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( self.log, - "LC error constructing finality update"; + "Light client error constructing finality update"; "peer" => %peer_id, "error" => ?e, ), @@ -1377,22 +1463,77 @@ impl Worker { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, + reprocess_tx: Option>>, seen_timestamp: Duration, ) { - match self - .chain - .verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp) - { - Ok(_verified_light_client_optimistic_update) => { + match self.chain.verify_optimistic_update_for_gossip( + light_client_optimistic_update.clone(), + seen_timestamp, + ) { + Ok(verified_light_client_optimistic_update) => { + debug!( + self.log, + "Light client successful optimistic update"; + "peer" => %peer_id, + "parent_root" => %verified_light_client_optimistic_update.parent_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); } Err(e) => { - metrics::register_optimistic_update_error(&e); match e { + LightClientOptimisticUpdateError::UnknownBlockParentRoot(parent_root) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, + ); + debug!( + self.log, + "Optimistic update for unknown block"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + if let Some(sender) = reprocess_tx { + let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( + QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update: Box::new( + light_client_optimistic_update, + ), + parent_root, + seen_timestamp, + }, + ); + + if sender.try_send(msg).is_err() { + error!( + self.log, + "Failed to send optimistic update for re-processing"; + ) + } + } else { + debug!( + self.log, + "Not sending light client update because it had been reprocessed"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + return; + } LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + metrics::register_optimistic_update_error(&e); + debug!( self.log, - "LC invalid optimistic update"; + "Light client invalid optimistic update"; "peer" => %peer_id, "error" => ?e, ); @@ -1404,9 +1545,10 @@ impl Worker { ) } LightClientOptimisticUpdateError::TooEarly => { + metrics::register_optimistic_update_error(&e); debug!( self.log, - "LC optimistic update too early"; + "Light client optimistic update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1417,21 +1559,29 @@ impl Worker { "light_client_gossip_error", ); } - LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!( - self.log, - "LC optimistic update already seen"; - "peer" => %peer_id, - "error" => ?e, - ), + LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client optimistic update already seen"; + "peer" => %peer_id, + "error" => ?e, + ) + } LightClientOptimisticUpdateError::BeaconChainError(_) | LightClientOptimisticUpdateError::LightClientUpdateError(_) | LightClientOptimisticUpdateError::SigSlotStartIsNone - | LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!( - self.log, - "LC error constructing optimistic update"; - "peer" => %peer_id, - "error" => ?e, - ), + | LightClientOptimisticUpdateError::FailedConstructingUpdate => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client error constructing optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ) + } } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index bfa0ea516fa..81b163bf7ee 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -7,10 +7,10 @@ use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error}; +use slog::{debug, error, warn}; use slot_clock::SlotClock; -use std::sync::Arc; use task_executor::TaskExecutor; +use tokio_stream::StreamExt; use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -131,21 +131,25 @@ impl Worker { request_id: PeerRequestId, request: BlocksByRootRequest, ) { + let requested_blocks = request.block_roots.len(); + let mut block_stream = match self + .chain + .get_blocks_checking_early_attester_cache(request.block_roots.into(), &executor) + { + Ok(block_stream) => block_stream, + Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), + }; // Fetching blocks is async because it may have to hit the execution layer for payloads. executor.spawn( async move { let mut send_block_count = 0; let mut send_response = true; - for root in request.block_roots.iter() { - match self - .chain - .get_block_checking_early_attester_cache(root) - .await - { + while let Some((root, result)) = block_stream.next().await { + match result.as_ref() { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(block)), + Response::BlocksByRoot(Some(block.clone())), request_id, ); send_block_count += 1; @@ -190,7 +194,7 @@ impl Worker { self.log, "Received BlocksByRoot Request"; "peer" => %peer_id, - "requested" => request.block_roots.len(), + "requested" => requested_blocks, "returned" => %send_block_count ); @@ -344,14 +348,19 @@ impl Worker { // remove all skip slots let block_roots = block_roots.into_iter().flatten().collect::>(); + let mut block_stream = match self.chain.get_blocks(block_roots, &executor) { + Ok(block_stream) => block_stream, + Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), + }; + // Fetching blocks is async because it may have to hit the execution layer for payloads. executor.spawn( async move { let mut blocks_sent = 0; let mut send_response = true; - for root in block_roots { - match self.chain.get_block(&root).await { + while let Some((root, result)) = block_stream.next().await { + match result.as_ref() { Ok(Some(block)) => { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending @@ -361,7 +370,7 @@ impl Worker { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Arc::new(block))), + response: Response::BlocksByRange(Some(block.clone())), id: request_id, }); } @@ -392,12 +401,26 @@ impl Worker { break; } Err(e) => { - error!( - self.log, - "Error fetching block for peer"; - "block_root" => ?root, - "error" => ?e - ); + if matches!( + e, + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error) + if matches!(**boxed_error, execution_layer::Error::EngineError(_)) + ) { + warn!( + self.log, + "Error rebuilding payload for peer"; + "info" => "this may occur occasionally when the EE is busy", + "block_root" => ?root, + "error" => ?e, + ); + } else { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + } // send the stream terminator self.send_error_response( diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 1ec045e97eb..ca2095348ae 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -9,12 +9,15 @@ use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ + observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -83,7 +86,68 @@ impl Worker { return; } }; + + // Returns `true` if the time now is after the 4s attestation deadline. + let block_is_late = SystemTime::now() + .duration_since(UNIX_EPOCH) + // If we can't read the system time clock then indicate that the + // block is late (and therefore should *not* be requeued). This + // avoids infinite loops. + .map_or(true, |now| { + get_block_delay_ms(now, block.message(), &self.chain.slot_clock) + > self.chain.slot_clock.unagg_attestation_production_delay() + }); + + // Checks if a block from this proposer is already known. + let proposal_already_known = || { + match self + .chain + .observed_block_producers + .read() + .proposer_has_been_observed(block.message()) + { + Ok(is_observed) => is_observed, + // Both of these blocks will be rejected, so reject them now rather + // than re-queuing them. + Err(ObserveError::FinalizedBlock { .. }) + | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + } + }; + + // If we've already seen a block from this proposer *and* the block + // arrived before the attestation deadline, requeue it to ensure it is + // imported late enough that it won't receive a proposer boost. + if !block_is_late && proposal_already_known() { + debug!( + self.log, + "Delaying processing of duplicate RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot() + ); + + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block_root, + block: block.clone(), + process_type, + seen_timestamp, + should_process: true, + }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to inform block import"; + "source" => "rpc", + "block_root" => %block_root + ); + } + return; + } + let slot = block.slot(); + let parent_root = block.message().parent_root(); let result = self .chain .process_block( @@ -101,7 +165,10 @@ impl Worker { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); // Trigger processing for work referencing this block. - let reprocess_msg = ReprocessQueueMessage::BlockImported(hash); + let reprocess_msg = ReprocessQueueMessage::BlockImported { + block_root: hash, + parent_root, + }; if reprocess_tx.try_send(reprocess_msg).is_err() { error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) }; @@ -509,6 +576,21 @@ impl Worker { }) } } + ref err @ BlockError::ParentExecutionPayloadInvalid { ref parent_root } => { + warn!( + self.log, + "Failed to sync chain built on invalid parent"; + "parent_root" => ?parent_root, + "advice" => "check execution node for corruption then restart it and Lighthouse", + ); + Err(ChainSegmentFailed { + message: format!("Peer sent invalid block. Reason: {err:?}"), + // We need to penalise harshly in case this represents an actual attack. In case + // of a faulty EL it will usually require manual intervention to fix anyway, so + // it's not too bad if we drop most of our peers. + peer_action: Some(PeerAction::LowToleranceError), + }) + } other => { debug!( self.log, "Invalid block received"; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4f3f29f934..09caaaa11e3 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -145,6 +145,19 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); + // Gossip BLS to execution changes. + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_verified_total", + "Total number of address changes verified for propagation." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_imported_total", + "Total number of address changes imported to the op pool." + ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", @@ -335,10 +348,18 @@ lazy_static! { pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_gossip_slot_start_delay_time", "Duration between when the block is received and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) + //decimal_buckets(-1,2) ); + pub static ref BEACON_BLOCK_LAST_DELAY: Result = try_create_int_gauge( + "beacon_block_last_delay", + "Keeps track of the last block's delay from the start of the slot" + ); + pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_block_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", @@ -362,6 +383,21 @@ lazy_static! { "Number of queued attestations where as matching block has been imported." ); + /* + * Light client update reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_optimistic_updates", + "Number of queued light client optimistic updates which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_sent_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); } pub fn update_bandwidth_metrics(bandwidth: Arc) { diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index a2fbe576109..9bf123e8dec 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -20,13 +20,13 @@ pub struct UPnPConfig { disable_discovery: bool, } -impl From<&NetworkConfig> for UPnPConfig { - fn from(config: &NetworkConfig) -> Self { - UPnPConfig { - tcp_port: config.libp2p_port, - udp_port: config.discovery_port, +impl UPnPConfig { + pub fn from_config(config: &NetworkConfig) -> Option { + config.listen_addrs().v4().map(|v4_addr| UPnPConfig { + tcp_port: v4_addr.tcp_port, + udp_port: v4_addr.udp_port, disable_discovery: config.disable_discovery, - } + }) } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs new file mode 100644 index 00000000000..7f75a27fe25 --- /dev/null +++ b/beacon_node/network/src/router.rs @@ -0,0 +1,535 @@ +//! This module handles incoming network messages. +//! +//! It routes the messages to appropriate services. +//! It handles requests at the application layer in its associated processor and directs +//! syncing-related responses to the Sync manager. +#![allow(clippy::unit_arg)] + +use crate::beacon_processor::{ + BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, +}; +use crate::error; +use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; +use crate::sync::manager::RequestId as SyncId; +use crate::sync::SyncMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use futures::prelude::*; +use lighthouse_network::rpc::*; +use lighthouse_network::{ + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, +}; +use slog::{debug, o, trace}; +use slog::{error, warn}; +use std::cmp; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; +use types::{EthSpec, SignedBeaconBlock}; + +/// Handles messages from the network and routes them to the appropriate service to be handled. +pub struct Router { + /// Access to the peer db and network information. + network_globals: Arc>, + /// A reference to the underlying beacon chain. + chain: Arc>, + /// A channel to the syncing thread. + sync_send: mpsc::UnboundedSender>, + /// A network context to return and handle RPC requests. + network: HandlerNetworkContext, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, + /// The `Router` logger. + log: slog::Logger, +} + +/// Types of messages the router can receive. +#[derive(Debug)] +pub enum RouterMessage { + /// Peer has disconnected. + PeerDisconnected(PeerId), + /// An RPC request has been received. + RPCRequestReceived { + peer_id: PeerId, + id: PeerRequestId, + request: Request, + }, + /// An RPC response has been received. + RPCResponseReceived { + peer_id: PeerId, + request_id: RequestId, + response: Response, + }, + /// An RPC request failed + RPCFailed { + peer_id: PeerId, + request_id: RequestId, + }, + /// A gossip message has been received. The fields are: message id, the peer that sent us this + /// message, the message itself and a bool which indicates if the message should be processed + /// by the beacon chain after successful verification. + PubsubMessage(MessageId, PeerId, PubsubMessage, bool), + /// The peer manager has requested we re-status a peer. + StatusPeer(PeerId), +} + +impl Router { + /// Initializes and runs the Router. + pub fn spawn( + beacon_chain: Arc>, + network_globals: Arc>, + network_send: mpsc::UnboundedSender>, + executor: task_executor::TaskExecutor, + log: slog::Logger, + ) -> error::Result>> { + let message_handler_log = log.new(o!("service"=> "router")); + trace!(message_handler_log, "Service starting"); + + let (handler_send, handler_recv) = mpsc::unbounded_channel(); + + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + + let sync_logger = log.new(o!("service"=> "sync")); + + // spawn the sync thread + let sync_send = crate::sync::manager::spawn( + executor.clone(), + beacon_chain.clone(), + network_globals.clone(), + network_send.clone(), + beacon_processor_send.clone(), + sync_logger, + ); + + BeaconProcessor { + beacon_chain: Arc::downgrade(&beacon_chain), + network_tx: network_send.clone(), + sync_tx: sync_send.clone(), + network_globals: network_globals.clone(), + executor: executor.clone(), + max_workers: cmp::max(1, num_cpus::get()), + current_workers: 0, + importing_blocks: Default::default(), + log: log.clone(), + } + .spawn_manager(beacon_processor_receive, None); + + // generate the Message handler + let mut handler = Router { + network_globals, + chain: beacon_chain, + sync_send, + network: HandlerNetworkContext::new(network_send, log.clone()), + beacon_processor_send, + log: message_handler_log, + }; + + // spawn handler task and move the message handler instance into the spawned thread + executor.spawn( + async move { + debug!(log, "Network message router started"); + UnboundedReceiverStream::new(handler_recv) + .for_each(move |msg| future::ready(handler.handle_message(msg))) + .await; + }, + "router", + ); + + Ok(handler_send) + } + + /// Handle all messages incoming from the network service. + fn handle_message(&mut self, message: RouterMessage) { + match message { + // we have initiated a connection to a peer or the peer manager has requested a + // re-status + RouterMessage::StatusPeer(peer_id) => { + self.send_status(peer_id); + } + // A peer has disconnected + RouterMessage::PeerDisconnected(peer_id) => { + self.send_to_sync(SyncMessage::Disconnect(peer_id)); + } + RouterMessage::RPCRequestReceived { + peer_id, + id, + request, + } => { + self.handle_rpc_request(peer_id, id, request); + } + RouterMessage::RPCResponseReceived { + peer_id, + request_id, + response, + } => { + self.handle_rpc_response(peer_id, request_id, response); + } + RouterMessage::RPCFailed { + peer_id, + request_id, + } => { + self.on_rpc_error(peer_id, request_id); + } + RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { + self.handle_gossip(id, peer_id, gossip, should_process); + } + } + } + + /* RPC - Related functionality */ + + /// A new RPC request has been received from the network. + fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: PeerRequestId, request: Request) { + if !self.network_globals.peers.read().is_connected(&peer_id) { + debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); + return; + } + match request { + Request::Status(status_message) => { + self.on_status_request(peer_id, request_id, status_message) + } + Request::BlocksByRange(request) => self.send_beacon_processor_work( + BeaconWorkEvent::blocks_by_range_request(peer_id, request_id, request), + ), + Request::BlocksByRoot(request) => self.send_beacon_processor_work( + BeaconWorkEvent::blocks_by_roots_request(peer_id, request_id, request), + ), + Request::LightClientBootstrap(request) => self.send_beacon_processor_work( + BeaconWorkEvent::lightclient_bootstrap_request(peer_id, request_id, request), + ), + } + } + + /// An RPC response has been received from the network. + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + response: Response, + ) { + match response { + Response::Status(status_message) => { + debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); + self.send_beacon_processor_work(BeaconWorkEvent::status_message( + peer_id, + status_message, + )) + } + Response::BlocksByRange(beacon_block) => { + self.on_blocks_by_range_response(peer_id, request_id, beacon_block); + } + Response::BlocksByRoot(beacon_block) => { + self.on_blocks_by_root_response(peer_id, request_id, beacon_block); + } + Response::LightClientBootstrap(_) => unreachable!(), + } + } + + /// Handle RPC messages. + /// Note: `should_process` is currently only useful for the `Attestation` variant. + /// if `should_process` is `false`, we only propagate the message on successful verification, + /// else, we propagate **and** import into the beacon chain. + fn handle_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + gossip_message: PubsubMessage, + should_process: bool, + ) { + match gossip_message { + PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self + .send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( + message_id, + peer_id, + *aggregate_and_proof, + timestamp_now(), + )), + PubsubMessage::Attestation(subnet_attestation) => { + self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( + message_id, + peer_id, + subnet_attestation.1, + subnet_attestation.0, + should_process, + timestamp_now(), + )) + } + PubsubMessage::BeaconBlock(block) => { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( + message_id, + peer_id, + self.network_globals.client(&peer_id), + block, + timestamp_now(), + )) + } + PubsubMessage::VoluntaryExit(exit) => { + debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( + message_id, peer_id, exit, + )) + } + PubsubMessage::ProposerSlashing(proposer_slashing) => { + debug!( + self.log, + "Received a proposer slashing"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( + message_id, + peer_id, + proposer_slashing, + )) + } + PubsubMessage::AttesterSlashing(attester_slashing) => { + debug!( + self.log, + "Received a attester slashing"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( + message_id, + peer_id, + attester_slashing, + )) + } + PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { + trace!( + self.log, + "Received sync committee aggregate"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( + message_id, + peer_id, + *contribution_and_proof, + timestamp_now(), + )) + } + PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { + trace!( + self.log, + "Received sync committee signature"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( + message_id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + timestamp_now(), + )) + } + PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { + trace!( + self.log, + "Received light client finality update"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work( + BeaconWorkEvent::gossip_light_client_finality_update( + message_id, + peer_id, + light_client_finality_update, + timestamp_now(), + ), + ) + } + PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { + trace!( + self.log, + "Received light client optimistic update"; + "peer_id" => %peer_id + ); + self.send_beacon_processor_work( + BeaconWorkEvent::gossip_light_client_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + timestamp_now(), + ), + ) + } + PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self + .send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + )), + } + } + + fn send_status(&mut self, peer_id: PeerId) { + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); + } + + fn send_to_sync(&mut self, message: SyncMessage) { + self.sync_send.send(message).unwrap_or_else(|e| { + warn!( + self.log, + "Could not send message to the sync service"; + "error" => %e, + ) + }); + } + + /// An error occurred during an RPC request. The state is maintained by the sync manager, so + /// this function notifies the sync manager of the error. + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + // Check if the failed RPC belongs to sync + if let RequestId::Sync(request_id) = request_id { + self.send_to_sync(SyncMessage::RpcError { + peer_id, + request_id, + }); + } + } + + /// Handle a `Status` request. + /// + /// Processes the `Status` from the remote peer and sends back our `Status`. + pub fn on_status_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + status: StatusMessage, + ) { + debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); + + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); + + self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) + } + + /// Handle a `BlocksByRange` response from the peer. + /// A `beacon_block` behaves as a stream which is terminated on a `None` response. + pub fn on_blocks_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_block: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { + unreachable!("Block lookups do not request BBRange requests") + } + id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, + }, + RequestId::Router => unreachable!("All BBRange requests belong to sync"), + }; + + trace!( + self.log, + "Received BlocksByRange Response"; + "peer" => %peer_id, + ); + + self.send_to_sync(SyncMessage::RpcBlock { + peer_id, + request_id, + beacon_block, + seen_timestamp: timestamp_now(), + }); + } + + /// Handle a `BlocksByRoot` response from the peer. + pub fn on_blocks_by_root_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_block: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, + SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { + unreachable!("Batch syncing do not request BBRoot requests") + } + }, + RequestId::Router => unreachable!("All BBRoot requests belong to sync"), + }; + + trace!( + self.log, + "Received BlocksByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcBlock { + peer_id, + request_id, + beacon_block, + seen_timestamp: timestamp_now(), + }); + } + + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { + self.beacon_processor_send + .try_send(work) + .unwrap_or_else(|e| { + let work_type = match &e { + mpsc::error::TrySendError::Closed(work) + | mpsc::error::TrySendError::Full(work) => work.work_type(), + }; + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + }) + } +} + +/// Wraps a Network Channel to employ various RPC related network functionality for the +/// processor. +#[derive(Clone)] +pub struct HandlerNetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender>, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl HandlerNetworkContext { + pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { + Self { network_send, log } + } + + /// Sends a message to the network task. + fn inform_network(&mut self, msg: NetworkMessage) { + self.network_send.send(msg).unwrap_or_else( + |e| warn!(self.log, "Could not send message to the network service"; "error" => %e), + ) + } + + /// Sends a request to the network task. + pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { + self.inform_network(NetworkMessage::SendRequest { + peer_id, + request_id: RequestId::Router, + request, + }) + } + + /// Sends a response to the network task. + pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { + self.inform_network(NetworkMessage::SendResponse { + peer_id, + id, + response, + }) + } +} + +fn timestamp_now() -> Duration { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) +} diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs deleted file mode 100644 index ce98337cfed..00000000000 --- a/beacon_node/network/src/router/mod.rs +++ /dev/null @@ -1,309 +0,0 @@ -//! This module handles incoming network messages. -//! -//! It routes the messages to appropriate services. -//! It handles requests at the application layer in its associated processor and directs -//! syncing-related responses to the Sync manager. -#![allow(clippy::unit_arg)] - -mod processor; - -use crate::error; -use crate::service::{NetworkMessage, RequestId}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use futures::prelude::*; -use lighthouse_network::{ - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, -}; -use processor::Processor; -use slog::{debug, o, trace}; -use std::sync::Arc; -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; -use types::EthSpec; - -/// Handles messages received from the network and client and organises syncing. This -/// functionality of this struct is to validate an decode messages from the network before -/// passing them to the internal message processor. The message processor spawns a syncing thread -/// which manages which blocks need to be requested and processed. -pub struct Router { - /// Access to the peer db. - network_globals: Arc>, - /// Processes validated and decoded messages from the network. Has direct access to the - /// sync manager. - processor: Processor, - /// The `Router` logger. - log: slog::Logger, -} - -/// Types of messages the handler can receive. -#[derive(Debug)] -pub enum RouterMessage { - /// We have initiated a connection to a new peer. - PeerDialed(PeerId), - /// Peer has disconnected, - PeerDisconnected(PeerId), - /// An RPC request has been received. - RPCRequestReceived { - peer_id: PeerId, - id: PeerRequestId, - request: Request, - }, - /// An RPC response has been received. - RPCResponseReceived { - peer_id: PeerId, - request_id: RequestId, - response: Response, - }, - /// An RPC request failed - RPCFailed { - peer_id: PeerId, - request_id: RequestId, - }, - /// A gossip message has been received. The fields are: message id, the peer that sent us this - /// message, the message itself and a bool which indicates if the message should be processed - /// by the beacon chain after successful verification. - PubsubMessage(MessageId, PeerId, PubsubMessage, bool), - /// The peer manager has requested we re-status a peer. - StatusPeer(PeerId), -} - -impl Router { - /// Initializes and runs the Router. - pub fn spawn( - beacon_chain: Arc>, - network_globals: Arc>, - network_send: mpsc::UnboundedSender>, - executor: task_executor::TaskExecutor, - log: slog::Logger, - ) -> error::Result>> { - let message_handler_log = log.new(o!("service"=> "router")); - trace!(message_handler_log, "Service starting"); - - let (handler_send, handler_recv) = mpsc::unbounded_channel(); - - // Initialise a message instance, which itself spawns the syncing thread. - let processor = Processor::new( - executor.clone(), - beacon_chain, - network_globals.clone(), - network_send, - &log, - ); - - // generate the Message handler - let mut handler = Router { - network_globals, - processor, - log: message_handler_log, - }; - - // spawn handler task and move the message handler instance into the spawned thread - executor.spawn( - async move { - debug!(log, "Network message router started"); - UnboundedReceiverStream::new(handler_recv) - .for_each(move |msg| future::ready(handler.handle_message(msg))) - .await; - }, - "router", - ); - - Ok(handler_send) - } - - /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: RouterMessage) { - match message { - // we have initiated a connection to a peer or the peer manager has requested a - // re-status - RouterMessage::PeerDialed(peer_id) | RouterMessage::StatusPeer(peer_id) => { - self.processor.send_status(peer_id); - } - // A peer has disconnected - RouterMessage::PeerDisconnected(peer_id) => { - self.processor.on_disconnect(peer_id); - } - RouterMessage::RPCRequestReceived { - peer_id, - id, - request, - } => { - self.handle_rpc_request(peer_id, id, request); - } - RouterMessage::RPCResponseReceived { - peer_id, - request_id, - response, - } => { - self.handle_rpc_response(peer_id, request_id, response); - } - RouterMessage::RPCFailed { - peer_id, - request_id, - } => { - self.processor.on_rpc_error(peer_id, request_id); - } - RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { - self.handle_gossip(id, peer_id, gossip, should_process); - } - } - } - - /* RPC - Related functionality */ - - /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, id: PeerRequestId, request: Request) { - if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); - return; - } - match request { - Request::Status(status_message) => { - self.processor - .on_status_request(peer_id, id, status_message) - } - Request::BlocksByRange(request) => self - .processor - .on_blocks_by_range_request(peer_id, id, request), - Request::BlocksByRoot(request) => self - .processor - .on_blocks_by_root_request(peer_id, id, request), - Request::LightClientBootstrap(request) => self - .processor - .on_lightclient_bootstrap(peer_id, id, request), - } - } - - /// An RPC response has been received from the network. - // we match on id and ignore responses past the timeout. - fn handle_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - response: Response, - ) { - // an error could have occurred. - match response { - Response::Status(status_message) => { - self.processor.on_status_response(peer_id, status_message); - } - Response::BlocksByRange(beacon_block) => { - self.processor - .on_blocks_by_range_response(peer_id, request_id, beacon_block); - } - Response::BlocksByRoot(beacon_block) => { - self.processor - .on_blocks_by_root_response(peer_id, request_id, beacon_block); - } - Response::LightClientBootstrap(_) => unreachable!(), - } - } - - /// Handle RPC messages. - /// Note: `should_process` is currently only useful for the `Attestation` variant. - /// if `should_process` is `false`, we only propagate the message on successful verification, - /// else, we propagate **and** import into the beacon chain. - fn handle_gossip( - &mut self, - id: MessageId, - peer_id: PeerId, - gossip_message: PubsubMessage, - should_process: bool, - ) { - match gossip_message { - // Attestations should never reach the router. - PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { - self.processor - .on_aggregated_attestation_gossip(id, peer_id, *aggregate_and_proof); - } - PubsubMessage::Attestation(subnet_attestation) => { - self.processor.on_unaggregated_attestation_gossip( - id, - peer_id, - subnet_attestation.1.clone(), - subnet_attestation.0, - should_process, - ); - } - PubsubMessage::BeaconBlock(block) => { - self.processor.on_block_gossip( - id, - peer_id, - self.network_globals.client(&peer_id), - block, - ); - } - PubsubMessage::VoluntaryExit(exit) => { - debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); - self.processor.on_voluntary_exit_gossip(id, peer_id, exit); - } - PubsubMessage::ProposerSlashing(proposer_slashing) => { - debug!( - self.log, - "Received a proposer slashing"; - "peer_id" => %peer_id - ); - self.processor - .on_proposer_slashing_gossip(id, peer_id, proposer_slashing); - } - PubsubMessage::AttesterSlashing(attester_slashing) => { - debug!( - self.log, - "Received a attester slashing"; - "peer_id" => %peer_id - ); - self.processor - .on_attester_slashing_gossip(id, peer_id, attester_slashing); - } - PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { - trace!( - self.log, - "Received sync committee aggregate"; - "peer_id" => %peer_id - ); - self.processor.on_sync_committee_contribution_gossip( - id, - peer_id, - *contribution_and_proof, - ); - } - PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { - trace!( - self.log, - "Received sync committee signature"; - "peer_id" => %peer_id - ); - self.processor.on_sync_committee_signature_gossip( - id, - peer_id, - sync_committtee_msg.1, - sync_committtee_msg.0, - ); - } - PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { - trace!( - self.log, - "Received light client finality update"; - "peer_id" => %peer_id - ); - self.processor.on_light_client_finality_update_gossip( - id, - peer_id, - light_client_finality_update, - ); - } - PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { - trace!( - self.log, - "Received light client optimistic update"; - "peer_id" => %peer_id - ); - self.processor.on_light_client_optimistic_update_gossip( - id, - peer_id, - light_client_optimistic_update, - ); - } - } - } -} diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs deleted file mode 100644 index 999ba29e90a..00000000000 --- a/beacon_node/network/src/router/processor.rs +++ /dev/null @@ -1,459 +0,0 @@ -use crate::beacon_processor::{ - BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, -}; -use crate::service::{NetworkMessage, RequestId}; -use crate::status::status_message; -use crate::sync::manager::RequestId as SyncId; -use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use lighthouse_network::rpc::*; -use lighthouse_network::{ - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, -}; -use slog::{debug, error, o, trace, warn}; -use std::cmp; -use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use store::SyncCommitteeMessage; -use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncSubnetId, -}; - -/// Processes validated messages from the network. It relays necessary data to the syncing thread -/// and processes blocks from the pubsub network. -pub struct Processor { - /// A reference to the underlying beacon chain. - chain: Arc>, - /// A channel to the syncing thread. - sync_send: mpsc::UnboundedSender>, - /// A network context to return and handle RPC requests. - network: HandlerNetworkContext, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, - /// The `RPCHandler` logger. - log: slog::Logger, -} - -impl Processor { - /// Instantiate a `Processor` instance - pub fn new( - executor: task_executor::TaskExecutor, - beacon_chain: Arc>, - network_globals: Arc>, - network_send: mpsc::UnboundedSender>, - log: &slog::Logger, - ) -> Self { - let sync_logger = log.new(o!("service"=> "sync")); - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - - // spawn the sync thread - let sync_send = crate::sync::manager::spawn( - executor.clone(), - beacon_chain.clone(), - network_globals.clone(), - network_send.clone(), - beacon_processor_send.clone(), - sync_logger, - ); - - BeaconProcessor { - beacon_chain: Arc::downgrade(&beacon_chain), - network_tx: network_send.clone(), - sync_tx: sync_send.clone(), - network_globals, - executor, - max_workers: cmp::max(1, num_cpus::get()), - current_workers: 0, - importing_blocks: Default::default(), - log: log.clone(), - } - .spawn_manager(beacon_processor_receive, None); - - Processor { - chain: beacon_chain, - sync_send, - network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send, - log: log.new(o!("service" => "router")), - } - } - - fn send_to_sync(&mut self, message: SyncMessage) { - self.sync_send.send(message).unwrap_or_else(|e| { - warn!( - self.log, - "Could not send message to the sync service"; - "error" => %e, - ) - }); - } - - /// Handle a peer disconnect. - /// - /// Removes the peer from the manager. - pub fn on_disconnect(&mut self, peer_id: PeerId) { - self.send_to_sync(SyncMessage::Disconnect(peer_id)); - } - - /// An error occurred during an RPC request. The state is maintained by the sync manager, so - /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { - // Check if the failed RPC belongs to sync - if let RequestId::Sync(request_id) = request_id { - self.send_to_sync(SyncMessage::RpcError { - peer_id, - request_id, - }); - } - } - - /// Sends a `Status` message to the peer. - /// - /// Called when we first connect to a peer, or when the PeerManager determines we need to - /// re-status. - pub fn send_status(&mut self, peer_id: PeerId) { - let status_message = status_message(&self.chain); - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } - - /// Handle a `Status` request. - /// - /// Processes the `Status` from the remote peer and sends back our `Status`. - pub fn on_status_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - status: StatusMessage, - ) { - debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - - // Say status back. - self.network.send_response( - peer_id, - Response::Status(status_message(&self.chain)), - request_id, - ); - - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) - } - - /// Process a `Status` response from a peer. - pub fn on_status_response(&mut self, peer_id: PeerId, status: StatusMessage) { - debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status); - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) - } - - /// Handle a `BlocksByRoot` request from the peer. - pub fn on_blocks_by_root_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_roots_request( - peer_id, request_id, request, - )) - } - - /// Handle a `LightClientBootstrap` request from the peer. - pub fn on_lightclient_bootstrap( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( - peer_id, request_id, request, - )) - } - - /// Handle a `BlocksByRange` request from the peer. - pub fn on_blocks_by_range_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - req: BlocksByRangeRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_range_request( - peer_id, request_id, req, - )) - } - - /// Handle a `BlocksByRange` response from the peer. - /// A `beacon_block` behaves as a stream which is terminated on a `None` response. - pub fn on_blocks_by_range_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - beacon_block: Option>>, - ) { - let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { - unreachable!("Block lookups do not request BBRange requests") - } - id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, - }, - RequestId::Router => unreachable!("All BBRange requests belong to sync"), - }; - - trace!( - self.log, - "Received BlocksByRange Response"; - "peer" => %peer_id, - ); - - self.send_to_sync(SyncMessage::RpcBlock { - peer_id, - request_id, - beacon_block, - seen_timestamp: timestamp_now(), - }); - } - - /// Handle a `BlocksByRoot` response from the peer. - pub fn on_blocks_by_root_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - beacon_block: Option>>, - ) { - let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { - unreachable!("Batch syncing do not request BBRoot requests") - } - }, - RequestId::Router => unreachable!("All BBRoot requests belong to sync"), - }; - - trace!( - self.log, - "Received BlocksByRoot Response"; - "peer" => %peer_id, - ); - self.send_to_sync(SyncMessage::RpcBlock { - peer_id, - request_id, - beacon_block, - seen_timestamp: timestamp_now(), - }); - } - - /// Process a gossip message declaring a new block. - /// - /// Attempts to apply to block to the beacon chain. May queue the block for later processing. - /// - /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( - message_id, - peer_id, - peer_client, - block, - timestamp_now(), - )) - } - - pub fn on_unaggregated_attestation_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - unaggregated_attestation: Attestation, - subnet_id: SubnetId, - should_process: bool, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( - message_id, - peer_id, - unaggregated_attestation, - subnet_id, - should_process, - timestamp_now(), - )) - } - - pub fn on_aggregated_attestation_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - aggregate: SignedAggregateAndProof, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( - message_id, - peer_id, - aggregate, - timestamp_now(), - )) - } - - pub fn on_voluntary_exit_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( - message_id, - peer_id, - voluntary_exit, - )) - } - - pub fn on_proposer_slashing_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( - message_id, - peer_id, - proposer_slashing, - )) - } - - pub fn on_attester_slashing_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( - message_id, - peer_id, - attester_slashing, - )) - } - - pub fn on_sync_committee_signature_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - sync_signature: SyncCommitteeMessage, - subnet_id: SyncSubnetId, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( - message_id, - peer_id, - sync_signature, - subnet_id, - timestamp_now(), - )) - } - - pub fn on_sync_committee_contribution_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - sync_contribution: SignedContributionAndProof, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( - message_id, - peer_id, - sync_contribution, - timestamp_now(), - )) - } - - pub fn on_light_client_finality_update_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_finality_update( - message_id, - peer_id, - light_client_finality_update, - timestamp_now(), - )) - } - - pub fn on_light_client_optimistic_update_gossip( - &mut self, - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_optimistic_update( - message_id, - peer_id, - light_client_optimistic_update, - timestamp_now(), - )) - } - - fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { - self.beacon_processor_send - .try_send(work) - .unwrap_or_else(|e| { - let work_type = match &e { - mpsc::error::TrySendError::Closed(work) - | mpsc::error::TrySendError::Full(work) => work.work_type(), - }; - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) - }) - } -} - -/// Wraps a Network Channel to employ various RPC related network functionality for the -/// processor. -#[derive(Clone)] -pub struct HandlerNetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, - /// Logger for the `NetworkContext`. - log: slog::Logger, -} - -impl HandlerNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { - Self { network_send, log } - } - - /// Sends a message to the network task. - fn inform_network(&mut self, msg: NetworkMessage) { - self.network_send.send(msg).unwrap_or_else( - |e| warn!(self.log, "Could not send message to the network service"; "error" => %e), - ) - } - - /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { - self.inform_network(NetworkMessage::SendRequest { - peer_id, - request_id: RequestId::Router, - request, - }) - } - - /// Sends a response to the network task. - pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { - self.inform_network(NetworkMessage::SendResponse { - peer_id, - id, - response, - }) - } -} - -fn timestamp_now() -> Duration { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) -} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4568ed1a229..3e86d2099f0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -19,7 +19,7 @@ use lighthouse_network::{ Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, }; use lighthouse_network::{ - types::{GossipEncoding, GossipTopic}, + types::{core_topics_to_subscribe, GossipEncoding, GossipTopic}, MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; @@ -228,16 +228,21 @@ impl NetworkService { let (network_senders, network_recievers) = NetworkSenders::new(); // try and construct UPnP port mappings if required. - let upnp_config = crate::nat::UPnPConfig::from(config); - let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_senders.network_send(); - if config.upnp_enabled { - executor.spawn_blocking( - move || { - crate::nat::construct_upnp_mappings(upnp_config, upnp_network_send, upnp_log) - }, - "UPnP", - ); + if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { + let upnp_log = network_log.new(o!("service" => "UPnP")); + let upnp_network_send = network_senders.network_send(); + if config.upnp_enabled { + executor.spawn_blocking( + move || { + crate::nat::construct_upnp_mappings( + upnp_config, + upnp_network_send, + upnp_log, + ) + }, + "UPnP", + ); + } } // get a reference to the beacon chain store @@ -445,7 +450,7 @@ impl NetworkService { let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); info!(self.log, "Subscribing to new fork topics"); - self.libp2p.subscribe_new_fork_topics(fork_digest); + self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); } else { @@ -467,7 +472,7 @@ impl NetworkService { ) { match ev { NetworkEvent::PeerConnectedOutgoing(peer_id) => { - self.send_to_router(RouterMessage::PeerDialed(peer_id)); + self.send_to_router(RouterMessage::StatusPeer(peer_id)); } NetworkEvent::PeerConnectedIncoming(_) | NetworkEvent::PeerBanned(_) @@ -684,7 +689,7 @@ impl NetworkService { } let mut subscribed_topics: Vec = vec![]; - for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { + for topic_kind in core_topics_to_subscribe(self.fork_context.current_fork()) { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( topic_kind.clone(), diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index f0dd0e75ffd..83fcc8c9ac8 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -59,10 +59,9 @@ mod tests { ); let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.libp2p_port = 21212; config.upnp_enabled = false; - config.discovery_port = 21212; config.boot_nodes_enr = enrs.clone(); runtime.block_on(async move { // Create a new network service which implicitly gets dropped at the diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 9e1c9f51bcc..a407fe1bcf8 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -182,6 +182,7 @@ mod attestation_service { #[cfg(feature = "deterministic_long_lived_attnets")] use std::collections::HashSet; + #[cfg(not(windows))] use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -290,6 +291,7 @@ mod attestation_service { } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[cfg(not(windows))] #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config @@ -513,6 +515,7 @@ mod attestation_service { assert_eq!(unexpected_msg_count, 0); } + #[cfg(not(windows))] #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index c61ca6b2cff..3ec24a18490 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -13,12 +13,13 @@ parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../../consensus/ssz_derive" } rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } bitvec = "1" +rand = "0.8.5" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs new file mode 100644 index 00000000000..c73666e1458 --- /dev/null +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -0,0 +1,147 @@ +use state_processing::SigVerifiedOp; +use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::sync::Arc; +use types::{ + AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + SignedBlsToExecutionChange, +}; + +/// Indicates if a `BlsToExecutionChange` was received before or after the +/// Capella fork. This is used to know which messages we should broadcast at the +/// Capella fork epoch. +#[derive(Copy, Clone)] +pub enum ReceivedPreCapella { + Yes, + No, +} + +/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. +/// +/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, +/// and is less-relevant after that. +#[derive(Debug, Default)] +pub struct BlsToExecutionChanges { + /// Map from validator index to BLS to execution change. + by_validator_index: HashMap>>, + /// Last-in-first-out (LIFO) queue of verified messages. + queue: Vec>>, + /// Contains a set of validator indices which need to have their changes + /// broadcast at the capella epoch. + received_pre_capella_indices: HashSet, +} + +impl BlsToExecutionChanges { + pub fn existing_change_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.by_validator_index + .get(&address_change.message.validator_index) + .map(|existing| existing.as_inner() == address_change) + } + + pub fn insert( + &mut self, + verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + let validator_index = verified_change.as_inner().message.validator_index; + // Wrap in an `Arc` once on insert. + let verified_change = Arc::new(verified_change); + match self.by_validator_index.entry(validator_index) { + Entry::Vacant(entry) => { + self.queue.push(verified_change.clone()); + entry.insert(verified_change); + if matches!(received_pre_capella, ReceivedPreCapella::Yes) { + self.received_pre_capella_indices.insert(validator_index); + } + true + } + Entry::Occupied(_) => false, + } + } + + /// FIFO ordering, used for persistence to disk. + pub fn iter_fifo( + &self, + ) -> impl Iterator>> { + self.queue.iter() + } + + /// LIFO ordering, used for block packing. + pub fn iter_lifo( + &self, + ) -> impl Iterator>> { + self.queue.iter().rev() + } + + /// Returns only those which are flagged for broadcasting at the Capella + /// fork. Uses FIFO ordering, although we expect this list to be shuffled by + /// the caller. + pub fn iter_received_pre_capella( + &self, + ) -> impl Iterator>> { + self.queue.iter().filter(|address_change| { + self.received_pre_capella_indices + .contains(&address_change.as_inner().message.validator_index) + }) + } + + /// Returns the set of indicies which should have their address changes + /// broadcast at the Capella fork. + pub fn iter_pre_capella_indices(&self) -> impl Iterator { + self.received_pre_capella_indices.iter() + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune>( + &mut self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + let mut validator_indices_pruned = vec![]; + + self.queue.retain(|address_change| { + let validator_index = address_change.as_inner().message.validator_index; + head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| { + let prune = validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }); + if prune { + validator_indices_pruned.push(validator_index); + } + !prune + }) + }); + + for validator_index in validator_indices_pruned { + self.by_validator_index.remove(&validator_index); + } + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&mut self, broadcasted: &HashSet) { + self.received_pre_capella_indices = self + .received_pre_capella_indices + .difference(broadcasted) + .copied() + .collect(); + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4fe5a725458..24c0623f5c3 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -2,25 +2,31 @@ mod attestation; mod attestation_id; mod attestation_storage; mod attester_slashing; +mod bls_to_execution_changes; mod max_cover; mod metrics; mod persistence; mod reward_cache; mod sync_aggregate_id; -pub use attestation::AttMaxCover; +pub use crate::bls_to_execution_changes::ReceivedPreCapella; +pub use attestation::{earliest_attestation_validators, AttMaxCover}; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, + PersistedOperationPoolV15, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; +use rand::seq::SliceRandom; +use rand::thread_rng; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -30,8 +36,9 @@ use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, - AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload, + Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, + Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; @@ -49,6 +56,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, + /// Map from credential changing validator to their position in the queue. + bls_to_execution_changes: RwLock>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -429,7 +438,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -488,7 +497,8 @@ impl OperationPool { |exit| { filter(exit.as_inner()) && exit.signature_is_still_valid(&state.fork()) - && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + && verify_exit(state, None, exit.as_inner(), VerifySignatures::False, spec) + .is_ok() }, |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), @@ -504,18 +514,121 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } + /// Check if an address change equal to `address_change` is already in the pool. + /// + /// Return `None` if no address change for the validator index exists in the pool. + pub fn bls_to_execution_change_in_pool_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.bls_to_execution_changes + .read() + .existing_change_equals(address_change) + } + + /// Insert a BLS to execution change into the pool, *only if* no prior change is known. + /// + /// Return `true` if the change was inserted. + pub fn insert_bls_to_execution_change( + &self, + verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + self.bls_to_execution_changes + .write() + .insert(verified_change, received_pre_capella) + } + + /// Get a list of execution changes for inclusion in a block. + /// + /// They're in random `HashMap` order, which isn't exactly fair, but isn't unfair either. + pub fn get_bls_to_execution_changes( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + filter_limit_operations( + self.bls_to_execution_changes.read().iter_lifo(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + T::MaxBlsToExecutionChanges::to_usize(), + ) + } + + /// Get a list of execution changes to be broadcast at the Capella fork. + /// + /// The list that is returned will be shuffled to help provide a fair + /// broadcast of messages. + pub fn get_bls_to_execution_changes_received_pre_capella( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut changes = filter_limit_operations( + self.bls_to_execution_changes + .read() + .iter_received_pre_capella(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + usize::max_value(), + ); + changes.shuffle(&mut thread_rng()); + changes + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&self, broadcasted: &HashSet) { + self.bls_to_execution_changes + .write() + .register_indices_broadcasted_at_capella(broadcasted); + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + pub fn prune_bls_to_execution_changes>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + self.bls_to_execution_changes + .write() + .prune(head_block, head_state, spec) + } + /// Prune all types of transactions given the latest head state and head fork. - pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { + pub fn prune_all>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + current_epoch: Epoch, + spec: &ChainSpec, + ) { self.prune_attestations(current_epoch); self.prune_sync_contributions(head_state.slot()); self.prune_proposer_slashings(head_state); self.prune_attester_slashings(head_state); self.prune_voluntary_exits(head_state); + self.prune_bls_to_execution_changes(head_block, head_state, spec); } /// Total number of voluntary exits in the pool. @@ -581,6 +694,17 @@ impl OperationPool { .map(|(_, exit)| exit.as_inner().clone()) .collect() } + + /// Returns all known `SignedBlsToExecutionChange` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_bls_to_execution_changes(&self) -> Vec { + self.bls_to_execution_changes + .read() + .iter_fifo() + .map(|address_change| address_change.as_inner().clone()) + .collect() + } } /// Filter up to a maximum number of operations out of an iterator. @@ -614,7 +738,7 @@ fn prune_validator_hash_map( prune_if: F, head_state: &BeaconState, ) where - F: Fn(&Validator) -> bool, + F: Fn(u64, &Validator) -> bool, T: VerifyOperation, { map.retain(|&validator_index, op| { @@ -622,7 +746,7 @@ fn prune_validator_hash_map( && head_state .validators() .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator)) + .map_or(true, |validator| !prune_if(validator_index, validator)) }); } @@ -1665,7 +1789,7 @@ mod release_tests { fn cross_fork_harness() -> (BeaconChainHarness>, ChainSpec) { - let mut spec = test_spec::(); + let mut spec = E::default_spec(); // Give some room to sign surround slashings. spec.altair_fork_epoch = Some(Epoch::new(3)); diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ed15369df73..35d2b4ce7ee 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; +use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -8,6 +9,8 @@ use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::SigVerifiedOp; +use std::collections::HashSet; +use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -18,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -40,20 +43,27 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, + /// BLS to Execution Changes + #[superstruct(only(V14, V15))] + pub bls_to_execution_changes: Vec>, + /// Validator indices with BLS to Execution Changes to be broadcast at the + /// Capella fork. + #[superstruct(only(V15))] + pub capella_bls_change_broadcast_indices: Vec, } impl PersistedOperationPool { @@ -99,17 +109,33 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::V12(PersistedOperationPoolV12 { + let bls_to_execution_changes = operation_pool + .bls_to_execution_changes + .read() + .iter_fifo() + .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) + .collect(); + + let capella_bls_change_broadcast_indices = operation_pool + .bls_to_execution_changes + .read() + .iter_pre_capella_indices() + .copied() + .collect(); + + PersistedOperationPool::V15(PersistedOperationPoolV15 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, }) } /// Reconstruct an `OperationPool`. - pub fn into_operation_pool(self) -> Result, OpPoolError> { + pub fn into_operation_pool(mut self) -> Result, OpPoolError> { let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( self.proposer_slashings()? @@ -127,21 +153,46 @@ impl PersistedOperationPool { ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); let attestations = match self { - PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), - PersistedOperationPool::V12(pool) => { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations { + for (att, attesting_indices) in self.attestations()?.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { + let persisted_changes = mem::take(persisted_changes); + + let broadcast_indices = + if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { + mem::take(indices).into_iter().collect() + } else { + HashSet::new() + }; + + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); + } + } let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes: RwLock::new(bls_to_execution_changes), reward_cache: Default::default(), _phantom: Default::default(), }; @@ -163,6 +214,48 @@ impl StoreItem for PersistedOperationPoolV5 { } } +impl StoreItem for PersistedOperationPoolV12 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for PersistedOperationPoolV14 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for PersistedOperationPoolV15 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -175,8 +268,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV12::from_ssz_bytes(bytes) - .map(Self::V12) + PersistedOperationPoolV15::from_ssz_bytes(bytes) + .map(Self::V15) .map_err(Into::into) } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 38d81512e4b..25521ec2428 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -71,7 +71,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") - .help("The address lighthouse will listen for UDP and TCP connections.") + .help("The address lighthouse will listen for UDP and TCP connections. To listen \ + over IpV4 and IpV6 set this flag twice with the different values.\n\ + Examples:\n\ + - --listen-address '0.0.0.0' will listen over Ipv4.\n\ + - --listen-address '::' will listen over Ipv6.\n\ + - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ + Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ + multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") + .multiple(true) + .max_values(2) .default_value("0.0.0.0") .takes_value(true) ) @@ -79,10 +88,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("port") .long("port") .value_name("PORT") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .help("The TCP/UDP port to listen on. The UDP port can be modified by the \ + --discovery-port flag. If listening over both Ipv4 and Ipv6 the --port flag \ + will apply to the Ipv4 address and --port6 to the Ipv6 address.") .default_value("9000") .takes_value(true), ) + .arg( + Arg::with_name("port6") + .long("port6") + .value_name("PORT") + .help("The TCP/UDP port to listen on over IpV6 when listening over both Ipv4 and \ + Ipv6. Defaults to 9090 when required.") + .default_value("9090") + .takes_value(true), + ) .arg( Arg::with_name("discovery-port") .long("discovery-port") @@ -90,6 +110,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("The UDP port that discovery will listen on. Defaults to `port`") .takes_value(true), ) + .arg( + Arg::with_name("discovery-port6") + .long("discovery-port6") + .value_name("PORT") + .help("The UDP port that discovery will listen on over IpV6 if listening over \ + both Ipv4 and IpV6. Defaults to `port6`") + .hidden(true) // TODO: implement dual stack via two sockets in discv5. + .takes_value(true), + ) .arg( Arg::with_name("target-peers") .long("target-peers") @@ -130,27 +159,49 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("enr-udp-port") .long("enr-udp-port") .value_name("PORT") - .help("The UDP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.") + .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV4.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-udp6-port") + .long("enr-udp6-port") + .value_name("PORT") + .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6.") .takes_value(true), ) .arg( Arg::with_name("enr-tcp-port") .long("enr-tcp-port") .value_name("PORT") - .help("The TCP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.\ - The --port flag is used if this is not set.") + .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV4. The --port flag is \ + used if this is not set.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-tcp6-port") + .long("enr-tcp6-port") + .value_name("PORT") + .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6. The --port6 flag is \ + used if this is not set.") .takes_value(true), ) .arg( Arg::with_name("enr-address") .long("enr-address") .value_name("ADDRESS") - .help("The IP address/ DNS address to broadcast to other peers on how to reach this node. \ - If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ - does not auto-update based on PONG responses in discovery. \ - Set this only if you are sure other nodes can connect to your local node on this address. \ - Discovery will automatically find your external address, if possible.") + .help("The IP address/ DNS address to broadcast to other peers on how to reach \ + this node. If a DNS address is provided, the enr-address is set to the IP \ + address it resolves to and does not auto-update based on PONG responses in \ + discovery. Set this only if you are sure other nodes can connect to your \ + local node on this address. This will update the `ip4` or `ip6` ENR fields \ + accordingly. To update both, set this flag twice with the different values.") .requires("enr-udp-port") + .multiple(true) + .max_values(2) .takes_value(true), ) .arg( @@ -158,7 +209,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("e") .long("enr-match") .help("Sets the local ENR IP address and port to match those set for lighthouse. \ - Specifically, the IP address will be the value of --listen-address and the UDP port will be --discovery-port.") + Specifically, the IP address will be the value of --listen-address and the \ + UDP port will be --discovery-port.") ) .arg( Arg::with_name("disable-enr-auto-update") @@ -181,6 +233,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") .takes_value(false), ) + .arg( + Arg::with_name("disable-peer-scoring") + .long("disable-peer-scoring") + .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ + Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") + .takes_value(false) + .hidden(true), + ) .arg( Arg::with_name("trusted-peers") .long("trusted-peers") @@ -194,6 +254,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") .takes_value(false), ) + .arg( + Arg::with_name("self-limiter") + .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node).\ + \ + Rate limit quotas per protocol can be set in the form of \ + :/. To set quotas for multiple protocols, \ + separate them by ';'. If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .min_values(0) + .hidden(true) + ) + .arg( + Arg::with_name("disable-backfill-rate-limiting") + .long("disable-backfill-rate-limiting") + .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ + as possible, however it can result in resource contention which degrades staking performance. Stakers \ + should generally choose to avoid this flag since backfill sync is not required for staking.") + .takes_value(false), + ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -303,6 +386,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5054).") .takes_value(true), ) + .arg( + Arg::with_name("shuffling-cache-size") + .long("shuffling-cache-size") + .help("Some HTTP API requests can be optimised by caching the shufflings at each epoch. \ + This flag allows the user to set the shuffling cache size in epochs. \ + Shufflings are dependent on validator count and setting this value to a large number can consume a large amount of memory.") + .takes_value(true) + ) /* * Monitoring metrics @@ -794,6 +885,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") ) + .arg( + Arg::with_name("proposer-reorg-cutoff") + .long("proposer-reorg-cutoff") + .value_name("MILLISECONDS") + .help("Maximum delay after the start of the slot at which to propose a reorging \ + block. Lower values can prevent failed reorgs by ensuring the block has \ + ample time to propagate and be processed by the network. The default is \ + 1/12th of a slot (1 second on mainnet)") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-disallowed-offsets") + .long("proposer-reorg-disallowed-offsets") + .value_name("N1,N2,...") + .help("Comma-separated list of integer offsets which can be used to avoid \ + proposing reorging blocks at certain slots. An offset of N means that \ + reorging proposals will not be attempted at any slot such that \ + `slot % SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be \ + avoided. Any offsets supplied with this flag will impose additional \ + restrictions.") + .conflicts_with("disable-proposer-reorgs") + ) .arg( Arg::with_name("prepare-payload-lookahead") .long("prepare-payload-lookahead") @@ -804,6 +917,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { for ensuring the EL is given ample notice. Default: 1/3 of a slot.") .takes_value(true) ) + .arg( + Arg::with_name("always-prepare-payload") + .long("always-prepare-payload") + .help("Send payload attributes with every fork choice update. This is intended for \ + use by block builders, relays and developers. You should set a fee \ + recipient on this BN and also consider adjusting the \ + --prepare-payload-lookahead flag.") + .takes_value(false) + ) .arg( Arg::with_name("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") @@ -878,12 +1000,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("0") .takes_value(true) ) + .arg( + Arg::with_name("builder-user-agent") + .long("builder-user-agent") + .value_name("STRING") + .help("The HTTP user agent to send alongside requests to the builder URL. The \ + default is Lighthouse's version string.") + .requires("builder") + .takes_value(true) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") .hidden(true) - .help("Enables an alternative, potentially more performant FFG \ - vote tracking method.") + .help("This flag is deprecated and has no effect.") .takes_value(true) .default_value("true") ) @@ -891,7 +1021,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("count-unrealized-full") .long("count-unrealized-full") .hidden(true) - .help("Stricter version of `count-unrealized`.") + .help("This flag is deprecated and has no effect.") .takes_value(true) .default_value("false") ) @@ -933,4 +1063,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { This is equivalent to --http and --validator-monitor-auto.") .takes_value(false) ) + .arg( + Arg::with_name("always-prefer-builder-payload") + .long("always-prefer-builder-payload") + .help("If set, the beacon node always uses the payload from the builder instead of the local payload.") + // The builder profit threshold flag is used to provide preference + // to local payloads, therefore it fundamentally conflicts with + // always using the builder. + .conflicts_with("builder-profit-threshold") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 294568cca9f..8cc38a534bc 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use beacon_chain::chain_config::{ - ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use clap::ArgMatches; @@ -10,13 +10,13 @@ use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; use genesis::Eth1Endpoint; use http_api::TlsConfig; +use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; use std::fmt::Debug; -use std::fmt::Write; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; @@ -24,7 +24,6 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; -use unused_port::{unused_tcp_port, unused_udp_port}; /// Gets the fully-initialized global client. /// @@ -78,13 +77,7 @@ pub fn get_config( let data_dir_ref = client_config.data_dir().clone(); - set_network_config( - &mut client_config.network, - cli_args, - &data_dir_ref, - log, - false, - )?; + set_network_config(&mut client_config.network, cli_args, &data_dir_ref, log)?; /* * Staking flag @@ -155,6 +148,10 @@ pub fn get_config( client_config.http_api.allow_sync_stalled = true; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { + client_config.chain.shuffling_cache_size = cache_size; + } + /* * Prometheus metrics HTTP server */ @@ -332,6 +329,9 @@ pub fn get_config( let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = + clap_utils::parse_optional(cli_args, "builder-user-agent")?; } // Set config values from parse values. @@ -404,13 +404,6 @@ pub fn get_config( * Discovery address is set to localhost by default. */ if cli_args.is_present("zero-ports") { - if client_config.network.enr_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { - client_config.network.enr_address = None - } - client_config.network.libp2p_port = - unused_tcp_port().map_err(|e| format!("Failed to get port for libp2p: {}", e))?; - client_config.network.discovery_port = - unused_udp_port().map_err(|e| format!("Failed to get port for discovery: {}", e))?; client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -696,6 +689,23 @@ pub fn get_config( client_config.chain.re_org_max_epochs_since_finalization = clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + client_config.chain.re_org_cutoff_millis = + clap_utils::parse_optional(cli_args, "proposer-reorg-cutoff")?; + + if let Some(disallowed_offsets_str) = + clap_utils::parse_optional::(cli_args, "proposer-reorg-disallowed-offsets")? + { + let disallowed_offsets = disallowed_offsets_str + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("invalid disallowed-offsets: {e:?}")) + }) + .collect::, _>>()?; + client_config.chain.re_org_disallowed_offsets = + DisallowedReOrgOffsets::new::(disallowed_offsets) + .map_err(|e| format!("invalid disallowed-offsets: {e:?}"))?; + } } // Note: This overrides any previous flags that enable this option. @@ -711,16 +721,29 @@ pub fn get_config( / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR }); + client_config.chain.always_prepare_payload = cli_args.is_present("always-prepare-payload"); + if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? { client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - client_config.chain.count_unrealized = - clap_utils::parse_required(cli_args, "count-unrealized")?; - client_config.chain.count_unrealized_full = - clap_utils::parse_required::(cli_args, "count-unrealized-full")?.into(); + if !clap_utils::parse_required::(cli_args, "count-unrealized")? { + warn!( + log, + "The flag --count-unrealized is deprecated and will be removed"; + "info" => "any use of the flag will have no effect" + ); + } + + if clap_utils::parse_required::(cli_args, "count-unrealized-full")? { + warn!( + log, + "The flag --count-unrealized-full is deprecated and will be removed"; + "info" => "setting it to `true` has no effect" + ); + } client_config.chain.always_reset_payload_statuses = cli_args.is_present("reset-payload-statuses"); @@ -751,16 +774,189 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.is_present("disable-optimistic-finalized-sync"); + // Payload selection configs + if cli_args.is_present("always-prefer-builder-payload") { + client_config.always_prefer_builder_payload = true; + } + + // Backfill sync rate-limiting + client_config.chain.enable_backfill_rate_limiting = + !cli_args.is_present("disable-backfill-rate-limiting"); + Ok(client_config) } -/// Sets the network config from the command line arguments +/// Gets the listening_addresses for lighthouse based on the cli options. +pub fn parse_listening_addresses( + cli_args: &ArgMatches, + log: &Logger, +) -> Result { + let listen_addresses_str = cli_args + .values_of("listen-address") + .expect("--listen_addresses has a default value"); + + let use_zero_ports = cli_args.is_present("zero-ports"); + + // parse the possible ips + let mut maybe_ipv4 = None; + let mut maybe_ipv6 = None; + for addr_str in listen_addresses_str { + let addr = addr_str.parse::().map_err(|parse_error| { + format!("Failed to parse listen-address ({addr_str}) as an Ip address: {parse_error}") + })?; + + match addr { + IpAddr::V4(v4_addr) => match &maybe_ipv4 { + Some(first_ipv4_addr) => { + return Err(format!( + "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ + Got two IpV4 addresses {first_ipv4_addr} and {v4_addr}" + )); + } + None => maybe_ipv4 = Some(v4_addr), + }, + IpAddr::V6(v6_addr) => match &maybe_ipv6 { + Some(first_ipv6_addr) => { + return Err(format!( + "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ + Got two IpV6 addresses {first_ipv6_addr} and {v6_addr}" + )); + } + None => maybe_ipv6 = Some(v6_addr), + }, + } + } + + // parse the possible tcp ports + let port = cli_args + .value_of("port") + .expect("--port has a default value") + .parse::() + .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; + let port6 = cli_args + .value_of("port6") + .map(str::parse::) + .transpose() + .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? + .unwrap_or(9090); + + // parse the possible udp ports + let maybe_udp_port = cli_args + .value_of("discovery-port") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --discovery-port as an integer: {parse_error}") + })?; + let maybe_udp6_port = cli_args + .value_of("discovery-port6") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --discovery-port6 as an integer: {parse_error}") + })?; + + // Now put everything together + let listening_addresses = match (maybe_ipv4, maybe_ipv6) { + (None, None) => { + // This should never happen unless clap is broken + return Err("No listening addresses provided".into()); + } + (None, Some(ipv6)) => { + // A single ipv6 address was provided. Set the ports + + if cli_args.is_present("port6") { + warn!(log, "When listening only over IpV6, use the --port flag. The value of --port6 will be ignored.") + } + // use zero ports if required. If not, use the given port. + let tcp_port = use_zero_ports + .then(unused_port::unused_tcp6_port) + .transpose()? + .unwrap_or(port); + + if maybe_udp6_port.is_some() { + warn!(log, "When listening only over IpV6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") + } + // use zero ports if required. If not, use the specific udp port. If none given, use + // the tcp port. + let udp_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(port); + + ListenAddress::V6(lighthouse_network::ListenAddr { + addr: ipv6, + udp_port, + tcp_port, + }) + } + (Some(ipv4), None) => { + // A single ipv4 address was provided. Set the ports + + // use zero ports if required. If not, use the given port. + let tcp_port = use_zero_ports + .then(unused_port::unused_tcp4_port) + .transpose()? + .unwrap_or(port); + // use zero ports if required. If not, use the specific udp port. If none given, use + // the tcp port. + let udp_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(port); + ListenAddress::V4(lighthouse_network::ListenAddr { + addr: ipv4, + udp_port, + tcp_port, + }) + } + (Some(ipv4), Some(ipv6)) => { + let ipv4_tcp_port = use_zero_ports + .then(unused_port::unused_tcp4_port) + .transpose()? + .unwrap_or(port); + let ipv4_udp_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_udp_port) + .unwrap_or(ipv4_tcp_port); + + // Defaults to 9090 when required + let ipv6_tcp_port = use_zero_ports + .then(unused_port::unused_tcp6_port) + .transpose()? + .unwrap_or(port6); + let ipv6_udp_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_udp6_port) + .unwrap_or(ipv6_tcp_port); + ListenAddress::DualStack( + lighthouse_network::ListenAddr { + addr: ipv4, + udp_port: ipv4_udp_port, + tcp_port: ipv4_tcp_port, + }, + lighthouse_network::ListenAddr { + addr: ipv6, + udp_port: ipv6_udp_port, + tcp_port: ipv6_tcp_port, + }, + ) + } + }; + + Ok(listening_addresses) +} + +/// Sets the network config from the command line arguments. pub fn set_network_config( config: &mut NetworkConfig, cli_args: &ArgMatches, data_dir: &Path, log: &Logger, - use_listening_port_as_enr_port_by_default: bool, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. if let Some(dir) = cli_args.value_of("network-dir") { @@ -781,12 +977,7 @@ pub fn set_network_config( config.shutdown_after_sync = true; } - if let Some(listen_address_str) = cli_args.value_of("listen-address") { - let listen_address = listen_address_str - .parse() - .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; - config.listen_address = listen_address; - } + config.set_listening_addr(parse_listening_addresses(cli_args, log)?); if let Some(target_peers_str) = cli_args.value_of("target-peers") { config.target_peers = target_peers_str @@ -794,21 +985,6 @@ pub fn set_network_config( .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; } - if let Some(port_str) = cli_args.value_of("port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - config.libp2p_port = port; - config.discovery_port = port; - } - - if let Some(port_str) = cli_args.value_of("discovery-port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - config.discovery_port = port; - } - if let Some(value) = cli_args.value_of("network-load") { let network_load = value .parse::() @@ -852,6 +1028,10 @@ pub fn set_network_config( .collect::, _>>()?; } + if cli_args.is_present("disable-peer-scoring") { + config.disable_peer_scoring = true; + } + if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') @@ -864,7 +1044,7 @@ pub fn set_network_config( } if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { - config.enr_udp_port = Some( + config.enr_udp4_port = Some( enr_udp_port_str .parse::() .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, @@ -872,7 +1052,23 @@ pub fn set_network_config( } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { - config.enr_tcp_port = Some( + config.enr_tcp4_port = Some( + enr_tcp_port_str + .parse::() + .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, + ); + } + + if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { + config.enr_udp6_port = Some( + enr_udp_port_str + .parse::() + .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { + config.enr_tcp6_port = Some( enr_tcp_port_str .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, @@ -880,58 +1076,106 @@ pub fn set_network_config( } if cli_args.is_present("enr-match") { + // Match the Ip and UDP port in the enr. + // set the enr address to localhost if the address is unspecified - if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) { - config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); - } else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) { - config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST)); - } else { - config.enr_address = Some(config.listen_address); + if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { + Ipv4Addr::LOCALHOST + } else { + ipv4_addr.addr + }; + config.enr_address.0 = Some(ipv4_enr_addr); + config.enr_udp4_port = Some(ipv4_addr.udp_port); + } + + if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { + Ipv6Addr::LOCALHOST + } else { + ipv6_addr.addr + }; + config.enr_address.1 = Some(ipv6_enr_addr); + config.enr_udp6_port = Some(ipv6_addr.udp_port); } - config.enr_udp_port = Some(config.discovery_port); - } - - if let Some(enr_address) = cli_args.value_of("enr-address") { - let resolved_addr = match enr_address.parse::() { - Ok(addr) => addr, // // Input is an IpAddr - Err(_) => { - let mut addr = enr_address.to_string(); - // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. - // Since enr-update is disabled with a dns address, not setting the enr-udp-port - // will make the node undiscoverable. - if let Some(enr_udp_port) = - config - .enr_udp_port - .or(if use_listening_port_as_enr_port_by_default { - Some(config.discovery_port) - } else { - None - }) - { - write!(addr, ":{}", enr_udp_port) - .map_err(|e| format!("Failed to write enr address {}", e))?; - } else { - return Err( - "enr-udp-port must be set for node to be discoverable with dns address" - .into(), - ); + } + + if let Some(enr_addresses) = cli_args.values_of("enr-address") { + let mut enr_ip4 = None; + let mut enr_ip6 = None; + let mut resolved_enr_ip4 = None; + let mut resolved_enr_ip6 = None; + + for addr in enr_addresses { + match addr.parse::() { + Ok(IpAddr::V4(v4_addr)) => { + if let Some(used) = enr_ip4.as_ref() { + warn!(log, "More than one Ipv4 ENR address provided"; "used" => %used, "ignored" => %v4_addr) + } else { + enr_ip4 = Some(v4_addr) + } + } + Ok(IpAddr::V6(v6_addr)) => { + if let Some(used) = enr_ip6.as_ref() { + warn!(log, "More than one Ipv6 ENR address provided"; "used" => %used, "ignored" => %v6_addr) + } else { + enr_ip6 = Some(v6_addr) + } + } + Err(_) => { + // Try to resolve the address + + // NOTE: From checking the `to_socket_addrs` code I don't think the port + // actually matters. Just use the udp port. + + let port = match config.listen_addrs() { + ListenAddress::V4(v4_addr) => v4_addr.udp_port, + ListenAddress::V6(v6_addr) => v6_addr.udp_port, + ListenAddress::DualStack(v4_addr, _v6_addr) => { + // NOTE: slight preference for ipv4 that I don't think is of importance. + v4_addr.udp_port + } + }; + + let addr_str = format!("{addr}:{port}"); + match addr_str.to_socket_addrs() { + Err(_e) => { + return Err(format!("Failed to parse or resolve address {addr}.")) + } + Ok(resolved_addresses) => { + for socket_addr in resolved_addresses { + // Use the first ipv4 and first ipv6 addresses present. + + // NOTE: this means that if two dns addresses are provided, we + // might end up using the ipv4 and ipv6 resolved addresses of just + // the first. + match socket_addr.ip() { + IpAddr::V4(v4_addr) => { + if resolved_enr_ip4.is_none() { + resolved_enr_ip4 = Some(v4_addr) + } + } + IpAddr::V6(v6_addr) => { + if resolved_enr_ip6.is_none() { + resolved_enr_ip6 = Some(v6_addr) + } + } + } + } + } + } } - // `to_socket_addr()` does the dns resolution - // Note: `to_socket_addrs()` is a blocking call - let resolved_addr = if let Ok(mut resolved_addrs) = addr.to_socket_addrs() { - // Pick the first ip from the list of resolved addresses - resolved_addrs - .next() - .map(|a| a.ip()) - .ok_or("Resolved dns addr contains no entries")? - } else { - return Err(format!("Failed to parse enr-address: {}", enr_address)); - }; - config.discv5_config.enr_update = false; - resolved_addr } - }; - config.enr_address = Some(resolved_addr); + } + + // The ENR addresses given as ips should take preference over any resolved address + let used_host_resolution = resolved_enr_ip4.is_some() || resolved_enr_ip6.is_some(); + let ip4 = enr_ip4.or(resolved_enr_ip4); + let ip6 = enr_ip6.or(resolved_enr_ip6); + config.enr_address = (ip4, ip6); + if used_host_resolution { + config.discv5_config.enr_update = false; + } } if cli_args.is_present("disable-enr-auto-update") { @@ -967,6 +1211,13 @@ pub fn set_network_config( // Light client server config. config.enable_light_client_server = cli_args.is_present("light-client-server"); + // This flag can be used both with or without a value. Try to parse it first with a value, if + // no value is defined but the flag is present, use the default params. + config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; + if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { + config.outbound_rate_limiter_config = Some(Default::default()); + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 47aef580e13..7df97105cb5 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -14,7 +14,7 @@ leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../../consensus/ssz_derive" } types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 8c64d4bcc05..73edfbb0744 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -18,6 +18,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; use typenum::Unsigned; +use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. /// @@ -26,7 +27,18 @@ use typenum::Unsigned; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdatePattern { /// The value is updated once per `n` slots. - OncePerNSlots { n: u64 }, + OncePerNSlots { + n: u64, + /// The slot at which the field begins to accumulate values. + /// + /// The field should not be read or written until `activation_slot` is reached, and the + /// activation slot should act as an offset when converting slots to vector indices. + activation_slot: Option, + /// The slot at which the field ceases to accumulate values. + /// + /// If this is `None` then the field is continually updated. + deactivation_slot: Option, + }, /// The value is updated once per epoch, for the epoch `current_epoch - lag`. OncePerEpoch { lag: u64 }, } @@ -98,12 +110,30 @@ pub trait Field: Copy { fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { // We take advantage of saturating subtraction on slots and epochs match Self::update_pattern(spec) { - OncePerNSlots { n } => { + OncePerNSlots { + n, + activation_slot, + deactivation_slot, + } => { // Per-slot changes exclude the index for the current slot, because // it won't be set until the slot completes (think of `state_roots`, `block_roots`). // This also works for the `historical_roots` because at the `n`th slot, the 0th // entry of the list is created, and before that the list is empty. - let end_vindex = current_slot / n; + // + // To account for the switch from historical roots to historical summaries at + // Capella we also modify the current slot by the activation and deactivation slots. + // The activation slot acts as an offset (subtraction) while the deactivation slot + // acts as a clamp (min). + let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { + std::cmp::min(current_slot, deactivation_slot) + }); + let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { + slot_with_clamp - activation_slot + } else { + // Return (0, 0) to indicate that the field should not be read/written. + return (0, 0); + }; + let end_vindex = slot_with_clamp_and_offset / n; let start_vindex = end_vindex - Self::Length::to_u64(); (start_vindex.as_usize(), end_vindex.as_usize()) } @@ -295,7 +325,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconBlockRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) ); @@ -305,7 +339,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconStateRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None, + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) ); @@ -315,8 +353,12 @@ field!( Hash256, T::HistoricalRootsLimit, DBColumn::BeaconHistoricalRoots, - |_| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64() + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: Some(Slot::new(0)), + deactivation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) ); @@ -331,6 +373,27 @@ field!( |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) ); +field!( + HistoricalSummaries, + VariableLengthField, + HistoricalSummary, + T::HistoricalRootsLimit, + DBColumn::BeaconHistoricalSummaries, + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), + deactivation_slot: None, + }, + |state: &BeaconState<_>, index, _| safe_modulo_index( + state + .historical_summaries() + .map_err(|_| ChunkError::InvalidFork)?, + index + ) +); + pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( field: F, store: &S, @@ -679,6 +742,7 @@ pub enum ChunkError { end_vindex: usize, length: usize, }, + InvalidFork, } #[cfg(test)] diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 30ee66074f8..fcc40706b30 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -3,7 +3,7 @@ use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, Slot}; +use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -42,9 +42,9 @@ pub enum Error { }, BlockReplayError(BlockReplayError), AddPayloadLogicError, - ResyncRequiredForExecutionPayloadSeparation, SlotClockUnavailableForMigration, - V9MigrationFailure(Hash256), + UnableToDowngrade, + InconsistentFork(InconsistentFork), } pub trait HandleUnavailable { @@ -103,6 +103,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InconsistentFork) -> Error { + Error::InconsistentFork(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4f63f4e7f97..02608f9a0bd 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,5 +1,5 @@ use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, + store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::config::{ OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, @@ -354,7 +354,8 @@ impl, Cold: ItemStore> HotColdDB } else if !self.config.prune_payloads { // If payload pruning is disabled there's a chance we may have the payload of // this finalized block. Attempt to load it but don't error in case it's missing. - if let Some(payload) = self.get_execution_payload(block_root)? { + let fork_name = blinded_block.fork_name(&self.spec)?; + if let Some(payload) = self.get_execution_payload(block_root, fork_name)? { DatabaseBlock::Full( blinded_block .try_into_full_block(Some(payload)) @@ -393,8 +394,9 @@ impl, Cold: ItemStore> HotColdDB blinded_block: SignedBeaconBlock>, ) -> Result, Error> { if blinded_block.message().execution_payload().is_ok() { + let fork_name = blinded_block.fork_name(&self.spec)?; let execution_payload = self - .get_execution_payload(block_root)? + .get_execution_payload(block_root, fork_name)? .ok_or(HotColdDBError::MissingExecutionPayload(*block_root))?; blinded_block.try_into_full_block(Some(execution_payload)) } else { @@ -413,7 +415,7 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant>( + pub fn get_block_any_variant>( &self, block_root: &Hash256, ) -> Result>, Error> { @@ -424,7 +426,7 @@ impl, Cold: ItemStore> HotColdDB /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it /// were for a different fork. - pub fn get_block_with>( + pub fn get_block_with>( &self, block_root: &Hash256, decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, @@ -437,9 +439,26 @@ impl, Cold: ItemStore> HotColdDB } /// Load the execution payload for a block from disk. + /// This method deserializes with the proper fork. pub fn get_execution_payload( &self, block_root: &Hash256, + fork_name: ForkName, + ) -> Result>, Error> { + let column = ExecutionPayload::::db_column().into(); + let key = block_root.as_bytes(); + + match self.hot_db.get_bytes(column, key)? { + Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), + None => Ok(None), + } + } + + /// Load the execution payload for a block from disk. + /// DANGEROUS: this method just guesses the fork. + pub fn get_execution_payload_dangerous_fork_agnostic( + &self, + block_root: &Hash256, ) -> Result>, Error> { self.get_item(block_root) } @@ -727,6 +746,10 @@ impl, Cold: ItemStore> HotColdDB let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + + StoreOp::KeyValueOp(kv_op) => { + key_value_batch.push(kv_op); + } } } Ok(key_value_batch) @@ -758,6 +781,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteState(_, _) => (), StoreOp::DeleteExecutionPayload(_) => (), + + StoreOp::KeyValueOp(_) => (), } } @@ -881,6 +906,7 @@ impl, Cold: ItemStore> HotColdDB store_updated_vector(StateRoots, db, state, &self.spec, ops)?; store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; + store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; // 3. Store restore point. let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; @@ -935,6 +961,7 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_state_roots(&self.cold_db, &self.spec)?; partial_state.load_historical_roots(&self.cold_db, &self.spec)?; partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; + partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; partial_state.try_into() } @@ -1101,6 +1128,11 @@ impl, Cold: ItemStore> HotColdDB &self.spec } + /// Get a reference to the `Logger` used by the database. + pub fn logger(&self) -> &Logger { + &self.log + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot @@ -1709,7 +1741,7 @@ fn no_state_root_iter() -> Option { + impl StoreItem for $ty_name { + fn db_column() -> DBColumn { + DBColumn::ExecPayload + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } + } + }; +} +impl_store_item!(ExecutionPayloadMerge); +impl_store_item!(ExecutionPayloadCapella); + +/// This fork-agnostic implementation should be only used for writing. +/// +/// It is very inefficient at reading, and decoding the desired fork-specific variant is recommended +/// instead. impl StoreItem for ExecutionPayload { fn db_column() -> DBColumn { DBColumn::ExecPayload @@ -12,6 +36,9 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + .map_err(Into::into) } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 75aeca058b5..ee01fa1ae15 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -161,6 +161,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteBlock(Hash256), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), + KeyValueOp(KeyValueStoreOp), } /// A unique column identifier. @@ -211,6 +212,8 @@ pub enum DBColumn { /// For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, + #[strum(serialize = "bhs")] + BeaconHistoricalSummaries, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 5cb3f122008..8e9b3599b14 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(16); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 010796afd5b..cd923da40dc 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,12 +1,13 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, - StateRoots, + load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, + HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use std::sync::Arc; +use types::historical_summary::HistoricalSummary; use types::superstruct; use types::*; @@ -14,7 +15,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +67,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub current_epoch_participation: VariableList, // Finality @@ -78,23 +79,41 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + + // Capella + #[superstruct(only(Capella))] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella))] + pub next_withdrawal_validator_index: u64, + + #[ssz(skip_serializing, skip_deserializing)] + #[superstruct(only(Capella))] + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { PartialBeaconState::$variant_name($struct_name { // Versioning genesis_time: $s.genesis_time, @@ -135,6 +154,11 @@ macro_rules! impl_from_state_forgetful { // Variant-specific fields $( $extra_fields: $s.$extra_fields.clone() + ),*, + + // Variant-specific optional + $( + $extra_fields_opt: None ),* }) } @@ -149,7 +173,8 @@ impl PartialBeaconState { outer, Base, PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), BeaconState::Altair(s) => impl_from_state_forgetful!( s, @@ -162,7 +187,8 @@ impl PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), BeaconState::Merge(s) => impl_from_state_forgetful!( s, @@ -176,7 +202,25 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] + ), + BeaconState::Capella(s) => impl_from_state_forgetful!( + s, + outer, + Capella, + PartialBeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] ), } } @@ -252,6 +296,23 @@ impl PartialBeaconState { Ok(()) } + pub fn load_historical_summaries>( + &mut self, + store: &S, + spec: &ChainSpec, + ) -> Result<(), Error> { + let slot = self.slot(); + if let Ok(historical_summaries) = self.historical_summaries_mut() { + if historical_summaries.is_none() { + *historical_summaries = + Some(load_variable_list_from_db::( + store, slot, spec, + )?); + } + } + Ok(()) + } + pub fn load_randao_mixes>( &mut self, store: &S, @@ -275,7 +336,7 @@ impl PartialBeaconState { /// Implement the conversion from PartialBeaconState -> BeaconState. macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { BeaconState::$variant_name($struct_name { // Versioning genesis_time: $inner.genesis_time, @@ -320,6 +381,11 @@ macro_rules! impl_try_into_beacon_state { // Variant-specific fields $( $extra_fields: $inner.$extra_fields + ),*, + + // Variant-specific optional fields + $( + $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? ),* }) } @@ -338,7 +404,8 @@ impl TryInto> for PartialBeaconState { inner, Base, BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( inner, @@ -350,7 +417,8 @@ impl TryInto> for PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( inner, @@ -363,7 +431,24 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] + ), + PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( + inner, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] ), }; Ok(state) diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index c939fd3f51f..c399f1b4571 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -1,6 +1,6 @@ //! Implementation of historic state reconstruction (given complete block history). use crate::hot_cold_store::{HotColdDB, HotColdDBError}; -use crate::{Error, ItemStore, KeyValueStore}; +use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ @@ -13,8 +13,8 @@ use types::{EthSpec, Hash256}; impl HotColdDB where E: EthSpec, - Hot: KeyValueStore + ItemStore, - Cold: KeyValueStore + ItemStore, + Hot: ItemStore, + Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { let mut anchor = if let Some(anchor) = self.get_anchor_info() { diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 1c11a8349dd..bbec70330b7 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -1,5 +1,4 @@ #![cfg(test)] -#![recursion_limit = "256"] use beacon_chain::StateSkipConfig; use node_test_rig::{ diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 470407ebee9..ff5c1e9805f 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,6 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) * [Docker](./docker.md) * [Build from Source](./installation-source.md) @@ -33,6 +32,11 @@ * [Authorization Header](./api-vc-auth-header.md) * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) +* [Lighthouse UI (Siren)](./lighthouse-ui.md) + * [Installation](./ui-installation.md) + * [Configuration](./ui-configuration.md) + * [Usage](./ui-usage.md) + * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index fb7f07a51a6..08d276ba356 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -41,7 +41,7 @@ drastically and use the (recommended) default. ### NAT Traversal (Port Forwarding) -Lighthouse, by default, used port 9000 for both TCP and UDP. Lighthouse will +Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will still function if it is behind a NAT without any port mappings. Although Lighthouse still functions, we recommend that some mechanism is used to ensure that your Lighthouse node is publicly accessible. This will typically improve @@ -54,6 +54,16 @@ node will inform you of established routes in this case). If UPnP is not enabled, we recommend you manually set up port mappings to both of Lighthouse's TCP and UDP ports (9000 by default). +> Note: Lighthouse needs to advertise its publicly accessible ports in +> order to inform its peers that it is contactable and how to connect to it. +> Lighthouse has an automated way of doing this for the UDP port. This means +> Lighthouse can detect its external UDP port. There is no such mechanism for the +> TCP port. As such, we assume that the external UDP and external TCP port is the +> same (i.e external 5050 UDP/TCP mapping to internal 9000 is fine). If you are setting up differing external UDP and TCP ports, you should +> explicitly specify them using the `--enr-tcp-port` and `--enr-udp-port` as +> explained in the following section. + + ### ENR Configuration Lighthouse has a number of CLI parameters for constructing and modifying the diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 05cb0b69cf8..28481809703 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -141,7 +141,7 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, - "attestation_target_hit_percentage": 50 + "attestation_target_hit_percentage": 50 } } } diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 736aa08f1cf..47dc03b20c4 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,17 +48,6 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` -### Use Infura as a remote beacon node provider - -You can use Infura as the remote beacon node provider to load the initial checkpoint state. - -1. Sign up for the free Infura ETH2 API using the `Create new project tab` on the [Infura dashboard](https://infura.io/dashboard). -2. Copy the HTTPS endpoint for the required network (Mainnet/Prater). -3. Use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` -lighthouse bn --checkpoint-sync-url https://:@eth2-beacon-mainnet.infura.io ... -``` - ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks @@ -108,7 +97,7 @@ You can opt-in to reconstructing all of the historic states by providing the The database keeps track of three markers to determine the availability of historic blocks and states: -* `oldest_block_slot`: All blocks with slots less than or equal to this value are available in the +* `oldest_block_slot`: All blocks with slots greater than or equal to this value are available in the database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 0982e10ab90..d2b7b518d75 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -26,10 +26,17 @@ validator client or the slasher**. | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | | v3.3.0 | Nov 2022 | v13 | yes | +| v3.4.0 | Jan 2023 | v13 | yes | +| v3.5.0 | Feb 2023 | v15 | yes before Capella | +| v4.0.1 | Mar 2023 | v16 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). +> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We +usually do this after a major version has been out for a while and everyone has upgraded. In this +case the above table will continue to record the deprecated schema changes for reference. + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. @@ -110,7 +117,7 @@ Several conditions need to be met in order to run `lighthouse db`: 2. The command must run as the user that owns the beacon node database. If you are using systemd then your beacon node might run as a user called `lighthousebeacon`. 3. The `--datadir` flag must be set to the location of the Lighthouse data directory. -4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `prater` or `ropsten`. +4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `prater` or `sepolia`. The general form for a `lighthouse db` command is: diff --git a/book/src/docker.md b/book/src/docker.md index f22b8a20082..d67b084da63 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -16,21 +16,18 @@ way to run Lighthouse without building the image yourself. Obtain the latest image with: ```bash -$ docker pull sigp/lighthouse +docker pull sigp/lighthouse ``` Download and test the image with: ```bash -$ docker run sigp/lighthouse lighthouse --version +docker run sigp/lighthouse lighthouse --version ``` If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version (see example below), then you've successfully installed Lighthouse via Docker. -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Example Version Output ``` @@ -38,6 +35,9 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` +> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker +> Images](#available-docker-images) below. + ### Available Docker Images There are several images available on Docker Hub. @@ -47,17 +47,16 @@ Lighthouse with optimizations enabled. If you are running on older hardware then `latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware compatibility (see [Portability](./installation-binaries.md#portability)). -To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands -like so: +To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: ``` -$ docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: ``` -${version}${arch}${stability}${modernity} +${version}${arch}${stability}${modernity}${features} ``` The `version` is: @@ -65,22 +64,28 @@ The `version` is: * `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1` * `latest` for the `stable` branch (latest release) or `unstable` branch -The `stability` is: - -* `-unstable` for the `unstable` branch -* empty for a tagged release or the `stable` branch - The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD * `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) +The `stability` is: + +* `-unstable` for the `unstable` branch +* empty for a tagged release or the `stable` branch + The `modernity` is: * `-modern` for optimized builds * empty for a `portable` unoptimized build +The `features` is: + +* `-dev` for a development build with `minimal-spec` preset enabled. +* empty for a standard build with no custom feature enabled. + + Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) @@ -93,13 +98,13 @@ To build the image from source, navigate to the root of the repository and run: ```bash -$ docker build . -t lighthouse:local +docker build . -t lighthouse:local ``` The build will likely take several minutes. Once it's built, test it with: ```bash -$ docker run lighthouse:local lighthouse --help +docker run lighthouse:local lighthouse --help ``` ## Using the Docker image @@ -107,12 +112,12 @@ $ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` -> To join the Prater testnet, use `--network prater` instead. +> To join the Goerli testnet, use `--network goerli` instead. -> The `-p` and `-v` and values are described below. +> The `-v` (Volumes) and `-p` (Ports) and values are described below. ### Volumes @@ -125,7 +130,7 @@ The following example runs a beacon node with the data directory mapped to the users home directory: ```bash -$ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon +docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon ``` ### Ports @@ -134,14 +139,14 @@ In order to be a good peer and serve other peers you should expose port `9000` f Use the `-p` flag to do this: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon +docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p 127.0.0.1:5052:5052`. ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` [docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/faq.md b/book/src/faq.md index 5bfae3fa875..b42e197a003 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -9,6 +9,11 @@ - [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators?](#how-can-i-monitor-my-validators) +- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do) +- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials) +- [I am missing attestations. Why?](#i-am-missing-attestations-why) +- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal) +- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do) ### Why does it take so long for a validator to be activated? @@ -128,8 +133,9 @@ same `datadir` as a previous network. I.e if you have been running the `datadir` (the `datadir` is also printed out in the beacon node's logs on boot-up). -If you find yourself with a low peer count and is not reaching the target you -expect. Try setting up the correct port forwards as described [here](./advanced_networking.md#nat-traversal-port-forwarding). +If you find yourself with a low peer count and it's not reaching the target you +expect. Try setting up the correct port forwards as described +[here](./advanced_networking.md#nat-traversal-port-forwarding). ### What should I do if I lose my slashing protection database? @@ -184,4 +190,47 @@ However, there are some components which can be configured with redundancy. See Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator -Monitoring](./validator-monitoring.md) for more information. +Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). + +### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? + +The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: + +`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` + +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur: +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. +1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. + +If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. + + +### How do I check or update my withdrawal credentials? +Withdrawals will be available after the Capella/Shanghai upgrades on 12th April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: +- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set. +- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + +For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + + +### I am missing attestations. Why? +The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: +- the clock is synced +- the computer has sufficient resources and is not overloaded +- the internet is working well +- you have sufficient peers + +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance. + +### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? + +In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. + + +### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do? + +Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. \ No newline at end of file diff --git a/book/src/imgs/ui-account-earnings.png b/book/src/imgs/ui-account-earnings.png new file mode 100644 index 00000000000..69e94560356 Binary files /dev/null and b/book/src/imgs/ui-account-earnings.png differ diff --git a/book/src/imgs/ui-balance-modal.png b/book/src/imgs/ui-balance-modal.png new file mode 100644 index 00000000000..9d7c0e36b45 Binary files /dev/null and b/book/src/imgs/ui-balance-modal.png differ diff --git a/book/src/imgs/ui-configuration.png b/book/src/imgs/ui-configuration.png new file mode 100644 index 00000000000..87ec95d7bbf Binary files /dev/null and b/book/src/imgs/ui-configuration.png differ diff --git a/book/src/imgs/ui-dashboard.png b/book/src/imgs/ui-dashboard.png new file mode 100644 index 00000000000..00552fe0768 Binary files /dev/null and b/book/src/imgs/ui-dashboard.png differ diff --git a/book/src/imgs/ui-device.png b/book/src/imgs/ui-device.png new file mode 100644 index 00000000000..0f1a0e77d30 Binary files /dev/null and b/book/src/imgs/ui-device.png differ diff --git a/book/src/imgs/ui-hardware.png b/book/src/imgs/ui-hardware.png new file mode 100644 index 00000000000..4178687f5da Binary files /dev/null and b/book/src/imgs/ui-hardware.png differ diff --git a/book/src/imgs/ui-settings.png b/book/src/imgs/ui-settings.png new file mode 100644 index 00000000000..da9cbca9fa6 Binary files /dev/null and b/book/src/imgs/ui-settings.png differ diff --git a/book/src/imgs/ui-validator-balance1.png b/book/src/imgs/ui-validator-balance1.png new file mode 100644 index 00000000000..4488cdd0534 Binary files /dev/null and b/book/src/imgs/ui-validator-balance1.png differ diff --git a/book/src/imgs/ui-validator-balance2.png b/book/src/imgs/ui-validator-balance2.png new file mode 100644 index 00000000000..44c5f516710 Binary files /dev/null and b/book/src/imgs/ui-validator-balance2.png differ diff --git a/book/src/imgs/ui-validator-management.png b/book/src/imgs/ui-validator-management.png new file mode 100644 index 00000000000..b28b372109c Binary files /dev/null and b/book/src/imgs/ui-validator-management.png differ diff --git a/book/src/imgs/ui-validator-modal.png b/book/src/imgs/ui-validator-modal.png new file mode 100644 index 00000000000..d53dc780988 Binary files /dev/null and b/book/src/imgs/ui-validator-modal.png differ diff --git a/book/src/imgs/ui-validator-table.png b/book/src/imgs/ui-validator-table.png new file mode 100644 index 00000000000..10bcbd6f144 Binary files /dev/null and b/book/src/imgs/ui-validator-table.png differ diff --git a/book/src/imgs/ui.png b/book/src/imgs/ui.png new file mode 100644 index 00000000000..208cb3101d1 Binary files /dev/null and b/book/src/imgs/ui.png differ diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 2365ea7ed7b..30bf03e14ee 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -23,21 +23,24 @@ For details, see [Portability](#portability). ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs -a portable `x86_64` binary. +a `x86_64` binary. ### Steps 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. -1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary. -1. Extract the archive: - 1. `cd Downloads` - 1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` +1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash + cd ~ + curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + ``` 1. Test the binary with `./lighthouse --version` (it should print the version). -1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. - - E.g., `cp lighthouse /usr/bin` +1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. -> Windows users will need to execute the commands in Step 3 from PowerShell. + + +> Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -64,4 +67,4 @@ WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get On some VPS providers, the virtualization can make it appear as if CPU features are not available, even when they are. In this case you might see the warning above, but so long as the client -continues to function it's nothing to worry about. +continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index b3d83ef9f9e..b9c9df163d8 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -5,8 +5,20 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way -to update the Rust compiler, and works on all platforms. +First, **install Rust** using [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +The rustup installer provides an easy way to update the Rust compiler, and works on all platforms. + +> Tips: +> +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. With Rust installed, follow the instructions below to install dependencies relevant to your operating system. @@ -19,10 +31,17 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` +> Tips: +> +> - If there are difficulties, try updating the package manager with `sudo apt +> update`. + > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories > of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: > [https://apt.kitware.com/](https://apt.kitware.com) +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. @@ -39,10 +58,19 @@ brew install protobuf [Homebrew]: https://brew.sh/ +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### Windows -1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. + > Tips: + > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. + > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash + Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` @@ -64,8 +92,11 @@ choco install protoc These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. + [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +After this, you are ready to [build Lighthouse](#build-lighthouse). + ## Build Lighthouse Once you have Rust and the build dependencies you're ready to build Lighthouse: @@ -128,8 +159,21 @@ Commonly used features include: * `gnosis`: support for the Gnosis Beacon Chain. * `portable`: support for legacy hardware. * `modern`: support for exclusively modern hardware. -* `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). +* `slasher-mdbx`: support for the MDBX slasher backend. Enabled by default. * `slasher-lmdb`: support for the LMDB slasher backend. +* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. + Not supported on Windows. +* `spec-minimal`: support for the minimal preset (useful for testing). + +Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` +argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +E.g. + +``` +CARGO_INSTALL_EXTRA_FLAGS="--no-default-features" make +``` + +[jemalloc]: https://jemalloc.net/ ## Compilation Profiles @@ -157,12 +201,11 @@ PROFILE=maxperf make Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory needs to be on your `PATH` before you can run `$ lighthouse`. -See ["Configuring the `PATH` environment variable" -(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. +See ["Configuring the `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information. ### Compilation error -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply run `rustup update`. If you can't install the latest version of Rust you can instead compile using the Minimum Supported Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's @@ -171,7 +214,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built -binary](./installation-binaries.md). +binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. diff --git a/book/src/installation.md b/book/src/installation.md index bc546e09874..627326d2a4a 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,24 +8,27 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -Community-maintained additional installation methods: +Additionally, there are two extra guides for specific uses: + +- [Raspberry Pi 4 guide](./pi.md). +- [Cross-compiling guide for developers](./cross-compiling.md). + +There are also community-maintained installation methods: - [Homebrew package](./homebrew.md). - Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). -Additionally, there are two extra guides for specific uses: -- [Raspberry Pi 4 guide](./pi.md). -- [Cross-compiling guide for developers](./cross-compiling.md). -## Minimum System Requirements +## Recommended System Requirements + +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): -For more information see [System Requirements](./system-requirements.md). -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +* Memory: 16 GB RAM or more +* Storage: 2 TB solid state storage +* Network: 100 Mb/s download, 20 Mb/s upload broadband connection diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 0014af8f152..fc4530589d9 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -14,6 +14,15 @@ There are three flags which control the re-orging behaviour: * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. +* `--proposer-reorg-cutoff T`: only attempt to re-org late blocks when the proposal is being made + before T milliseconds into the slot. Delays between the validator client and the beacon node can + cause some blocks to be requested later than the start of the slot, which makes them more likely + to fail. The default cutoff is 1000ms on mainnet, which gives blocks 3000ms to be signed and + propagated before the attestation deadline at 4000ms. +* `--proposer-reorg-disallowed-offsets N1,N2,N3...`: Prohibit Lighthouse from attempting to reorg at + specific offsets in each epoch. A disallowed offset `N` prevents reorging blocks from being + proposed at any `slot` such that `slot % SLOTS_PER_EPOCH == N`. The value to this flag is a + comma-separated list of integer offsets. All flags should be applied to `lighthouse bn`. The default configuration is recommended as it balances the chance of the re-org succeeding against the chance of failure due to attestations diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md new file mode 100644 index 00000000000..225f293f978 --- /dev/null +++ b/book/src/lighthouse-ui.md @@ -0,0 +1,33 @@ +# Lighthouse UI (Siren) + +_Documentation for Siren users and developers._ + +[![Chat Badge]][Chat Link] + +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh + +![ui-overview](./imgs/ui.png) + +Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and +a Lighthouse Validator Client to monitor performance and display key validator +metrics. + +The UI is currently in active development. Its resides in the +[Siren](https://github.com/sigp/siren) repository. + +## Topics + +See the following Siren specific topics for more context-specific +information: + +- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. +- [Configuration Guide](./ui-configuration.md) - Explanation of how to setup + and configure Siren. +- [Usage](./ui-usage.md) - Details various Siren components. +- [FAQs](./ui-faqs.md) - Frequently Asked Questions. + +## Contributing + +If you find and issue or bug or would otherwise like to help out with the +development of the Siren project, please submit issues and PRs to the [Siren](https://github.com/sigp/siren) repository. diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 08f1b51e42a..ec9aeaaee86 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -58,7 +58,7 @@ supported. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page for your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) @@ -203,5 +203,5 @@ guidance for specific setups. - [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) -- [EthDocker: Merge Preparation](https://eth-docker.net/docs/About/MergePrep/) +- [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) - [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/pi.md b/book/src/pi.md index 24796d394e3..d8d154d765a 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -12,18 +12,18 @@ desktop) may be convenient.* ### 1. Install Ubuntu -Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). - -**A 64-bit version is required** and latest version is recommended (Ubuntu -20.04 LTS was the latest at the time of writing). +Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation-source.md#ubuntu). -(I.e., run the `sudo apt install ...` command at that link). +Install the Ubuntu dependencies: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +``` > Tips: > @@ -32,15 +32,18 @@ Install the [Ubuntu Dependencies](installation-source.md#ubuntu). ### 3. Install Rust -Install Rust as per [rustup](https://rustup.rs/). (I.e., run the `curl ... ` -command). +Install Rust as per [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` > Tips: > -> - When prompted, enter `1` for the default installation. -> - Try running `cargo version` after Rust installation completes. If it cannot -> be found, run `source $HOME/.cargo/env`. -> - It's generally advised to append `source $HOME/.cargo/env` to `~/.bashrc`. +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. ### 4. Install Lighthouse diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 5ce42aa6305..fb112c36753 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -26,7 +26,7 @@ has authority to control the execution engine. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page of your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) diff --git a/book/src/system-requirements.md b/book/src/system-requirements.md deleted file mode 100644 index 0c51d07cce8..00000000000 --- a/book/src/system-requirements.md +++ /dev/null @@ -1,23 +0,0 @@ -# System Requirements - -Lighthouse is able to run on most low to mid-range consumer hardware, but will perform best when -provided with ample system resources. The following system requirements are for running a beacon -node and a validator client with a modest number of validator keys (less than 100). - -## Minimum - -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection - -During smooth network conditions, Lighthouse's database will fit within 15 GB, but in case of a long -period of non-finality, it is **strongly recommended** that at least 128 GB is available. - -## Recommended - -* Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* 16 GB RAM -* 256 GB solid state storage -* 100 Mb/s download, 20 Mb/s upload broadband connection - diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md new file mode 100644 index 00000000000..5b67b03b374 --- /dev/null +++ b/book/src/ui-configuration.md @@ -0,0 +1,47 @@ +# Configuration + +Siren requires a connection to both a Lighthouse Validator Client +and a Lighthouse Beacon Node. Upon running you will first be greeted by the +following configuration screen. + +![ui-configuration](./imgs/ui-configuration.png) + + +## Connecting to the Clients + +This allows you to enter the address and ports of the associated Lighthouse +Beacon node and Lighthouse Validator client. + +> The Beacon Node must be run with the `--gui` flag set. To allow the browser +> to access the node beyond your local computer you also need to allow CORS in +> the http API. This can be done via `--http-allow-origin "*"`. + +A green tick will appear once Siren is able to connect to both clients. You +can specify different ports for each client by clicking on the advanced tab. + + +## API Token + +The API Token is a secret key that allows you to connect to the validator +client. The validator client's HTTP API is guarded by this key because it +contains sensitive validator information and the ability to modify +validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) +for further details. + +Siren requires this token in order to connect to the Validator client. +The token is located in the default data directory of the validator +client. The default path is +`~/.lighthouse//validators/api-token.txt`. + +The contents of this file for the desired valdiator client needs to be +entered. + +## Name + +This is your name, it can be modified and is solely used for aesthetics. + +## Device + +This is a name that can be associated with the validator client/beacon +node pair. Multiple such pairs can be remembered for quick swapping between +them. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md new file mode 100644 index 00000000000..51aa9385a44 --- /dev/null +++ b/book/src/ui-faqs.md @@ -0,0 +1,16 @@ +# Frequently Asked Questions + +## 1. Are there any requirements to run Siren? +Yes, Siren requires Lighthouse v3.5.1 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. + +## 2. Where can I find my API token? +The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./ui-configuration.md#api-token). + +## 3. How do I fix the Node Network Errors? +If you recieve a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). + +## 4. How do I change my Beacon or Validator address after logging in? +Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configurtion` action button that will redirect you back to the configuration screen where you can make appropriate changes. + +## 5. Why doesn't my validator balance graph show any data? +If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md new file mode 100644 index 00000000000..b8ae788c69b --- /dev/null +++ b/book/src/ui-installation.md @@ -0,0 +1,105 @@ +# 📦 Installation + +Siren runs on Linux, MacOS and Windows. + +## Version Requirement +The Siren app requires Lighthouse v3.5.1 or higher to function properly. These versions can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. + +## Pre-Built Electron Packages + +There are pre-compiled electron packages for each operating systems which can +be downloaded and executed. These can be found on the +[releases](https://github.com/sigp/siren/releases) page of the +Siren repository. + +Simply download the package specific to your operating system and run it. + +## Building From Source + +### Requirements + +Building from source requires `Node v18` and `yarn`. + +### Building From Source + +The electron app can be built from source by first cloning the repository and +entering the directory: + +``` +$ git clone https://github.com/sigp/siren.git +$ cd siren +``` + +Once cloned, the electron app can be built and ran via the Makefile by: + +``` +$ make +``` + +alternatively it can be built via: + +``` +$ yarn +``` + +Once completed successfully the electron app can be run via: + +``` +$ yarn dev +``` + +### Running In The Browser + +#### Docker (Recommended) + +Docker is the recommended way to run a webserver that hosts Siren and can be +connected to via a web browser. We recommend this method as it establishes a +production-grade web-server to host the application. + +`docker` is required to be installed with the service running. + +The docker image can be built and run via the Makefile by running: +``` +$ make docker +``` + +Alternatively, to run with Docker, the image needs to be built. From the repository directory +run: +``` +$ docker build -t siren . +``` + +Then to run the image: +``` +$ docker run --rm -ti --name siren -p 80:80 siren +``` + +This will open port 80 and allow your browser to connect. You can choose +another local port by modifying the command. For example `-p 8000:80` will open +port 8000. + +To view Siren, simply go to `http://localhost` in your web browser. + +#### Development Server + +A development server can also be built which will expose a local port 3000 via: +``` +$ yarn start +``` + +Once executed, you can direct your web browser to the following URL to interact +with the app: +``` +http://localhost:3000 +``` + +A production version of the app can be built via +``` +$ yarn build +``` +and then further hosted via a production web server. + +### Known Issues + +If you experience any issues in running the UI please create an issue on the +[Lighthouse UI](https://github.com/sigp/lighthouse-ui) repository. diff --git a/book/src/ui-usage.md b/book/src/ui-usage.md new file mode 100644 index 00000000000..e88c4677a87 --- /dev/null +++ b/book/src/ui-usage.md @@ -0,0 +1,61 @@ +# Usage + +# Dashboard + +Siren's dashboard view provides a summary of all performance and key validator metrics. Sync statuses, uptimes, accumulated rewards, hardware and network metrics are all consolidated on the dashboard for evaluation. + +![](imgs/ui-dashboard.png) + +## Account Earnings + +The account earnings component accumulates reward data from all registered validators providing a summation of total rewards earned while staking. Given current conversion rates, this component also converts your balance into your selected fiat currency. + +Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific timeframe based on current device and network conditions. + +![](imgs/ui-account-earnings.png) + +## Validator Table + +The validator table component is a list of all registered validators, which includes data such as name, index, total balance, earned rewards and current status. Each validator row also contains a link to a detailed data modal and additional data provided by [Beaconcha.in](https://beaconcha.in). + +![](imgs/ui-validator-table.png) + +## Validator Balance Chart + +The validator balance component is a graphical representation of each validator balance over the latest 10 epochs. Take note that only active validators are rendered in the chart visualization. + +![](imgs/ui-validator-balance1.png) + +By clicking on the chart component you can filter selected validators in the render. This call allow for greater resolution in the rendered visualization. + + + + + + +## Hardware Usage and Device Diagnostics + +The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node. + + + + + + +# Validator Management + +Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator. + +![](imgs/ui-validator-management.png) + +## Validator Modal + +Clicking the validator icon activates a detailed validator modal component. This component also allows users to trigger validator actions and as well to view and update validator graffiti. Each modal contains the validator total income with hourly, daily and weekly earnings estimates. + + + +# Settings + +Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters. + +![](imgs/ui-settings.png) diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index e6fbc0f16f8..0793af20db5 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -59,14 +59,7 @@ The following fields are returned: - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. -From this data you can calculate some interesting figures: - -#### Participation Rate - -`previous_epoch_attesting_gwei / previous_epoch_active_gwei` - -Expresses the ratio of validators that managed to have an attestation -voting upon the previous epoch included in a block. +From this data you can calculate: #### Justification/Finalization Rate diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 9312716b7b2..d71e1b56c9b 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.4.0" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index 9a37320028b..c3d7ac48a98 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -53,6 +53,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("network-dir") ) + .arg( + Arg::with_name("enr-udp6-port") + .long("enr-udp6-port") + .value_name("PORT") + .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IpV6.") + .takes_value(true), + ) .arg( Arg::with_name("enable-enr-auto-update") .short("x") diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index b7a66cbbd84..d3ee58a9077 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,7 +1,7 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; -use lighthouse_network::discv5::enr::EnrBuilder; +use lighthouse_network::discovery::create_enr_builder_from_config; use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ @@ -57,12 +57,24 @@ impl BootNodeConfig { let logger = slog_scope::logger(); - set_network_config(&mut network_config, matches, &data_dir, &logger, true)?; + set_network_config(&mut network_config, matches, &data_dir, &logger)?; - // Set the enr-udp-port to the default listening port if it was not specified. - if !matches.is_present("enr-udp-port") { - network_config.enr_udp_port = Some(network_config.discovery_port); - } + // Set the Enr UDP ports to the listening ports if not present. + if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { + network_config.enr_udp4_port = Some( + network_config + .enr_udp4_port + .unwrap_or(listening_addr_v4.udp_port), + ) + }; + + if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { + network_config.enr_udp6_port = Some( + network_config + .enr_udp6_port + .unwrap_or(listening_addr_v6.udp_port), + ) + }; // By default this is enabled. If it is not set, revert to false. if !matches.is_present("enable-enr-auto-update") { @@ -70,17 +82,29 @@ impl BootNodeConfig { } // the address to listen on - let listen_socket = - SocketAddr::new(network_config.listen_address, network_config.discovery_port); - if listen_socket.is_ipv6() { - // create ipv6 sockets and enable ipv4 mapped addresses. - network_config.discv5_config.ip_mode = IpMode::Ip6 { - enable_mapped_addresses: true, - }; - } else { - // Set explicitly as ipv4 otherwise - network_config.discv5_config.ip_mode = IpMode::Ip4; - } + let listen_socket = match network_config.listen_addrs().clone() { + lighthouse_network::ListenAddress::V4(v4_addr) => { + // Set explicitly as ipv4 otherwise + network_config.discv5_config.ip_mode = IpMode::Ip4; + v4_addr.udp_socket_addr() + } + lighthouse_network::ListenAddress::V6(v6_addr) => { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: false, + }; + + v6_addr.udp_socket_addr() + } + lighthouse_network::ListenAddress::DualStack(_v4_addr, v6_addr) => { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: true, + }; + + v6_addr.udp_socket_addr() + } + }; let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -115,30 +139,8 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let mut builder = EnrBuilder::new("v4"); - // Set the enr address if specified. Set also the port. - // NOTE: if the port is specified but the the address is not, the port won't be - // set since it can't be known if it's an ipv6 or ipv4 udp port. - if let Some(enr_address) = network_config.enr_address { - match enr_address { - std::net::IpAddr::V4(ipv4_addr) => { - builder.ip4(ipv4_addr); - if let Some(port) = network_config.enr_udp_port { - builder.udp4(port); - } - } - std::net::IpAddr::V6(ipv6_addr) => { - builder.ip6(ipv6_addr); - if let Some(port) = network_config.enr_udp_port { - builder.udp6(port); - // We are enabling mapped addresses in the boot node in this case, - // so advertise an udp4 port as well. - builder.udp4(port); - } - } - } - }; - + let enable_tcp = false; + let mut builder = create_enr_builder_from_config(&network_config, enable_tcp); // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { builder.add_value("eth2", enr_fork_bytes.as_slice()); diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 8f38fb300dc..3f5419c2c68 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -44,7 +44,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server - let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); + let mut discv5: Discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table for enr in boot_nodes { diff --git a/bors.toml b/bors.toml index 096ac3b29a2..9e633d63f57 100644 --- a/bors.toml +++ b/bors.toml @@ -10,7 +10,6 @@ status = [ "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", - "check-consensus", "clippy", "arbitrary-check", "cargo-audit", diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs index a0166eb500a..bc2f5446ad2 100644 --- a/common/compare_fields/src/lib.rs +++ b/common/compare_fields/src/lib.rs @@ -115,11 +115,7 @@ impl Comparison { let mut children = vec![]; for i in 0..std::cmp::max(a.len(), b.len()) { - children.push(FieldComparison::new( - format!("{:}", i), - &a.get(i), - &b.get(i), - )); + children.push(FieldComparison::new(format!("{i}"), &a.get(i), &b.get(i))); } Self::parent(field_name, a == b, children) @@ -164,8 +160,8 @@ impl FieldComparison { Self { field_name, equal: a == b, - a: format!("{:?}", a), - b: format!("{:?}", b), + a: format!("{a:?}"), + b: format!("{b:?}"), } } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index beabc6ca9ba..a8b92b3d548 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -1,4 +1,3 @@ -#![recursion_limit = "256"] extern crate proc_macro; use proc_macro::TokenStream; @@ -32,7 +31,7 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream { _ => panic!("compare_fields_derive only supports named struct fields."), }; - let field_name = format!("{:}", ident_a); + let field_name = ident_a.to_string(); let ident_b = ident_a.clone(); let quote = if is_slice(field) { diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index cfa91aecf18..b3ac41577d8 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -21,7 +21,7 @@ bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../../consensus/ssz_derive" } futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 58b4c88b3c7..e03cc2e9b02 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -14,22 +14,22 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; -use self::mixin::{RequestAccept, ResponseForkName, ResponseOptional}; +use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; -use ::types::map_fork_name_with; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -pub use sensitive_url::SensitiveUrl; +pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; +use store::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); @@ -339,7 +339,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -358,7 +358,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -377,7 +377,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -397,7 +397,8 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -427,7 +428,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -467,7 +468,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -500,7 +501,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -523,7 +524,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -548,7 +549,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -569,7 +570,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -596,7 +597,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -611,7 +612,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -631,7 +632,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -676,42 +677,17 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blinded_blocks/{block_id}` @@ -720,43 +696,17 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blinded_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blinded_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blocks` (LEGACY) @@ -817,7 +767,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -836,7 +786,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1012,6 +962,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/bls_to_execution_changes` + pub async fn post_beacon_pool_bls_to_execution_changes( + &self, + address_changes: &[SignedBlsToExecutionChange], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("bls_to_execution_changes"); + + self.post(path, &address_changes).await?; + + Ok(()) + } + /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { use ssz::Decode; @@ -1026,6 +994,58 @@ impl BeaconNodeHttpClient { .transpose() } + /// `POST beacon/rewards/sync_committee` + pub async fn post_beacon_rewards_sync_committee( + &self, + rewards: &[Option>], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("sync_committee"); + + self.post(path, &rewards).await?; + + Ok(()) + } + + /// `GET beacon/rewards/blocks` + pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("blocks"); + + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + + self.get(path).await + } + + /// `POST beacon/rewards/attestations` + pub async fn post_beacon_rewards_attestations( + &self, + attestations: &[ValidatorId], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("attestations"); + + self.post(path, &attestations).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, @@ -1254,28 +1274,12 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } - /// `GET v1/debug/beacon/states/{state_id}` (LEGACY) - pub async fn get_debug_beacon_states_v1( - &self, - state_id: StateId, - ) -> Result>>, Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("debug") - .push("beacon") - .push("states") - .push(&state_id.to_string()); - - self.get_opt(path).await - } - /// `GET debug/beacon/states/{state_id}` /// `-H "accept: application/octet-stream"` pub async fn get_debug_beacon_states_ssz( @@ -1321,6 +1325,18 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/debug/fork_choice` + pub async fn get_debug_fork_choice(&self) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("fork_choice"); + + self.get(path).await + } + /// `GET validator/duties/proposer/{epoch}` pub async fn get_validator_duties_proposer( &self, @@ -1340,7 +1356,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1351,7 +1367,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + pub async fn get_validator_blocks_modular>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1383,7 +1399,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1399,7 +1415,10 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular>( + pub async fn get_validator_blinded_blocks_modular< + T: EthSpec, + Payload: AbstractExecPayload, + >( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1633,7 +1652,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 2dced1c449a..bb933dbe121 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,8 +1,11 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. mod attestation_performance; +pub mod attestation_rewards; mod block_packing_efficiency; mod block_rewards; +mod standard_block_rewards; +mod sync_committee_rewards; use crate::{ ok_or_error, @@ -10,7 +13,7 @@ use crate::{ BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; @@ -22,11 +25,14 @@ use store::{AnchorInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; +pub use attestation_rewards::StandardAttestationRewards; pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use standard_block_rewards::StandardBlockReward; +pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. @@ -560,4 +566,73 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } + + /// + /// Analysis endpoints. + /// + + /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot + pub async fn get_lighthouse_analysis_block_rewards( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_rewards"); + + path.query_pairs_mut() + .append_pair("start_slot", &start_slot.to_string()) + .append_pair("end_slot", &end_slot.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_block_packing( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_packing_efficiency"); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_attestation_performance( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + target: String, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("attestation_performance") + .push(&target); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } } diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs new file mode 100644 index 00000000000..314ffb85121 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -0,0 +1,44 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid for attestations +// All rewards in GWei + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct IdealAttestationRewards { + // Validator's effective balance in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub effective_balance: u64, + // Ideal attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // Ideal attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub target: u64, + // Ideal attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub source: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct TotalAttestationRewards { + // one entry for every validator based on their attestations in the epoch + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub target: i64, + // attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub source: i64, + // TBD attester's inclusion_delay reward in gwei (phase0 only) + // pub inclusion_delay: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct StandardAttestationRewards { + pub ideal_rewards: Vec, + pub total_rewards: Vec, +} diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs new file mode 100644 index 00000000000..502577500d9 --- /dev/null +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards for a single block +// All rewards in GWei +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct StandardBlockReward { + // proposer of the block, the proposer index who receives these rewards + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_index: u64, + // total block reward in gwei, + // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub total: u64, + // block reward component due to included attestations in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attestations: u64, + // block reward component due to included sync_aggregate in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub sync_aggregate: u64, + // block reward component due to included proposer_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_slashings: u64, + // block reward component due to included attester_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attester_slashings: u64, +} diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs new file mode 100644 index 00000000000..e215d8e3e0b --- /dev/null +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid to sync committee members for attesting headers +// All rewards in GWei + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct SyncCommitteeReward { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // sync committee reward in gwei for the validator + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub reward: i64, +} diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 88b5b684019..90c128751d0 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -642,6 +642,30 @@ impl ValidatorClientHttpClient { let url = self.make_gas_limit_url(pubkey)?; self.delete_with_raw_response(url, &()).await } + + /// `POST /eth/v1/validator/{pubkey}/voluntary_exit` + pub async fn post_validator_voluntary_exit( + &self, + pubkey: &PublicKeyBytes, + epoch: Option, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("voluntary_exit"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.post(path, &()).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 92439337f61..fa5d4ae119e 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -144,3 +144,8 @@ pub struct UpdateGasLimitRequest { #[serde(with = "eth2_serde_utils::quoted_u64")] pub gas_limit: u64, } + +#[derive(Deserialize)] +pub struct VoluntaryExitQuery { + pub epoch: Option, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 70129724600..97c7ff34e13 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -200,6 +200,14 @@ pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct GenericResponse { @@ -222,6 +230,18 @@ impl GenericResponse { data: self.data, } } + + pub fn add_execution_optimistic_finalized( + self, + execution_optimistic: bool, + finalized: bool, + ) -> ExecutionOptimisticFinalizedResponse { + ExecutionOptimisticFinalizedResponse { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + data: self.data, + } + } } #[derive(Debug, PartialEq, Clone, Serialize)] @@ -236,21 +256,6 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ExecutionOptimisticForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub data: T, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub data: T, -} - #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub struct RootData { pub root: Hash256, @@ -270,11 +275,20 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(try_from = "&str")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } +impl TryFrom<&str> for ValidatorId { + type Error = String; + + fn try_from(s: &str) -> Result { + Self::from_str(s) + } +} + impl FromStr for ValidatorId { type Err = String; @@ -903,6 +917,79 @@ pub struct SseLateHead { pub execution_optimistic: bool, } +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)) +)] +#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] +#[serde(untagged)] +pub struct SsePayloadAttributes { + #[superstruct(getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + #[superstruct(getter(copy))] + pub prev_randao: Hash256, + #[superstruct(getter(copy))] + pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, +} + +#[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] +pub struct SseExtendedPayloadAttributesGeneric { + pub proposal_slot: Slot, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_index: u64, + pub parent_block_root: Hash256, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub parent_block_number: u64, + pub parent_block_hash: ExecutionBlockHash, + pub payload_attributes: T, +} + +pub type SseExtendedPayloadAttributes = SseExtendedPayloadAttributesGeneric; +pub type VersionedSsePayloadAttributes = ForkVersionedResponse; + +impl ForkVersionDeserialize for SsePayloadAttributes { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Merge => serde_json::from_value(value) + .map(Self::V1) + .map_err(serde::de::Error::custom), + ForkName::Capella => serde_json::from_value(value) + .map(Self::V2) + .map_err(serde::de::Error::custom), + ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( + "SsePayloadAttributes deserialization for {fork_name} not implemented" + ))), + } + } +} + +impl ForkVersionDeserialize for SseExtendedPayloadAttributes { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let helper: SseExtendedPayloadAttributesGeneric = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(Self { + proposal_slot: helper.proposal_slot, + proposer_index: helper.proposer_index, + parent_block_root: helper.parent_block_root, + parent_block_number: helper.parent_block_number, + parent_block_hash: helper.parent_block_hash, + payload_attributes: SsePayloadAttributes::deserialize_by_fork::( + helper.payload_attributes, + fork_name, + )?, + }) + } +} + #[derive(PartialEq, Debug, Serialize, Clone)] #[serde(bound = "T: EthSpec", untagged)] pub enum EventKind { @@ -916,6 +1003,7 @@ pub enum EventKind { LateHead(SseLateHead), #[cfg(feature = "lighthouse")] BlockReward(BlockReward), + PayloadAttributes(VersionedSsePayloadAttributes), } impl EventKind { @@ -928,6 +1016,7 @@ impl EventKind { EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint", EventKind::ChainReorg(_) => "chain_reorg", EventKind::ContributionAndProof(_) => "contribution_and_proof", + EventKind::PayloadAttributes(_) => "payload_attributes", EventKind::LateHead(_) => "late_head", #[cfg(feature = "lighthouse")] EventKind::BlockReward(_) => "block_reward", @@ -983,6 +1072,11 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Contribution and Proof: {:?}", e)) })?, ))), + "payload_attributes" => Ok(EventKind::PayloadAttributes( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Payload Attributes: {:?}", e)) + })?, + )), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), @@ -1012,6 +1106,7 @@ pub enum EventTopic { ChainReorg, ContributionAndProof, LateHead, + PayloadAttributes, #[cfg(feature = "lighthouse")] BlockReward, } @@ -1028,6 +1123,7 @@ impl FromStr for EventTopic { "finalized_checkpoint" => Ok(EventTopic::FinalizedCheckpoint), "chain_reorg" => Ok(EventTopic::ChainReorg), "contribution_and_proof" => Ok(EventTopic::ContributionAndProof), + "payload_attributes" => Ok(EventTopic::PayloadAttributes), "late_head" => Ok(EventTopic::LateHead), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventTopic::BlockReward), @@ -1046,6 +1142,7 @@ impl fmt::Display for EventTopic { EventTopic::FinalizedCheckpoint => write!(f, "finalized_checkpoint"), EventTopic::ChainReorg => write!(f, "chain_reorg"), EventTopic::ContributionAndProof => write!(f, "contribution_and_proof"), + EventTopic::PayloadAttributes => write!(f, "payload_attributes"), EventTopic::LateHead => write!(f, "late_head"), #[cfg(feature = "lighthouse")] EventTopic::BlockReward => write!(f, "block_reward"), @@ -1120,6 +1217,26 @@ pub struct LivenessResponseData { pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ForkChoice { + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub fork_choice_nodes: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceNode { + pub slot: Slot, + pub block_root: Hash256, + pub parent_root: Option, + pub justified_epoch: Option, + pub finalized_epoch: Option, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub weight: u64, + pub validity: Option, + pub execution_block_hash: Option, +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7e3c025a83b..7e5506667ff 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -278,26 +278,6 @@ define_hardcoded_nets!( // directory. GENESIS_STATE_IS_KNOWN ), - ( - // Network name (must be unique among all networks). - kiln, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - "kiln", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN - ), - ( - // Network name (must be unique among all networks). - ropsten, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - "ropsten", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN - ), ( // Network name (must be unique among all networks). sepolia, diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 00803845b5e..b5ad0b235a4 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } eth2_config = { path = "../eth2_config"} -enr = { version = "0.6.2", features = ["ed25519", "k256"] } +discv5 = "0.2.2" diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index d55ef3f3b5f..ca1d1e88a86 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -36,6 +36,9 @@ ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 +# Capella +CAPELLA_FORK_VERSION: 0x03000064 +CAPELLA_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/kiln/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/kiln/boot_enr.yaml deleted file mode 100644 index 4c03b0f19e4..00000000000 --- a/common/eth2_network_config/built_in_network_configs/kiln/boot_enr.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk -- enr:-KG4QFkPJUFWuONp5grM94OJvNht9wX6N36sA4wqucm6Z02ECWBQRmh6AzndaLVGYBHWre67mjK-E0uKt2CIbWrsZ_8DhGV0aDKQc6pfXHAAAHAyAAAAAAAAAIJpZIJ2NIJpcISl6LTmiXNlY3AyNTZrMaEDHlSNOgYrNWP8_l_WXqDMRvjv6gUAvHKizfqDDVc8feaDdGNwgiMog3VkcIIjKA -- enr:-MK4QI-wkVW1PxL4ksUM4H_hMgTTwxKMzvvDMfoiwPBuRxcsGkrGPLo4Kho3Ri1DEtJG4B6pjXddbzA9iF2gVctxv42GAX9v5WG5h2F0dG5ldHOIAAAAAAAAAACEZXRoMpBzql9ccAAAcDIAAAAAAAAAgmlkgnY0gmlwhKRcjMiJc2VjcDI1NmsxoQK1fc46pmVHKq8HNYLkSVaUv4uK2UBsGgjjGWU6AAhAY4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml deleted file mode 100644 index 5631c8a0bf5..00000000000 --- a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# Extends the mainnet preset -CONFIG_NAME: 'kiln' -PRESET_BASE: 'mainnet' -# Genesis -# --------------------------------------------------------------- -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000 -# Mar 11th, 2022, 14:00 UTC -MIN_GENESIS_TIME: 1647007200 -# Genesis fork -GENESIS_FORK_VERSION: 0x70000069 -# 300 seconds (5 min) -GENESIS_DELAY: 300 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x70000070 -ALTAIR_FORK_EPOCH: 50 -# Bellatrix -BELLATRIX_FORK_VERSION: 0x70000071 -BELLATRIX_FORK_EPOCH: 150 -TERMINAL_TOTAL_DIFFICULTY: 20000000000000 -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - -# Sharding -SHARDING_FORK_VERSION: 0x03000000 -SHARDING_FORK_EPOCH: 18446744073709551615 - - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 16 blocks is ~190s -ETH1_FOLLOW_DISTANCE: 16 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 - - -# Deposit contract -# --------------------------------------------------------------- -# Custom Ethereum testnet -DEPOSIT_CHAIN_ID: 1337802 -DEPOSIT_NETWORK_ID: 1337802 -DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 diff --git a/common/eth2_network_config/built_in_network_configs/kiln/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/kiln/deploy_block.txt deleted file mode 100644 index c227083464f..00000000000 --- a/common/eth2_network_config/built_in_network_configs/kiln/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -0 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/kiln/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/kiln/genesis.ssz.zip deleted file mode 100644 index 309b323a5b6..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/kiln/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 6e87a708f82..0bbf873a3fb 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -36,6 +36,9 @@ ALTAIR_FORK_EPOCH: 74240 # Merge BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index d173be20de5..69d65ca8fc8 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -35,8 +35,11 @@ ALTAIR_FORK_EPOCH: 36660 # Merge BELLATRIX_FORK_VERSION: 0x02001020 BELLATRIX_FORK_EPOCH: 112260 +# Capella +CAPELLA_FORK_VERSION: 0x03001020 +CAPELLA_FORK_EPOCH: 162304 # Sharding -SHARDING_FORK_VERSION: 0x03001020 +SHARDING_FORK_VERSION: 0x04001020 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml deleted file mode 100644 index 27e6e53fc42..00000000000 --- a/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Pari -- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk -# Teku -- enr:-KG4QMJSJ7DHk6v2p-W8zQ3Xv7FfssZ_1E3p2eY6kN13staMObUonAurqyWhODoeY6edXtV8e9eL9RnhgZ9va2SMDRQMhGV0aDKQS-iVMYAAAHD0AQAAAAAAAIJpZIJ2NIJpcIQDhAAhiXNlY3AyNTZrMaEDXBVUZhhmdy1MYor1eGdRJ4vHYghFKDgjyHgt6sJ-IlCDdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml deleted file mode 100644 index 5dad3ff7599..00000000000 --- a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# Extends the mainnet preset -PRESET_BASE: 'mainnet' -CONFIG_NAME: 'ropsten' - -# Genesis -# --------------------------------------------------------------- -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 -# Monday, May 30th, 2022 3:00:00 PM +UTC -MIN_GENESIS_TIME: 1653318000 -GENESIS_FORK_VERSION: 0x80000069 -GENESIS_DELAY: 604800 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x80000070 -ALTAIR_FORK_EPOCH: 500 -# Merge -BELLATRIX_FORK_VERSION: 0x80000071 -BELLATRIX_FORK_EPOCH: 750 -TERMINAL_TOTAL_DIFFICULTY: 50000000000000000 -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - -# Sharding -SHARDING_FORK_VERSION: 0x03001020 -SHARDING_FORK_EPOCH: 18446744073709551615 - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 2048 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 - - -# Fork choice -# --------------------------------------------------------------- -# 40% -PROPOSER_SCORE_BOOST: 40 - -# Deposit contract -# --------------------------------------------------------------- -DEPOSIT_CHAIN_ID: 3 -DEPOSIT_NETWORK_ID: 3 -DEPOSIT_CONTRACT_ADDRESS: 0x6f22fFbC56eFF051aECF839396DD1eD9aD6BBA9D diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt deleted file mode 100644 index dd46f23b620..00000000000 --- a/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -12269949 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip deleted file mode 100644 index 5f83ed3b65f..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 4c3e4bb6eca..29465728995 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -29,8 +29,8 @@ TERMINAL_BLOCK_HASH: 0x000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 # Capella -CAPELLA_FORK_VERSION: 0x03001020 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_VERSION: 0x90000072 +CAPELLA_FORK_EPOCH: 56832 # Sharding SHARDING_FORK_VERSION: 0x04001020 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 7aef784373d..7274bbf029b 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -11,7 +11,7 @@ //! To add a new built-in testnet, add it to the `define_hardcoded_nets` invocation in the `eth2_config` //! crate. -use enr::{CombinedKey, Enr}; +use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3c136b18b92..d30f45ca292 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.4.0-", - fallback = "Lighthouse/v3.4.0" + prefix = "Lighthouse/v4.1.0-", + fallback = "Lighthouse/v4.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 5c0e4c1ca14..7b8e9ba9a8a 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -31,6 +31,77 @@ where } } + /// Inserts a key without removal of potentially expired elements. + /// Returns true if the key does not already exist. + pub fn raw_insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let is_new = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if is_new { + self.list.push_back(Element { + key, + inserted: Instant::now(), + }); + } else { + let position = self + .list + .iter() + .position(|e| e.key == key) + .expect("Key is not new"); + let mut element = self + .list + .remove(position) + .expect("Position is not occupied"); + element.inserted = Instant::now(); + self.list.push_back(element); + } + #[cfg(test)] + self.check_invariant(); + is_new + } + + /// Removes a key from the cache without purging expired elements. Returns true if the key + /// existed. + pub fn raw_remove(&mut self, key: &Key) -> bool { + if self.map.remove(key) { + let position = self + .list + .iter() + .position(|e| &e.key == key) + .expect("Key must exist"); + self.list + .remove(position) + .expect("Position is not occupied"); + true + } else { + false + } + } + + /// Removes all expired elements and returns them + pub fn remove_expired(&mut self) -> Vec { + if self.list.is_empty() { + return Vec::new(); + } + + let mut removed_elements = Vec::new(); + let now = Instant::now(); + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + removed_elements.push(element.key); + } + #[cfg(test)] + self.check_invariant(); + + removed_elements + } + // Inserts a new key. It first purges expired elements to do so. // // If the key was not present this returns `true`. If the value was already present this @@ -89,6 +160,12 @@ where self.map.contains(key) } + /// Shrink the mappings to fit the current size. + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit(); + self.list.shrink_to_fit(); + } + #[cfg(test)] #[track_caller] fn check_invariant(&self) { diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 569eed6082b..c88ec0bd5af 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -4,13 +4,21 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" libc = "0.2.79" parking_lot = "0.12.0" +jemalloc-ctl = { version = "0.5.0", optional = true } + +# Jemalloc's background_threads feature requires Linux (pthreads). +[target.'cfg(target_os = "linux")'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats", "background_threads"] } + +[target.'cfg(not(target_os = "linux"))'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats"] } [features] mallinfo2 = [] +jemalloc = ["jemallocator", "jemalloc-ctl"] +jemalloc-profiling = ["jemallocator/profiling"] diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs new file mode 100644 index 00000000000..c796ea39a19 --- /dev/null +++ b/common/malloc_utils/src/jemalloc.rs @@ -0,0 +1,52 @@ +//! Set the allocator to `jemalloc`. +//! +//! Due to `jemalloc` requiring configuration at compile time or immediately upon runtime +//! initialisation it is configured via a Cargo config file in `.cargo/config.toml`. +//! +//! The `jemalloc` tuning can be overriden by: +//! +//! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. +//! B) `_RJEM_MALLOC_CONF` at runtime. +use jemalloc_ctl::{arenas, epoch, stats, Error}; +use lazy_static::lazy_static; +use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; + +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +// Metrics for jemalloc. +lazy_static! { + pub static ref NUM_ARENAS: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use"); + pub static ref BYTES_ALLOCATED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated"); + pub static ref BYTES_ACTIVE: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active"); + pub static ref BYTES_MAPPED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped"); + pub static ref BYTES_METADATA: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata"); + pub static ref BYTES_RESIDENT: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident"); + pub static ref BYTES_RETAINED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained"); +} + +pub fn scrape_jemalloc_metrics() { + scrape_jemalloc_metrics_fallible().unwrap() +} + +pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { + // Advance the epoch so that the underlying statistics are updated. + epoch::advance()?; + + set_gauge(&NUM_ARENAS, arenas::narenas::read()? as i64); + set_gauge(&BYTES_ALLOCATED, stats::allocated::read()? as i64); + set_gauge(&BYTES_ACTIVE, stats::active::read()? as i64); + set_gauge(&BYTES_MAPPED, stats::mapped::read()? as i64); + set_gauge(&BYTES_METADATA, stats::metadata::read()? as i64); + set_gauge(&BYTES_RESIDENT, stats::resident::read()? as i64); + set_gauge(&BYTES_RETAINED, stats::retained::read()? as i64); + + Ok(()) +} diff --git a/common/malloc_utils/src/lib.rs b/common/malloc_utils/src/lib.rs index b8aed948f8b..3bb242369f7 100644 --- a/common/malloc_utils/src/lib.rs +++ b/common/malloc_utils/src/lib.rs @@ -2,18 +2,18 @@ //! //! ## Conditional Compilation //! -//! Presently, only configuration for "The GNU Allocator" from `glibc` is supported. All other -//! allocators are ignored. +//! This crate can be compiled with different feature flags to support different allocators: //! -//! It is assumed that if the following two statements are correct then we should expect to -//! configure `glibc`: +//! - Jemalloc, via the `jemalloc` feature. +//! - GNU malloc, if no features are set and the system supports it. +//! - The system allocator, if no features are set and the allocator is not GNU malloc. +//! +//! It is assumed that if Jemalloc is not in use, and the following two statements are correct then +//! we should expect to configure `glibc`: //! //! - `target_os = linux` //! - `target_env != musl` //! -//! In all other cases this library will not attempt to do anything (i.e., all functions are -//! no-ops). -//! //! If the above conditions are fulfilled but `glibc` still isn't present at runtime then a panic //! may be triggered. It is understood that there's no way to be certain that a compatible `glibc` //! is present: https://github.com/rust-lang/rust/issues/33244. @@ -24,18 +24,42 @@ //! detecting `glibc` are best-effort. If this crate throws errors about undefined external //! functions, then try to compile with the `not_glibc_interface` module. -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod glibc; +#[cfg(feature = "jemalloc")] +mod jemalloc; + pub use interface::*; -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { pub use crate::glibc::configure_glibc_malloc as configure_memory_allocator; pub use crate::glibc::scrape_mallinfo_metrics as scrape_allocator_metrics; } -#[cfg(any(not(target_os = "linux"), target_env = "musl"))] +#[cfg(feature = "jemalloc")] +mod interface { + #[allow(dead_code)] + pub fn configure_memory_allocator() -> Result<(), String> { + Ok(()) + } + + pub use crate::jemalloc::scrape_jemalloc_metrics as scrape_allocator_metrics; +} + +#[cfg(all( + any(not(target_os = "linux"), target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { #[allow(dead_code, clippy::unnecessary_wraps)] pub fn configure_memory_allocator() -> Result<(), String> { diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 183f5c9313d..1c8813ca2f2 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -104,12 +104,23 @@ pub trait SlotClock: Send + Sync + Sized + Clone { self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 } - /// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts. - fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option { + /// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts. + fn seconds_from_current_slot_start(&self) -> Option { self.now_duration() .and_then(|now| now.checked_sub(self.genesis_duration())) .map(|duration_into_slot| { - Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) + Duration::from_secs(duration_into_slot.as_secs() % self.slot_duration().as_secs()) + }) + } + + /// Returns the `Duration` since the start of the current `Slot` at milliseconds precision. + fn millis_from_current_slot_start(&self) -> Option { + self.now_duration() + .and_then(|now| now.checked_sub(self.genesis_duration())) + .map(|duration_into_slot| { + Duration::from_millis( + (duration_into_slot.as_millis() % self.slot_duration().as_millis()) as u64, + ) }) } diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 296247fe93b..61299f74ac4 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -1,6 +1,7 @@ use super::SlotClock; use parking_lot::RwLock; use std::convert::TryInto; +use std::sync::Arc; use std::time::Duration; use types::Slot; @@ -10,7 +11,7 @@ pub struct ManualSlotClock { /// Duration from UNIX epoch to genesis. genesis_duration: Duration, /// Duration from UNIX epoch to right now. - current_time: RwLock, + current_time: Arc>, /// The length of each slot. slot_duration: Duration, } @@ -20,7 +21,7 @@ impl Clone for ManualSlotClock { ManualSlotClock { genesis_slot: self.genesis_slot, genesis_duration: self.genesis_duration, - current_time: RwLock::new(*self.current_time.read()), + current_time: Arc::clone(&self.current_time), slot_duration: self.slot_duration, } } @@ -90,7 +91,7 @@ impl SlotClock for ManualSlotClock { Self { genesis_slot, - current_time: RwLock::new(genesis_duration), + current_time: Arc::new(RwLock::new(genesis_duration)), genesis_duration, slot_duration, } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 06c1ca8f58e..2dd041ff07e 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -6,3 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lru_cache = { path = "../lru_cache" } +lazy_static = "1.4.0" +parking_lot = "0.12.0" diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs index 4a8cf17380d..386f08a7390 100644 --- a/common/unused_port/src/lib.rs +++ b/common/unused_port/src/lib.rs @@ -1,4 +1,8 @@ -use std::net::{TcpListener, UdpSocket}; +use lazy_static::lazy_static; +use lru_cache::LRUTimeCache; +use parking_lot::Mutex; +use std::net::{SocketAddr, TcpListener, UdpSocket}; +use std::time::Duration; #[derive(Copy, Clone)] pub enum Transport { @@ -6,14 +10,37 @@ pub enum Transport { Udp, } -/// A convenience function for `unused_port(Transport::Tcp)`. -pub fn unused_tcp_port() -> Result { - unused_port(Transport::Tcp) +#[derive(Copy, Clone)] +pub enum IpVersion { + Ipv4, + Ipv6, +} + +pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300); + +lazy_static! { + static ref FOUND_PORTS_CACHE: Mutex> = + Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL)); +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_tcp4_port() -> Result { + zero_port(Transport::Tcp, IpVersion::Ipv4) +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_udp4_port() -> Result { + zero_port(Transport::Udp, IpVersion::Ipv4) } -/// A convenience function for `unused_port(Transport::Tcp)`. -pub fn unused_udp_port() -> Result { - unused_port(Transport::Udp) +/// A convenience wrapper over [`zero_port`]. +pub fn unused_tcp6_port() -> Result { + zero_port(Transport::Tcp, IpVersion::Ipv6) +} + +/// A convenience wrapper over [`zero_port`]. +pub fn unused_udp6_port() -> Result { + zero_port(Transport::Udp, IpVersion::Ipv6) } /// A bit of hack to find an unused port. @@ -26,10 +53,29 @@ pub fn unused_udp_port() -> Result { /// It is possible that users are unable to bind to the ports returned by this function as the OS /// has a buffer period where it doesn't allow binding to the same port even after the socket is /// closed. We might have to use SO_REUSEADDR socket option from `std::net2` crate in that case. -pub fn unused_port(transport: Transport) -> Result { +pub fn zero_port(transport: Transport, ipv: IpVersion) -> Result { + let localhost = match ipv { + IpVersion::Ipv4 => std::net::Ipv4Addr::LOCALHOST.into(), + IpVersion::Ipv6 => std::net::Ipv6Addr::LOCALHOST.into(), + }; + let socket_addr = std::net::SocketAddr::new(localhost, 0); + let mut unused_port: u16; + loop { + unused_port = find_unused_port(transport, socket_addr)?; + let mut cache_lock = FOUND_PORTS_CACHE.lock(); + if !cache_lock.contains(&unused_port) { + cache_lock.insert(unused_port); + break; + } + } + + Ok(unused_port) +} + +fn find_unused_port(transport: Transport, socket_addr: SocketAddr) -> Result { let local_addr = match transport { Transport::Tcp => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + let listener = TcpListener::bind(socket_addr).map_err(|e| { format!("Failed to create TCP listener to find unused port: {:?}", e) })?; listener.local_addr().map_err(|e| { @@ -40,7 +86,7 @@ pub fn unused_port(transport: Transport) -> Result { })? } Transport::Udp => { - let socket = UdpSocket::bind("127.0.0.1:0") + let socket = UdpSocket::bind(socket_addr) .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; socket.local_addr().map_err(|e| { format!( diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 346361b18fe..77d61251f24 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -6,3 +6,4 @@ pub mod metrics; pub mod query; pub mod reject; pub mod task; +pub mod uor; diff --git a/common/warp_utils/src/task.rs b/common/warp_utils/src/task.rs index c3b3e86e2ed..001231f2c6b 100644 --- a/common/warp_utils/src/task.rs +++ b/common/warp_utils/src/task.rs @@ -1,4 +1,5 @@ use serde::Serialize; +use warp::reply::{Reply, Response}; /// A convenience wrapper around `blocking_task`. pub async fn blocking_task(func: F) -> Result @@ -8,16 +9,29 @@ where { tokio::task::spawn_blocking(func) .await - .unwrap_or_else(|_| Err(warp::reject::reject())) // This should really be a 500 + .unwrap_or_else(|_| Err(warp::reject::reject())) +} + +/// A convenience wrapper around `blocking_task` that returns a `warp::reply::Response`. +/// +/// Using this method consistently makes it possible to simplify types using `.unify()` or `.uor()`. +pub async fn blocking_response_task(func: F) -> Result +where + F: FnOnce() -> Result + Send + 'static, + T: Reply + Send + 'static, +{ + blocking_task(func).await.map(Reply::into_response) } /// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. -pub async fn blocking_json_task(func: F) -> Result +pub async fn blocking_json_task(func: F) -> Result where F: FnOnce() -> Result + Send + 'static, T: Serialize + Send + 'static, { - blocking_task(func) - .await - .map(|resp| warp::reply::json(&resp)) + blocking_response_task(|| { + let response = func()?; + Ok(warp::reply::json(&response)) + }) + .await } diff --git a/common/warp_utils/src/uor.rs b/common/warp_utils/src/uor.rs new file mode 100644 index 00000000000..363f1df7d4d --- /dev/null +++ b/common/warp_utils/src/uor.rs @@ -0,0 +1,25 @@ +use warp::{filters::BoxedFilter, Filter, Rejection}; + +/// Mixin trait for `Filter` providing the unifying-or method. +pub trait UnifyingOrFilter: Filter + Sized + Send + Sync + 'static +where + Self::Extract: Send, +{ + /// Unifying `or`. + /// + /// This is a shorthand for `self.or(other).unify().boxed()`, which is useful because it keeps + /// the filter type simple and prevents type-checker explosions. + fn uor(self, other: F) -> BoxedFilter + where + F: Filter + Clone + Send + Sync + 'static, + { + self.or(other).unify().boxed() + } +} + +impl UnifyingOrFilter for F +where + F: Filter + Sized + Send + Sync + 'static, + F::Extract: Send, +{ +} diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 55d13e9b548..ab8550c6d4e 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" ethereum-types = "0.14.1" eth2_ssz_types = { version = "0.2.2", path = "../ssz_types" } eth2_hashing = { version = "0.3.0", path = "../../crypto/eth2_hashing" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } eth2_ssz = { version = "0.4.1", path = "../ssz" } tree_hash = { version = "0.4.1", path = "../tree_hash" } smallvec = "1.6.1" diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 59415c14523..c64c15ee646 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -11,7 +11,7 @@ types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = { version = "0.4.1", path = "../ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 290cef78ab5..e6c46e83e78 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,7 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, - ProtoArrayForkChoice, ReOrgThreshold, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -13,9 +13,10 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, - ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, + AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, + EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, + SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -186,51 +187,6 @@ impl CountUnrealized { pub fn is_true(&self) -> bool { matches!(self, CountUnrealized::True) } - - pub fn and(&self, other: CountUnrealized) -> CountUnrealized { - if self.is_true() && other.is_true() { - CountUnrealized::True - } else { - CountUnrealized::False - } - } -} - -impl From for CountUnrealized { - fn from(count_unrealized: bool) -> Self { - if count_unrealized { - CountUnrealized::True - } else { - CountUnrealized::False - } - } -} - -#[derive(Copy, Clone)] -enum UpdateJustifiedCheckpointSlots { - OnTick { - current_slot: Slot, - }, - OnBlock { - state_slot: Slot, - current_slot: Slot, - }, -} - -impl UpdateJustifiedCheckpointSlots { - fn current_slot(&self) -> Slot { - match self { - UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot, - UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot, - } - } - - fn state_slot(&self) -> Option { - match self { - UpdateJustifiedCheckpointSlots::OnTick { .. } => None, - UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot), - } - } } /// Indicates if a block has been verified by an execution payload. @@ -392,7 +348,6 @@ where anchor_block: &SignedBeaconBlock, anchor_state: &BeaconState, current_slot: Option, - count_unrealized_full_config: CountUnrealizedFull, spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. @@ -412,18 +367,18 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - // Default any non-merge execution block hashes to 0x000..000. - let execution_status = anchor_block.message_merge().map_or_else( - |()| ExecutionStatus::irrelevant(), - |message| { - let execution_payload = &message.body.execution_payload; - if execution_payload == &<_>::default() { + let execution_status = anchor_block.message().execution_payload().map_or_else( + // If the block doesn't have an execution payload then it can't have + // execution enabled. + |_| ExecutionStatus::irrelevant(), + |execution_payload| { + if execution_payload.is_default_with_empty_roots() { // A default payload does not have execution enabled. ExecutionStatus::irrelevant() } else { // Assume that this payload is valid, since the anchor should be a trusted block and // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash()) + ExecutionStatus::Valid(execution_payload.block_hash()) } }, ); @@ -439,7 +394,6 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, - count_unrealized_full_config, )?; let mut fork_choice = Self { @@ -532,7 +486,7 @@ where // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. - let current_slot = self.update_time(system_time_current_slot, spec)?; + let current_slot = self.update_time(system_time_current_slot)?; let store = &mut self.fc_store; @@ -579,6 +533,7 @@ where current_slot: Slot, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { // Ensure that fork choice has already been updated for the current slot. This prevents @@ -610,6 +565,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -619,6 +575,7 @@ where &self, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { let current_slot = self.fc_store.get_current_slot(); @@ -628,6 +585,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -653,58 +611,6 @@ where } } - /// Returns `true` if the given `store` should be updated to set - /// `state.current_justified_checkpoint` its `justified_checkpoint`. - /// - /// ## Specification - /// - /// Is equivalent to: - /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint - fn should_update_justified_checkpoint( - &mut self, - new_justified_checkpoint: Checkpoint, - slots: UpdateJustifiedCheckpointSlots, - spec: &ChainSpec, - ) -> Result> { - self.update_time(slots.current_slot(), spec)?; - - if compute_slots_since_epoch_start::(self.fc_store.get_current_slot()) - < spec.safe_slots_to_update_justified - { - return Ok(true); - } - - let justified_slot = - compute_start_slot_at_epoch::(self.fc_store.justified_checkpoint().epoch); - - // This sanity check is not in the spec, but the invariant is implied. - if let Some(state_slot) = slots.state_slot() { - if justified_slot >= state_slot { - return Err(Error::AttemptToRevertJustification { - store: justified_slot, - state: state_slot, - }); - } - } - - // We know that the slot for `new_justified_checkpoint.root` is not greater than - // `state.slot`, since a state cannot justify its own slot. - // - // We know that `new_justified_checkpoint.root` is an ancestor of `state`, since a `state` - // only ever justifies ancestors. - // - // A prior `if` statement protects against a justified_slot that is greater than - // `state.slot` - let justified_ancestor = - self.get_ancestor(new_justified_checkpoint.root, justified_slot)?; - if justified_ancestor != Some(self.fc_store.justified_checkpoint().root) { - return Ok(false); - } - - Ok(true) - } - /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. pub fn on_valid_execution_payload( &mut self, @@ -721,7 +627,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation(op) + .process_execution_payload_invalidation::(op) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -744,7 +650,7 @@ where /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. #[allow(clippy::too_many_arguments)] - pub fn on_block>( + pub fn on_block>( &mut self, system_time_current_slot: Slot, block: BeaconBlockRef, @@ -758,7 +664,7 @@ where // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. - let current_slot = self.update_time(system_time_current_slot, spec)?; + let current_slot = self.update_time(system_time_current_slot)?; // Parent block must be known. let parent_block = self @@ -813,17 +719,10 @@ where self.fc_store.set_proposer_boost_root(block_root); } - let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock { - state_slot: state.slot(), - current_slot, - }; - // Update store with checkpoints if necessary self.update_checkpoints( state.current_justified_checkpoint(), state.finalized_checkpoint(), - update_justified_checkpoint_slots, - spec, )?; // Update unrealized justified/finalized checkpoints. @@ -856,7 +755,9 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + BeaconBlockRef::Capella(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => { let participation_cache = per_epoch_processing::altair::ParticipationCache::new(state, spec) .map_err(Error::ParticipationCacheBuild)?; @@ -902,11 +803,9 @@ where // If block is from past epochs, try to update store's justified & finalized checkpoints right away if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { - self.update_checkpoints( + self.pull_up_store_checkpoints( unrealized_justified_checkpoint, unrealized_finalized_checkpoint, - update_justified_checkpoint_slots, - spec, )?; } @@ -1001,29 +900,19 @@ where &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - slots: UpdateJustifiedCheckpointSlots, - spec: &ChainSpec, ) -> Result<(), Error> { // Update justified checkpoint. if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { - if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch { - self.fc_store - .set_best_justified_checkpoint(justified_checkpoint); - } - if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? { - self.fc_store - .set_justified_checkpoint(justified_checkpoint) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; } // Update finalized checkpoint. if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { self.fc_store.set_finalized_checkpoint(finalized_checkpoint); - self.fc_store - .set_justified_checkpoint(justified_checkpoint) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; } + Ok(()) } @@ -1164,9 +1053,8 @@ where system_time_current_slot: Slot, attestation: &IndexedAttestation, is_from_block: AttestationFromBlock, - spec: &ChainSpec, ) -> Result<(), Error> { - self.update_time(system_time_current_slot, spec)?; + self.update_time(system_time_current_slot)?; // Ignore any attestations to the zero hash. // @@ -1227,16 +1115,12 @@ where /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. - pub fn update_time( - &mut self, - current_slot: Slot, - spec: &ChainSpec, - ) -> Result> { + pub fn update_time(&mut self, current_slot: Slot) -> Result> { while self.fc_store.get_current_slot() < current_slot { let previous_slot = self.fc_store.get_current_slot(); // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // get stuck in a loop. - self.on_tick(previous_slot + 1, spec)? + self.on_tick(previous_slot + 1)? } // Process any attestations that might now be eligible. @@ -1252,7 +1136,7 @@ where /// Equivalent to: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick - fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error> { + fn on_tick(&mut self, time: Slot) -> Result<(), Error> { let store = &mut self.fc_store; let previous_slot = store.get_current_slot(); @@ -1280,28 +1164,29 @@ where return Ok(()); } - if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { - let store = &self.fc_store; - if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { - let store = &mut self.fc_store; - store - .set_justified_checkpoint(*store.best_justified_checkpoint()) - .map_err(Error::ForkChoiceStoreError)?; - } - } - - // Update store.justified_checkpoint if a better unrealized justified checkpoint is known + // Update the justified/finalized checkpoints based upon the + // best-observed unrealized justification/finality. let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint(); let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint(); - self.update_checkpoints( + self.pull_up_store_checkpoints( unrealized_justified_checkpoint, unrealized_finalized_checkpoint, - UpdateJustifiedCheckpointSlots::OnTick { current_slot }, - spec, )?; + Ok(()) } + fn pull_up_store_checkpoints( + &mut self, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, + ) -> Result<(), Error> { + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + } + /// Processes and removes from the queue any queued attestations which may now be eligible for /// processing due to the slot clock incrementing. fn process_attestation_queue(&mut self) -> Result<(), Error> { @@ -1323,12 +1208,13 @@ where /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root) + self.proto_array.contains_block(block_root) + && self.is_finalized_checkpoint_or_descendant(*block_root) } /// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root. pub fn get_block(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block(block_root) } else { None @@ -1337,7 +1223,7 @@ where /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block_execution_status(block_root) } else { None @@ -1372,10 +1258,10 @@ where }) } - /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. - pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { + /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. + pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) + .is_finalized_checkpoint_or_descendant::(block_root) } /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. @@ -1464,16 +1350,6 @@ where *self.fc_store.justified_checkpoint() } - /// Return the best justified checkpoint. - /// - /// ## Warning - /// - /// This is distinct to the "justified checkpoint" or the "current justified checkpoint". This - /// "best justified checkpoint" value should only be used internally or for testing. - pub fn best_justified_checkpoint(&self) -> Checkpoint { - *self.fc_store.best_justified_checkpoint() - } - pub fn unrealized_justified_checkpoint(&self) -> Checkpoint { *self.fc_store.unrealized_justified_checkpoint() } @@ -1534,13 +1410,11 @@ where pub fn proto_array_from_persisted( persisted: &PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result> { - let mut proto_array = - ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) - .map_err(Error::InvalidProtoArrayBytes)?; + let mut proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) + .map_err(Error::InvalidProtoArrayBytes)?; let contains_invalid_payloads = proto_array.contains_invalid_payloads(); debug!( @@ -1571,7 +1445,7 @@ where "error" => e, "info" => "please report this error", ); - ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes) } else { debug!( @@ -1588,17 +1462,11 @@ where persisted: PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, fc_store: T, - count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result> { - let proto_array = Self::proto_array_from_persisted( - &persisted, - reset_payload_statuses, - count_unrealized_full, - spec, - log, - )?; + let proto_array = + Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec, log)?; let current_slot = fc_store.get_current_slot(); @@ -1691,7 +1559,6 @@ mod tests { fn get_queued_attestations() -> Vec { (1..4) - .into_iter() .map(|i| QueuedAttestation { slot: Slot::new(i), attesting_indices: vec![], diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 60c58859ed8..320f10141d9 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,7 +1,7 @@ use proto_array::JustifiedBalances; use std::collections::BTreeSet; use std::fmt::Debug; -use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -34,7 +34,7 @@ pub trait ForkChoiceStore: Sized { /// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork /// choice. Allows the implementer to performing caching or other housekeeping duties. - fn on_verified_block>( + fn on_verified_block>( &mut self, block: BeaconBlockRef, block_root: Hash256, @@ -47,9 +47,6 @@ pub trait ForkChoiceStore: Sized { /// Returns balances from the `state` identified by `justified_checkpoint.root`. fn justified_balances(&self) -> &JustifiedBalances; - /// Returns the `best_justified_checkpoint`. - fn best_justified_checkpoint(&self) -> &Checkpoint; - /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; @@ -68,9 +65,6 @@ pub trait ForkChoiceStore: Sized { /// Sets the `justified_checkpoint`. fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Self::Error>; - /// Sets the `best_justified_checkpoint`. - fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); - /// Sets the `unrealized_justified_checkpoint`. fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint); diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index b307c66d885..397a2ff8930 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,6 +7,4 @@ pub use crate::fork_choice::{ PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, InvalidationOperation, -}; +pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 00bd1f763dc..82bf642f180 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -104,16 +104,6 @@ impl ForkChoiceTest { self } - /// Assert the epochs match. - pub fn assert_best_justified_epoch(self, epoch: u64) -> Self { - assert_eq!( - self.get(|fc_store| fc_store.best_justified_checkpoint().epoch), - Epoch::new(epoch), - "best_justified_epoch" - ); - self - } - /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { assert!(self.harness.finalized_checkpoint().epoch < epoch); @@ -151,7 +141,7 @@ impl ForkChoiceTest { .chain .canonical_head .fork_choice_write_lock() - .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) + .update_time(self.harness.chain.slot().unwrap()) .unwrap(); func( self.harness @@ -241,6 +231,11 @@ impl ForkChoiceTest { /// /// If the chain is presently in an unsafe period, transition through it and the following safe /// period. + /// + /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed + /// from the fork choice spec in Q1 2023. We're still leaving references to + /// it in our tests because (a) it's easier and (b) it allows us to easily + /// test for the absence of that parameter. pub fn move_to_next_unsafe_period(self) -> Self { self.move_inside_safe_to_update() .move_outside_safe_to_update() @@ -534,7 +529,6 @@ async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) - .assert_best_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); @@ -551,11 +545,9 @@ async fn justified_checkpoint_updates_first_justification_outside_safe_to_update .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) - .assert_best_justified_epoch(0) .apply_blocks(1) .await - .assert_justified_epoch(2) - .assert_best_justified_epoch(2); + .assert_justified_epoch(2); } /// - The new justified checkpoint **does not** descend from the current. @@ -583,8 +575,7 @@ async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_with .unwrap(); }) .await - .assert_justified_epoch(3) - .assert_best_justified_epoch(3); + .assert_justified_epoch(3); } /// - The new justified checkpoint **does not** descend from the current. @@ -612,8 +603,9 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - .assert_justified_epoch(2) - .assert_best_justified_epoch(3); + // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new + // block should have updated the justified checkpoint. + .assert_justified_epoch(3); } /// - The new justified checkpoint **does not** descend from the current. @@ -641,8 +633,7 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - .assert_justified_epoch(3) - .assert_best_justified_epoch(3); + .assert_justified_epoch(3); } /// Check that the balances are obtained correctly. diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 8091b8da542..a55f839d33a 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,7 +11,7 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } eth2_ssz = { version = "0.4.1", path = "../ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index c55739da792..1fe45fd0f10 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -50,6 +50,7 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + InvalidEpochOffset(u64), Arith(ArithError), } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 035fb799eea..157f072ad37 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -3,7 +3,6 @@ mod ffg_updates; mod no_votes; mod votes; -use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; @@ -88,7 +87,6 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), - CountUnrealizedFull::default(), ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); @@ -273,7 +271,7 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation(&op) + .process_execution_payload_invalidation::(&op) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( @@ -307,8 +305,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { let bytes = original.as_bytes(); - let decoded = ProtoArrayForkChoice::from_bytes(&bytes, CountUnrealizedFull::default()) - .expect("fork choice should decode from bytes"); + let decoded = + ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes"); assert!( *original == decoded, "fork choice should encode and decode without change" diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs index 75f6c2f7c80..c8787817f1a 100644 --- a/consensus/proto_array/src/justified_balances.rs +++ b/consensus/proto_array/src/justified_balances.rs @@ -24,7 +24,7 @@ impl JustifiedBalances { .validators() .iter() .map(|validator| { - if validator.is_active_at(current_epoch) { + if !validator.slashed && validator.is_active_at(current_epoch) { total_effective_balance.safe_add_assign(validator.effective_balance)?; num_active_validators.safe_add_assign(1)?; diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index f2b29e1c7b2..481daba47e4 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -6,12 +6,10 @@ mod proto_array_fork_choice; mod ssz_container; pub use crate::justified_balances::JustifiedBalances; -pub use crate::proto_array::{ - calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, -}; +pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation}; pub use crate::proto_array_fork_choice::{ - Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, - ReOrgThreshold, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index add84f54787..2c19206cb75 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -118,24 +118,6 @@ impl Default for ProposerBoost { } } -/// Indicate whether we should strictly count unrealized justification/finalization votes. -#[derive(Default, PartialEq, Eq, Debug, Serialize, Deserialize, Copy, Clone)] -pub enum CountUnrealizedFull { - True, - #[default] - False, -} - -impl From for CountUnrealizedFull { - fn from(b: bool) -> Self { - if b { - CountUnrealizedFull::True - } else { - CountUnrealizedFull::False - } - } -} - #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -146,7 +128,6 @@ pub struct ProtoArray { pub nodes: Vec, pub indices: HashMap, pub previous_proposer_boost: ProposerBoost, - pub count_unrealized_full: CountUnrealizedFull, } impl ProtoArray { @@ -451,7 +432,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. - pub fn propagate_execution_payload_invalidation( + pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -482,7 +463,7 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) + && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -684,9 +665,9 @@ impl ProtoArray { start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, finalized_checkpoint: self.finalized_checkpoint, - head_root: justified_node.root, - head_justified_checkpoint: justified_node.justified_checkpoint, - head_finalized_checkpoint: justified_node.finalized_checkpoint, + head_root: best_node.root, + head_justified_checkpoint: best_node.justified_checkpoint, + head_finalized_checkpoint: best_node.finalized_checkpoint, }))); } @@ -900,55 +881,44 @@ impl ProtoArray { } let genesis_epoch = Epoch::new(0); - - let checkpoint_match_predicate = - |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { - let correct_justified = node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == genesis_epoch; - let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == genesis_epoch; - correct_justified && correct_finalized + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let node_epoch = node.slot.epoch(E::slots_per_epoch()); + let node_justified_checkpoint = + if let Some(justified_checkpoint) = node.justified_checkpoint { + justified_checkpoint + } else { + // The node does not have any information about the justified + // checkpoint. This indicates an inconsistent proto-array. + return false; }; - if let ( - Some(unrealized_justified_checkpoint), - Some(unrealized_finalized_checkpoint), - Some(justified_checkpoint), - Some(finalized_checkpoint), - ) = ( - node.unrealized_justified_checkpoint, - node.unrealized_finalized_checkpoint, - node.justified_checkpoint, - node.finalized_checkpoint, - ) { - let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let voting_source = if current_epoch > node_epoch { + // The block is from a prior epoch, the voting source will be pulled-up. + node.unrealized_justified_checkpoint + // Sometimes we don't track the unrealized justification. In + // that case, just use the fully-realized justified checkpoint. + .unwrap_or(node_justified_checkpoint) + } else { + // The block is not from a prior epoch, therefore the voting source + // is not pulled up. + node_justified_checkpoint + }; - // If previous epoch is justified, pull up all tips to at least the previous epoch - if CountUnrealizedFull::True == self.count_unrealized_full - && (current_epoch > genesis_epoch - && self.justified_checkpoint.epoch + 1 == current_epoch) - { - unrealized_justified_checkpoint.epoch + 1 >= current_epoch - // If previous epoch is not justified, pull up only tips from past epochs up to the current epoch - } else { - // If block is from a previous epoch, filter using unrealized justification & finalization information - if node.slot.epoch(E::slots_per_epoch()) < current_epoch { - checkpoint_match_predicate( - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - ) - // If block is from the current epoch, filter using the head state's justification & finalization information - } else { - checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) - } + let mut correct_justified = self.justified_checkpoint.epoch == genesis_epoch + || voting_source.epoch == self.justified_checkpoint.epoch; + + if let Some(node_unrealized_justified_checkpoint) = node.unrealized_justified_checkpoint { + if !correct_justified && self.justified_checkpoint.epoch + 1 == current_epoch { + correct_justified = node_unrealized_justified_checkpoint.epoch + >= self.justified_checkpoint.epoch + && voting_source.epoch + 2 >= current_epoch; } - } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = - (node.justified_checkpoint, node.finalized_checkpoint) - { - checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) - } else { - false } + + let correct_finalized = self.finalized_checkpoint.epoch == genesis_epoch + || self.is_finalized_checkpoint_or_descendant::(node.root); + + correct_justified && correct_finalized } /// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`. @@ -977,6 +947,12 @@ impl ProtoArray { /// ## Notes /// /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// + /// ## Warning + /// + /// Do not use this function to check if a block is a descendant of the + /// finalized checkpoint. Use `Self::is_finalized_checkpoint_or_descendant` + /// instead. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.indices .get(&ancestor_root) @@ -990,6 +966,70 @@ impl ProtoArray { .unwrap_or(false) } + /// Returns `true` if `root` is equal to or a descendant of + /// `self.finalized_checkpoint`. + /// + /// Notably, this function is checking ancestory of the finalized + /// *checkpoint* not the finalized *block*. + pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { + let finalized_root = self.finalized_checkpoint.root; + let finalized_slot = self + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let mut node = if let Some(node) = self + .indices + .get(&root) + .and_then(|index| self.nodes.get(*index)) + { + node + } else { + // An unknown root is not a finalized descendant. This line can only + // be reached if the user supplies a root that is not known to fork + // choice. + return false; + }; + + // The finalized and justified checkpoints represent a list of known + // ancestors of `node` that are likely to coincide with the store's + // finalized checkpoint. + // + // Run this check once, outside of the loop rather than inside the loop. + // If the conditions don't match for this node then they're unlikely to + // start matching for its ancestors. + for checkpoint in &[ + node.finalized_checkpoint, + node.justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.unrealized_justified_checkpoint, + ] { + if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + return true; + } + } + + loop { + // If `node` is less than or equal to the finalized slot then `node` + // must be the finalized block. + if node.slot <= finalized_slot { + return node.root == finalized_root; + } + + // Since `node` is from a higher slot that the finalized checkpoint, + // replace `node` with the parent of `node`. + if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + node = parent + } else { + // If `node` is not the finalized block and its parent does not + // exist in fork choice, then the parent must have been pruned. + // Proto-array only prunes blocks prior to the finalized block, + // so this means the parent conflicts with finality. + return false; + }; + } + } + /// Returns the first *beacon block root* which contains an execution payload with the given /// `block_hash`, if any. pub fn execution_block_hash_to_beacon_block_root( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cbd369ae6ec..d376e62e8f6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,8 +1,8 @@ use crate::{ error::Error, proto_array::{ - calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, Iter, - ProposerBoost, ProtoArray, ProtoNode, + calculate_committee_fraction, InvalidationOperation, Iter, ProposerBoost, ProtoArray, + ProtoNode, }, ssz_container::SszContainer, JustifiedBalances, @@ -10,7 +10,10 @@ use crate::{ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::{BTreeSet, HashMap}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt, +}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -125,6 +128,17 @@ impl ExecutionStatus { } } +impl fmt::Display for ExecutionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionStatus::Valid(_) => write!(f, "valid"), + ExecutionStatus::Invalid(_) => write!(f, "invalid"), + ExecutionStatus::Optimistic(_) => write!(f, "optimistic"), + ExecutionStatus::Irrelevant(_) => write!(f, "irrelevant"), + } + } +} + /// A block that is to be applied to the fork choice. /// /// A simplified version of `types::BeaconBlock`. @@ -236,6 +250,9 @@ pub enum DoNotReOrg { ParentDistance, HeadDistance, ShufflingUnstable, + DisallowedOffset { + offset: u64, + }, JustificationAndFinalizationNotCompetitive, ChainNotFinalizing { epochs_since_finalization: u64, @@ -257,6 +274,9 @@ impl std::fmt::Display for DoNotReOrg { Self::ParentDistance => write!(f, "parent too far from head"), Self::HeadDistance => write!(f, "head too far from current slot"), Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::DisallowedOffset { offset } => { + write!(f, "re-orgs disabled at offset {offset}") + } Self::JustificationAndFinalizationNotCompetitive => { write!(f, "justification or finalization not competitive") } @@ -290,6 +310,31 @@ impl std::fmt::Display for DoNotReOrg { #[serde(transparent)] pub struct ReOrgThreshold(pub u64); +/// New-type for disallowed re-org slots. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DisallowedReOrgOffsets { + // Vecs are faster than hashmaps for small numbers of items. + offsets: Vec, +} + +impl Default for DisallowedReOrgOffsets { + fn default() -> Self { + DisallowedReOrgOffsets { offsets: vec![0] } + } +} + +impl DisallowedReOrgOffsets { + pub fn new(offsets: Vec) -> Result { + for &offset in &offsets { + if offset >= E::slots_per_epoch() { + return Err(Error::InvalidEpochOffset(offset)); + } + } + Ok(Self { offsets }) + } +} + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, @@ -307,7 +352,6 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, - count_unrealized_full: CountUnrealizedFull, ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -316,7 +360,6 @@ impl ProtoArrayForkChoice { nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), - count_unrealized_full, }; let block = Block { @@ -358,12 +401,12 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. - pub fn process_execution_payload_invalidation( + pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation(op) + .propagate_execution_payload_invalidation::(op) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -448,6 +491,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let info = self.get_proposer_head_info::( @@ -455,6 +499,7 @@ impl ProtoArrayForkChoice { canonical_head, justified_balances, re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, )?; @@ -489,6 +534,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let mut nodes = self @@ -533,6 +579,12 @@ impl ProtoArrayForkChoice { return Err(DoNotReOrg::ShufflingUnstable.into()); } + // Check allowed slot offsets. + let offset = (re_org_block_slot % E::slots_per_epoch()).as_u64(); + if disallowed_offsets.offsets.contains(&offset) { + return Err(DoNotReOrg::DisallowedOffset { offset }.into()); + } + // Check FFG. let ffg_competitive = parent_node.unrealized_justified_checkpoint == head_node.unrealized_justified_checkpoint @@ -748,6 +800,15 @@ impl ProtoArrayForkChoice { .is_descendant(ancestor_root, descendant_root) } + /// See `ProtoArray` documentation. + pub fn is_finalized_checkpoint_or_descendant( + &self, + descendant_root: Hash256, + ) -> bool { + self.proto_array + .is_finalized_checkpoint_or_descendant::(descendant_root) + } + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; @@ -771,13 +832,10 @@ impl ProtoArrayForkChoice { SszContainer::from(self).as_ssz_bytes() } - pub fn from_bytes( - bytes: &[u8], - count_unrealized_full: CountUnrealizedFull, - ) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let container = SszContainer::from_ssz_bytes(bytes) .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))?; - (container, count_unrealized_full) + container .try_into() .map_err(|e| format!("Failed to initialize ProtoArrayForkChoice: {e:?}")) } @@ -928,6 +986,10 @@ mod test_compute_deltas { epoch: genesis_epoch, root: finalized_root, }; + let junk_checkpoint = Checkpoint { + epoch: Epoch::new(42), + root: Hash256::repeat_byte(42), + }; let mut fc = ProtoArrayForkChoice::new::( genesis_slot, @@ -937,7 +999,6 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, - CountUnrealizedFull::default(), ) .unwrap(); @@ -973,8 +1034,10 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, + // Use the junk checkpoint for the next to values to prevent + // the loop-shortcutting mechanism from triggering. + justified_checkpoint: junk_checkpoint, + finalized_checkpoint: junk_checkpoint, execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, @@ -993,6 +1056,11 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); assert!(!fc.is_descendant(finalized_desc, finalized_root)); @@ -1004,6 +1072,170 @@ mod test_compute_deltas { assert!(!fc.is_descendant(not_finalized_desc, unknown)); } + /// This test covers an interesting case where a block can be a descendant + /// of the finalized *block*, but not a descenant of the finalized + /// *checkpoint*. + /// + /// ## Example + /// + /// Consider this block tree which has three blocks (`A`, `B` and `C`): + /// + /// ```ignore + /// [A] <--- [-] <--- [B] + /// | + /// |--[C] + /// ``` + /// + /// - `A` (slot 31) is the common descendant. + /// - `B` (slot 33) descends from `A`, but there is a single skip slot + /// between it and `A`. + /// - `C` (slot 32) descends from `A` and conflicts with `B`. + /// + /// Imagine that the `B` chain is finalized at epoch 1. This means that the + /// finalized checkpoint points to the skipped slot at 32. The root of the + /// finalized checkpoint is `A`. + /// + /// In this scenario, the block `C` has the finalized root (`A`) as an + /// ancestor whilst simultaneously conflicting with the finalized + /// checkpoint. + /// + /// This means that to ensure a block does not conflict with finality we + /// must check to ensure that it's an ancestor of the finalized + /// *checkpoint*, not just the finalized *block*. + #[test] + fn finalized_descendant_edge_case() { + let get_block_root = Hash256::from_low_u64_be; + let genesis_slot = Slot::new(0); + let junk_state_root = Hash256::zero(); + let junk_shuffling_id = + AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); + + let genesis_checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }; + + let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, + junk_state_root, + genesis_checkpoint, + genesis_checkpoint, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), + execution_status, + ) + .unwrap(); + + struct TestBlock { + slot: u64, + root: u64, + parent_root: u64, + } + + let insert_block = |fc: &mut ProtoArrayForkChoice, block: TestBlock| { + fc.proto_array + .on_block::( + Block { + slot: Slot::from(block.slot), + root: get_block_root(block.root), + parent_root: Some(get_block_root(block.parent_root)), + state_root: Hash256::zero(), + target_root: Hash256::zero(), + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + Slot::from(block.slot), + ) + .unwrap(); + }; + + /* + * Start of interesting part of tests. + */ + + // Produce the 0th epoch of blocks. They should all form a chain from + // the genesis block. + for i in 1..MainnetEthSpec::slots_per_epoch() { + insert_block( + &mut fc, + TestBlock { + slot: i, + root: i, + parent_root: i - 1, + }, + ) + } + + let last_slot_of_epoch_0 = MainnetEthSpec::slots_per_epoch() - 1; + + // Produce a block that descends from the last block of epoch -. + // + // This block will be non-canonical. + let non_canonical_slot = last_slot_of_epoch_0 + 1; + insert_block( + &mut fc, + TestBlock { + slot: non_canonical_slot, + root: non_canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + // Produce a block that descends from the last block of the 0th epoch, + // that skips the 1st slot of the 1st epoch. + // + // This block will be canonical. + let canonical_slot = last_slot_of_epoch_0 + 2; + insert_block( + &mut fc, + TestBlock { + slot: canonical_slot, + root: canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + let finalized_root = get_block_root(last_slot_of_epoch_0); + + // Set the finalized checkpoint to finalize the first slot of epoch 1 on + // the canonical chain. + fc.proto_array.finalized_checkpoint = Checkpoint { + root: finalized_root, + epoch: Epoch::new(1), + }; + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(finalized_root), + "the finalized checkpoint is the finalized checkpoint" + ); + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + canonical_slot + )), + "the canonical block is a descendant of the finalized checkpoint" + ); + assert!( + !fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + non_canonical_slot + )), + "although the non-canonical block is a descendant of the finalized block, \ + it's not a descendant of the finalized checkpoint" + ); + } + #[test] fn zero_hash() { let validator_count: usize = 16; diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1a20ef967ad..ed1efaae1af 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, + proto_array::{ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, Error, JustifiedBalances, }; @@ -43,12 +43,10 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { +impl TryFrom for ProtoArrayForkChoice { type Error = Error; - fn try_from( - (from, count_unrealized_full): (SszContainer, CountUnrealizedFull), - ) -> Result { + fn try_from(from: SszContainer) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -56,7 +54,6 @@ impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { nodes: from.nodes, indices: from.indices.into_iter().collect::>(), previous_proposer_boost: from.previous_proposer_boost, - count_unrealized_full, }; Ok(Self { diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index ae11f147780..c980d87c14b 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -16,4 +16,4 @@ pub mod u64_hex_be; pub mod u8_hex; pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8}; +pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index e4cd249cb84..254bd1edb38 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -12,7 +12,7 @@ use serde::{Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; macro_rules! define_mod { - ($int: ty, $visit_fn: ident) => { + ($int: ty) => { /// Serde support for deserializing quoted integers. /// /// Configurable so that quotes are either required or optional. @@ -141,19 +141,25 @@ macro_rules! define_mod { pub mod quoted_u8 { use super::*; - define_mod!(u8, visit_u8); + define_mod!(u8); } pub mod quoted_u32 { use super::*; - define_mod!(u32, visit_u32); + define_mod!(u32); } pub mod quoted_u64 { use super::*; - define_mod!(u64, visit_u64); + define_mod!(u64); +} + +pub mod quoted_i64 { + use super::*; + + define_mod!(i64); } pub mod quoted_u256 { @@ -217,4 +223,26 @@ mod test { fn u256_without_quotes() { serde_json::from_str::("1").unwrap_err(); } + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct WrappedI64(#[serde(with = "quoted_i64")] i64); + + #[test] + fn negative_i64_with_quotes() { + assert_eq!( + serde_json::from_str::("\"-200\"").unwrap().0, + -200 + ); + assert_eq!( + serde_json::to_string(&WrappedI64(-12_500)).unwrap(), + "\"-12500\"" + ); + } + + // It would be OK if this worked, but we don't need it to (i64s should always be quoted). + #[test] + fn negative_i64_without_quotes() { + serde_json::from_str::("-200").unwrap_err(); + } } diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 2f5a0c4a436..7aaa3d6a89b 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" name = "ssz" [dev-dependencies] -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } ethereum-types = { version = "0.14.1" } [dependencies] diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 50e11f1067b..2d96fbd83cb 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -390,6 +390,7 @@ macro_rules! impl_decodable_for_u8_array { impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); +impl_decodable_for_u8_array!(48); macro_rules! impl_for_vec { ($type: ty, $max_len: expr) => { diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 98f47947931..5dcd5a6147d 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -512,6 +512,7 @@ macro_rules! impl_encodable_for_u8_array { impl_encodable_for_u8_array!(4); impl_encodable_for_u8_array!(32); +impl_encodable_for_u8_array!(48); #[cfg(test)] mod tests { diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index b4b91da4b58..f52d2c5cdfe 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -388,145 +388,3 @@ mod round_trip { round_trip(data); } } - -mod derive_macro { - use ssz::{Decode, Encode}; - use ssz_derive::{Decode, Encode}; - use std::fmt::Debug; - - fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); - } - - fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoFixedUnion { - U8(u8), - U16(u16), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoFixedUnionStruct { - a: TwoFixedUnion, - } - - #[test] - fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableA { - a: u8, - b: Vec, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableB { - a: Vec, - b: u8, - } - - #[derive(PartialEq, Debug, Encode)] - #[ssz(enum_behaviour = "transparent")] - enum TwoVariableTrans { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode)] - struct TwoVariableTransStruct { - a: TwoVariableTrans, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVariableUnion { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoVariableUnionStruct { - a: TwoVariableUnion, - } - - #[test] - fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[test] - fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVecUnion { - A(Vec), - B(Vec), - } - - #[test] - fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml index cac617d3917..eabc856515c 100644 --- a/consensus/ssz_derive/Cargo.toml +++ b/consensus/ssz_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" authors = ["Paul Hauner "] edition = "2021" description = "Procedural derive macros to accompany the eth2_ssz crate." @@ -15,3 +15,6 @@ syn = "1.0.42" proc-macro2 = "1.0.23" quote = "1.0.7" darling = "0.13.0" + +[dev-dependencies] +eth2_ssz = { version = "0.4.1", path = "../ssz" } diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs index ed5068ef4f4..bfeb63e57ce 100644 --- a/consensus/ssz_derive/src/lib.rs +++ b/consensus/ssz_derive/src/lib.rs @@ -1,7 +1,146 @@ -#![recursion_limit = "256"] //! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. //! -//! Supports field attributes, see each derive macro for more information. +//! ## Attributes +//! +//! The following struct/enum attributes are available: +//! +//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. +//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the +//! value whilst ignoring outermost the `enum`. +//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ +//! "container". +//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one +//! non-skipped field as if the outermost `struct` does not exist. +//! +//! The following field attributes are available: +//! +//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and +//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type +//! (e.g. the type is defined in another crate). +//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. +//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized +//! SSZ vector and it will be initialized from a `Default` implementation. +//! +//! ## Examples +//! +//! ### Structs +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "list" wrapped in an SSZ "container". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour +//! struct TypicalStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![4, 0, 0, 0, 42] +//! ); +//! +//! assert_eq!( +//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), +//! TypicalStruct { foo: vec![42] }, +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStructSkippedField { +//! foo: Vec, +//! #[ssz(skip_serializing, skip_deserializing)] +//! bar: u8, +//! } +//! +//! assert_eq!( +//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! WrapperStructSkippedField { foo: vec![42], bar: 0 } +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewType(Vec); +//! +//! assert_eq!( +//! NewType(vec![42]).as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); +//! +//! assert_eq!( +//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! NewTypeSkippedField(vec![42], 0) +//! ); +//! ``` +//! +//! ### Enums +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "union". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(enum_behaviour = "union")] +//! enum UnionEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! UnionEnum::Foo(42).as_ssz_bytes(), +//! vec![0, 42] +//! ); +//! assert_eq!( +//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), +//! UnionEnum::Bar(vec![42, 42]), +//! ); +//! +//! /// Represented as only the value in the enum variant. +//! #[derive(Debug, PartialEq, Encode)] +//! #[ssz(enum_behaviour = "transparent")] +//! enum TransparentEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! TransparentEnum::Foo(42).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), +//! vec![42, 42] +//! ); +//! ``` use darling::{FromDeriveInput, FromMeta}; use proc_macro::TokenStream; @@ -13,11 +152,18 @@ use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident}; /// extensions). const MAX_UNION_SELECTOR: u8 = 127; +const ENUM_TRANSPARENT: &str = "transparent"; +const ENUM_UNION: &str = "union"; +const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ + a \"transparent\" or \"union\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; + #[derive(Debug, FromDeriveInput)] #[darling(attributes(ssz))] struct StructOpts { #[darling(default)] enum_behaviour: Option, + #[darling(default)] + struct_behaviour: Option, } /// Field-level configuration. @@ -31,40 +177,87 @@ struct FieldOpts { skip_deserializing: bool, } -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[ssz(enum_behaviour = \"transparent\")]"; +enum Procedure<'a> { + Struct { + data: &'a syn::DataStruct, + behaviour: StructBehaviour, + }, + Enum { + data: &'a syn::DataEnum, + behaviour: EnumBehaviour, + }, +} -enum EnumBehaviour { +enum StructBehaviour { + Container, Transparent, +} + +enum EnumBehaviour { Union, + Transparent, } -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) +impl<'a> Procedure<'a> { + fn read(item: &'a DeriveInput) -> Self { + let opts = StructOpts::from_derive_input(item).unwrap(); + + match &item.data { + syn::Data::Struct(data) => { + if opts.enum_behaviour.is_some() { + panic!("cannot use \"enum_behaviour\" for a struct"); + } + + match opts.struct_behaviour.as_deref() { + Some("container") | None => Procedure::Struct { + data, + behaviour: StructBehaviour::Container, + }, + Some("transparent") => Procedure::Struct { + data, + behaviour: StructBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", + other + ), + } + } + syn::Data::Enum(data) => { + if opts.struct_behaviour.is_some() { + panic!("cannot use \"struct_behaviour\" for an enum"); + } + + match opts.enum_behaviour.as_deref() { + Some("union") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Union, + }, + Some("transparent") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", + other + ), + None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), + } + } + _ => panic!("ssz_derive only supports structs and enums"), + } } } -fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ident, FieldOpts)> { +fn parse_ssz_fields( + struct_data: &syn::DataStruct, +) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { struct_data .fields .iter() .map(|field| { let ty = &field.ty; - let ident = match &field.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields."), - }; + let ident = field.ident.as_ref(); let field_opts_candidates = field .attrs @@ -93,21 +286,17 @@ fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ide #[proc_macro_derive(Encode, attributes(ssz))] pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_encode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, s), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, s), + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_encode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), + EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -132,6 +321,13 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct continue; } + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + if let Some(module) = field_opts.with { let module = quote! { #module::encode }; field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); @@ -219,6 +415,82 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct output.into() } +/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. +/// +/// The single field is encoded directly, making the outermost `struct` transparent. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_serializing)]`: the field will not be serialized. +fn ssz_encode_derive_struct_transparent( + derive_input: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let (ty, ident, _field_opts) = ssz_fields + .iter() + .find(|(_, _, field_opts)| !field_opts.skip_deserializing) + .expect("\"transparent\" struct must have at least one non-skipped field"); + + let output = if let Some(field_name) = ident { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.#field_name.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.#field_name.ssz_append(buf) + } + } + } + } else { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + } + } + }; + + output.into() +} + /// Derive `ssz::Encode` for an enum in the "transparent" method. /// /// The "transparent" method is distinct from the "union" method specified in the SSZ specification. @@ -367,24 +639,20 @@ fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum #[proc_macro_derive(Decode, attributes(ssz))] pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_decode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_decode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), EnumBehaviour::Transparent => panic!( "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" is valid.", ENUM_TRANSPARENT, ENUM_UNION ), - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, s), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -409,6 +677,13 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok let mut fixed_lens = vec![]; for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + field_names.push(quote! { #ident }); @@ -545,6 +820,90 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok output.into() } +/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. +/// +/// The bytes will be decoded as if they are the inner field, without the outermost struct. The +/// outermost struct will then be applied artificially. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a +/// `Default` implementation. The decoder will assume that the field was not serialized at all +/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). +fn ssz_decode_derive_struct_transparent( + item: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &item.ident; + let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let mut fields = vec![]; + let mut wrapped_type = None; + + for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { + if let Some(name) = ident { + if field_opts.skip_deserializing { + fields.push(quote! { + #name: <_>::default(), + }); + } else { + fields.push(quote! { + #name: <_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } else { + let index = syn::Index::from(i); + if field_opts.skip_deserializing { + fields.push(quote! { + #index:<_>::default(), + }); + } else { + fields.push(quote! { + #index:<_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } + } + + let ty = wrapped_type.unwrap(); + + let output = quote! { + impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { + Ok(Self { + #( + #fields + )* + + }) + } + } + }; + output.into() +} + /// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { let name = &derive_input.ident; diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs new file mode 100644 index 00000000000..2eeb3a48db7 --- /dev/null +++ b/consensus/ssz_derive/tests/tests.rs @@ -0,0 +1,215 @@ +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::fmt::Debug; +use std::marker::PhantomData; + +fn assert_encode(item: &T, bytes: &[u8]) { + assert_eq!(item.as_ssz_bytes(), bytes); +} + +fn assert_encode_decode(item: &T, bytes: &[u8]) { + assert_encode(item, bytes); + assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoFixedUnion { + U8(u8), + U16(u16), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoFixedUnionStruct { + a: TwoFixedUnion, +} + +#[test] +fn two_fixed_union() { + let eight = TwoFixedUnion::U8(1); + let sixteen = TwoFixedUnion::U16(1); + + assert_encode_decode(&eight, &[0, 1]); + assert_encode_decode(&sixteen, &[1, 1, 0]); + + assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); + assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableA { + a: u8, + b: Vec, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableB { + a: Vec, + b: u8, +} + +#[derive(PartialEq, Debug, Encode)] +#[ssz(enum_behaviour = "transparent")] +enum TwoVariableTrans { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode)] +struct TwoVariableTransStruct { + a: TwoVariableTrans, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVariableUnion { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoVariableUnionStruct { + a: TwoVariableUnion, +} + +#[test] +fn two_variable_trans() { + let trans_a = TwoVariableTrans::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let trans_b = TwoVariableTrans::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); + assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); + + assert_encode( + &TwoVariableTransStruct { a: trans_a }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode( + &TwoVariableTransStruct { a: trans_b }, + &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[test] +fn two_variable_union() { + let union_a = TwoVariableUnion::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let union_b = TwoVariableUnion::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); + assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); + + assert_encode_decode( + &TwoVariableUnionStruct { a: union_a }, + &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode_decode( + &TwoVariableUnionStruct { a: union_b }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVecUnion { + A(Vec), + B(Vec), +} + +#[test] +fn two_vec_union() { + assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); + assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); + + assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); + assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); + + assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); + assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStruct { + inner: Vec, +} + +impl TransparentStruct { + fn new(inner: u8) -> Self { + Self { inner: vec![inner] } + } +} + +#[test] +fn transparent_struct() { + assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructSkippedField { + inner: Vec, + #[ssz(skip_serializing, skip_deserializing)] + skipped: PhantomData, +} + +impl TransparentStructSkippedField { + fn new(inner: u8) -> Self { + Self { + inner: vec![inner], + skipped: PhantomData, + } + } +} + +#[test] +fn transparent_struct_skipped_field() { + assert_encode_decode( + &TransparentStructSkippedField::new(42), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewType(Vec); + +#[test] +fn transparent_struct_newtype() { + assert_encode_decode( + &TransparentStructNewType(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewTypeSkippedField( + Vec, + #[ssz(skip_serializing, skip_deserializing)] PhantomData, +); + +impl TransparentStructNewTypeSkippedField { + fn new(inner: Vec) -> Self { + Self(inner, PhantomData) + } +} + +#[test] +fn transparent_struct_newtype_skipped_field() { + assert_encode_decode( + &TransparentStructNewTypeSkippedField::new(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 438879c133d..410a6bc4d5a 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -662,7 +662,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = N::to_usize(); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -674,7 +674,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = core::cmp::min(rand, max_size); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index e80c93da448..63a60de7ebd 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -293,7 +293,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 0134c183b17..58f553cf83a 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -178,6 +178,15 @@ impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { } } +impl IntoIterator for VariableList { + type Item = T; + type IntoIter = alloc::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + impl tree_hash::TreeHash for VariableList where T: tree_hash::TreeHash, @@ -275,7 +284,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 9b113d1df42..a0c6a97118a 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -14,7 +14,7 @@ bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" eth2_ssz = { version = "0.4.1", path = "../ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } eth2_ssz_types = { version = "0.2.2", path = "../ssz_types" } merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index ac2dba875e7..d4675f5ef5d 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -50,9 +50,11 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) => whistleblower_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)?, + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + } }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 0bd5f61aff8..ccf8cefb69f 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -4,8 +4,8 @@ use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, - EthSpec, ExecPayload, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, + AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -98,7 +98,7 @@ impl ConsensusContext { self } - pub fn get_current_block_root>( + pub fn get_current_block_root>( &mut self, block: &SignedBeaconBlock, ) -> Result { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index fb2c9bfa7d0..68f04b554e3 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,7 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -61,15 +61,34 @@ pub fn initialize_beacon_state_from_eth1( .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) { + // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderMerge::default() upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing - *state.latest_execution_payload_header_mut()? = - execution_payload_header.unwrap_or_default(); + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_merge_mut()? = header.clone(); + } + } + + // Upgrade to capella if configured from genesis + if spec + .capella_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_capella(&mut state, spec)?; + + // Remove intermediate Bellatrix fork from `state.fork`. + state.fork_mut().previous_version = spec.capella_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Capella(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_capella_mut()? = header.clone(); + } } // Now that we have our validators, initialize the caches (including the committees) diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index e4f36bedd8c..9641e8f96ec 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -41,4 +41,4 @@ pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, }; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; -pub use verify_operation::{SigVerifiedOp, VerifyOperation}; +pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 7d0cb01aebc..c564b98d669 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -18,6 +18,7 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; +pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -32,10 +33,13 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; +mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; +use crate::common::decrease_balance; + #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -88,7 +92,7 @@ pub enum VerifyBlockRoot { /// re-calculating the root when it is already known. Note `block_root` should be equal to the /// tree hash root of the block, NOT the signing root of the block. This function takes /// care of mixing in the domain. -pub fn per_block_processing>( +pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, @@ -156,7 +160,8 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - process_execution_payload(state, payload, spec)?; + process_withdrawals::(state, payload, spec)?; + process_execution_payload::(state, payload, spec)?; } process_randao(state, block, verify_randao, ctxt, spec)?; @@ -235,7 +240,7 @@ pub fn process_block_header( /// Verifies the signature of a block. /// /// Spec v0.12.1 -pub fn verify_block_signature>( +pub fn verify_block_signature>( state: &BeaconState, block: &SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -261,7 +266,7 @@ pub fn verify_block_signature>( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. -pub fn process_randao>( +pub fn process_randao>( state: &mut BeaconState, block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, @@ -334,17 +339,17 @@ pub fn get_new_eth1_data( /// Contains a partial set of checks from the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn partially_verify_execution_payload>( +pub fn partially_verify_execution_payload>( state: &BeaconState, block_slot: Slot, - payload: &Payload, + payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if is_merge_transition_complete(state) { block_verify!( - payload.parent_hash() == state.latest_execution_payload_header()?.block_hash, + payload.parent_hash() == state.latest_execution_payload_header()?.block_hash(), BlockProcessingError::ExecutionHashChainIncontiguous { - expected: state.latest_execution_payload_header()?.block_hash, + expected: state.latest_execution_payload_header()?.block_hash(), found: payload.parent_hash(), } ); @@ -376,14 +381,27 @@ pub fn partially_verify_execution_payload>( /// Partially equivalent to the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn process_execution_payload>( +pub fn process_execution_payload>( state: &mut BeaconState, - payload: &Payload, + payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload(state, state.slot(), payload, spec)?; + partially_verify_execution_payload::(state, state.slot(), payload, spec)?; - *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); + match state.latest_execution_payload_header_mut()? { + ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Merge(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + ExecutionPayloadHeaderRefMut::Capella(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Capella(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + } Ok(()) } @@ -392,30 +410,37 @@ pub fn process_execution_payload>( /// the merge has happened or if we're on the transition block. Thus we don't want to propagate /// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to /// repeaetedly write code to treat these errors as false. -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_complete +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { + // We must check defaultness against the payload header with 0x0 roots, as that's what's meant + // by `ExecutionPayloadHeader()` in the spec. state .latest_execution_payload_header() - .map(|header| *header != >::default()) + .map(|header| !header.is_default_with_zero_roots()) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_block -pub fn is_merge_transition_block>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block +pub fn is_merge_transition_block>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { + // For execution payloads in blocks (which may be headers) we must check defaultness against + // the payload with `transactions_root` equal to the tree hash of the empty list. body.execution_payload() - .map(|payload| !is_merge_transition_complete(state) && *payload != Payload::default()) + .map(|payload| { + !is_merge_transition_complete(state) && !payload.is_default_with_empty_roots() + }) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_execution_enabled -pub fn is_execution_enabled>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_execution_enabled +pub fn is_execution_enabled>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { is_merge_transition_block(state, body) || is_merge_transition_complete(state) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot + +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, block_slot: Slot, @@ -426,3 +451,115 @@ pub fn compute_timestamp_at_slot( .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } + +/// Compute the next batch of withdrawals which should be included in a block. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals +pub fn get_expected_withdrawals( + state: &BeaconState, + spec: &ChainSpec, +) -> Result, BlockProcessingError> { + let epoch = state.current_epoch(); + let mut withdrawal_index = state.next_withdrawal_index()?; + let mut validator_index = state.next_withdrawal_validator_index()?; + let mut withdrawals = vec![]; + + let bound = std::cmp::min( + state.validators().len() as u64, + spec.max_validators_per_withdrawals_sweep, + ); + for _ in 0..bound { + let validator = state.get_validator(validator_index as usize)?; + let balance = *state.balances().get(validator_index as usize).ok_or( + BeaconStateError::BalancesOutOfBounds(validator_index as usize), + )?; + if validator.is_fully_withdrawable_at(balance, epoch, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance, + }); + withdrawal_index.safe_add_assign(1)?; + } else if validator.is_partially_withdrawable_validator(balance, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance.safe_sub(spec.max_effective_balance)?, + }); + withdrawal_index.safe_add_assign(1)?; + } + if withdrawals.len() == T::max_withdrawals_per_payload() { + break; + } + validator_index = validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + } + + Ok(withdrawals.into()) +} + +/// Apply withdrawals to the state. +pub fn process_withdrawals>( + state: &mut BeaconState, + payload: Payload::Ref<'_>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match state { + BeaconState::Merge(_) => Ok(()), + BeaconState::Capella(_) => { + let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); + let withdrawals_root = payload.withdrawals_root()?; + + if expected_root != withdrawals_root { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_root, + found: withdrawals_root, + }); + } + + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } + + // Update the next withdrawal index if this block contained withdrawals + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; + + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == T::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + } + + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != T::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + + Ok(()) + } + // these shouldn't even be encountered but they're here for completeness + BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + } +} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 5e52ff8cb83..709302eec17 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -7,7 +7,8 @@ use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, + SignedBeaconBlock, }; pub type Result = std::result::Result; @@ -124,7 +125,7 @@ where /// contains invalid signatures on deposits._ /// /// See `Self::verify` for more detail. - pub fn verify_entire_block>( + pub fn verify_entire_block>( state: &'a BeaconState, get_pubkey: F, decompressor: D, @@ -138,7 +139,7 @@ where } /// Includes all signatures on the block (except the deposit signatures) for verification. - pub fn include_all_signatures>( + pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -155,7 +156,7 @@ where /// Includes all signatures on the block (except the deposit signatures and the proposal /// signature) for verification. - pub fn include_all_signatures_except_proposal>( + pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -169,12 +170,13 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; + self.include_bls_to_execution_changes(block)?; Ok(()) } /// Includes the block signature for `self.block` for verification. - pub fn include_block_proposal>( + pub fn include_block_proposal>( &mut self, block: &'a SignedBeaconBlock, block_root: Option, @@ -193,7 +195,7 @@ where } /// Includes the randao signature for `self.block` for verification. - pub fn include_randao_reveal>( + pub fn include_randao_reveal>( &mut self, block: &'a SignedBeaconBlock, verified_proposer_index: Option, @@ -210,7 +212,7 @@ where } /// Includes all signatures in `self.block.body.proposer_slashings` for verification. - pub fn include_proposer_slashings>( + pub fn include_proposer_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -239,7 +241,7 @@ where } /// Includes all signatures in `self.block.body.attester_slashings` for verification. - pub fn include_attester_slashings>( + pub fn include_attester_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -268,7 +270,7 @@ where } /// Includes all signatures in `self.block.body.attestations` for verification. - pub fn include_attestations>( + pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -298,7 +300,7 @@ where } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. - pub fn include_exits>( + pub fn include_exits>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -322,7 +324,7 @@ where } /// Include the signature of the block's sync aggregate (if it exists) for verification. - pub fn include_sync_aggregate>( + pub fn include_sync_aggregate>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -341,6 +343,24 @@ where Ok(()) } + /// Include the signature of the block's BLS to execution changes for verification. + pub fn include_bls_to_execution_changes>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + // To improve performance we might want to decompress the withdrawal pubkeys in parallel. + if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() { + for bls_to_execution_change in bls_to_execution_changes { + self.sets.push(bls_execution_change_signature_set( + self.state, + bls_to_execution_change, + self.spec, + )?); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 71bd55f883c..1aaf298d690 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -2,6 +2,7 @@ use super::signature_sets::Error as SignatureSetError; use crate::ContextError; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; +use ssz::DecodeError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either @@ -48,12 +49,17 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, + BlsExecutionChangeInvalid { + index: usize, + reason: BlsExecutionChangeInvalid, + }, SyncAggregateInvalid { reason: SyncAggregateInvalid, }, BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + SszDecodeError(DecodeError), MerkleTreeError(MerkleTreeError), ArithError(ArithError), InconsistentBlockFork(InconsistentFork), @@ -72,6 +78,11 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + WithdrawalsRootMismatch { + expected: Hash256, + found: Hash256, + }, + WithdrawalCredentialsInvalid, } impl From for BlockProcessingError { @@ -92,6 +103,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(error: DecodeError) -> Self { + BlockProcessingError::SszDecodeError(error) + } +} + impl From for BlockProcessingError { fn from(e: ArithError) -> Self { BlockProcessingError::ArithError(e) @@ -160,7 +177,8 @@ impl_into_block_processing_error_with_index!( IndexedAttestationInvalid, AttestationInvalid, DepositInvalid, - ExitInvalid + ExitInvalid, + BlsExecutionChangeInvalid ); pub type HeaderValidationError = BlockOperationError; @@ -170,6 +188,7 @@ pub type AttestationValidationError = BlockOperationError; pub type SyncCommitteeMessageValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; +pub type BlsExecutionChangeValidationError = BlockOperationError; #[derive(Debug, PartialEq, Clone)] pub enum BlockOperationError { @@ -274,7 +293,7 @@ pub enum AttesterSlashingInvalid { /// Describes why an object is invalid. #[derive(Debug, PartialEq, Clone)] pub enum AttestationInvalid { - /// Commmittee index exceeds number of committees in that slot. + /// Committee index exceeds number of committees in that slot. BadCommitteeIndex, /// Attestation included before the inclusion delay. IncludedTooEarly { @@ -385,6 +404,18 @@ pub enum ExitInvalid { SignatureSetError(SignatureSetError), } +#[derive(Debug, PartialEq, Clone)] +pub enum BlsExecutionChangeInvalid { + /// The specified validator is not in the state's validator registry. + ValidatorUnknown(u64), + /// Validator does not have BLS Withdrawal credentials before this change. + NonBlsWithdrawalCredentials, + /// Provided BLS pubkey does not match withdrawal credentials. + WithdrawalCredentialsMismatch, + /// The signature is invalid. + BadSignature, +} + #[derive(Debug, PartialEq, Clone)] pub enum SyncAggregateInvalid { /// One or more of the aggregate public keys is invalid. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9f27c4c9a1e..4bee596615a 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,9 +9,9 @@ use crate::VerifySignatures; use safe_arith::SafeArith; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_operations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -33,6 +33,11 @@ pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + + if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { + process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; + } + Ok(()) } @@ -232,9 +237,9 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_attestations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -249,7 +254,9 @@ pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( spec, )?; } - BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { + BeaconBlockBodyRef::Altair(_) + | BeaconBlockBodyRef::Merge(_) + | BeaconBlockBodyRef::Capella(_) => { altair::process_attestations( state, block_body.attestations(), @@ -275,13 +282,39 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + verify_exit(state, None, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; } Ok(()) } +/// Validates each `bls_to_execution_change` and updates the state +/// +/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_bls_to_execution_changes( + state: &mut BeaconState, + bls_to_execution_changes: &[SignedBlsToExecutionChange], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { + verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + state + .get_validator_mut(signed_address_change.message.validator_index as usize)? + .change_withdrawal_credentials( + &signed_address_change.message.to_execution_address, + spec, + ); + } + + Ok(()) +} + /// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 90bbdd56feb..c05d3f057d7 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -7,12 +7,12 @@ use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, InconsistentFork, - IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, + AbstractExecPayload, AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, + BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, + InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, - SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -71,7 +71,7 @@ where } /// A signature set that is valid if a block was signed by the expected block producer. -pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, signed_block: &'a SignedBeaconBlock, @@ -113,7 +113,7 @@ where /// Unlike `block_proposal_signature_set` this does **not** check that the proposer index is /// correct according to the shuffling. It should only be used if no suitable `BeaconState` is /// available. -pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: AbstractExecPayload>( signed_block: &'a SignedBeaconBlock, block_root: Option, proposer_index: u64, @@ -156,8 +156,34 @@ where )) } +pub fn bls_execution_change_signature_set<'a, T: EthSpec>( + state: &'a BeaconState, + signed_address_change: &'a SignedBlsToExecutionChange, + spec: &'a ChainSpec, +) -> Result> { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + state.genesis_validators_root(), + ); + let message = signed_address_change.message.signing_root(domain); + let signing_key = Cow::Owned( + signed_address_change + .message + .from_bls_pubkey + .decompress() + .map_err(|_| Error::PublicKeyDecompressionFailed)?, + ); + + Ok(SignatureSet::single_pubkey( + &signed_address_change.signature, + signing_key, + message, + )) +} + /// A signature set that is valid if the block proposers randao reveal signature is correct. -pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, block: BeaconBlockRef<'a, T, Payload>, diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b7d28832db0..6eabbf0d44a 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -978,8 +978,14 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against phase0 state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against phase0 state"); /* * Ensure the exit verifies after Altair. @@ -992,8 +998,14 @@ async fn fork_spanning_exit() { let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against altair state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against altair state"); /* * Ensure the exit no longer verifies after Bellatrix. @@ -1009,6 +1021,12 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect_err("phase0 exit does not verify against bellatrix state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect_err("phase0 exit does not verify against bellatrix state"); } diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs new file mode 100644 index 00000000000..bb26799250d --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -0,0 +1,56 @@ +use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; +use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; +use crate::VerifySignatures; +use eth2_hashing::hash; +use types::*; + +type Result = std::result::Result>; + +fn error(reason: Invalid) -> BlockOperationError { + BlockOperationError::invalid(reason) +} + +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block, +/// where the block is being applied to the given `state`. +/// +/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. +pub fn verify_bls_to_execution_change( + state: &BeaconState, + signed_address_change: &SignedBlsToExecutionChange, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<()> { + let address_change = &signed_address_change.message; + + let validator = state + .validators() + .get(address_change.validator_index as usize) + .ok_or_else(|| error(Invalid::ValidatorUnknown(address_change.validator_index)))?; + + verify!( + validator + .withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) + .unwrap_or(false), + Invalid::NonBlsWithdrawalCredentials + ); + + // Re-hashing the pubkey isn't necessary during block replay, so we may want to skip that in + // future. + let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); + verify!( + validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + Invalid::WithdrawalCredentialsMismatch + ); + + if verify_signatures.is_true() { + verify!( + bls_execution_change_signature_set(state, signed_address_change, spec)?.verify(), + Invalid::BadSignature + ); + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index f17e5fcd230..9e9282912de 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -20,10 +20,12 @@ fn error(reason: ExitInvalid) -> BlockOperationError { /// Spec v0.12.1 pub fn verify_exit( state: &BeaconState, + current_epoch: Option, signed_exit: &SignedVoluntaryExit, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { + let current_epoch = current_epoch.unwrap_or(state.current_epoch()); let exit = &signed_exit.message; let validator = state @@ -33,7 +35,7 @@ pub fn verify_exit( // Verify the validator is active. verify!( - validator.is_active_at(state.current_epoch()), + validator.is_active_at(current_epoch), ExitInvalid::NotActive(exit.validator_index) ); @@ -45,9 +47,9 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - state.current_epoch() >= exit.epoch, + current_epoch >= exit.epoch, ExitInvalid::FutureEpoch { - state: state.current_epoch(), + state: current_epoch, exit: exit.epoch } ); @@ -57,9 +59,9 @@ pub fn verify_exit( .activation_epoch .safe_add(spec.shard_committee_period)?; verify!( - state.current_epoch() >= earliest_exit_epoch, + current_epoch >= earliest_exit_epoch, ExitInvalid::TooYoungToExit { - current_epoch: state.current_epoch(), + current_epoch, earliest_exit_epoch, } ); diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index cb90c67b56d..6350685f822 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -3,14 +3,16 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use justification_and_finalization_state::JustificationAndFinalizationState; -pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; -pub use slashings::process_slashings; use types::{BeaconState, ChainSpec, EthSpec}; + +pub use registry_updates::process_registry_updates; +pub use slashings::process_slashings; pub use weigh_justification_and_finalization::weigh_justification_and_finalization; pub mod altair; pub mod base; +pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -38,6 +40,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Capella(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs new file mode 100644 index 00000000000..aaf301f29ec --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -0,0 +1,78 @@ +use super::altair::inactivity_updates::process_inactivity_updates; +use super::altair::justification_and_finalization::process_justification_and_finalization; +use super::altair::participation_cache::ParticipationCache; +use super::altair::participation_flag_updates::process_participation_flag_updates; +use super::altair::rewards_and_penalties::process_rewards_and_penalties; +use super::altair::sync_committee_updates::process_sync_committee_updates; +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub use historical_summaries_update::process_historical_summaries_update; + +mod historical_summaries_update; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Pre-compute participating indices and total balances. + let participation_cache = ParticipationCache::new(state, spec)?; + let sync_committee = state.current_sync_committee()?.clone(); + + // Justification and finalization. + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); + + process_inactivity_updates(state, &participation_cache, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, &participation_cache, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + participation_cache.current_epoch_total_active_balance(), + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical summaries accumulator + process_historical_summaries_update(state)?; + + // Rotate current/previous epoch participation + process_participation_flag_updates(state)?; + + process_sync_committee_updates(state, spec)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(spec)?; + + Ok(EpochProcessingSummary::Altair { + participation_cache, + sync_committee, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs new file mode 100644 index 00000000000..9a87ceb6050 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -0,0 +1,23 @@ +use crate::EpochProcessingError; +use safe_arith::SafeArith; +use types::historical_summary::HistoricalSummary; +use types::{BeaconState, EthSpec}; + +pub fn process_historical_summaries_update( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + // Set historical block root accumulator. + let next_epoch = state.next_epoch()?; + if next_epoch + .as_u64() + .safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)? + == 0 + { + let summary = HistoricalSummary::new(state); + return state + .historical_summaries_mut()? + .push(summary) + .map_err(Into::into); + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 9018db65bcd..ead06edbf56 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,4 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -55,6 +55,10 @@ pub fn per_slot_processing( if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { upgrade_to_bellatrix(state, spec)?; } + // Capella. + if spec.capella_fork_epoch == Some(state.current_epoch()) { + upgrade_to_capella(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index fdf13c82818..a57d5923f86 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,5 +1,7 @@ pub mod altair; +pub mod capella; pub mod merge; pub use altair::upgrade_to_altair; +pub use capella::upgrade_to_capella; pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs new file mode 100644 index 00000000000..3b933fac37a --- /dev/null +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -0,0 +1,74 @@ +use ssz_types::VariableList; +use std::mem; +use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Merge` state into an `Capella` state. +pub fn upgrade_to_capella( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_merge_mut()?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Capella(BeaconStateCapella { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.capella_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), + // Capella + next_withdrawal_index: 0, + next_withdrawal_validator_index: 0, + historical_summaries: VariableList::default(), + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index 2e4ed441a47..c172466248a 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -1,7 +1,7 @@ use std::mem; use types::{ BeaconState, BeaconStateError as Error, BeaconStateMerge, ChainSpec, EthSpec, - ExecutionPayloadHeader, Fork, + ExecutionPayloadHeaderMerge, Fork, }; /// Transform a `Altair` state into an `Merge` state. @@ -57,7 +57,7 @@ pub fn upgrade_to_bellatrix( current_sync_committee: pre.current_sync_committee.clone(), next_sync_committee: pre.next_sync_committee.clone(), // Execution - latest_execution_payload_header: >::default(), + latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 80dee28f621..864844080fb 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -1,8 +1,10 @@ use crate::per_block_processing::{ errors::{ - AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError, + AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError, + ProposerSlashingValidationError, }, - verify_attester_slashing, verify_exit, verify_proposer_slashing, + verify_attester_slashing, verify_bls_to_execution_change, verify_exit, + verify_proposer_slashing, }; use crate::VerifySignatures; use derivative::Derivative; @@ -12,7 +14,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, - SignedVoluntaryExit, + SignedBlsToExecutionChange, SignedVoluntaryExit, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; @@ -87,6 +89,7 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { + // The .all() will return true if the iterator is empty. self.as_inner() .verification_epochs() .into_iter() @@ -118,6 +121,8 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// Return the epochs at which parts of this message were verified. /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. + /// + /// If the message is valid across all forks it should return an empty smallvec. fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } @@ -129,7 +134,7 @@ impl VerifyOperation for SignedVoluntaryExit { state: &BeaconState, spec: &ChainSpec, ) -> Result, Self::Error> { - verify_exit(state, &self, VerifySignatures::True, spec)?; + verify_exit(state, None, &self, VerifySignatures::True, spec)?; Ok(SigVerifiedOp::new(self, state)) } @@ -182,3 +187,53 @@ impl VerifyOperation for ProposerSlashing { .epoch(E::slots_per_epoch())] } } + +impl VerifyOperation for SignedBlsToExecutionChange { + type Error = BlsExecutionChangeValidationError; + + fn validate( + self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_bls_to_execution_change(state, &self, VerifySignatures::True, spec)?; + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![] + } +} + +/// Trait for operations that can be verified and transformed into a +/// `SigVerifiedOp`. +/// +/// The `At` suffix indicates that we can specify a particular epoch at which to +/// verify the operation. +pub trait VerifyOperationAt: VerifyOperation + Sized { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error>; +} + +impl VerifyOperationAt for SignedVoluntaryExit { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_exit( + state, + Some(validate_at_epoch), + &self, + VerifySignatures::True, + spec, + )?; + Ok(SigVerifiedOp::new(self, state)) + } +} diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index 9648ab1d47b..4dcb41e0717 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -12,7 +12,7 @@ tree_hash_derive = { version = "0.4.0", path = "../tree_hash_derive" } types = { path = "../types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_ssz = { version = "0.4.1", path = "../ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } [dependencies] ethereum-types = { version = "0.14.1", default-features = false, features = ["rlp", "serialize"] } diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index 783b868e19d..950eb3aa09c 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -82,6 +82,26 @@ macro_rules! impl_for_lt_32byte_u8_array { impl_for_lt_32byte_u8_array!(4); impl_for_lt_32byte_u8_array!(32); +impl TreeHash for [u8; 48] { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + let values_per_chunk = BYTES_PER_CHUNK; + let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; + merkle_root(self, minimum_chunk_count) + } +} + impl TreeHash for U128 { fn tree_hash_type() -> TreeHashType { TreeHashType::Basic diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs index 21ff324d542..85ece80fb56 100644 --- a/consensus/tree_hash_derive/src/lib.rs +++ b/consensus/tree_hash_derive/src/lib.rs @@ -1,4 +1,3 @@ -#![recursion_limit = "256"] use darling::FromDeriveInput; use proc_macro::TokenStream; use quote::quote; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index dd6b04413dc..a9006368073 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -10,7 +10,7 @@ harness = false [dependencies] merkle_proof = { path = "../../consensus/merkle_proof" } -bls = { path = "../../crypto/bls" } +bls = { path = "../../crypto/bls", features = ["arbitrary"] } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } @@ -25,26 +25,28 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = { version = "0.4.1", path = "../ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../ssz_derive" } -eth2_ssz_types = { version = "0.2.2", path = "../ssz_types" } -swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } +eth2_ssz = { version = "0.4.1", path = "../ssz", features = ["arbitrary"] } +eth2_ssz_derive = { version = "0.3.1", path = "../ssz_derive" } +eth2_ssz_types = { version = "0.2.2", path = "../ssz_types", features = ["arbitrary"] } +swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { version = "0.4.1", path = "../tree_hash" } +tree_hash = { version = "0.4.1", path = "../tree_hash", features = ["arbitrary"] } tree_hash_derive = { version = "0.4.0", path = "../tree_hash_derive" } rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" -rusqlite = { version = "0.25.3", features = ["bundled"], optional = true } -arbitrary = { version = "1.0", features = ["derive"], optional = true } +rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } +# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by +# `AbstractExecPayload` +arbitrary = { version = "1.0", features = ["derive"] } eth2_serde_utils = { version = "0.1.1", path = "../serde_utils" } regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.5.0" +superstruct = "0.6.0" metastruct = "0.1.0" serde_json = "1.0.74" smallvec = "1.8.0" @@ -63,12 +65,6 @@ default = ["sqlite", "legacy-arith"] # Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. legacy-arith = [] sqlite = ["rusqlite"] -arbitrary-fuzz = [ - "arbitrary", - "ethereum-types/arbitrary", - "bls/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", - "swap_or_not_shuffle/arbitrary", - "tree_hash/arbitrary", -] +# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. +# For simplicity `Arbitrary` is now derived regardless of the feature's presence. +arbitrary-fuzz = [] diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml new file mode 100644 index 00000000000..913c2956ba7 --- /dev/null +++ b/consensus/types/presets/gnosis/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml new file mode 100644 index 00000000000..913c2956ba7 --- /dev/null +++ b/consensus/types/presets/mainnet/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml new file mode 100644 index 00000000000..d27253de871 --- /dev/null +++ b/consensus/types/presets/minimal/capella.yaml @@ -0,0 +1,17 @@ +# Minimal preset - Capella + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + + +# Execution +# --------------------------------------------------------------- +# [customized] 2**2 (= 4) +MAX_WITHDRAWALS_PER_PAYLOAD: 4 + +# Withdrawals processing +# --------------------------------------------------------------- +# [customized] 2**4 (= 16) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16 diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 19c8f8a0a80..39a0a28c0ce 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -11,9 +11,20 @@ use tree_hash_derive::TreeHash; /// A Validators aggregate attestation and selection proof. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 12586e28d5e..5c333e0d456 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -23,12 +23,21 @@ pub enum Error { /// Details an attestation that can be slashable. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, pub data: AttestationData, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 8792a3c56d7..c6a661c85dd 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -10,8 +10,8 @@ use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, PartialEq, diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index ecfa613ed44..87a9c932a45 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,8 +1,7 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { /// The slot during which the attester must attest. pub slot: Slot, diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index b239f62e46c..c5634950745 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -9,12 +9,21 @@ use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AttesterSlashing { pub attestation_1: IndexedAttestation, pub attestation_2: IndexedAttestation, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 0ec1f9a3741..4bf9e641c03 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -29,10 +29,14 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), ref_attributes( derive(Debug, PartialEq, TreeHash), @@ -41,14 +45,16 @@ use tree_hash_derive::TreeHash; map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconBlock = FullPayload> { +pub struct BeaconBlock = FullPayload> { #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] @@ -64,23 +70,32 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, + #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] + pub body: BeaconBlockBodyCapella, } pub type BlindedBeaconBlock = BeaconBlock>; -impl> SignedRoot for BeaconBlock {} -impl<'a, T: EthSpec, Payload: ExecPayload> SignedRoot for BeaconBlockRef<'a, T, Payload> {} +impl> SignedRoot for BeaconBlock {} +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignedRoot + for BeaconBlockRef<'a, T, Payload> +{ +} + +/// Empty block trait for each block variant to implement. +pub trait EmptyBlock { + /// Returns an empty block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self; +} -impl> BeaconBlock { +impl> BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { - Self::Merge(BeaconBlockMerge::empty(spec)) - } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { - Self::Altair(BeaconBlockAltair::empty(spec)) - } else { - Self::Base(BeaconBlockBase::empty(spec)) - } + map_fork_name!( + spec.fork_name_at_epoch(T::genesis_epoch()), + Self, + EmptyBlock::empty(spec) + ) } /// Custom SSZ decoder that takes a `ChainSpec` as context. @@ -109,13 +124,11 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockMerge::from_ssz_bytes(bytes) - .map(BeaconBlock::Merge) - .or_else(|_| { - BeaconBlockAltair::from_ssz_bytes(bytes) - .map(BeaconBlock::Altair) - .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) - }) + BeaconBlockCapella::from_ssz_bytes(bytes) + .map(BeaconBlock::Capella) + .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) + .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) + .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. @@ -178,7 +191,7 @@ impl> BeaconBlock { } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, T, Payload> { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -189,6 +202,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Capella { .. } => ForkName::Capella, }; if fork_at_slot == object_fork { @@ -242,12 +256,12 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&Payload, Error> { + pub fn execution_payload(&self) -> Result, Error> { self.body().execution_payload() } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, T, Payload> { /// Convert a mutable reference to a beacon block to a mutable ref to its body. pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T, Payload> { map_beacon_block_ref_mut_into_beacon_block_body_ref_mut!(&'a _, self, |block, cons| cons( @@ -256,9 +270,8 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> } } -impl> BeaconBlockBase { - /// Returns an empty block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { +impl> EmptyBlock for BeaconBlockBase { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { slot: spec.genesis_slot, proposer_index: 0, @@ -281,7 +294,9 @@ impl> BeaconBlockBase { }, } } +} +impl> BeaconBlockBase { /// Return a block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let header = BeaconBlockHeader { @@ -377,9 +392,9 @@ impl> BeaconBlockBase { } } -impl> BeaconBlockAltair { +impl> EmptyBlock for BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, proposer_index: 0, @@ -403,7 +418,9 @@ impl> BeaconBlockAltair { }, } } +} +impl> BeaconBlockAltair { /// Return an Altair block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); @@ -436,9 +453,9 @@ impl> BeaconBlockAltair { } } -impl> BeaconBlockMerge { +impl> EmptyBlock for BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { slot: spec.genesis_slot, proposer_index: 0, @@ -458,7 +475,82 @@ impl> BeaconBlockMerge { deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: Payload::default(), + execution_payload: Payload::Merge::default(), + }, + } + } +} + +impl> BeaconBlockCapella { + /// Return a Capella block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::zero(), + }, + signature: Signature::empty() + }; + T::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Capella::default(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockCapella { + /// Returns an empty Capella block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Capella::default(), + bls_to_execution_changes: VariableList::empty(), }, } } @@ -533,7 +625,7 @@ macro_rules! impl_from { parent_root, state_root, body, - }, payload) + }, payload.map(Into::into)) } } } @@ -542,6 +634,7 @@ macro_rules! impl_from { impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -572,6 +665,7 @@ macro_rules! impl_clone_as_blinded { impl_clone_as_blinded!(BeaconBlockBase, >, >); impl_clone_as_blinded!(BeaconBlockAltair, >, >); impl_clone_as_blinded!(BeaconBlockMerge, >, >); +impl_clone_as_blinded!(BeaconBlockCapella, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -601,6 +695,24 @@ impl From>> } } +impl> ForkVersionDeserialize + for BeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod tests { use super::*; @@ -650,19 +762,43 @@ mod tests { }); } + #[test] + fn roundtrip_capella_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockCapella { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyCapella::random_for_test(rng), + }; + let block = BeaconBlock::Capella(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; - let spec = E::default_spec(); + let mut spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = spec.altair_fork_epoch.unwrap(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_epoch = altair_fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); - let altair_epoch = fork_epoch; + let altair_epoch = altair_fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + let capella_epoch = altair_fork_epoch + 1; + let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + + spec.altair_fork_epoch = Some(altair_epoch); + spec.capella_fork_epoch = Some(capella_epoch); // BeaconBlockBase { @@ -707,5 +843,27 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) .expect_err("bad altair block cannot be decoded"); } + + // BeaconBlockCapella + { + let good_block = BeaconBlock::Capella(BeaconBlockCapella { + slot: capella_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good capella block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad capella block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 381a9bd43e3..c0ba8694100 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -13,7 +13,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -25,20 +25,24 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -pub struct BeaconBlockBody = FullPayload> { +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, pub graffiti: Graffiti, @@ -47,21 +51,44 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge))] + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + #[serde(flatten)] + pub execution_payload: Payload::Merge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] #[serde(flatten)] - pub execution_payload: Payload, + pub execution_payload: Payload::Capella, + #[superstruct(only(Capella))] + pub bls_to_execution_changes: + VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] + #[arbitrary(default)] pub _phantom: PhantomData, } +impl> BeaconBlockBody { + pub fn execution_payload(&self) -> Result, Error> { + self.to_ref().execution_payload() + } +} + +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { + pub fn execution_payload(&self) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), + } + } +} + impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { @@ -69,6 +96,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, } } } @@ -214,7 +242,7 @@ impl From>> impl From>> for ( BeaconBlockBodyMerge>, - Option>, + Option>, ) { fn from(body: BeaconBlockBodyMerge>) -> Self { @@ -228,7 +256,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = body; ( @@ -242,9 +270,51 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { + execution_payload: BlindedPayloadMerge { + execution_payload_header: From::from(&execution_payload), + }, + }, + Some(execution_payload), + ) + } +} + +impl From>> + for ( + BeaconBlockBodyCapella>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyCapella>) -> Self { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, + } = body; + + ( + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(&execution_payload), }, + bls_to_execution_changes, }, Some(execution_payload), ) @@ -278,7 +348,7 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = self; BeaconBlockBodyMerge { @@ -291,9 +361,43 @@ impl BeaconBlockBodyMerge> { deposits: deposits.clone(), voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayload { - execution_payload_header: From::from(execution_payload), + execution_payload: BlindedPayloadMerge { + execution_payload_header: execution_payload.into(), + }, + } + } +} + +impl BeaconBlockBodyCapella> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyCapella> { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, + } = self; + + BeaconBlockBodyCapella { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadCapella { + execution_payload_header: execution_payload.into(), }, + bls_to_execution_changes: bls_to_execution_changes.clone(), } } } @@ -307,7 +411,7 @@ impl From>> fn from(body: BeaconBlockBody>) -> Self { map_beacon_block_body!(body, |inner, cons| { let (block, payload) = inner.into(); - (cons(block), payload) + (cons(block), payload.map(Into::into)) }) } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index cca8fef8416..c6d6678f31a 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct BeaconBlockHeader { pub slot: Slot, diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index 6483c009af7..ad293c3a3bb 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -17,8 +17,7 @@ impl<'a> BeaconCommittee<'a> { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Debug, PartialEq)] +#[derive(arbitrary::Arbitrary, Default, Clone, Debug, PartialEq)] pub struct OwnedBeaconCommittee { pub slot: Slot, pub index: CommitteeIndex, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 12d44741f92..921dafbbc6d 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -14,6 +14,7 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use std::convert::TryInto; +use std::hash::Hash; use std::{fmt, mem, sync::Arc}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; @@ -25,6 +26,7 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; @@ -120,6 +122,7 @@ pub enum Error { ArithError(ArithError), MissingBeaconBlock(SignedBeaconBlockHash), MissingBeaconState(BeaconStateHash), + PayloadConversionLogicFlaw, SyncCommitteeNotKnown { current_epoch: Epoch, epoch: Epoch, @@ -144,8 +147,7 @@ impl AllowNextEpoch { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)] pub struct BeaconStateHash(Hash256); impl fmt::Debug for BeaconStateHash { @@ -174,7 +176,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Derivative, @@ -187,18 +189,19 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, + arbitrary::Arbitrary ), serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), derivative(Clone), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconState @@ -222,6 +225,7 @@ where pub block_roots: FixedVector, #[compare_fields(as_slice)] pub state_roots: FixedVector, + // Frozen in Capella, replaced by historical_summaries pub historical_roots: VariableList, // Ethereum 1.0 chain data @@ -252,9 +256,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub current_epoch_participation: VariableList, // Finality @@ -269,18 +273,37 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + + // Capella + #[superstruct(only(Capella), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub next_withdrawal_validator_index: u64, + // Deep history valid from Capella onwards. + #[superstruct(only(Capella))] + pub historical_summaries: VariableList, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -391,6 +414,7 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, }; if fork_at_slot == object_fork { @@ -680,6 +704,33 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. + pub fn latest_execution_payload_header(&self) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRef::Merge( + &state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( + &state.latest_execution_payload_header, + )), + } + } + + pub fn latest_execution_payload_header_mut( + &mut self, + ) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRefMut::Merge( + &mut state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( + &mut state.latest_execution_payload_header, + )), + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -1104,6 +1155,7 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), + BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), } } @@ -1300,12 +1352,14 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1610,6 +1664,7 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), + BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); @@ -1777,7 +1832,24 @@ impl CompareFields for BeaconState { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } } + +impl ForkVersionDeserialize for BeaconState { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconState failed to deserialize: {:?}", + e + )))? + )) + } +} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 03adaf3d443..8afef1183be 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -336,7 +336,6 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for CommitteeCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 1c199c04755..b657d62ae62 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -61,7 +61,6 @@ impl ExitCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for ExitCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index d81801e77d2..590ea30f999 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -42,7 +42,6 @@ impl PubkeyCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for PubkeyCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index abca10e3726..d63eaafc4b9 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,7 +2,7 @@ use crate::test_utils::*; use crate::test_utils::{SeedableRng, XorShiftRng}; use beacon_chain::test_utils::{ - interop_genesis_state, test_spec, BeaconChainHarness, EphemeralHarnessType, + interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ @@ -551,7 +551,7 @@ fn tree_hash_cache_linear_history_long_skip() { let spec = &test_spec::(); // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state( + let mut state: BeaconState = interop_genesis_state_with_eth1( &keypairs, 0, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 2fc56bdc013..efc6573d2bc 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -3,6 +3,7 @@ #![allow(clippy::indexing_slicing)] use super::Error; +use crate::historical_summary::HistoricalSummaryCache; use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use rayon::prelude::*; @@ -142,6 +143,7 @@ pub struct BeaconTreeHashCacheInner { block_roots: TreeHashCache, state_roots: TreeHashCache, historical_roots: TreeHashCache, + historical_summaries: OptionalTreeHashCache, balances: TreeHashCache, randao_mixes: TreeHashCache, slashings: TreeHashCache, @@ -164,6 +166,14 @@ impl BeaconTreeHashCacheInner { let historical_roots = state .historical_roots() .new_tree_hash_cache(&mut fixed_arena); + let historical_summaries = OptionalTreeHashCache::new( + state + .historical_summaries() + .ok() + .map(HistoricalSummaryCache::new) + .as_ref(), + ); + let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); let validators = ValidatorsListTreeHashCache::new::(state.validators()); @@ -200,6 +210,7 @@ impl BeaconTreeHashCacheInner { block_roots, state_roots, historical_roots, + historical_summaries, balances, randao_mixes, slashings, @@ -249,6 +260,7 @@ impl BeaconTreeHashCacheInner { .slashings() .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, ]; + // Participation if let BeaconState::Base(state) = state { leaves.push(state.previous_epoch_attestations.tree_hash_root()); @@ -291,6 +303,24 @@ impl BeaconTreeHashCacheInner { if let Ok(payload_header) = state.latest_execution_payload_header() { leaves.push(payload_header.tree_hash_root()); } + + // Withdrawal indices (Capella and later). + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + leaves.push(next_withdrawal_index.tree_hash_root()); + } + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + leaves.push(next_withdrawal_validator_index.tree_hash_root()); + } + + // Historical roots/summaries (Capella and later). + if let Ok(historical_summaries) = state.historical_summaries() { + leaves.push( + self.historical_summaries.recalculate_tree_hash_root( + &HistoricalSummaryCache::new(historical_summaries), + )?, + ); + } + Ok(leaves) } @@ -570,7 +600,6 @@ impl OptionalTreeHashCacheInner { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs new file mode 100644 index 00000000000..b279515bd1f --- /dev/null +++ b/consensus/types/src/bls_to_execution_change.rs @@ -0,0 +1,57 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::PublicKeyBytes; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct BlsToExecutionChange { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub from_bls_pubkey: PublicKeyBytes, + pub to_execution_address: Address, +} + +impl SignedRoot for BlsToExecutionChange {} + +impl BlsToExecutionChange { + pub fn sign( + self, + secret_key: &SecretKey, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlsToExecutionChange { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + genesis_validators_root, + ); + let message = self.signing_root(domain); + SignedBlsToExecutionChange { + message: self, + signature: secret_key.sign(message), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BlsToExecutionChange); +} diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 047bceae7e2..e922e81c706 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,4 +1,7 @@ -use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use crate::{ + AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, + ForkVersionDeserialize, SignedRoot, Uint256, +}; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; @@ -10,7 +13,7 @@ use tree_hash_derive::TreeHash; #[serde_as] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct BuilderBid> { +pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, #[serde(with = "eth2_serde_utils::quoted_u256")] @@ -21,16 +24,70 @@ pub struct BuilderBid> { _phantom_data: PhantomData, } -impl> SignedRoot for BuilderBid {} +impl> SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct SignedBuilderBid> { +pub struct SignedBuilderBid> { pub message: BuilderBid, pub signature: Signature, } +impl> ForkVersionDeserialize + for BuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |_| { + serde::de::Error::custom( + "BuilderBid failed to deserialize: unable to convert payload header to payload", + ) + }; + + #[derive(Deserialize)] + struct Helper { + header: serde_json::Value, + #[serde(with = "eth2_serde_utils::quoted_u256")] + value: Uint256, + pubkey: PublicKeyBytes, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let payload_header = + ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; + + Ok(Self { + header: Payload::try_from(payload_header).map_err(convert_err)?, + value: helper.value, + pubkey: helper.pubkey, + _phantom_data: Default::default(), + }) + } +} + +impl> ForkVersionDeserialize + for SignedBuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + struct Helper { + pub message: serde_json::Value, + pub signature: Signature, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?, + signature: helper.signature, + }) + } +} + struct BlindedPayloadAsHeader(PhantomData); impl> SerializeAs for BlindedPayloadAsHeader { @@ -42,7 +99,7 @@ impl> SerializeAs for BlindedPayloa } } -impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> +impl<'de, E: EthSpec, Payload: AbstractExecPayload> DeserializeAs<'de, Payload> for BlindedPayloadAsHeader { fn deserialize_as(deserializer: D) -> Result @@ -55,7 +112,7 @@ impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> } } -impl> SignedBuilderBid { +impl> SignedBuilderBid { pub fn verify_signature(&self, spec: &ChainSpec) -> bool { self.message .pubkey diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c8333868cd2..dc89ab902f5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -11,6 +11,7 @@ use tree_hash::TreeHash; /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { + BlsToExecutionChange, BeaconProposer, BeaconAttester, Randao, @@ -27,8 +28,7 @@ pub enum Domain { /// Lighthouse's internal configuration struct. /// /// Contains a mixture of "preset" and "config" values w.r.t to the EF definitions. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct ChainSpec { /* * Config name @@ -71,6 +71,7 @@ pub struct ChainSpec { */ pub genesis_fork_version: [u8; 4], pub bls_withdrawal_prefix_byte: u8, + pub eth1_address_withdrawal_prefix_byte: u8, /* * Time parameters @@ -150,6 +151,14 @@ pub struct ChainSpec { pub terminal_block_hash_activation_epoch: Epoch, pub safe_slots_to_import_optimistically: u64, + /* + * Capella hard fork params + */ + pub capella_fork_version: [u8; 4], + /// The Capella fork epoch is optional, with `None` representing "Capella never happens". + pub capella_fork_epoch: Option, + pub max_validators_per_withdrawals_sweep: u64, + /* * Networking */ @@ -169,6 +178,11 @@ pub struct ChainSpec { * Application params */ pub(crate) domain_application_mask: u32, + + /* + * Capella params + */ + pub(crate) domain_bls_to_execution_change: u32, } impl ChainSpec { @@ -233,11 +247,14 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, } } @@ -248,6 +265,7 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, + ForkName::Capella => self.capella_fork_version, } } @@ -257,6 +275,7 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, + ForkName::Capella => self.capella_fork_epoch, } } @@ -266,6 +285,7 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -278,6 +298,7 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -290,6 +311,7 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -337,6 +359,7 @@ impl ChainSpec { Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), + Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, } } @@ -499,7 +522,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0; 4], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters @@ -587,6 +611,13 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 00, 00, 00], + capella_fork_epoch: Some(Epoch::new(194048)), + max_validators_per_withdrawals_sweep: 16384, + /* * Network specific */ @@ -606,6 +637,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } @@ -645,6 +681,10 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), + // Capella + capella_fork_version: [0x03, 0x00, 0x00, 0x01], + capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -707,7 +747,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0x00, 0x00, 0x00, 0x64], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters @@ -797,6 +838,13 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 0x00, 0x00, 0x64], + capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, + /* * Network specific */ @@ -816,6 +864,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } } @@ -875,6 +928,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option>, + #[serde(default = "default_capella_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + capella_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub capella_fork_epoch: Option>, + #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -912,6 +973,11 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_capella_fork_version() -> [u8; 4] { + // TODO: determine if the bellatrix example should be copied like this + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1008,6 +1074,10 @@ impl Config { bellatrix_fork_epoch: spec .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + capella_fork_version: spec.capella_fork_version, + capella_fork_epoch: spec + .capella_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -1053,6 +1123,8 @@ impl Config { altair_fork_epoch, bellatrix_fork_epoch, bellatrix_fork_version, + capella_fork_epoch, + capella_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1083,6 +1155,8 @@ impl Config { altair_fork_epoch: altair_fork_epoch.map(|q| q.value), bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), bellatrix_fork_version, + capella_fork_epoch: capella_fork_epoch.map(|q| q.value), + capella_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1174,6 +1248,12 @@ mod tests { apply_bit_mask(builder_domain_pre_mask, &spec), &spec, ); + + test_domain( + Domain::BlsToExecutionChange, + spec.domain_bls_to_execution_change, + &spec, + ); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index cad7fab754e..e84798f6f7d 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -8,8 +8,8 @@ use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index b7ec015ea31..b10ad7557b9 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, + consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, + EthSpec, ForkName, }; use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Altair, Bellatrix), + variants(Bellatrix, Capella), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -24,9 +25,11 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[superstruct(only(Bellatrix))] #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Capella))] + #[serde(flatten)] + pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -37,26 +40,29 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.bellatrix_fork_epoch.is_some() + if spec.capella_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Merge) + || fork_name == Some(ForkName::Capella) { - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); - ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { + ConfigAndPreset::Capella(ConfigAndPresetCapella { config, base_preset, altair_preset, bellatrix_preset, + capella_preset, extra_fields, }) } else { - ConfigAndPreset::Altair(ConfigAndPresetAltair { + ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { config, base_preset, altair_preset, + bellatrix_preset, extra_fields, }) } @@ -130,8 +136,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetBellatrix = + let from: ConfigAndPresetCapella = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); + assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); } } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 855e36bc903..167b0857c5a 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -9,9 +9,20 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A Validators aggregate sync committee contribution and selection proof. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index a347cf675cf..bbc3bd9fb89 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -11,9 +11,18 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Deposit { pub proof: FixedVector, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 6c5444e110f..1969311671f 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct DepositData { pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index d1f245bc980..63073401c22 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -10,8 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 0fe929a1e93..3556e31a9fc 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// a nodes local ENR. /// /// Spec v0.11 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct EnrForkId { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 4fd7d3373c5..6b2396e112c 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, PartialEq, Clone, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e6169760264..378e8d34b7d 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -48,7 +48,9 @@ impl fmt::Display for EthSpecId { } } -pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq { +pub trait EthSpec: + 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq + for<'a> arbitrary::Arbitrary<'a> +{ /* * Constants */ @@ -95,6 +97,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Capella + */ + type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -222,6 +229,16 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn bytes_per_logs_bloom() -> usize { Self::BytesPerLogsBloom::to_usize() } + + /// Returns the `MAX_BLS_TO_EXECUTION_CHANGES` constant for this specification. + fn max_bls_to_execution_changes() -> usize { + Self::MaxBlsToExecutionChanges::to_usize() + } + + /// Returns the `MAX_WITHDRAWALS_PER_PAYLOAD` constant for this specification. + fn max_withdrawals_per_payload() -> usize { + Self::MaxWithdrawalsPerPayload::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -233,8 +250,7 @@ macro_rules! params_from_eth_spec { } /// Ethereum Foundation specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { @@ -265,6 +281,8 @@ impl EthSpec for MainnetEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -276,8 +294,7 @@ impl EthSpec for MainnetEthSpec { } /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { @@ -290,6 +307,7 @@ impl EthSpec for MinimalEthSpec { type SyncSubcommitteeSize = U8; // 32 committee size / 4 sync committee subnet count type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch + type MaxWithdrawalsPerPayload = U4; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -309,7 +327,8 @@ impl EthSpec for MinimalEthSpec { BytesPerLogsBloom, GasLimitDenominator, MinGasLimit, - MaxExtraDataBytes + MaxExtraDataBytes, + MaxBlsToExecutionChanges }); fn default_spec() -> ChainSpec { @@ -322,8 +341,7 @@ impl EthSpec for MinimalEthSpec { } /// Gnosis Beacon Chain specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct GnosisEthSpec; impl EthSpec for GnosisEthSpec { @@ -354,6 +372,8 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 988dcece5e8..363a35a86a1 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -6,8 +6,18 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derive( + arbitrary::Arbitrary, + Default, + Clone, + Copy, + Serialize, + Deserialize, + Eq, + PartialEq, + Hash, + Derivative, +)] #[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 4baa5dd3958..b19988ff7df 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -17,14 +17,16 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayload, Hash256, Hash64, Uint256}; +use crate::{Address, EthSpec, ExecutionPayloadRef, Hash256, Hash64, Uint256}; use metastruct::metastruct; /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[metastruct(mappings(map_execution_block_header_fields()))] +#[metastruct(mappings(map_execution_block_header_fields_except_withdrawals(exclude( + withdrawals_root +))))] pub struct ExecutionBlockHeader { pub parent_hash: Hash256, pub ommers_hash: Hash256, @@ -42,33 +44,36 @@ pub struct ExecutionBlockHeader { pub mix_hash: Hash256, pub nonce: Hash64, pub base_fee_per_gas: Uint256, + pub withdrawals_root: Option, } impl ExecutionBlockHeader { pub fn from_payload( - payload: &ExecutionPayload, + payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, rlp_transactions_root: Hash256, + rlp_withdrawals_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. ExecutionBlockHeader { - parent_hash: payload.parent_hash.into_root(), + parent_hash: payload.parent_hash().into_root(), ommers_hash: rlp_empty_list_root, - beneficiary: payload.fee_recipient, - state_root: payload.state_root, + beneficiary: payload.fee_recipient(), + state_root: payload.state_root(), transactions_root: rlp_transactions_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone().into(), + receipts_root: payload.receipts_root(), + logs_bloom: payload.logs_bloom().clone().into(), difficulty: Uint256::zero(), - number: payload.block_number.into(), - gas_limit: payload.gas_limit.into(), - gas_used: payload.gas_used.into(), - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone().into(), - mix_hash: payload.prev_randao, + number: payload.block_number().into(), + gas_limit: payload.gas_limit().into(), + gas_used: payload.gas_used().into(), + timestamp: payload.timestamp(), + extra_data: payload.extra_data().clone().into(), + mix_hash: payload.prev_randao(), nonce: Hash64::zero(), - base_fee_per_gas: payload.base_fee_per_gas, + base_fee_per_gas: payload.base_fee_per_gas(), + withdrawals_root: rlp_withdrawals_root, } } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 412e5a8df3a..18da0d161f7 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -12,50 +12,152 @@ pub type Transactions = VariableList< ::MaxTransactionsPerPayload, >; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + +#[superstruct( + variants(Merge, Capella), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec") + ), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_into(FullPayload, BlindedPayload), + map_ref_into(ExecutionPayloadHeader) +)] #[derive( - Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +#[tree_hash(enum_behaviour = "transparent")] pub struct ExecutionPayload { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, + #[superstruct(only(Capella))] + pub withdrawals: Withdrawals, +} + +impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { + // this emulates clone on a normal reference type + pub fn clone_from_ref(&self) -> ExecutionPayload { + map_execution_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } } impl ExecutionPayload { - pub fn empty() -> Self { - Self::default() + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayload: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + } + } + + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_merge_size() -> usize { + // Fixed part + ExecutionPayloadMerge::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } #[allow(clippy::integer_arithmetic)] /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_size() -> usize { + pub fn max_execution_payload_capella_size() -> usize { // Fixed part - Self::empty().as_ssz_bytes().len() + ExecutionPayloadCapella::::default().as_ssz_bytes().len() // Max size of variable length `extra_data` field + (T::max_extra_data_bytes() * ::ssz_fixed_len()) // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } +} + +impl ForkVersionDeserialize for ExecutionPayload { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e)) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayload failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} + +impl ExecutionPayload { + pub fn fork_name(&self) -> ForkName { + match self { + ExecutionPayload::Merge(_) => ForkName::Merge, + ExecutionPayload::Capella(_) => ForkName::Capella, + } } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 01780fa1c32..d193a6cd8e7 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,49 +1,156 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; +use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use BeaconStateError; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[superstruct( + variants(Merge, Capella), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec") + ), + ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] #[derive( - Default, Debug, Clone, Serialize, Deserialize, Derivative, Encode, Decode, TreeHash, TestRandom, + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] pub struct ExecutionPayloadHeader { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub transactions_root: Hash256, + #[superstruct(only(Capella))] + #[superstruct(getter(copy))] + pub withdrawals_root: Hash256, } impl ExecutionPayloadHeader { - pub fn empty() -> Self { - Self::default() + pub fn transactions(&self) -> Option<&Transactions> { + None + } + + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadHeaderMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => { + ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) + } + } + } +} + +impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { + pub fn is_default_with_zero_roots(self) -> bool { + map_execution_payload_header_ref!(&'a _, self, |inner, cons| { + cons(inner); + *inner == Default::default() + }) + } +} + +impl ExecutionPayloadHeaderMerge { + pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { + ExecutionPayloadHeaderCapella { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: Hash256::zero(), + } } } -impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader { - fn from(payload: &'a ExecutionPayload) -> Self { - ExecutionPayloadHeader { +impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a ExecutionPayloadMerge) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + } + } +} +impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a ExecutionPayloadCapella) -> Self { + Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -58,6 +165,77 @@ impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + } + } +} + +// These impls are required to work around an inelegance in `to_execution_payload_header`. +// They only clone headers so they should be relatively cheap. +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + +impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { + map_execution_payload_ref_into_execution_payload_header!( + &'a _, + payload, + |inner, cons| cons(inner.into()) + ) + } +} + +impl TryFrom> for ExecutionPayloadHeaderMerge { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Merge(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} +impl TryFrom> for ExecutionPayloadHeaderCapella { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Ok(execution_payload_header) + } + _ => Err(BeaconStateError::IncorrectStateVariant), } } } + +impl ForkVersionDeserialize for ExecutionPayloadHeader { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: {:?}", + e + )) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 44b8a16637d..de332f0cada 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 52b9294c8ca..90d1fbc6864 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,6 +47,13 @@ impl ForkContext { )); } + if spec.capella_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Capella, + ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index be13f71e4d5..cc790393159 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ForkData { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e97b08309b7..007d4c4daa5 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,11 +11,17 @@ pub enum ForkName { Base, Altair, Merge, + Capella, } impl ForkName { pub fn list_all() -> Vec { - vec![ForkName::Base, ForkName::Altair, ForkName::Merge] + vec![ + ForkName::Base, + ForkName::Altair, + ForkName::Merge, + ForkName::Capella, + ] } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` @@ -26,16 +32,25 @@ impl ForkName { ForkName::Base => { spec.altair_fork_epoch = None; spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = None; + spec + } + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); spec } } @@ -49,6 +64,7 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), + ForkName::Capella => Some(ForkName::Merge), } } @@ -59,7 +75,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => None, + ForkName::Merge => Some(ForkName::Capella), + ForkName::Capella => None, } } } @@ -101,6 +118,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } + ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } } }; } @@ -113,6 +134,7 @@ impl FromStr for ForkName { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, + "capella" => ForkName::Capella, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -124,6 +146,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), + ForkName::Capella => "capella".fmt(f), } } } @@ -155,7 +178,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Merge.next_fork(), None); + assert_eq!(ForkName::Capella.next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs new file mode 100644 index 00000000000..2d97dc12194 --- /dev/null +++ b/consensus/types/src/fork_versioned_response.rs @@ -0,0 +1,141 @@ +use crate::ForkName; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; +use std::sync::Arc; + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ExecutionOptimisticFinalizedForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + execution_optimistic: Option, + finalized: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { + version: helper.version, + execution_optimistic: helper.execution_optimistic, + finalized: helper.finalized, + data, + }) + } +} + +pub trait ForkVersionDeserialize: Sized + DeserializeOwned { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result; +} + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ForkVersionedResponse { + version: helper.version, + data, + }) + } +} + +impl ForkVersionDeserialize for Arc { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + Ok(Arc::new(F::deserialize_by_fork::<'de, D>( + value, fork_name, + )?)) + } +} + +#[cfg(test)] +mod fork_version_response_tests { + use crate::{ + ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec, + }; + use serde_json::json; + + #[test] + fn fork_versioned_response_deserialize_correct_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Merge), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_ok()); + } + + #[test] + fn fork_versioned_response_deserialize_incorrect_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Capella), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_err()); + } +} diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs deleted file mode 100644 index 81a778d8421..00000000000 --- a/consensus/types/src/free_attestation.rs +++ /dev/null @@ -1,14 +0,0 @@ -/// Note: this object does not actually exist in the spec. -/// -/// We use it for managing attestations that have not been aggregated. -use super::{AttestationData, Signature}; -use serde_derive::Serialize; - -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize)] -pub struct FreeAttestation { - pub data: AttestationData, - pub signature: Signature, - #[serde(with = "eth2_serde_utils::quoted_u64")] - pub validator_index: u64, -} diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 2b0a645cd05..6288cdbe807 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -14,7 +14,7 @@ pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. #[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); impl Graffiti { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 325f5f85377..e75b64cae93 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -10,8 +10,19 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct HistoricalBatch { pub block_roots: FixedVector, pub state_roots: FixedVector, diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs new file mode 100644 index 00000000000..84d87b85fd9 --- /dev/null +++ b/consensus/types/src/historical_summary.rs @@ -0,0 +1,89 @@ +use crate::test_utils::TestRandom; +use crate::Unsigned; +use crate::{BeaconState, EthSpec, Hash256}; +use cached_tree_hash::Error; +use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; +use compare_fields_derive::CompareFields; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK}; +use tree_hash_derive::TreeHash; + +/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` +/// making the two hash_tree_root-compatible. This struct is introduced into the beacon state +/// in the Capella hard fork. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary +#[derive( + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + CompareFields, + Clone, + Copy, + Default, + arbitrary::Arbitrary, +)] +pub struct HistoricalSummary { + block_summary_root: Hash256, + state_summary_root: Hash256, +} + +impl HistoricalSummary { + pub fn new(state: &BeaconState) -> Self { + Self { + block_summary_root: state.block_roots().tree_hash_root(), + state_summary_root: state.state_roots().tree_hash_root(), + } + } +} + +/// Wrapper type allowing the implementation of `CachedTreeHash`. +#[derive(Debug)] +pub struct HistoricalSummaryCache<'a, N: Unsigned> { + pub inner: &'a VariableList, +} + +impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> { + pub fn new(inner: &'a VariableList) -> Self { + Self { inner } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<'a, N> { + fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { + TreeHashCache::new(arena, int_log(N::to_usize()), self.len()) + } + + fn recalculate_tree_hash_root( + &self, + arena: &mut CacheArena, + cache: &mut TreeHashCache, + ) -> Result { + Ok(mix_in_length( + &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, + self.len(), + )) + } +} + +pub fn leaf_iter( + values: &[HistoricalSummary], +) -> impl Iterator + ExactSizeIterator + '_ { + values + .iter() + .map(|value| value.tree_hash_root()) + .map(Hash256::to_fixed_bytes) +} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 32271cfa935..16ffb1ad8fa 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -12,12 +12,21 @@ use tree_hash_derive::TreeHash; /// To be included in an `AttesterSlashing`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq)] // to satisfy Clippy's lint about `Hash` #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. #[serde(with = "quoted_variable_list_u64")] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 87f5ebe8b3c..aefb45490a8 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,7 +1,4 @@ //! Ethereum 2.0 types - -// Required for big type-level numbers -#![recursion_limit = "128"] // Clippy lint set up #![cfg_attr( not(test), @@ -28,6 +25,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; @@ -46,9 +44,10 @@ pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; -pub mod free_attestation; +pub mod fork_versioned_response; pub mod graffiti; pub mod historical_batch; +pub mod historical_summary; pub mod indexed_attestation; pub mod light_client_bootstrap; pub mod light_client_finality_update; @@ -63,6 +62,7 @@ pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; +pub mod signed_bls_to_execution_change; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -91,6 +91,7 @@ pub mod sync_selection_proof; pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; +pub mod withdrawal; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -104,20 +105,21 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, + BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, + BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, + ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, }; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; @@ -129,13 +131,19 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; -pub use crate::execution_payload::{ExecutionPayload, Transaction, Transactions}; -pub use crate::execution_payload_header::ExecutionPayloadHeader; +pub use crate::execution_payload::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, ExecutionPayloadRef, + Transaction, Transactions, Withdrawals, +}; +pub use crate::execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::free_attestation::FreeAttestation; +pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; @@ -143,9 +151,13 @@ pub use crate::light_client_finality_update::LightClientFinalityUpdate; pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; -pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; +pub use crate::payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadMerge, + BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadCapella, FullPayloadMerge, + FullPayloadRef, OwnedExecPayload, +}; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; @@ -153,10 +165,11 @@ pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; @@ -175,12 +188,15 @@ pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; +pub use crate::withdrawal::Withdrawal; pub type CommitteeIndex = u64; pub type Hash256 = H256; pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; +pub type BLSFieldElement = Uint256; +pub type VersionedHash = Hash256; pub type Hash64 = ethereum_types::H64; pub use bls::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index d2a46c04a43..1a5eed2205d 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -8,9 +8,19 @@ use tree_hash::TreeHash; /// A LightClientBootstrap is the initializer we send over to lightclient nodes /// that are trying to generate their basic storage when booting up. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientBootstrap { /// Requested beacon block header. pub header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index cae6266f9e7..08069c93084 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -10,9 +10,19 @@ use tree_hash::TreeHash; /// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that /// signal a new finalized beacon block header for the light client sync protocol. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 8dda8cd5aed..7a39bd9ac1c 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -9,9 +9,19 @@ use tree_hash::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientOptimisticUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 7d01f39bfc8..ca35f96802b 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -52,9 +52,19 @@ impl From for Error { /// A LightClientUpdate is the update we request solely to either complete the bootstraping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index a2dd4948641..bd98f8da078 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -7,7 +7,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { #[serde(with = "eth2_serde_utils::quoted_u8")] bits: u8, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 667fff58c7f..2795c7f1092 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -2,45 +2,33 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::borrow::Cow; use std::convert::TryFrom; use std::fmt::Debug; use std::hash::Hash; use test_random_derive::TestRandom; -use tree_hash::{PackedEncoding, TreeHash}; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum BlockType { Blinded, Full, } -pub trait ExecPayload: - Debug - + Clone - + Encode - + Debug - + Decode - + TestRandom - + TreeHash - + Default - + PartialEq - + Serialize - + DeserializeOwned - + Hash - + TryFrom> - + From> - + Send - + 'static -{ +/// A trait representing behavior of an `ExecutionPayload` that either has a full list of transactions +/// or a transaction hash in it's place. +pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + Send { fn block_type() -> BlockType; /// Convert the payload into a payload header. fn to_execution_payload_header(&self) -> ExecutionPayloadHeader; - // We provide a subset of field accessors, for the fields used in `consensus`. - // - // More fields can be added here if you wish. + /// We provide a subset of field accessors, for the fields used in `consensus`. + /// + /// More fields can be added here if you wish. fn parent_hash(&self) -> ExecutionBlockHash; fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; @@ -48,6 +36,132 @@ pub trait ExecPayload: fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; + fn transactions(&self) -> Option<&Transactions>; + /// fork-specific fields + fn withdrawals_root(&self) -> Result; + + /// Is this a default payload with 0x0 roots for transactions and withdrawals? + fn is_default_with_zero_roots(&self) -> bool; + + /// Is this a default payload with the hash of the empty list for transactions and withdrawals? + fn is_default_with_empty_roots(&self) -> bool; +} + +/// `ExecPayload` functionality the requires ownership. +pub trait OwnedExecPayload: + ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + + 'static +{ +} + +impl OwnedExecPayload for P where + P: ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + + 'static +{ +} + +pub trait AbstractExecPayload: + ExecPayload + + Sized + + From> + + TryFrom> + + TryInto + + TryInto +{ + type Ref<'a>: ExecPayload + Copy + From<&'a Self::Merge> + From<&'a Self::Capella>; + + type Merge: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; + type Capella: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; + + fn default_at_fork(fork_name: ForkName) -> Result; +} + +#[superstruct( + variants(Merge, Capella), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + arbitrary::Arbitrary, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), + ssz(struct_behaviour = "transparent"), + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct FullPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, +} + +impl From> for ExecutionPayload { + fn from(full_payload: FullPayload) -> Self { + map_full_payload_into_execution_payload!(full_payload, move |payload, cons| { + cons(payload.execution_payload) + }) + } +} + +impl<'a, T: EthSpec> From> for ExecutionPayload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.execution_payload.clone().into() + }) + } +} + +impl<'a, T: EthSpec> From> for FullPayload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } } impl ExecPayload for FullPayload { @@ -55,230 +169,749 @@ impl ExecPayload for FullPayload { BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::from(&self.execution_payload) + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self.to_ref(), move |inner, cons| { + cons(inner); + let exec_payload_ref: ExecutionPayloadRef<'a, T> = From::from(&inner.execution_payload); + ExecutionPayloadHeader::from(exec_payload_ref) + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload.parent_hash + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - self.execution_payload.prev_randao + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) } - fn block_number(&self) -> u64 { - self.execution_payload.block_number + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) } - fn timestamp(&self) -> u64 { - self.execution_payload.timestamp + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload.block_hash + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) } - fn fee_recipient(&self) -> Address { - self.execution_payload.fee_recipient + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - self.execution_payload.gas_limit + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) + } + + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } + + fn withdrawals_root(&self) -> Result { + match self { + FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayload::Capella(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) + } + + fn is_default_with_empty_roots(&self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() } } -impl ExecPayload for BlindedPayload { +impl FullPayload { + pub fn execution_payload(self) -> ExecutionPayload { + map_full_payload_into_execution_payload!(self, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl<'a, T: EthSpec> FullPayloadRef<'a, T> { + pub fn execution_payload_ref(self) -> ExecutionPayloadRef<'a, T> { + map_full_payload_ref_into_execution_payload_ref!(&'a _, self, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { fn block_type() -> BlockType { - BlockType::Blinded + BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - self.execution_payload_header.clone() + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.to_execution_payload_header() + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.parent_hash + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - self.execution_payload_header.prev_randao + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) } - fn block_number(&self) -> u64 { - self.execution_payload_header.block_number + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) } - fn timestamp(&self) -> u64 { - self.execution_payload_header.timestamp + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.block_hash + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) } - fn fee_recipient(&self) -> Address { - self.execution_payload_header.fee_recipient + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - self.execution_payload_header.gas_limit + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) } -} -#[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct BlindedPayload { - pub execution_payload_header: ExecutionPayloadHeader, -} + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } -// NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` -// implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the -// default payload in `is_merge_transition_block` to determine whether the merge has occurred. -// -// The default `BlindedPayload` is therefore the payload header that results from blinding the -// default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that -// its `transactions_root` is the hash of the empty list rather than 0x0. -impl Default for BlindedPayload { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeader::from(&ExecutionPayload::default()), + fn withdrawals_root(&self) -> Result { + match self { + FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Capella(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) + } + + fn is_default_with_empty_roots(&self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() + } } -impl From> for BlindedPayload { - fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { - Self { - execution_payload_header, +impl AbstractExecPayload for FullPayload { + type Ref<'a> = FullPayloadRef<'a, T>; + type Merge = FullPayloadMerge; + type Capella = FullPayloadCapella; + + fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), } } } -impl From> for ExecutionPayloadHeader { - fn from(blinded: BlindedPayload) -> Self { - blinded.execution_payload_header +impl From> for FullPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + map_execution_payload_into_full_payload!(execution_payload, |inner, cons| { + cons(inner.into()) + }) } } -impl From> for BlindedPayload { - fn from(execution_payload: ExecutionPayload) -> Self { - Self { - execution_payload_header: ExecutionPayloadHeader::from(&execution_payload), - } +impl TryFrom> for FullPayload { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) } } -impl TreeHash for BlindedPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() +#[superstruct( + variants(Merge, Capella), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), + ssz(struct_behaviour = "transparent"), + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + map_into(ExecutionPayloadHeader), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct BlindedPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload_header: ExecutionPayloadHeaderCapella, +} + +impl<'a, T: EthSpec> From> for BlindedPayload { + fn from(blinded_payload_ref: BlindedPayloadRef<'a, T>) -> Self { + map_blinded_payload_ref!(&'a _, blinded_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) } +} - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.execution_payload_header.tree_hash_packed_encoding() +impl ExecPayload for BlindedPayload { + fn block_type() -> BlockType { + BlockType::Blinded } - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + map_blinded_payload_into_execution_payload_header!(self.clone(), |inner, cons| { + cons(inner.execution_payload_header) + }) } - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload_header.tree_hash_root() + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) } -} -impl Decode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) } - fn ssz_fixed_len() -> usize { - as Decode>::ssz_fixed_len() + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) } - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(Self { - execution_payload_header: ExecutionPayloadHeader::from_ssz_bytes(bytes)?, + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp }) } -} -impl Encode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) } - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload_header.ssz_append(buf) + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) } - fn ssz_bytes_len(&self) -> usize { - self.execution_payload_header.ssz_bytes_len() + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) } -} -#[derive(Default, Debug, Clone, Serialize, Deserialize, TestRandom, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct FullPayload { - pub execution_payload: ExecutionPayload, -} + fn transactions(&self) -> Option<&Transactions> { + None + } -impl From> for FullPayload { - fn from(execution_payload: ExecutionPayload) -> Self { - Self { execution_payload } + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Capella(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } } -} -impl TryFrom> for FullPayload { - type Error = (); + fn is_default_with_zero_roots(&self) -> bool { + self.to_ref().is_default_with_zero_roots() + } - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) + // For blinded payloads we must check "defaultness" against the default `ExecutionPayload` + // which has been blinded into an `ExecutionPayloadHeader`, NOT against the default + // `ExecutionPayloadHeader` which has a zeroed out `transactions_root`. The transactions root + // should be the root of the empty list. + fn is_default_with_empty_roots(&self) -> bool { + self.to_ref().is_default_with_empty_roots() } } -impl TreeHash for FullPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() +impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { + fn block_type() -> BlockType { + BlockType::Blinded } - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - self.execution_payload.tree_hash_packed_encoding() + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.to_execution_payload_header() + }) } - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) } - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload.tree_hash_root() + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) + } + + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) + } + + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp + }) + } + + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) + } + + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) + } + + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) + } + + fn transactions(&self) -> Option<&Transactions> { + None } -} -impl Decode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Capella(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } + } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header == <_>::default() + }) } - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(FullPayload { - execution_payload: Decode::from_ssz_bytes(bytes)?, + fn is_default_with_empty_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { + cons(payload); + payload.is_default_with_empty_roots() }) } } -impl Encode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() +macro_rules! impl_exec_payload_common { + ($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge + $wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge + $wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge + $wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge + $wrapped_field:ident, // execution_payload_header | execution_payload + $fork_variant:ident, // Merge | Merge + $block_type_variant:ident, // Blinded | Full + $is_default_with_empty_roots:block, + $f:block, + $g:block) => { + impl ExecPayload for $wrapper_type { + fn block_type() -> BlockType { + BlockType::$block_type_variant + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from( + &self.$wrapped_field, + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.$wrapped_field.prev_randao + } + + fn block_number(&self) -> u64 { + self.$wrapped_field.block_number + } + + fn timestamp(&self) -> u64 { + self.$wrapped_field.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.block_hash + } + + fn fee_recipient(&self) -> Address { + self.$wrapped_field.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.$wrapped_field.gas_limit + } + + fn is_default_with_zero_roots(&self) -> bool { + self.$wrapped_field == $wrapped_type::default() + } + + fn is_default_with_empty_roots(&self) -> bool { + let f = $is_default_with_empty_roots; + f(self) + } + + fn transactions(&self) -> Option<&Transactions> { + let f = $f; + f(self) + } + + fn withdrawals_root(&self) -> Result { + let g = $g; + g(self) + } + } + + impl From<$wrapped_type> for $wrapper_type { + fn from($wrapped_field: $wrapped_type) -> Self { + Self { $wrapped_field } + } + } + }; +} + +macro_rules! impl_exec_payload_for_fork { + // BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge + ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { + //*************** Blinded payload implementations ******************// + + impl_exec_payload_common!( + $wrapper_type_header, // BlindedPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + execution_payload_header, + $fork_variant, // Merge + Blinded, + { + |wrapper: &$wrapper_type_header| { + wrapper.execution_payload_header + == $wrapped_type_header::from(&$wrapped_type_full::default()) + } + }, + { |_| { None } }, + { + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } + ); + + impl TryInto<$wrapper_type_header> for BlindedPayload { + type Error = Error; + + fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { + match self { + BlindedPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::IncorrectStateVariant), + } + } + } + + // NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` + // implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the + // default payload in `is_merge_transition_block` to determine whether the merge has occurred. + // + // The default `BlindedPayload` is therefore the payload header that results from blinding the + // default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that + // its `transactions_root` is the hash of the empty list rather than 0x0. + impl Default for $wrapper_type_header { + fn default() -> Self { + Self { + execution_payload_header: $wrapped_type_header::from( + &$wrapped_type_full::default(), + ), + } + } + } + + impl TryFrom> for $wrapper_type_header { + type Error = Error; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } + + // BlindedPayload* from CoW reference to ExecutionPayload* (hopefully just a reference). + impl<'a, T: EthSpec> From>> for $wrapper_type_header { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload_header: $wrapped_type_header::from(&*execution_payload), + } + } + } + + //*************** Full payload implementations ******************// + + impl_exec_payload_common!( + $wrapper_type_full, // FullPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + execution_payload, + $fork_variant, // Merge + Full, + { + |wrapper: &$wrapper_type_full| { + wrapper.execution_payload == $wrapped_type_full::default() + } + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = + |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); + c + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } + ); + + impl Default for $wrapper_type_full { + fn default() -> Self { + Self { + execution_payload: $wrapped_type_full::default(), + } + } + } + + // FullPayload * from CoW reference to ExecutionPayload* (hopefully already owned). + impl<'a, T: EthSpec> From>> for $wrapper_type_full { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload: $wrapped_type_full::from(execution_payload.into_owned()), + } + } + } + + impl TryFrom> for $wrapper_type_full { + type Error = Error; + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } + + impl TryFrom<$wrapped_type_header> for $wrapper_type_full { + type Error = Error; + fn try_from(_: $wrapped_type_header) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } + + impl TryInto<$wrapper_type_full> for FullPayload { + type Error = Error; + + fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { + match self { + FullPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } + }; +} + +impl_exec_payload_for_fork!( + BlindedPayloadMerge, + FullPayloadMerge, + ExecutionPayloadHeaderMerge, + ExecutionPayloadMerge, + Merge +); +impl_exec_payload_for_fork!( + BlindedPayloadCapella, + FullPayloadCapella, + ExecutionPayloadHeaderCapella, + ExecutionPayloadCapella, + Capella +); + +impl AbstractExecPayload for BlindedPayload { + type Ref<'a> = BlindedPayloadRef<'a, T>; + type Merge = BlindedPayloadMerge; + type Capella = BlindedPayloadCapella; + + fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), + ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + } + } +} + +impl From> for BlindedPayload { + fn from(payload: ExecutionPayload) -> Self { + // This implementation is a bit wasteful in that it discards the payload body. + // Required by the top-level constraint on AbstractExecPayload but could maybe be loosened + // in future. + map_execution_payload_into_blinded_payload!(payload, |inner, cons| cons(From::from( + Cow::Owned(inner) + ))) } +} - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload.ssz_append(buf) +impl From> for BlindedPayload { + fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { + match execution_payload_header { + ExecutionPayloadHeader::Merge(execution_payload_header) => { + Self::Merge(BlindedPayloadMerge { + execution_payload_header, + }) + } + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Self::Capella(BlindedPayloadCapella { + execution_payload_header, + }) + } + } } +} - fn ssz_bytes_len(&self) -> usize { - self.execution_payload.ssz_bytes_len() +impl From> for ExecutionPayloadHeader { + fn from(blinded: BlindedPayload) -> Self { + match blinded { + BlindedPayload::Merge(blinded_payload) => { + ExecutionPayloadHeader::Merge(blinded_payload.execution_payload_header) + } + BlindedPayload::Capella(blinded_payload) => { + ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) + } + } } } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 2a65bff66f8..1b9903ebbe5 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -9,7 +9,19 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, @@ -19,18 +31,6 @@ pub struct PendingAttestation { pub proposer_index: u64, } -#[cfg(feature = "arbitrary-fuzz")] -impl arbitrary::Arbitrary<'_> for PendingAttestation { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self { - aggregation_bits: >::arbitrary(u)?, - data: AttestationData::arbitrary(u)?, - inclusion_delay: u64::arbitrary(u)?, - proposer_index: u64::arbitrary(u)?, - }) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index fc5aa873006..20c78f05159 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -184,6 +184,27 @@ impl BellatrixPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct CapellaPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bls_to_execution_changes: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_withdrawals_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_validators_per_withdrawals_sweep: u64, +} + +impl CapellaPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_bls_to_execution_changes: T::max_bls_to_execution_changes() as u64, + max_withdrawals_per_payload: T::max_withdrawals_per_payload() as u64, + max_validators_per_withdrawals_sweep: spec.max_validators_per_withdrawals_sweep, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -219,6 +240,9 @@ mod test { let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); + + let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); + assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ca048b149ac..1ac2464a47f 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -9,9 +9,19 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ProposerSlashing { pub signed_header_1: SignedBeaconBlockHeader, diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/relative_epoch.rs index e681ce15c20..77a46b56e86 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/relative_epoch.rs @@ -14,15 +14,11 @@ impl From for Error { } } -#[cfg(feature = "arbitrary-fuzz")] -use arbitrary::Arbitrary; - /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy, arbitrary::Arbitrary)] pub enum RelativeEpoch { /// The prior epoch. Previous, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index 0a360b01554..f8bc8ba69fb 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -7,8 +7,7 @@ use ssz::Encode; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SelectionProof(Signature); impl SelectionProof { diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 0047bd3ccd4..6d86c056349 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -12,9 +12,20 @@ use tree_hash_derive::TreeHash; /// gossipsub topic. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedAggregateAndProof { /// The `AggregateAndProof` that was signed. pub message: AggregateAndProof, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5c40c4685c3..5f623cf07a6 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -8,8 +8,7 @@ use superstruct::superstruct; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(arbitrary::Arbitrary, PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); impl fmt::Debug for SignedBeaconBlockHash { @@ -38,7 +37,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -49,35 +48,40 @@ impl From for Hash256 { Decode, TreeHash, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec")), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), - serde(bound = "E: EthSpec, Payload: ExecPayload"), + serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), ), map_into(BeaconBlock), map_ref_into(BeaconBlockRef), map_ref_mut_into(BeaconBlockRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct SignedBeaconBlock = FullPayload> { +pub struct SignedBeaconBlock = FullPayload> { #[superstruct(only(Base), partial_getter(rename = "message_base"))] pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] pub message: BeaconBlockAltair, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, + #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] + pub message: BeaconBlockCapella, pub signature: Signature, } pub type SignedBlindedBeaconBlock = SignedBeaconBlock>; -impl> SignedBeaconBlock { +impl> SignedBeaconBlock { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -129,6 +133,9 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } + BeaconBlock::Capella(message) => { + SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) + } } } @@ -258,7 +265,7 @@ impl From>> impl SignedBeaconBlockMerge> { pub fn into_full_block( self, - execution_payload: ExecutionPayload, + execution_payload: ExecutionPayloadMerge, ) -> SignedBeaconBlockMerge> { let SignedBeaconBlockMerge { message: @@ -278,7 +285,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { .. }, + execution_payload: BlindedPayloadMerge { .. }, }, }, signature, @@ -299,7 +306,61 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlockCapella> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadCapella, + ) -> SignedBeaconBlockCapella> { + let SignedBeaconBlockCapella { + message: + BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { .. }, + bls_to_execution_changes, + }, + }, + signature, + } = self; + SignedBeaconBlockCapella { + message: BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, }, }, signature, @@ -312,12 +373,19 @@ impl SignedBeaconBlock> { self, execution_payload: Option>, ) -> Option>> { - let full_block = match self { - SignedBeaconBlock::Base(block) => SignedBeaconBlock::Base(block.into()), - SignedBeaconBlock::Altair(block) => SignedBeaconBlock::Altair(block.into()), - SignedBeaconBlock::Merge(block) => { - SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) + let full_block = match (self, execution_payload) { + (SignedBeaconBlock::Base(block), _) => SignedBeaconBlock::Base(block.into()), + (SignedBeaconBlock::Altair(block), _) => SignedBeaconBlock::Altair(block.into()), + (SignedBeaconBlock::Merge(block), Some(ExecutionPayload::Merge(payload))) => { + SignedBeaconBlock::Merge(block.into_full_block(payload)) + } + (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { + SignedBeaconBlock::Capella(block.into_full_block(payload)) } + // avoid wildcard matching forks so that compiler will + // direct us here when a new fork has been added + (SignedBeaconBlock::Merge(_), _) => return None, + (SignedBeaconBlock::Capella(_), _) => return None, }; Some(full_block) } @@ -354,6 +422,24 @@ impl SignedBeaconBlock { } } +impl> ForkVersionDeserialize + for SignedBeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "SignedBeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index dc786beb6e9..c265eded1d5 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs new file mode 100644 index 00000000000..2b17095ae7d --- /dev/null +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::Signature; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct SignedBlsToExecutionChange { + pub message: BlsToExecutionChange, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedBlsToExecutionChange); +} diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 245d33ff485..4cb35884338 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -10,9 +10,20 @@ use tree_hash_derive::TreeHash; /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedContributionAndProof { /// The `ContributionAndProof` that was signed. pub message: ContributionAndProof, diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 69f0e6e2c9f..3392826a62f 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index 61f7e839fa2..b80d4a40d5a 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -7,8 +7,18 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SigningData { pub object_root: Hash256, pub domain: Hash256, diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 277aa9deaee..2716367c7eb 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -24,13 +24,35 @@ use std::iter::Iterator; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index e1de2776150..fd06eb78a12 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -18,8 +18,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 2292b021118..300c86fc0f8 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -20,12 +20,21 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, pub sync_committee_signature: AggregateSignature, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 963b9dc6040..9e72438be20 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -6,9 +6,18 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SyncAggregatorSelectionData { pub slot: Slot, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 598d5fc16fc..43ba23f121c 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -25,9 +25,20 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommittee { pub pubkeys: FixedVector, pub aggregate_pubkey: PublicKeyBytes, diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c79ceb92fbb..ef8b52becfc 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -15,9 +15,20 @@ pub enum Error { } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 21dfd9c2882..5c2fb083743 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -8,8 +8,18 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// The data upon which a `SyncCommitteeContribution` is based. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 51395c0c135..570abace1eb 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -12,8 +12,7 @@ use ssz_types::typenum::Unsigned; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SyncSelectionProof(Signature); impl SyncSelectionProof { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 9babe323950..11bcf268941 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -19,8 +19,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs index ec23927d30c..34043c0e83f 100644 --- a/consensus/types/src/tree_hash_impls.rs +++ b/consensus/types/src/tree_hash_impls.rs @@ -17,7 +17,7 @@ impl CachedTreeHash for Validator { /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. /// - /// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant. + /// Specifically, we assume that the `pubkey` field is constant. fn recalculate_tree_hash_root( &self, arena: &mut CacheArena, @@ -29,8 +29,8 @@ impl CachedTreeHash for Validator { .iter_mut(arena)? .enumerate() .flat_map(|(i, leaf)| { - // Fields pubkey and withdrawal_credentials are constant - if (i == 0 || i == 1) && cache.initialized { + // Pubkey field (index 0) is constant. + if i == 0 && cache.initialized { None } else if process_field_by_index(self, i, leaf, !cache.initialized) { Some(i) diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 21a6b39b6d0..43b892cdf3d 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,5 +1,6 @@ use crate::{ - test_utils::TestRandom, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + PublicKeyBytes, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -9,8 +10,18 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, @@ -65,6 +76,49 @@ impl Validator { // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } + + /// Returns `true` if the validator has eth1 withdrawal credential. + pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + self.withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) + .unwrap_or(false) + } + + /// Get the eth1 withdrawal address if this validator has one initialized. + pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option

{ + self.has_eth1_withdrawal_credential(spec) + .then(|| { + self.withdrawal_credentials + .as_bytes() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + } + + /// Changes withdrawal credentials to the provided eth1 execution address. + /// + /// WARNING: this function does NO VALIDATION - it just does it! + pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { + let mut bytes = [0u8; 32]; + bytes[0] = spec.eth1_address_withdrawal_prefix_byte; + bytes[12..].copy_from_slice(execution_address.as_bytes()); + self.withdrawal_credentials = Hash256::from(bytes); + } + + /// Returns `true` if the validator is fully withdrawable at some epoch. + pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + } + + /// Returns `true` if the validator is partially withdrawable. + pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) + && self.effective_balance == spec.max_effective_balance + && balance > spec.max_effective_balance + } } impl Default for Validator { diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index cc10632d07c..20c84986c29 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -11,9 +11,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs new file mode 100644 index 00000000000..5221ff63f09 --- /dev/null +++ b/consensus/types/src/withdrawal.rs @@ -0,0 +1,37 @@ +use crate::test_utils::TestRandom; +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct Withdrawal { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub address: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(Withdrawal); +} diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index fdb59626fb2..a61529af250 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -266,7 +266,7 @@ where } /// Hashes the `self.serialize()` bytes. -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl Hash for GenericAggregateSignature where Sig: TSignature, diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index eeeab9a8773..445a8a044f6 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.4.0" +version = "4.1.0" authors = ["Paul Hauner "] edition = "2021" [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] +jemalloc = ["malloc_utils/jemalloc"] [dependencies] bls = { path = "../crypto/bls" } @@ -40,3 +41,7 @@ eth2 = { path = "../common/eth2" } snap = "1.0.1" beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } +malloc_utils = { path = "../common/malloc_utils" } + +[package.metadata.cargo-udeps.ignore] +normal = ["malloc_utils"] diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1129e710f46..98f33f21536 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 9e91f425a73..6c0e8dcecf8 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,10 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader}; +use types::{ + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, + ForkName, +}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -17,15 +20,28 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); - let execution_payload_header: ExecutionPayloadHeader = ExecutionPayloadHeader { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeader::default() + let execution_payload_header: ExecutionPayloadHeader = match fork_name { + ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), + ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderMerge::default() + }), + ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderCapella::default() + }), }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 6f39392d121..8662a804761 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -3,15 +3,14 @@ use lighthouse_network::{ discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs; use std::fs::File; use std::io::Write; -use std::net::IpAddr; use std::path::PathBuf; +use std::{fs, net::Ipv4Addr}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches) -> Result<(), String> { - let ip: IpAddr = clap_utils::parse_required(matches, "ip")?; + let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; @@ -25,12 +24,10 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { )); } - let config = NetworkConfig { - enr_address: Some(ip), - enr_udp_port: Some(udp_port), - enr_tcp_port: Some(tcp_port), - ..Default::default() - }; + let mut config = NetworkConfig::default(); + config.enr_address = (Some(ip), None); + config.enr_udp4_port = Some(udp_port); + config.enr_tcp6_port = Some(tcp_port); let local_keypair = Keypair::generate_secp256k1(); let enr_key = CombinedKey::from_libp2p(&local_keypair)?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35a0..cdf9cfa677d 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -371,7 +371,8 @@ fn main() { .subcommand( SubCommand::with_name("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ + is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( Arg::with_name("execution-block-hash") .long("execution-block-hash") @@ -417,7 +418,15 @@ fn main() { .takes_value(true) .required(true) .help("Output file"), - ) + ).arg( + Arg::with_name("fork") + .long("fork") + .value_name("FORK") + .takes_value(true) + .default_value("bellatrix") + .help("The fork for which the execution payload header should be created.") + .possible_values(&["merge", "bellatrix", "capella"]) + ) ) .subcommand( SubCommand::with_name("new-testnet") @@ -732,7 +741,6 @@ fn main() { .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .requires("pre-state-path") .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( @@ -792,6 +800,7 @@ fn run( debug_level: String::from("trace"), logfile_debug_level: String::from("trace"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index b2760829cb8..5af22731f3b 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,7 +9,8 @@ use std::io::Read; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, + test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -79,8 +80,21 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - ExecutionPayloadHeader::::from_ssz_bytes(bytes.as_slice()) - .map_err(|e| format!("SSZ decode failed: {:?}", e)) + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) + } + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; @@ -88,9 +102,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul execution_payload_header.as_ref() { let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash); + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp); + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); (eth1_block_hash, genesis_time) } else { let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 4a8ea531914..54c5819f07f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "3.4.0" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.62" +rust-version = "1.68.2" [features] default = ["slasher-mdbx"] @@ -24,6 +24,8 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] +# Use jemalloc. +jemalloc = ["malloc_utils/jemalloc"] [dependencies] beacon_node = { "path" = "../beacon_node" } @@ -53,7 +55,7 @@ malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } -slasher = { path = "../slasher" } +slasher = { path = "../slasher", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fad7edeb196..8ef67e82ddb 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -50,6 +50,7 @@ pub struct LoggerConfig { pub debug_level: String, pub logfile_debug_level: String, pub log_format: Option, + pub logfile_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, @@ -64,6 +65,7 @@ impl Default for LoggerConfig { debug_level: String::from("info"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 200, @@ -252,7 +254,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format.as_deref() { + .format(match config.logfile_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index da72204f967..b05e78fe5a7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,5 +1,3 @@ -#![recursion_limit = "256"] - mod metrics; use beacon_node::ProductionBeaconNode; @@ -31,6 +29,14 @@ fn bls_library_name() -> &'static str { } } +fn allocator_name() -> &'static str { + if cfg!(feature = "jemalloc") { + "jemalloc" + } else { + "system" + } +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -51,10 +57,12 @@ fn main() { "{}\n\ BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), + allocator_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() @@ -99,6 +107,15 @@ fn main() { .default_value("debug") .global(true), ) + .arg( + Arg::with_name("logfile-format") + .long("logfile-format") + .value_name("FORMAT") + .help("Specifies the log format used when emitting logs to the logfile.") + .possible_values(&["DEFAULT", "JSON"]) + .takes_value(true) + .global(true) + ) .arg( Arg::with_name("logfile-max-size") .long("logfile-max-size") @@ -402,6 +419,11 @@ fn run( .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; + let logfile_format = matches + .value_of("logfile-format") + // Ensure that `logfile-format` defaults to the value of `log-format`. + .or_else(|| matches.value_of("log-format")); + let logfile_max_size: u64 = matches .value_of("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? @@ -452,6 +474,7 @@ fn run( debug_level: String::from(debug_level), logfile_debug_level: String::from(logfile_debug_level), log_format: log_format.map(String::from), + logfile_format: logfile_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4a2e160e8bc..9dd67eadc60 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,14 +1,15 @@ -use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; +use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -16,7 +17,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; -use unused_port::{unused_tcp_port, unused_udp_port}; +use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -118,6 +119,26 @@ fn disable_lock_timeouts_flag() { .with_config(|config| assert!(!config.chain.enable_lock_timeouts)); } +#[test] +fn shuffling_cache_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.shuffling_cache_size, + beacon_node::beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + ) + }); +} + +#[test] +fn shuffling_cache_set() { + CommandLineTest::new() + .flag("shuffling-cache-size", Some("500")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); +} + #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() @@ -183,88 +204,87 @@ fn prepare_payload_lookahead_shorter() { } #[test] -fn paranoid_block_proposal_default() { +fn always_prepare_payload_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert!(!config.chain.paranoid_block_proposal)); + .with_config(|config| assert!(!config.chain.always_prepare_payload)); } #[test] -fn paranoid_block_proposal_on() { +fn always_prepare_payload_override() { CommandLineTest::new() - .flag("paranoid-block-proposal", None) + .flag("always-prepare-payload", None) .run_with_zero_port() - .with_config(|config| assert!(config.chain.paranoid_block_proposal)); + .with_config(|config| assert!(config.chain.always_prepare_payload)); } #[test] -fn count_unrealized_default() { +fn paranoid_block_proposal_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.paranoid_block_proposal)); +} + +#[test] +fn paranoid_block_proposal_on() { CommandLineTest::new() + .flag("paranoid-block-proposal", None) .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); + .with_config(|config| assert!(config.chain.paranoid_block_proposal)); } #[test] fn count_unrealized_no_arg() { CommandLineTest::new() .flag("count-unrealized", None) - .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_false() { CommandLineTest::new() .flag("count-unrealized", Some("false")) - .run_with_zero_port() - .with_config(|config| assert!(!config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_true() { CommandLineTest::new() .flag("count-unrealized", Some("true")) - .run_with_zero_port() - .with_config(|config| assert!(config.chain.count_unrealized)); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_no_arg() { CommandLineTest::new() .flag("count-unrealized-full", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::False - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_false() { CommandLineTest::new() .flag("count-unrealized-full", Some("false")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::False - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] fn count_unrealized_full_true() { CommandLineTest::new() .flag("count-unrealized-full", Some("true")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.count_unrealized_full, - CountUnrealizedFull::True - ) - }); + // This flag should be ignored, so there's nothing to test but that the + // client starts with the flag present. + .run_with_zero_port(); } #[test] @@ -325,6 +345,21 @@ fn trusted_peers_flag() { }); } +#[test] +fn always_prefer_builder_payload_flag() { + CommandLineTest::new() + .flag("always-prefer-builder-payload", None) + .run_with_zero_port() + .with_config(|config| assert!(config.always_prefer_builder_payload)); +} + +#[test] +fn no_flag_sets_always_prefer_builder_payload_to_false() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.always_prefer_builder_payload)); +} + // Tests for Eth1 flags. #[test] fn dummy_eth1_flag() { @@ -681,6 +716,40 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_user_agent() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().builder_user_agent, + None + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-user-agent"), + Some("anon"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_user_agent + .as_ref() + .unwrap(), + "anon" + ); + }, + ); +} + fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { use sensitive_url::SensitiveUrl; @@ -821,37 +890,188 @@ fn network_shutdown_after_sync_disabled_flag() { .with_config(|config| assert!(!config.network.shutdown_after_sync)); } #[test] -fn network_listen_address_flag() { - let addr = "127.0.0.2".parse::().unwrap(); +fn network_listen_address_flag_v4() { + let addr = "127.0.0.2".parse::().unwrap(); CommandLineTest::new() .flag("listen-address", Some("127.0.0.2")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.listen_address, addr)); + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|addr| addr.addr), + Some(addr) + ) + }); +} +#[test] +fn network_listen_address_flag_v6() { + const ADDR: &str = "::1"; + let addr = ADDR.parse::().unwrap(); + CommandLineTest::new() + .flag("listen-address", Some(ADDR)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|addr| addr.addr), + Some(addr) + ) + }); } #[test] -fn network_port_flag() { - let port = unused_tcp_port().expect("Unable to find unused port."); +fn network_listen_address_flag_dual_stack() { + const V4_ADDR: &str = "127.0.0.1"; + const V6_ADDR: &str = "::1"; + let ipv6_addr = V6_ADDR.parse::().unwrap(); + let ipv4_addr = V4_ADDR.parse::().unwrap(); + CommandLineTest::new() + .flag("listen-address", Some(V6_ADDR)) + .flag("listen-address", Some(V4_ADDR)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|addr| addr.addr), + Some(ipv6_addr) + ); + assert_eq!( + config.network.listen_addrs().v4().map(|addr| addr.addr), + Some(ipv4_addr) + ) + }); +} +#[test] +#[should_panic] +fn network_listen_address_flag_wrong_double_v4_value_config() { + // It's actually possible to listen over multiple sockets in libp2p over the same ip version. + // However this is not compatible with the single contactable address over each version in ENR. + // Because of this, it's important to test this is disallowed. + const V4_ADDR1: &str = "127.0.0.1"; + const V4_ADDR2: &str = "0.0.0.0"; + CommandLineTest::new() + .flag("listen-address", Some(V4_ADDR1)) + .flag("listen-address", Some(V4_ADDR2)) + .run_with_zero_port(); +} +#[test] +#[should_panic] +fn network_listen_address_flag_wrong_double_v6_value_config() { + // It's actually possible to listen over multiple sockets in libp2p over the same ip version. + // However this is not compatible with the single contactable address over each version in ENR. + // Because of this, it's important to test this is disallowed. + const V6_ADDR1: &str = "::3"; + const V6_ADDR2: &str = "::1"; + CommandLineTest::new() + .flag("listen-address", Some(V6_ADDR1)) + .flag("listen-address", Some(V6_ADDR2)) + .run_with_zero_port(); +} +#[test] +fn network_port_flag_over_ipv4() { + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.libp2p_port, port); - assert_eq!(config.network.discovery_port, port); + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), + Some((port, port)) + ); }); } #[test] -fn network_port_and_discovery_port_flags() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_udp_port().expect("Unable to find unused port."); +fn network_port_flag_over_ipv6() { + let port = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() - .flag("port", Some(port1.to_string().as_str())) - .flag("discovery-port", Some(port2.to_string().as_str())) + .flag("listen-address", Some("::1")) + .flag("port", Some(port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.libp2p_port, port1); - assert_eq!(config.network.discovery_port, port2); + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), + Some((port, port)) + ); }); } +#[test] +fn network_port_and_discovery_port_flags_over_ipv4() { + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp4_port, udp4_port)) + ); + }); +} +#[test] +fn network_port_and_discovery_port_flags_over_ipv6() { + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("listen-address", Some("::1")) + .flag("port", Some(tcp6_port.to_string().as_str())) + .flag("discovery-port", Some(udp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp6_port, udp6_port)) + ); + }); +} +#[test] +fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("listen-address", Some("::1")) + .flag("listen-address", Some("127.0.0.1")) + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port6", Some(tcp6_port.to_string().as_str())) + .flag("discovery-port6", Some(udp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config + .network + .listen_addrs() + .v4() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp4_port, udp4_port)) + ); + + assert_eq!( + config + .network + .listen_addrs() + .v6() + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), + Some((tcp6_port, udp6_port)) + ); + }); +} + #[test] fn disable_discovery_flag() { CommandLineTest::new() @@ -860,6 +1080,13 @@ fn disable_discovery_flag() { .with_config(|config| assert!(config.network.disable_discovery)); } #[test] +fn disable_peer_scoring_flag() { + CommandLineTest::new() + .flag("disable-peer-scoring", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.disable_peer_scoring)); +} +#[test] fn disable_upnp_flag() { CommandLineTest::new() .flag("disable-upnp", None) @@ -867,6 +1094,19 @@ fn disable_upnp_flag() { .with_config(|config| assert!(!config.network.upnp_enabled)); } #[test] +fn disable_backfill_rate_limiting_flag() { + CommandLineTest::new() + .flag("disable-backfill-rate-limiting", None) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting)); +} +#[test] +fn default_backfill_rate_limiting_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.enable_backfill_rate_limiting)); +} +#[test] fn default_boot_nodes() { let mainnet = vec![ // Lighthouse Team (Sigma Prime) @@ -956,7 +1196,6 @@ fn zero_ports_flag() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enr_address, None); assert_eq!(config.http_api.listen_port, 0); assert_eq!(config.http_metrics.listen_port, 0); }); @@ -973,67 +1212,171 @@ fn network_load_flag() { // Tests for ENR flags. #[test] -fn enr_udp_port_flags() { - let port = unused_udp_port().expect("Unable to find unused port."); +fn enr_udp_port_flag() { + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp_port, Some(port))); + .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); } #[test] -fn enr_tcp_port_flags() { - let port = unused_tcp_port().expect("Unable to find unused port."); +fn enr_tcp_port_flag() { + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp_port, Some(port))); + .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); } #[test] -fn enr_match_flag() { - let addr = "127.0.0.2".parse::().unwrap(); - let port1 = unused_udp_port().expect("Unable to find unused port."); - let port2 = unused_udp_port().expect("Unable to find unused port."); +fn enr_udp6_port_flag() { + let port = unused_udp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-udp6-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); +} +#[test] +fn enr_tcp6_port_flag() { + let port = unused_tcp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-tcp6-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); +} +#[test] +fn enr_match_flag_over_ipv4() { + let addr = "127.0.0.2".parse::().unwrap(); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) - .flag("discovery-port", Some(port1.to_string().as_str())) - .flag("port", Some(port2.to_string().as_str())) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port", Some(tcp4_port.to_string().as_str())) .run() .with_config(|config| { - assert_eq!(config.network.listen_address, addr); - assert_eq!(config.network.enr_address, Some(addr)); - assert_eq!(config.network.discovery_port, port1); - assert_eq!(config.network.enr_udp_port, Some(port1)); + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((addr, udp4_port, tcp4_port)) + ); + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + }); +} +#[test] +fn enr_match_flag_over_ipv6() { + const ADDR: &str = "::1"; + let addr = ADDR.parse::().unwrap(); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-match", None) + .flag("listen-address", Some(ADDR)) + .flag("discovery-port", Some(udp6_port.to_string().as_str())) + .flag("port", Some(tcp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((addr, udp6_port, tcp6_port)) + ); + assert_eq!(config.network.enr_address, (None, Some(addr))); + assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); }); } #[test] -fn enr_address_flag() { - let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp_port().expect("Unable to find unused port."); +fn enr_match_flag_over_ipv4_and_ipv6() { + const IPV6_ADDR: &str = "::1"; + let ipv6_addr = IPV6_ADDR.parse::().unwrap(); + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + let ipv4_addr = IPV4_ADDR.parse::().unwrap(); + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-match", None) + .flag("listen-address", Some(IPV4_ADDR)) + .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("listen-address", Some(IPV6_ADDR)) + .flag("discovery-port6", Some(udp6_port.to_string().as_str())) + .flag("port6", Some(tcp6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((ipv6_addr, udp6_port, tcp6_port)) + ); + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.addr, + listen_addr.udp_port, + listen_addr.tcp_port + )), + Some((ipv4_addr, udp4_port, tcp4_port)) + ); + assert_eq!( + config.network.enr_address, + (Some(ipv4_addr), Some(ipv6_addr)) + ); + assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + }); +} +#[test] +fn enr_address_flag_with_ipv4() { + let addr = "192.167.1.1".parse::().unwrap(); + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enr_address, Some(addr)); - assert_eq!(config.network.enr_udp_port, Some(port)); + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(port)); + }); +} +#[test] +fn enr_address_flag_with_ipv6() { + let addr = "192.167.1.1".parse::().unwrap(); + let port = unused_udp4_port().expect("Unable to find unused port."); + CommandLineTest::new() + .flag("enr-address", Some("192.167.1.1")) + .flag("enr-udp-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.enr_address, (Some(addr), None)); + assert_eq!(config.network.enr_udp4_port, Some(port)); }); } #[test] fn enr_address_dns_flag() { - let addr = "127.0.0.1".parse::().unwrap(); - let ipv6addr = "::1".parse::().unwrap(); - let port = unused_udp_port().expect("Unable to find unused port."); + let addr = Ipv4Addr::LOCALHOST; + let ipv6addr = Ipv6Addr::LOCALHOST; + let port = unused_udp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert!( - config.network.enr_address == Some(addr) - || config.network.enr_address == Some(ipv6addr) + config.network.enr_address.0 == Some(addr) + || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp_port, Some(port)); + assert_eq!(config.network.enr_udp4_port, Some(port)); }); } #[test] @@ -1070,8 +1413,8 @@ fn http_address_ipv6_flag() { } #[test] fn http_port_flag() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_tcp_port().expect("Unable to find unused port."); + let port1 = unused_tcp4_port().expect("Unable to find unused port."); + let port2 = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) @@ -1079,6 +1422,19 @@ fn http_port_flag() { .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } #[test] +fn empty_self_limiter_flag() { + // Test that empty rate limiter is accepted using the default rate limiting configurations. + CommandLineTest::new() + .flag("self-limiter", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.outbound_rate_limiter_config, + Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) + ) + }); +} +#[test] fn http_allow_origin_flag() { CommandLineTest::new() .flag("http-allow-origin", Some("127.0.0.99")) @@ -1172,8 +1528,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp_port().expect("Unable to find unused port."); - let port2 = unused_tcp_port().expect("Unable to find unused port."); + let port1 = unused_tcp4_port().expect("Unable to find unused port."); + let port2 = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -1405,7 +1761,7 @@ fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("11.25")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config.slasher.as_ref().unwrap(); assert_eq!(slasher_config.slot_offset, 11.25); @@ -1417,7 +1773,7 @@ fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("NaN")) - .run(); + .run_with_zero_port(); } #[test] fn slasher_history_length_flag() { @@ -1452,7 +1808,7 @@ fn slasher_attestation_cache_size_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-att-cache-size", Some("10000")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config .slasher @@ -1556,23 +1912,29 @@ fn ensure_panic_on_failed_launch() { #[test] fn enable_proposer_re_orgs_default() { - CommandLineTest::new().run().with_config(|config| { - assert_eq!( - config.chain.re_org_threshold, - Some(DEFAULT_RE_ORG_THRESHOLD) - ); - assert_eq!( - config.chain.re_org_max_epochs_since_finalization, - DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - ); - }); + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + assert_eq!( + config.chain.re_org_cutoff(12), + Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + ); + }); } #[test] fn disable_proposer_re_orgs() { CommandLineTest::new() .flag("disable-proposer-reorgs", None) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); } @@ -1580,7 +1942,7 @@ fn disable_proposer_re_orgs() { fn proposer_re_org_threshold() { CommandLineTest::new() .flag("proposer-reorg-threshold", Some("90")) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); } @@ -1588,7 +1950,7 @@ fn proposer_re_org_threshold() { fn proposer_re_org_max_epochs_since_finalization() { CommandLineTest::new() .flag("proposer-reorg-epochs-since-finalization", Some("8")) - .run() + .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.re_org_max_epochs_since_finalization.as_u64(), @@ -1597,6 +1959,49 @@ fn proposer_re_org_max_epochs_since_finalization() { }); } +#[test] +fn proposer_re_org_cutoff() { + CommandLineTest::new() + .flag("proposer-reorg-cutoff", Some("500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500)) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![0]).unwrap() + ) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_override() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![1, 2, 3]).unwrap() + ) + }); +} + +#[test] +#[should_panic] +fn proposer_re_org_disallowed_offsets_invalid() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .run_with_zero_port(); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() @@ -1662,7 +2067,24 @@ fn logfile_no_restricted_perms_flag() { assert!(config.logger_config.is_restricted == false); }); } - +#[test] +fn logfile_format_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.logger_config.logfile_format, None)); +} +#[test] +fn logfile_format_flag() { + CommandLineTest::new() + .flag("logfile-format", Some("JSON")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.logger_config.logfile_format, + Some("JSON".to_string()) + ) + }); +} #[test] fn sync_eth1_chain_default() { CommandLineTest::new() diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 8c000bbb3d4..4dd5ad95dd4 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -12,7 +12,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; use tempfile::TempDir; -use unused_port::unused_udp_port; +use unused_port::unused_udp4_port; const IP_ADDRESS: &str = "192.168.2.108"; @@ -62,7 +62,7 @@ fn enr_address_arg() { #[test] fn port_flag() { - let port = unused_udp_port().unwrap(); + let port = unused_udp4_port().unwrap(); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run_with_ip() @@ -122,7 +122,7 @@ fn boot_nodes_flag() { #[test] fn enr_port_flag() { - let port = unused_udp_port().unwrap(); + let port = unused_udp4_port().unwrap(); CommandLineTest::new() .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index f0ed4f737d4..45cd989a44d 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -476,3 +476,28 @@ fn disable_run_on_all() { assert!(config.disable_run_on_all); }); } + +#[test] +fn latency_measurement_service() { + CommandLineTest::new().run().with_config(|config| { + assert!(config.enable_latency_measurement_service); + }); + CommandLineTest::new() + .flag("latency-measurement-service", None) + .run() + .with_config(|config| { + assert!(config.enable_latency_measurement_service); + }); + CommandLineTest::new() + .flag("latency-measurement-service", Some("true")) + .run() + .with_config(|config| { + assert!(config.enable_latency_measurement_service); + }); + CommandLineTest::new() + .flag("latency-measurement-service", Some("false")) + .run() + .with_config(|config| { + assert!(!config.enable_latency_measurement_service); + }); +} diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c9fb3876813..c4050ac934e 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -20,7 +20,7 @@ Modify `vars.env` as desired. Start a local eth1 ganache server plus boot node along with `BN_COUNT` number of beacon nodes and `VC_COUNT` validator clients. -The `start_local_testnet.sh` script takes three options `-v VC_COUNT`, `-d DEBUG_LEVEL` and `-h` for help. +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index dcc0a5382a9..e3aba5c3add 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -28,7 +28,7 @@ while getopts "v:d:ph" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable private tx proposals" + echo " -p: enable builder proposals" echo " -h: this help" exit ;; diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 2506e9e1cdf..1ade1732867 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -45,7 +45,7 @@ SECONDS_PER_SLOT=3 SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage -PROPOSER_SCORE_BOOST=70 +PROPOSER_SCORE_BOOST=40 # Command line arguments for validator client VC_ARGS="" diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index b0f9ce82658..95dfff56962 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -2,6 +2,7 @@ # Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq` + BEHAVIOR=$1 if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then @@ -9,13 +10,22 @@ if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then exit 1 fi +exit_if_fails() { + echo $@ + $@ + EXIT_CODE=$? + if [[ $EXIT_CODE -eq 1 ]]; then + exit 111 + fi +} + source ./vars.env -../local_testnet/clean.sh +exit_if_fails ../local_testnet/clean.sh echo "Starting ganache" -../local_testnet/ganache_test_node.sh &> /dev/null & +exit_if_fails ../local_testnet/ganache_test_node.sh &> /dev/null & GANACHE_PID=$! # Wait for ganache to start @@ -23,14 +33,14 @@ sleep 5 echo "Setting up local testnet" -../local_testnet/setup.sh +exit_if_fails ../local_testnet/setup.sh # Duplicate this directory so slashing protection doesn't keep us from re-using validator keys -cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger +exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger echo "Starting bootnode" -../local_testnet/bootnode.sh &> /dev/null & +exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & BOOT_PID=$! # wait for the bootnode to start @@ -38,20 +48,20 @@ sleep 10 echo "Starting local beacon nodes" -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & BEACON_PID=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & BEACON_PID2=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & BEACON_PID3=$! echo "Starting local validator clients" -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & VALIDATOR_1_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & VALIDATOR_2_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & VALIDATOR_3_PID=$! echo "Waiting an epoch before starting the next validator client" @@ -73,9 +83,14 @@ if [[ "$BEHAVIOR" == "failure" ]]; then echo "Done" - if [[ $DOPPELGANGER_EXIT -eq 124 ]]; then + # We expect to find a doppelganger, exit with success error code if doppelganger was found + # and failure if no doppelganger was found. + if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then + exit 0 + else exit 1 fi + fi if [[ "$BEHAVIOR" == "success" ]]; then diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 376fe3d8c55..778a0afca59 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 +# Proposer score boost percentage +PROPOSER_SCORE_BOOST=40 + # Enable doppelganger detection VC_ARGS=" --enable-doppelganger-protection " diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 31102615def..658dca4846b 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -13,7 +13,7 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] bincode = "1.3.1" byteorder = "1.3.4" eth2_ssz = { version = "0.4.1", path = "../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../consensus/ssz_derive" } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 63cf1e4649e..0a787defa21 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -9,7 +9,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } directory = { path = "../../common/directory" } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } network = { path = "../../beacon_node/network" } -slasher = { path = ".." } +slasher = { path = "..", default-features = false } slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 32e2d5648df..ddc49e13cd7 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,11 +1,9 @@ -FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +FROM rust:1.68.2-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. -# We have to use nightly in order to disable the new LLVM pass manager. -RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse - +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 2917f16f8e6..09aea1c5dba 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -23,7 +23,7 @@ serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" eth2_ssz = { version = "0.4.1", path = "../../consensus/ssz" } -eth2_ssz_derive = { version = "0.3.0", path = "../../consensus/ssz_derive" } +eth2_ssz_derive = { version = "0.3.1", path = "../../consensus/ssz_derive" } tree_hash = { version = "0.4.1", path = "../../consensus/tree_hash" } tree_hash_derive = { version = "0.4.0", path = "../../consensus/tree_hash_derive" } cached_tree_hash = { path = "../../consensus/cached_tree_hash" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index b2af490dd0e..f7562f477a2 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0 +TESTS_TAG := v1.3.0-rc.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 892b9a37707..b52d1552244 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,8 +39,10 @@ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", - # Capella tests are disabled for now. - "tests/.*/capella", + # LightClientHeader + "tests/.*/.*/ssz_static/LightClientHeader", + # Deneb (previously known as eip4844) tests are disabled for now. + "tests/.*/deneb", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. @@ -53,9 +55,11 @@ "bls12-381-tests/hash_to_G2" ] + def normalize_path(path): return path.split("consensus-spec-tests/")[1] + # Determine the list of filenames which were accessed during tests. passed = set() for line in open(accessed_files_filename, 'r').readlines(): @@ -88,4 +92,5 @@ def normalize_path(path): # Exit with an error if there were any files missed. assert len(missed) == 0, "{} missed files".format(len(missed)) -print("Accessed {} files ({} intentionally excluded)".format(accessed_files, excluded_files)) +print("Accessed {} files ({} intentionally excluded)".format( + accessed_files, excluded_files)) diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e15a2e2ca3a..a59ccb34adf 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -65,6 +65,7 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. + ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 0283d13da4a..6095e1be6b1 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; +use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::{ altair, base, effective_balance_updates::process_effective_balance_updates, @@ -57,6 +58,8 @@ pub struct RandaoMixesReset; #[derive(Debug)] pub struct HistoricalRootsUpdate; #[derive(Debug)] +pub struct HistoricalSummariesUpdate; +#[derive(Debug)] pub struct ParticipationRecordUpdates; #[derive(Debug)] pub struct SyncCommitteeUpdates; @@ -77,6 +80,7 @@ type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); type_name!(RandaoMixesReset, "randao_mixes_reset"); type_name!(HistoricalRootsUpdate, "historical_roots_update"); +type_name!(HistoricalSummariesUpdate, "historical_summaries_update"); type_name!(ParticipationRecordUpdates, "participation_record_updates"); type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); @@ -97,7 +101,7 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -118,7 +122,7 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_rewards_and_penalties( state, &altair::ParticipationCache::new(state, spec).unwrap(), @@ -147,7 +151,7 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -187,7 +191,21 @@ impl EpochTransition for RandaoMixesReset { impl EpochTransition for HistoricalRootsUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_historical_roots_update(state) + match state { + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => { + process_historical_roots_update(state) + } + _ => Ok(()), + } + } +} + +impl EpochTransition for HistoricalSummariesUpdate { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Capella(_) => process_historical_summaries_update(state), + _ => Ok(()), + } } } @@ -205,7 +223,7 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_sync_committee_updates(state, spec) } } @@ -216,11 +234,13 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_inactivity_updates( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ) + } } } } @@ -229,7 +249,7 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_participation_flag_updates(state) } } @@ -275,9 +295,17 @@ impl> Case for EpochProcessing { T::name() != "sync_committee_updates" && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" + && T::name() != "historical_summaries_update" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", + ForkName::Altair | ForkName::Merge => { + T::name() != "participation_record_updates" + && T::name() != "historical_summaries_update" + } + ForkName::Capella => { + T::name() != "participation_record_updates" + && T::name() != "historical_roots_update" + } } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index ae12447abf3..52157d32f8e 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -61,6 +61,7 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 039efb36845..4f5d9983012 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -45,7 +45,6 @@ pub struct Checks { justified_checkpoint: Option, justified_checkpoint_root: Option, finalized_checkpoint: Option, - best_justified_checkpoint: Option, u_justified_checkpoint: Option, u_finalized_checkpoint: Option, proposer_boost_root: Option, @@ -229,7 +228,6 @@ impl Case for ForkChoiceTest { justified_checkpoint, justified_checkpoint_root, finalized_checkpoint, - best_justified_checkpoint, u_justified_checkpoint, u_finalized_checkpoint, proposer_boost_root, @@ -260,11 +258,6 @@ impl Case for ForkChoiceTest { tester.check_finalized_checkpoint(*expected_finalized_checkpoint)?; } - if let Some(expected_best_justified_checkpoint) = best_justified_checkpoint { - tester - .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; - } - if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint { tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?; } @@ -311,6 +304,7 @@ impl Tester { .keypairs(vec![]) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() + .recalculate_fork_times_with_genesis(0) .mock_execution_layer_all_payloads_valid() .build(); @@ -377,7 +371,7 @@ impl Tester { .chain .canonical_head .fork_choice_write_lock() - .update_time(slot, &self.spec) + .update_time(slot) .unwrap(); } @@ -387,7 +381,7 @@ impl Tester { let result = self.block_on_dangerous(self.harness.chain.process_block( block_root, block.clone(), - CountUnrealized::False, + CountUnrealized::True, NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { @@ -431,7 +425,7 @@ impl Tester { .harness .chain .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .unwrap(); let result = self @@ -447,7 +441,7 @@ impl Tester { &state, PayloadVerificationStatus::Irrelevant, &self.harness.chain.spec, - self.harness.chain.config.count_unrealized.into(), + CountUnrealized::True, ); if result.is_ok() { @@ -575,23 +569,6 @@ impl Tester { check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } - pub fn check_best_justified_checkpoint( - &self, - expected_checkpoint: Checkpoint, - ) -> Result<(), Error> { - let best_justified_checkpoint = self - .harness - .chain - .canonical_head - .fork_choice_read_lock() - .best_justified_checkpoint(); - check_equal( - "best_justified_checkpoint", - best_justified_checkpoint, - expected_checkpoint, - ) - } - pub fn check_u_justified_checkpoint( &self, expected_checkpoint: Checkpoint, diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dc139ac0b9f..dbf6c70b29b 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; @@ -38,8 +38,9 @@ impl LoadCase for GenesisInitialization { let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let execution_payload_header: Option> = if meta.execution_payload_header.unwrap_or(false) { - Some(ssz_decode_file( + Some(ssz_decode_file_with( &path.join("execution_payload_header.ssz_snappy"), + |bytes| ExecutionPayloadHeader::from_ssz_bytes(bytes, fork_name), )?) } else { None diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a57abc2e070..c180774bb64 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -29,7 +29,7 @@ pub struct MerkleProofValidity { impl LoadCase for MerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); - let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. let meta_path = path.join("meta.yaml"); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index aaa725f567a..5fd00285aaa 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,17 +3,16 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, + altair, base, process_attester_slashings, process_bls_to_execution_changes, + process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; @@ -21,8 +20,8 @@ use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, - SyncAggregate, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -36,6 +35,12 @@ struct ExecutionMetadata { execution_valid: bool, } +/// Newtype for testing withdrawals. +#[derive(Debug, Clone, Deserialize)] +pub struct WithdrawalsPayload { + payload: FullPayload, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -45,10 +50,8 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: TypeName + Debug + Sync + Sized { - fn handler_name() -> String { - Self::name().to_lowercase() - } +pub trait Operation: Debug + Sync + Sized { + fn handler_name() -> String; fn filename() -> String { format!("{}.ssz_snappy", Self::handler_name()) @@ -58,7 +61,7 @@ pub trait Operation: TypeName + Debug + Sync + Sized { true } - fn decode(path: &Path, spec: &ChainSpec) -> Result; + fn decode(path: &Path, fork_name: ForkName, spec: &ChainSpec) -> Result; fn apply_to( &self, @@ -69,7 +72,11 @@ pub trait Operation: TypeName + Debug + Sync + Sized { } impl Operation for Attestation { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "attestation".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -88,7 +95,7 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) } } @@ -100,7 +107,7 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -122,7 +129,11 @@ impl Operation for AttesterSlashing { } impl Operation for Deposit { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "deposit".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -146,7 +157,7 @@ impl Operation for ProposerSlashing { "proposer_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -172,7 +183,7 @@ impl Operation for SignedVoluntaryExit { "voluntary_exit".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -195,7 +206,7 @@ impl Operation for BeaconBlock { "block.ssz_snappy".into() } - fn decode(path: &Path, spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } @@ -230,7 +241,7 @@ impl Operation for SyncAggregate { fork_name != ForkName::Base } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -258,8 +269,11 @@ impl Operation for FullPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -273,7 +287,7 @@ impl Operation for FullPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } @@ -292,8 +306,11 @@ impl Operation for BlindedPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file::>(path).map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -307,13 +324,72 @@ impl Operation for BlindedPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } } } +impl Operation for WithdrawalsPayload { + fn handler_name() -> String { + "withdrawals".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(|payload| WithdrawalsPayload { + payload: payload.into(), + }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) + } +} + +impl Operation for SignedBlsToExecutionChange { + fn handler_name() -> String { + "bls_to_execution_change".into() + } + + fn filename() -> String { + "address_change.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_bls_to_execution_changes(state, &[self.clone()], VerifySignatures::True, spec) + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -337,7 +413,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), spec) { + match O::decode(&path.join(O::filename()), fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), @@ -380,9 +456,11 @@ impl> Case for Operations { let mut expected = self.post.clone(); // Processing requires the committee caches. - state - .build_all_committee_caches(spec) - .expect("committee caches OK"); + // NOTE: some of the withdrawals tests have 0 active validators, do not try + // to build the commitee cache in this case. + if O::handler_name() != "withdrawals" { + state.build_all_committee_caches(spec).unwrap(); + } let mut result = self .operation diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 2c9134aba50..314e51d5302 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,6 +42,11 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 13f70fea716..2ed596e25e4 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -218,6 +218,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Merge]) } + pub fn capella_only() -> Self { + Self::for_forks(vec![ForkName::Capella]) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } @@ -365,6 +369,11 @@ impl Handler for SanitySlotsHandler { fn handler_name(&self) -> String { "slots".into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Some sanity tests compute sync committees, which requires real crypto. + fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + } } #[derive(Derivative)] @@ -533,10 +542,13 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix or later. - if self.handler_name == "on_merge_block" - && (fork_name == ForkName::Base || fork_name == ForkName::Altair) - { + // Merge block tests are only enabled for Bellatrix. + if self.handler_name == "on_merge_block" && fork_name != ForkName::Merge { + return false; + } + + // Tests are no longer generated for the base/phase0 specification. + if fork_name == ForkName::Base { return false; } @@ -638,6 +650,11 @@ impl Handler for MerkleProofValidityHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { fork_name != ForkName::Base + // Test is skipped due to some changes in the Capella light client + // spec. + // + // https://github.com/sigp/lighthouse/issues/4022 + && fork_name != ForkName::Capella } } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5c2ca3fb55e..5ab2b4b7b43 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,10 @@ pub use case_result::CaseResult; -pub use cases::Case; +pub use cases::WithdrawalsPayload; pub use cases::{ - EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, + InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, + ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, + SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c075e89b3fe..675388ee58f 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::historical_summary::HistoricalSummary; use types::*; pub trait TypeName { @@ -45,6 +46,7 @@ type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(Checkpoint); @@ -54,8 +56,12 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); type_name_generic!(ExecutionPayload); +type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -76,3 +82,7 @@ type_name_generic!(SyncAggregate); type_name_generic!(SyncCommittee); type_name!(Validator); type_name!(VoluntaryExit); +type_name!(Withdrawal); +type_name!(BlsToExecutionChange, "BLSToExecutionChange"); +type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); +type_name!(HistoricalSummary); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 87a6bec71b5..33f8d67ec00 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,6 +82,18 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[test] +fn operations_withdrawals() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + +#[test] +fn operations_bls_to_execution_change() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); @@ -203,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::historical_summary::HistoricalSummary; use types::*; ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); @@ -250,6 +263,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() + .run(); } // Altair and later @@ -302,18 +319,50 @@ mod ssz_static { // Merge and later #[test] fn execution_payload() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() + .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); } #[test] fn execution_payload_header() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec> + ::capella_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::capella_only().run(); + } + + #[test] + fn withdrawal() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn signed_bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn historical_summary() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); } } @@ -381,6 +430,12 @@ fn epoch_processing_historical_roots_update() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_historical_summaries_update() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_participation_record_updates() { EpochProcessingHandler::::default().run(); @@ -448,6 +503,18 @@ fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); } +#[test] +fn fork_choice_reorg() { + ForkChoiceHandler::::new("reorg").run(); + // There is no mainnet variant for this test. +} + +#[test] +fn fork_choice_withholding() { + ForkChoiceHandler::::new("withholding").run(); + // There is no mainnet variant for this test. +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index d8df3fd8aeb..898a089ba01 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -3,7 +3,7 @@ use std::io::prelude::*; use std::io::BufReader; use std::process::{Child, Command, Stdio}; use std::time::{Duration, Instant}; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; use web3::{transports::Http, Transport, Web3}; /// How long we will wait for ganache to indicate that it is ready. @@ -65,7 +65,7 @@ impl GanacheInstance { /// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// RPC connections. pub fn new(chain_id: u64) -> Result { - let port = unused_tcp_port()?; + let port = unused_tcp4_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", false => "ganache", @@ -97,7 +97,7 @@ impl GanacheInstance { } pub fn fork(&self) -> Result { - let port = unused_tcp_port()?; + let port = unused_tcp4_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", false => "ganache", diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 26b5f596f22..de3085d2227 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,3 +21,4 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } +logging = { path = "../../common/logging" } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index ad5af531586..61a50b0405e 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -4,7 +4,7 @@ use sensitive_url::SensitiveUrl; use std::path::PathBuf; use std::process::Child; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; pub const KEYSTORE_PASSWORD: &str = "testpwd"; pub const ACCOUNT1: &str = "7b8C3a386C0eea54693fFB0DA17373ffC9228139"; @@ -50,8 +50,8 @@ impl ExecutionEngine { pub fn new(engine: E) -> Self { let datadir = E::init_datadir(); let jwt_secret_path = datadir.path().join(DEFAULT_JWT_FILE); - let http_port = unused_tcp_port().unwrap(); - let http_auth_port = unused_tcp_port().unwrap(); + let http_port = unused_tcp4_port().unwrap(); + let http_auth_port = unused_tcp4_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); let provider = Provider::::try_from(format!("http://localhost:{}", http_port)) .expect("failed to instantiate ethers provider"); diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 1b96fa9f3f9..5c83a97e21f 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use std::{env, fs::File}; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; @@ -83,7 +83,7 @@ impl GenericExecutionEngine for GethEngine { http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { - let network_port = unused_tcp_port().unwrap(); + let network_port = unused_tcp4_port().unwrap(); Command::new(Self::binary_path()) .arg("--datadir") diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index bd3436602c1..e46bc13c8d3 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,4 +1,3 @@ -#![recursion_limit = "1024"] /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 740d87ab8ae..485485c6fe3 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -6,12 +6,12 @@ use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; -use unused_port::unused_tcp_port; +use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.14.6"; +const NETHERMIND_BRANCH: &str = "release/1.17.1"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -67,7 +67,7 @@ impl NethermindEngine { .join("Nethermind.Runner") .join("bin") .join("Release") - .join("net6.0") + .join("net7.0") .join("Nethermind.Runner") } } @@ -88,14 +88,14 @@ impl GenericExecutionEngine for NethermindEngine { http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { - let network_port = unused_tcp_port().unwrap(); + let network_port = unused_tcp4_port().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") - .arg("kiln") + .arg("hive") .arg("--Init.ChainSpecPath") .arg(genesis_json_path.to_str().unwrap()) .arg("--Merge.TerminalTotalDifficulty") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 5455b48bce8..726019a8480 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -15,8 +15,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); @@ -100,7 +100,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: impl TestRig { pub fn new(generic_engine: E) -> Self { - let log = environment::null_logger().unwrap(); + let log = logging::test_logger(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -110,6 +110,8 @@ impl TestRig { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -151,9 +153,6 @@ impl TestRig { } }; - let mut spec = MainnetEthSpec::default_spec(); - spec.terminal_total_difficulty = Uint256::zero(); - Self { runtime, ee_a, @@ -271,6 +270,8 @@ impl TestRig { }; let proposer_index = 0; + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let prepared = self .ee_a .execution_layer @@ -278,11 +279,8 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient: Address::zero(), - }, + // TODO: think about how to test different forks + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), ) .await; @@ -315,21 +313,30 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let valid_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; + .to_payload() + .execution_payload(); + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); /* * Execution Engine A: @@ -337,7 +344,7 @@ impl TestRig { * Indicate that the payload is the head of the chain, before submitting a * `notify_new_payload`. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -377,7 +384,7 @@ impl TestRig { * * Do not provide payload attributes (we'll test that later). */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -394,7 +401,6 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); - assert_eq!(valid_payload.transactions.len(), pending_txs.len()); // Verify that all submitted txs were successful for pending_tx in pending_txs { @@ -414,14 +420,23 @@ impl TestRig { */ let mut invalid_payload = valid_payload.clone(); - invalid_payload.prev_randao = Hash256::from_low_u64_be(42); + *invalid_payload.prev_randao_mut() = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer .notify_new_payload(&invalid_payload) .await .unwrap(); - assert!(matches!(status, PayloadStatus::InvalidBlockHash { .. })); + assert!(matches!( + status, + PayloadStatus::InvalidBlockHash { .. } + // Geth is returning `INVALID` with a `null` LVH to indicate it + // does not know the invalid ancestor. + | PayloadStatus::Invalid { + latest_valid_hash: None, + .. + } + )); /* * Execution Engine A: @@ -429,8 +444,8 @@ impl TestRig { * Produce another payload atop the previous one. */ - let parent_hash = valid_payload.block_hash; - let timestamp = valid_payload.timestamp + 1; + let parent_hash = valid_payload.block_hash(); + let timestamp = valid_payload.timestamp() + 1; let prev_randao = Hash256::zero(); let proposer_index = 0; let builder_params = BuilderParams { @@ -438,21 +453,29 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let second_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; + .to_payload() + .execution_payload(); /* * Execution Engine A: @@ -474,13 +497,13 @@ impl TestRig { * * Indicate that the payload is the head of the chain, providing payload attributes. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - let payload_attributes = PayloadAttributes { - timestamp: second_payload.timestamp + 1, - prev_randao: Hash256::zero(), - suggested_fee_recipient: Address::zero(), - }; + // TODO: think about how to handle different forks + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -524,7 +547,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -576,7 +599,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -603,13 +626,35 @@ async fn check_payload_reconstruction( ee: &ExecutionPair, payload: &ExecutionPayload, ) { + // check via legacy eth_getBlockByHash let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash) + .get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name()) .await .unwrap() .unwrap(); assert_eq!(reconstructed, *payload); + // also check via payload bodies method + let capabilities = ee + .execution_layer + .get_engine_capabilities(None) + .await + .unwrap(); + assert!( + // if the engine doesn't have these capabilities, we need to update the client in our tests + capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1, + "Testing engine does not support payload bodies methods" + ); + let mut bodies = ee + .execution_layer + .get_payload_bodies_by_hash(vec![payload.block_hash()]) + .await + .unwrap(); + assert_eq!(bodies.len(), 1); + let body = bodies.pop().unwrap().unwrap(); + let header = ExecutionPayloadHeader::from(payload.to_ref()); + let reconstructed_from_body = body.to_payload(header).unwrap(); + assert_eq!(reconstructed_from_body, *payload); } /// Returns the duration since the unix epoch. diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 82a60cda2f2..d4fd115bec3 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -89,8 +89,9 @@ pub fn testing_client_config() -> ClientConfig { let mut client_config = ClientConfig::default(); // Setting ports to `0` means that the OS will choose some available port. - client_config.network.libp2p_port = 0; - client_config.network.discovery_port = 0; + client_config + .network + .set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 0, 0); client_config.network.upnp_enabled = false; client_config.http_api.enabled = true; client_config.http_api.listen_port = 0; diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 02f4f76d51e..d34cdbc9ff1 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -228,7 +228,7 @@ pub async fn verify_transition_block_finalized( .map_err(|e| format!("Get state root via http failed: {:?}", e))? .message() .execution_payload() - .map(|payload| payload.execution_payload.block_hash) + .map(|payload| payload.block_hash()) .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; block_hashes.push(execution_block_hash); } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 8284bff6096..43e8a5cf4dc 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -13,7 +13,7 @@ use node_test_rig::{ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -62,6 +62,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, @@ -148,7 +149,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.eth1.chain_id = Eth1Id::from(chain_id); beacon_config.network.target_peers = node_count - 1; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); if post_merge_sim { let el_config = execution_layer::Config { diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 8df912ed161..3e481df8857 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -58,10 +58,13 @@ impl LocalNetwork { context: RuntimeContext, mut beacon_config: ClientConfig, ) -> Result { - beacon_config.network.discovery_port = BOOTNODE_PORT; - beacon_config.network.libp2p_port = BOOTNODE_PORT; - beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + BOOTNODE_PORT, + BOOTNODE_PORT, + ); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -132,10 +135,13 @@ impl LocalNetwork { .enr() .expect("bootnode must have a network"), ); - beacon_config.network.discovery_port = BOOTNODE_PORT + count; - beacon_config.network.libp2p_port = BOOTNODE_PORT + count; - beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count); + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + BOOTNODE_PORT + count, + BOOTNODE_PORT + count, + ); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; } if let Some(el_config) = &mut beacon_config.execution_layer { diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 9e05a539cfc..922149537cb 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -1,5 +1,3 @@ -#![recursion_limit = "256"] - //! This crate provides a simluation that creates `n` beacon node and validator clients, each with //! `v` validators. A deposit contract is deployed at the start of the simulation using a local //! `ganache` instance (you must have `ganache` installed and avaliable on your path). All diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 53c4447da2c..f1f6dc44262 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -7,7 +7,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::time::sleep; use types::{Epoch, EthSpec, MainnetEthSpec}; @@ -47,6 +47,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, @@ -90,7 +91,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.dummy_eth1_backend = true; beacon_config.sync_eth1_chain = true; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); let main_future = async { let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 1c8b41f0573..c437457c20c 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -8,7 +8,7 @@ use node_test_rig::{ }; use node_test_rig::{testing_validator_config, ClientConfig}; use std::cmp::max; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::Ipv4Addr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::{Epoch, EthSpec}; @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: String::from(log_level), logfile_debug_level: String::from("debug"), log_format: log_format.map(String::from), + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, @@ -94,7 +95,7 @@ fn syncing_sim( beacon_config.http_api.allow_sync_stalled = true; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); // Generate the directories and keystores required for the validator clients. let validator_indices = (0..num_validators).collect::>(); diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 8ce58300629..c0fbf667236 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -3,8 +3,6 @@ name = "web3signer_tests" version = "0.1.0" edition = "2021" -build = "build.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -27,9 +25,7 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } - -[build-dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -reqwest = { version = "0.11.0", features = ["json","stream"] } serde_json = "1.0.58" zip = "0.5.13" +lazy_static = "1.4.0" +parking_lot = "0.12.0" \ No newline at end of file diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/src/get_web3signer.rs similarity index 88% rename from testing/web3signer_tests/build.rs rename to testing/web3signer_tests/src/get_web3signer.rs index a55c39376a4..800feb204ae 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/src/get_web3signer.rs @@ -15,17 +15,6 @@ use zip::ZipArchive; /// Use `Some("21.8.1")` to download a specific version. const FIXED_VERSION_STRING: Option<&str> = None; -#[tokio::main] -async fn main() { - let out_dir = env::var("OUT_DIR").unwrap(); - - // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. - // We use a name that is unlikely to accidentally collide with anything the user has configured. - let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); - - download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await; -} - pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { let version_file = dest_dir.join("version"); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4f9a574f847..dd17ae23b15 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -9,16 +9,21 @@ //! - Lighthouse can issue valid requests to Web3Signer. //! - The signatures generated by Web3Signer are identical to those which Lighthouse generates. //! -//! There is a build script in this crate which obtains the latest version of Web3Signer and makes -//! it available via the `OUT_DIR`. +//! There is a `download_binary` function in the `get_web3signer` module which obtains the latest version of Web3Signer and makes +//! it available via the `TEMP_DIR`. +#![cfg(all(test, unix, not(debug_assertions)))] + +mod get_web3signer; -#[cfg(all(test, unix, not(debug_assertions)))] mod tests { + use crate::get_web3signer::download_binary; use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use lazy_static::lazy_static; + use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; use slot_clock::{SlotClock, TestingSlotClock}; @@ -31,7 +36,8 @@ mod tests { use std::sync::Arc; use std::time::{Duration, Instant}; use task_executor::TaskExecutor; - use tempfile::TempDir; + use tempfile::{tempdir, TempDir}; + use tokio::sync::OnceCell; use tokio::time::sleep; use types::*; use url::Url; @@ -51,6 +57,13 @@ mod tests { /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; + lazy_static! { + static ref TEMP_DIR: Arc> = Arc::new(Mutex::new( + tempdir().expect("Failed to create temporary directory") + )); + static ref GET_WEB3SIGNER_BIN: OnceCell<()> = OnceCell::new(); + } + type E = MainnetEthSpec; /// This marker trait is implemented for objects that we wish to compare to ensure Web3Signer @@ -99,7 +112,10 @@ mod tests { /// The location of the Web3Signer binary generated by the build script. fn web3signer_binary() -> PathBuf { - PathBuf::from(env::var("OUT_DIR").unwrap()) + TEMP_DIR + .lock() + .path() + .to_path_buf() .join("web3signer") .join("bin") .join("web3signer") @@ -143,6 +159,19 @@ mod tests { impl Web3SignerRig { pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { + GET_WEB3SIGNER_BIN + .get_or_init(|| async { + // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. + // We use a name that is unlikely to accidentally collide with anything the user has configured. + let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); + download_binary( + TEMP_DIR.lock().path().to_path_buf(), + github_token.as_deref().unwrap_or(""), + ) + .await; + }) + .await; + let keystore_dir = TempDir::new().unwrap(); let keypair = testing_keypair(); let keystore = @@ -660,17 +689,17 @@ mod tests { } #[tokio::test] - async fn ropsten_base_types() { - test_base_types("ropsten", 4250).await + async fn sepolia_base_types() { + test_base_types("sepolia", 4250).await } #[tokio::test] - async fn ropsten_altair_types() { - test_altair_types("ropsten", 4251).await + async fn sepolia_altair_types() { + test_altair_types("sepolia", 4251).await } #[tokio::test] - async fn ropsten_merge_types() { - test_merge_types("ropsten", 4252).await + async fn sepolia_merge_types() { + test_merge_types("sepolia", 4252).await } } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index c566060c27e..5eccd89359d 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -12,9 +12,9 @@ path = "tests/main.rs" [dependencies] tempfile = "3.1.0" types = { path = "../../consensus/types" } -rusqlite = { version = "0.25.3", features = ["bundled"] } +rusqlite = { version = "0.28.0", features = ["bundled"] } r2d2 = "0.8.9" -r2d2_sqlite = "0.18.0" +r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index bd5f97f4d81..c8be851472e 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -162,8 +162,8 @@ impl SlashingDatabase { /// The exclusive locking mode also has the benefit of applying to other processes, so multiple /// Lighthouse processes trying to access the same database will also be blocked. fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> { - conn.pragma_update(None, "foreign_keys", &true)?; - conn.pragma_update(None, "locking_mode", &"EXCLUSIVE")?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; Ok(()) } diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 82f085c43fb..3e667429b4e 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -7,17 +7,20 @@ use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_RE use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; -use slog::{error, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; use std::future::Future; use std::marker::PhantomData; use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; use tokio::{sync::RwLock, time::sleep}; use types::{ChainSpec, Config, EthSpec}; +/// Message emitted when the VC detects the BN is using a different spec. +const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updating"; + /// The number of seconds *prior* to slot start that we will try and update the state of fallback /// nodes. /// @@ -27,6 +30,14 @@ use types::{ChainSpec, Config, EthSpec}; /// having the correct nodes up and running prior to the start of the slot. const SLOT_LOOKAHEAD: Duration = Duration::from_secs(1); +/// Indicates a measurement of latency between the VC and a BN. +pub struct LatencyMeasurement { + /// An identifier for the beacon node (e.g. the URL). + pub beacon_node_id: String, + /// The round-trip latency, if the BN responded successfully. + pub latency: Option, +} + /// Starts a service that will routinely try and update the status of the provided `beacon_nodes`. /// /// See `SLOT_LOOKAHEAD` for information about when this should run. @@ -262,6 +273,7 @@ impl CandidateBeaconNode { "Beacon node has mismatched Altair fork epoch"; "endpoint" => %self.beacon_node, "endpoint_altair_fork_epoch" => ?beacon_node_spec.altair_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, ); } else if beacon_node_spec.bellatrix_fork_epoch != spec.bellatrix_fork_epoch { warn!( @@ -269,6 +281,15 @@ impl CandidateBeaconNode { "Beacon node has mismatched Bellatrix fork epoch"; "endpoint" => %self.beacon_node, "endpoint_bellatrix_fork_epoch" => ?beacon_node_spec.bellatrix_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, + ); + } else if beacon_node_spec.capella_fork_epoch != spec.capella_fork_epoch { + warn!( + log, + "Beacon node has mismatched Capella fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_capella_fork_epoch" => ?beacon_node_spec.capella_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, ); } @@ -394,6 +415,47 @@ impl BeaconNodeFallback { let _ = future::join_all(futures).await; } + /// Concurrently send a request to all candidates (regardless of + /// offline/online) status and attempt to collect a rough reading on the + /// latency between the VC and candidate. + pub async fn measure_latency(&self) -> Vec { + let futures: Vec<_> = self + .candidates + .iter() + .map(|candidate| async { + let beacon_node_id = candidate.beacon_node.to_string(); + // The `node/version` endpoint is used since I imagine it would + // require the least processing in the BN and therefore measure + // the connection moreso than the BNs processing speed. + // + // I imagine all clients have the version string availble as a + // pre-computed string. + let response_instant = candidate + .beacon_node + .get_node_version() + .await + .ok() + .map(|_| Instant::now()); + (beacon_node_id, response_instant) + }) + .collect(); + + let request_instant = Instant::now(); + + // Send the request to all BNs at the same time. This might involve some + // queueing on the sending host, however I hope it will avoid bias + // caused by sending requests at different times. + future::join_all(futures) + .await + .into_iter() + .map(|(beacon_node_id, response_instant)| LatencyMeasurement { + beacon_node_id, + latency: response_instant + .and_then(|response| response.checked_duration_since(request_instant)), + }) + .collect() + } + /// Run `func` against each candidate in `self`, returning immediately if a result is found. /// Otherwise, return all the errors encountered along the way. /// @@ -409,10 +471,12 @@ impl BeaconNodeFallback { where F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, + Err: Debug, { let mut errors = vec![]; let mut to_retry = vec![]; let mut retry_unsynced = vec![]; + let log = &self.log.clone(); // Run `func` using a `candidate`, returning the value or capturing errors. // @@ -427,6 +491,12 @@ impl BeaconNodeFallback { match func(&$candidate.beacon_node).await { Ok(val) => return Ok(val), Err(e) => { + debug!( + log, + "Request to beacon node failed"; + "node" => $candidate.beacon_node.to_string(), + "error" => ?e, + ); // If we have an error on this function, make the client as not-ready. // // There exists a race condition where the candidate may have been marked @@ -626,6 +696,7 @@ impl BeaconNodeFallback { where F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, + Err: Debug, { if self.disable_run_on_all { self.first_success(require_synced, offline_on_failure, func) diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d4acbe7563d..3b37492377f 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -7,7 +7,6 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; -use eth2::types::Graffiti; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; @@ -15,7 +14,10 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::sleep; -use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; +use types::{ + AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + Slot, +}; #[derive(Debug)] pub enum BlockError { @@ -295,7 +297,7 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( + async fn publish_block>( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -333,6 +335,11 @@ impl BlockService { let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + info!( + log, + "Requesting unsigned block"; + "slot" => slot.as_u64(), + ); // Request block from first responsive beacon node. let block = self .beacon_nodes @@ -383,6 +390,11 @@ impl BlockService { } }; + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" @@ -395,11 +407,21 @@ impl BlockService { ) .await?; + let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); let signed_block = self_ref .validator_store .sign_block::(*validator_pubkey_ref, block, current_slot) .await .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + let signing_time_ms = + Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); + + info!( + log, + "Publishing signed block"; + "slot" => slot.as_u64(), + "signing_time_ms" => signing_time_ms, + ); // Publish block with first available beacon node. self.beacon_nodes @@ -453,6 +475,7 @@ impl BlockService { "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), "slot" => signed_block.slot().as_u64(), ); + Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index c82a1a9d362..fd96aa1f5c4 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -231,6 +231,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5064).") .takes_value(true), ) + .arg( + Arg::with_name("enable-high-validator-count-metrics") + .long("enable-high-validator-count-metrics") + .help("Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.") + .takes_value(false), + ) /* * Explorer metrics */ @@ -309,6 +318,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { set here moves too far from the previous block's gas limit. [default: 30,000,000]") .requires("builder-proposals"), ) + .arg( + Arg::with_name("latency-measurement-service") + .long("latency-measurement-service") + .value_name("BOOLEAN") + .help("Set to 'true' to enable a service that periodically attempts to measure latency to BNs. \ + Set to 'false' to disable.") + .default_value("true") + .takes_value(true), + ) /* * Experimental/development options. */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 22741dabbd7..724d6c74f1f 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -53,6 +53,11 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, + /// If true, then we publish validator specific metrics (e.g next attestation duty slot) + /// for all our managed validators. + /// Note: We publish validator specific metrics for low validator counts without this flag + /// (<= 64 validators) + pub enable_high_validator_count_metrics: bool, /// Enable use of the blinded block endpoints during proposals. pub builder_proposals: bool, /// Overrides the timestamp field in builder api ValidatorRegistrationV1 @@ -68,6 +73,8 @@ pub struct Config { pub block_delay: Option, /// Disables publishing http api requests to all beacon nodes for select api calls. pub disable_run_on_all: bool, + /// Enables a service which attempts to measure latency between the VC and BNs. + pub enable_latency_measurement_service: bool, } impl Default for Config { @@ -99,12 +106,14 @@ impl Default for Config { http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, + enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, block_delay: None, builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, disable_run_on_all: false, + enable_latency_measurement_service: true, } } } @@ -273,6 +282,10 @@ impl Config { config.http_metrics.enabled = true; } + if cli_args.is_present("enable-high-validator-count-metrics") { + config.enable_high_validator_count_metrics = true; + } + if let Some(address) = cli_args.value_of("metrics-address") { config.http_metrics.listen_addr = address .parse::() @@ -347,6 +360,9 @@ impl Config { ); } + config.enable_latency_measurement_service = + parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + /* * Experimental */ diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 86b8ca870e2..c335c67ab16 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -9,6 +9,7 @@ mod sync; use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -16,13 +17,14 @@ use crate::{ }; use environment::RuntimeContext; use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId}; -use futures::future::join_all; +use futures::{stream, StreamExt}; use parking_lot::RwLock; use safe_arith::ArithError; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::{HashMap, HashSet}; +use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; use std::sync::Arc; +use std::time::Duration; use sync::poll_sync_committee_duties; use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; @@ -39,6 +41,19 @@ const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; +/// Compute attestation selection proofs this many slots before they are required. +/// +/// At start-up selection proofs will be computed with less lookahead out of necessity. +const SELECTION_PROOF_SLOT_LOOKAHEAD: u64 = 8; + +/// Fraction of a slot at which selection proof signing should happen (2 means half way). +const SELECTION_PROOF_SCHEDULE_DENOM: u32 = 2; + +/// Minimum number of validators for which we auto-enable per-validator metrics. +/// For validators greater than this value, we need to manually set the `enable-per-validator-metrics` +/// flag in the cli to enable collection of per validator metrics. +const VALIDATOR_METRICS_MIN_COUNT: usize = 64; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -65,7 +80,7 @@ pub struct DutyAndProof { impl DutyAndProof { /// Instantiate `Self`, computing the selection proof as well. - pub async fn new( + pub async fn new_with_selection_proof( duty: AttesterData, validator_store: &ValidatorStore, spec: &ChainSpec, @@ -93,6 +108,14 @@ impl DutyAndProof { selection_proof, }) } + + /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. + pub fn new_without_selection_proof(duty: AttesterData) -> Self { + Self { + duty, + selection_proof: None, + } + } } /// To assist with readability, the dependent root for attester/proposer duties. @@ -121,6 +144,7 @@ pub struct DutiesService { /// This functionality is a little redundant since most BNs will likely reject duties when they /// aren't synced, but we keep it around for an emergency. pub require_synced: RequireSynced, + pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, } @@ -220,6 +244,12 @@ impl DutiesService { .cloned() .collect() } + + /// Returns `true` if we should collect per validator metrics and `false` otherwise. + pub fn per_validator_metrics(&self) -> bool { + self.enable_high_validator_count_metrics + || self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT + } } /// Start the service that periodically polls the beacon node for validator duties. This will start @@ -458,7 +488,7 @@ async fn poll_validator_indices( /// 3. Push out any attestation subnet subscriptions to the BN. /// 4. Prune old entries from `duties_service.attesters`. async fn poll_beacon_attesters( - duties_service: &DutiesService, + duties_service: &Arc>, ) -> Result<(), Error> { let current_epoch_timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, @@ -501,6 +531,7 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, + current_slot, ) .await { @@ -520,9 +551,14 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = - poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) - .await + if let Err(e) = poll_beacon_attesters_for_epoch( + duties_service, + next_epoch, + &local_indices, + &local_pubkeys, + current_slot, + ) + .await { error!( log, @@ -615,10 +651,11 @@ async fn poll_beacon_attesters( /// For the given `local_indices` and `local_pubkeys`, download the duties for the given `epoch` and /// store them in `duties_service.attesters`. async fn poll_beacon_attesters_for_epoch( - duties_service: &DutiesService, + duties_service: &Arc>, epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, + current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -671,6 +708,35 @@ async fn poll_beacon_attesters_for_epoch( .data .into_iter() .filter(|duty| { + if duties_service.per_validator_metrics() { + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + local_pubkeys.contains(&duty.pubkey) && { // Only update the duties if either is true: // @@ -693,31 +759,16 @@ async fn poll_beacon_attesters_for_epoch( "num_new_duties" => new_duties.len(), ); - // Produce the `DutyAndProof` messages in parallel. - let duty_and_proof_results = join_all(new_duties.into_iter().map(|duty| { - DutyAndProof::new(duty, &duties_service.validator_store, &duties_service.spec) - })) - .await; - // Update the duties service with the new `DutyAndProof` messages. let mut attesters = duties_service.attesters.write(); let mut already_warned = Some(()); - for result in duty_and_proof_results { - let duty_and_proof = match result { - Ok(duty_and_proof) => duty_and_proof, - Err(e) => { - error!( - log, - "Failed to produce duty and proof"; - "error" => ?e, - "msg" => "may impair attestation duties" - ); - // Do not abort the entire batch for a single failure. - continue; - } - }; + for duty in &new_duties { + let attester_map = attesters.entry(duty.pubkey).or_default(); - let attester_map = attesters.entry(duty_and_proof.duty.pubkey).or_default(); + // Create initial entries in the map without selection proofs. We'll compute them in the + // background later to avoid creating a thundering herd of signing threads whenever new + // duties are computed. + let duty_and_proof = DutyAndProof::new_without_selection_proof(duty.clone()); if let Some((prior_dependent_root, _)) = attester_map.insert(epoch, (dependent_root, duty_and_proof)) @@ -736,9 +787,144 @@ async fn poll_beacon_attesters_for_epoch( } drop(attesters); + // Spawn the background task to compute selection proofs. + let subservice = duties_service.clone(); + duties_service.context.executor.spawn( + async move { + fill_in_selection_proofs(subservice, new_duties, dependent_root).await; + }, + "duties_service_selection_proofs_background", + ); + Ok(()) } +/// Compute the attestation selection proofs for the `duties` and add them to the `attesters` map. +/// +/// Duties are computed in batches each slot. If a re-org is detected then the process will +/// terminate early as it is assumed the selection proofs from `duties` are no longer relevant. +async fn fill_in_selection_proofs( + duties_service: Arc>, + duties: Vec, + dependent_root: Hash256, +) { + let log = duties_service.context.log(); + + // Sort duties by slot in a BTreeMap. + let mut duties_by_slot: BTreeMap> = BTreeMap::new(); + + for duty in duties { + duties_by_slot.entry(duty.slot).or_default().push(duty); + } + + // At halfway through each slot when nothing else is likely to be getting signed, sign a batch + // of selection proofs and insert them into the duties service `attesters` map. + let slot_clock = &duties_service.slot_clock; + let slot_offset = duties_service.slot_clock.slot_duration() / SELECTION_PROOF_SCHEDULE_DENOM; + + while !duties_by_slot.is_empty() { + if let Some(duration) = slot_clock.duration_to_next_slot() { + sleep(duration.saturating_sub(slot_offset)).await; + + let Some(current_slot) = slot_clock.now() else { + continue; + }; + + let lookahead_slot = current_slot + SELECTION_PROOF_SLOT_LOOKAHEAD; + + let mut relevant_duties = duties_by_slot.split_off(&lookahead_slot); + std::mem::swap(&mut relevant_duties, &mut duties_by_slot); + + let batch_size = relevant_duties.values().map(Vec::len).sum::(); + + if batch_size == 0 { + continue; + } + + let timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTATION_SELECTION_PROOFS], + ); + + // Sign selection proofs (serially). + let duty_and_proof_results = stream::iter(relevant_duties.into_values().flatten()) + .then(|duty| async { + DutyAndProof::new_with_selection_proof( + duty, + &duties_service.validator_store, + &duties_service.spec, + ) + .await + }) + .collect::>() + .await; + + // Add to attesters store. + let mut attesters = duties_service.attesters.write(); + for result in duty_and_proof_results { + let duty_and_proof = match result { + Ok(duty_and_proof) => duty_and_proof, + Err(e) => { + error!( + log, + "Failed to produce duty and proof"; + "error" => ?e, + "msg" => "may impair attestation duties" + ); + // Do not abort the entire batch for a single failure. + continue; + } + }; + + let attester_map = attesters.entry(duty_and_proof.duty.pubkey).or_default(); + let epoch = duty_and_proof.duty.slot.epoch(E::slots_per_epoch()); + match attester_map.entry(epoch) { + hash_map::Entry::Occupied(mut entry) => { + // No need to update duties for which no proof was computed. + let Some(selection_proof) = duty_and_proof.selection_proof else { + continue; + }; + + let (existing_dependent_root, existing_duty) = entry.get_mut(); + + if *existing_dependent_root == dependent_root { + // Replace existing proof. + existing_duty.selection_proof = Some(selection_proof); + } else { + // Our selection proofs are no longer relevant due to a reorg, abandon + // this entire background process. + debug!( + log, + "Stopping selection proof background task"; + "reason" => "re-org" + ); + return; + } + } + hash_map::Entry::Vacant(entry) => { + entry.insert((dependent_root, duty_and_proof)); + } + } + } + drop(attesters); + + let time_taken_ms = + Duration::from_secs_f64(timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); + debug!( + log, + "Computed attestation selection proofs"; + "batch_size" => batch_size, + "lookahead_slot" => lookahead_slot, + "time_taken_ms" => time_taken_ms + ); + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; + } + } +} + /// Download the proposer duties for the current epoch and store them in `duties_service.proposers`. /// If there are any proposer for this slot, send out a notification to the block proposers. /// diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/src/http_api/create_signed_voluntary_exit.rs new file mode 100644 index 00000000000..b777d158064 --- /dev/null +++ b/validator_client/src/http_api/create_signed_voluntary_exit.rs @@ -0,0 +1,69 @@ +use crate::validator_store::ValidatorStore; +use bls::{PublicKey, PublicKeyBytes}; +use slog::{info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; + +pub async fn create_signed_voluntary_exit( + pubkey: PublicKey, + maybe_epoch: Option, + validator_store: Arc>, + slot_clock: T, + log: Logger, +) -> Result { + let epoch = match maybe_epoch { + Some(epoch) => epoch, + None => get_current_epoch::(slot_clock).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string()) + })?, + }; + + let pubkey_bytes = PublicKeyBytes::from(pubkey); + if !validator_store.has_validator(&pubkey_bytes) { + return Err(warp_utils::reject::custom_not_found(format!( + "{} is disabled or not managed by this validator client", + pubkey_bytes.as_hex_string() + ))); + } + + let validator_index = validator_store + .validator_index(&pubkey_bytes) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "The validator index for {} is not known. The validator client \ + may still be initializing or the validator has not yet had a \ + deposit processed.", + pubkey_bytes.as_hex_string() + )) + })?; + + let voluntary_exit = VoluntaryExit { + epoch, + validator_index, + }; + + info!( + log, + "Signing voluntary exit"; + "validator" => pubkey_bytes.as_hex_string(), + "epoch" => epoch + ); + + let signed_voluntary_exit = validator_store + .sign_voluntary_exit(pubkey_bytes, voluntary_exit) + .await + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to sign voluntary exit: {:?}", + e + )) + })?; + + Ok(signed_voluntary_exit) +} + +/// Calculates the current epoch from the genesis time and current time. +fn get_current_epoch(slot_clock: T) -> Option { + slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index b87bb083811..15b3f9fe09e 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,9 +1,11 @@ mod api_secret; +mod create_signed_voluntary_exit; mod create_validator; mod keystores; mod remotekeys; mod tests; +use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, @@ -71,6 +73,7 @@ pub struct Context { pub spec: ChainSpec, pub config: Config, pub log: Logger, + pub slot_clock: T, pub _phantom: PhantomData, } @@ -189,6 +192,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_slot_clock = ctx.slot_clock.clone(); + let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); @@ -904,6 +910,46 @@ pub fn serve( ) .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // POST /eth/v1/validator/{pubkey}/voluntary_exit + let post_validators_voluntary_exits = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("voluntary_exit")) + .and(warp::query::()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(slot_clock_filter) + .and(log_filter.clone()) + .and(signer.clone()) + .and(task_executor_filter.clone()) + .and_then( + |pubkey: PublicKey, + query: api_types::VoluntaryExitQuery, + validator_store: Arc>, + slot_clock: T, + log, + signer, + task_executor: TaskExecutor| { + blocking_signed_json_task(signer, move || { + if let Some(handle) = task_executor.handle() { + let signed_voluntary_exit = + handle.block_on(create_signed_voluntary_exit( + pubkey, + query.epoch, + validator_store, + slot_clock, + log, + ))?; + Ok(signed_voluntary_exit) + } else { + Err(warp_utils::reject::custom_server_error( + "Lighthouse shutting down".into(), + )) + } + }) + }, + ); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1001,6 +1047,7 @@ pub fn serve( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_validators_voluntary_exits) .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 5aa24a2b022..df0e4804440 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -45,6 +45,7 @@ struct ApiTester { initialized_validators: Arc>, validator_store: Arc>, url: SensitiveUrl, + slot_clock: TestingSlotClock, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, _runtime_shutdown: exit_future::Signal, @@ -90,8 +91,12 @@ impl ApiTester { let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let slot_clock = - TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let genesis_time: u64 = 0; + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_time), + Duration::from_secs(1), + ); let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); @@ -101,9 +106,9 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), - slot_clock, + slot_clock.clone(), &config, executor.clone(), log.clone(), @@ -129,7 +134,8 @@ impl ApiTester { listen_port: 0, allow_origin: None, }, - log, + log: log.clone(), + slot_clock: slot_clock.clone(), _phantom: PhantomData, }); let ctx = context.clone(); @@ -156,6 +162,7 @@ impl ApiTester { initialized_validators, validator_store, url, + slot_clock, _server_shutdown: shutdown_tx, _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, @@ -212,9 +219,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); @@ -494,6 +501,33 @@ impl ApiTester { self } + pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + // manually setting validator index in `ValidatorStore` + self.initialized_validators + .write() + .set_index(&validator.voting_pubkey, 0); + + let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch()); + + let resp = self + .client + .post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch) + .await; + + assert!(resp.is_ok()); + assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch); + + self + } + + fn get_current_epoch(&self) -> Epoch { + self.slot_clock + .now() + .map(|s| s.epoch(E::slots_per_epoch())) + .unwrap() + } + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; @@ -778,6 +812,29 @@ fn hd_validator_creation() { }); } +#[test] +fn validator_exit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; + }); +} + #[test] fn validator_enabling() { let runtime = build_runtime(); diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 146d008a575..8a52a4d35e9 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -32,6 +32,7 @@ pub const PROPOSER_DUTIES_HTTP_GET: &str = "proposer_duties_http_get"; pub const VALIDATOR_ID_HTTP_GET: &str = "validator_id_http_get"; pub const SUBSCRIPTIONS_HTTP_POST: &str = "subscriptions_http_post"; pub const UPDATE_PROPOSERS: &str = "update_proposers"; +pub const ATTESTATION_SELECTION_PROOFS: &str = "attestation_selection_proofs"; pub const SUBSCRIPTIONS: &str = "subscriptions"; pub const LOCAL_KEYSTORE: &str = "local_keystore"; pub const WEB3SIGNER: &str = "web3signer"; @@ -87,6 +88,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_voluntary_exits_total", + "Total count of VoluntaryExit signings", + &["status"] + ); pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( "builder_validator_registrations_total", "Total count of ValidatorRegistrationData signings", @@ -172,6 +178,28 @@ lazy_static::lazy_static! { "Duration to obtain a signature", &["type"] ); + pub static ref BLOCK_SIGNING_TIMES: Result = try_create_histogram( + "vc_block_signing_times_seconds", + "Duration to obtain a signature for a block", + ); + + pub static ref ATTESTATION_DUTY: Result = try_create_int_gauge_vec( + "vc_attestation_duty_slot", + "Attestation duty slot for all managed validators", + &["validator"] + ); + /* + * BN latency + */ + pub static ref VC_BEACON_NODE_LATENCY: Result = try_create_histogram_vec( + "vc_beacon_node_latency", + "Round-trip latency for a simple API endpoint on each BN", + &["endpoint"] + ); + pub static ref VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT: Result = try_create_histogram( + "vc_beacon_node_latency_primary_endpoint", + "Round-trip latency for the primary BN endpoint", + ); } pub fn gather_prometheus_metrics( diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index c30d6034471..31337491e88 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -121,7 +121,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 7fe2f5f8ecd..468fc2b06b2 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -989,7 +989,23 @@ impl InitializedValidators { let cache = KeyCache::open_or_create(&self.validators_dir).map_err(Error::UnableToOpenKeyCache)?; - let mut key_cache = self.decrypt_key_cache(cache, &mut key_stores).await?; + + // Check if there is at least one local definition. + let has_local_definitions = self.definitions.as_slice().iter().any(|def| { + matches!( + def.signing_definition, + SigningDefinition::LocalKeystore { .. } + ) + }); + + // Only decrypt cache when there is at least one local definition. + // Decrypting cache is a very expensive operation which is never used for web3signer. + let mut key_cache = if has_local_definitions { + self.decrypt_key_cache(cache, &mut key_stores).await? + } else { + // Assign an empty KeyCache if all definitions are of the Web3Signer type. + KeyCache::new() + }; let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { @@ -1115,13 +1131,16 @@ impl InitializedValidators { ); } } - for uuid in disabled_uuids { - key_cache.remove(&uuid); + + if has_local_definitions { + for uuid in disabled_uuids { + key_cache.remove(&uuid); + } } let validators_dir = self.validators_dir.clone(); let log = self.log.clone(); - if key_cache.is_modified() { + if has_local_definitions && key_cache.is_modified() { tokio::task::spawn_blocking(move || { match key_cache.save(validators_dir) { Err(e) => warn!( diff --git a/validator_client/src/latency.rs b/validator_client/src/latency.rs new file mode 100644 index 00000000000..7e752f29235 --- /dev/null +++ b/validator_client/src/latency.rs @@ -0,0 +1,64 @@ +use crate::{http_metrics::metrics, BeaconNodeFallback}; +use environment::RuntimeContext; +use slog::debug; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::time::sleep; +use types::EthSpec; + +/// The latency service will run 11/12ths of the way through the slot. +pub const SLOT_DELAY_MULTIPLIER: u32 = 11; +pub const SLOT_DELAY_DENOMINATOR: u32 = 12; + +/// Starts a service that periodically checks the latency between the VC and the +/// candidate BNs. +pub fn start_latency_service( + context: RuntimeContext, + slot_clock: T, + beacon_nodes: Arc>, +) { + let log = context.log().clone(); + + let future = async move { + loop { + let sleep_time = slot_clock + .duration_to_next_slot() + .map(|next_slot| { + // This is 11/12ths through the next slot. On mainnet this + // will happen in the 11th second of each slot, one second + // before the next slot. + next_slot + (next_slot / SLOT_DELAY_DENOMINATOR) * SLOT_DELAY_MULTIPLIER + }) + // If we can't read the slot clock, just wait one slot. Running + // the measurement at a non-exact time is not a big issue. + .unwrap_or_else(|| slot_clock.slot_duration()); + + // Sleep until it's time to perform the measurement. + sleep(sleep_time).await; + + for (i, measurement) in beacon_nodes.measure_latency().await.iter().enumerate() { + if let Some(latency) = measurement.latency { + debug!( + log, + "Measured BN latency"; + "node" => &measurement.beacon_node_id, + "latency" => latency.as_millis(), + ); + metrics::observe_timer_vec( + &metrics::VC_BEACON_NODE_LATENCY, + &[&measurement.beacon_node_id], + latency, + ); + if i == 0 { + metrics::observe_duration( + &metrics::VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT, + latency, + ); + } + } + } + } + }; + + context.executor.spawn(future, "latency"); +} diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 4db9804054a..556fdef26b3 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -8,6 +8,7 @@ mod duties_service; mod graffiti_file; mod http_metrics; mod key_cache; +mod latency; mod notifier; mod preparation_service; mod signing_method; @@ -31,6 +32,7 @@ use crate::beacon_node_fallback::{ }; use crate::doppelganger_service::DoppelgangerService; use crate::graffiti_file::GraffitiFile; +use crate::initialized_validators::Error::UnableToOpenVotingKeystore; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; @@ -92,6 +94,7 @@ pub struct ProductionValidatorClient { doppelganger_service: Option>, preparation_service: PreparationService, validator_store: Arc>, + slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, } @@ -184,7 +187,16 @@ impl ProductionValidatorClient { log.clone(), ) .await - .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; + .map_err(|e| { + match e { + UnableToOpenVotingKeystore(err) => { + format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ + make sure to update the location of voting_keystore_path in your validator_definitions.yml", err) + }, + err => { + format!("Unable to initialize validators: {:?}", err)} + } + })?; let voting_pubkeys: Vec<_> = validators.iter_voting_pubkeys().collect(); @@ -412,6 +424,7 @@ impl ProductionValidatorClient { }, spec: context.eth2_config.spec.clone(), context: duties_context, + enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, }); // Update the metrics server. @@ -449,7 +462,7 @@ impl ProductionValidatorClient { let sync_committee_service = SyncCommitteeService::new( duties_service.clone(), validator_store.clone(), - slot_clock, + slot_clock.clone(), beacon_nodes.clone(), context.service_context("sync_committee".into()), ); @@ -470,6 +483,7 @@ impl ProductionValidatorClient { preparation_service, validator_store, config, + slot_clock, http_api_listen_addr: None, }) } @@ -532,6 +546,7 @@ impl ProductionValidatorClient { graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), + slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, }); @@ -552,6 +567,14 @@ impl ProductionValidatorClient { None }; + if self.config.enable_latency_measurement_service { + latency::start_latency_service( + self.context.clone(), + self.duties_service.slot_clock.clone(), + self.duties_service.beacon_nodes.clone(), + ); + } + Ok(()) } } diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index de69d990033..0de2f2f54fa 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -34,7 +34,7 @@ pub enum Error { } /// Enumerates all messages that can be signed by a validator. -pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> { +pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), AttestationData(&'a AttestationData), @@ -47,9 +47,10 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload), ValidatorRegistration(&'a ValidatorRegistrationData), + VoluntaryExit(&'a VoluntaryExit), } -impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is @@ -67,6 +68,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), + SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), } } } @@ -116,7 +118,7 @@ impl SigningContext { impl SigningMethod { /// Return the signature of `signable_message`, with respect to the `signing_context`. - pub async fn get_signature>( + pub async fn get_signature>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_context: SigningContext, @@ -141,7 +143,7 @@ impl SigningMethod { .await } - pub async fn get_signature_from_root>( + pub async fn get_signature_from_root>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_root: Hash256, @@ -203,6 +205,7 @@ impl SigningMethod { SignableMessage::ValidatorRegistration(v) => { Web3SignerObject::ValidatorRegistration(v) } + SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), }; // Determine the Web3Signer message type. diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index cf02ae0c323..e907126faf4 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -26,6 +26,7 @@ pub enum ForkName { Phase0, Altair, Bellatrix, + Capella, } #[derive(Debug, PartialEq, Serialize)] @@ -36,7 +37,7 @@ pub struct ForkInfo { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec", rename_all = "snake_case")] -pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { +pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { AggregationSlot { slot: Slot, }, @@ -61,7 +62,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { RandaoReveal { epoch: Epoch, }, - #[allow(dead_code)] VoluntaryExit(&'a VoluntaryExit), SyncCommitteeMessage { beacon_block_root: Hash256, @@ -72,7 +72,7 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { ValidatorRegistration(&'a ValidatorRegistrationData), } -impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock) -> Result { match block { BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { @@ -90,6 +90,11 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Capella(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Capella, + block: None, + block_header: Some(block.block_header()), + }), } } @@ -116,7 +121,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec")] -pub struct SigningRequest<'a, T: EthSpec, Payload: ExecPayload> { +pub struct SigningRequest<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(rename = "type")] pub message_type: MessageType, #[serde(skip_serializing_if = "Option::is_none")] diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 292b49ac3a5..73843579a2b 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -18,12 +18,13 @@ use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; use types::{ - attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, - Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, - EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, + attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, + AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -155,6 +156,14 @@ impl ValidatorStore { self.validators.clone() } + /// Indicates if the `voting_public_key` exists in self and is enabled. + pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool { + self.validators + .read() + .validator(voting_public_key) + .is_some() + } + /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. #[allow(clippy::too_many_arguments)] @@ -454,7 +463,7 @@ impl ValidatorStore { .unwrap_or(self.builder_proposals) } - pub async fn sign_block>( + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes, block: BeaconBlock, @@ -616,6 +625,32 @@ impl ValidatorStore { } } + pub async fn sign_voluntary_exit( + &self, + validator_pubkey: PublicKeyBytes, + voluntary_exit: VoluntaryExit, + ) -> Result { + let signing_epoch = voluntary_exit.epoch; + let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch); + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::VoluntaryExit(&voluntary_exit), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]); + + Ok(SignedVoluntaryExit { + message: voluntary_exit, + signature, + }) + } + pub async fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, diff --git a/watch/.gitignore b/watch/.gitignore new file mode 100644 index 00000000000..5b6b0720c9e --- /dev/null +++ b/watch/.gitignore @@ -0,0 +1 @@ +config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml new file mode 100644 index 00000000000..d1793a9d068 --- /dev/null +++ b/watch/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "watch" +version = "0.1.0" +edition = "2018" + +[lib] +name = "watch" +path = "src/lib.rs" + +[[bin]] +name = "watch" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +log = "0.4.14" +env_logger = "0.9.0" +types = { path = "../consensus/types" } +eth2 = { path = "../common/eth2" } +beacon_node = { path = "../beacon_node"} +tokio = { version = "1.14.0", features = ["time"] } +axum = "0.5.15" +hyper = "0.14.20" +serde = "1.0.116" +serde_json = "1.0.58" +reqwest = { version = "0.11.0", features = ["json","stream"] } +url = "2.2.2" +rand = "0.7.3" +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } +byteorder = "1.4.3" +bls = { path = "../crypto/bls" } +hex = "0.4.2" +r2d2 = "0.8.9" +serde_yaml = "0.8.24" + +[dev-dependencies] +tokio-postgres = "0.7.5" +http_api = { path = "../beacon_node/http_api" } +beacon_chain = { path = "../beacon_node/beacon_chain" } +network = { path = "../beacon_node/network" } +testcontainers = "0.14.0" +unused_port = { path = "../common/unused_port" } diff --git a/watch/README.md b/watch/README.md new file mode 100644 index 00000000000..18bf393946d --- /dev/null +++ b/watch/README.md @@ -0,0 +1,460 @@ +## beacon.watch + +>beacon.watch is pre-MVP and still under active development and subject to change. + +beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to +data which is: +1. Not already stored natively in the Beacon Chain +2. Too specialized for Block Explorers +3. Too sensitive for public Block Explorers + + +### Requirements +- `git` +- `rust` : https://rustup.rs/ +- `libpg` : https://www.postgresql.org/download/ +- `diesel_cli` : +``` +cargo install diesel_cli --no-default-features --features postgres +``` +- `docker` : https://docs.docker.com/engine/install/ +- `docker-compose` : https://docs.docker.com/compose/install/ + +### Setup +1. Setup the database: +``` +cd postgres_docker_compose +docker-compose up +``` + +1. Ensure the tests pass: +``` +cargo test --release +``` + +1. Drop the database (if it already exists) and run the required migrations: +``` +diesel database reset --database-url postgres://postgres:postgres@localhost/dev +``` + +1. Ensure a synced Lighthouse beacon node with historical states is available +at `localhost:5052`. +The smaller the value of `--slots-per-restore-point` the faster beacon.watch +will be able to sync to the beacon node. + +1. Run the updater daemon: +``` +cargo run --release -- run-updater +``` + +1. Start the HTTP API server: +``` +cargo run --release -- serve +``` + +1. Ensure connectivity: +``` +curl "http://localhost:5059/v1/slots/highest" +``` + +> Functionality on MacOS has not been tested. Windows is not supported. + + +### Configuration +beacon.watch can be configured through the use of a config file. +Available options can be seen in `config.yaml.default`. + +You can specify a config file during runtime: +``` +cargo run -- run-updater --config path/to/config.yaml +cargo run -- serve --config path/to/config.yaml +``` + +You can specify only the parts of the config file which you need changed. +Missing values will remain as their defaults. + +For example, if you wish to run with default settings but only wish to alter `log_level` +your config file would be: +```yaml +# config.yaml +log_level = "info" +``` + +### Available Endpoints +As beacon.watch continues to develop, more endpoints will be added. + +> In these examples any data containing information from blockprint has either been redacted or fabricated. + +#### `/v1/slots/{slot}` +```bash +curl "http://localhost:5059/v1/slots/4635296" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "skipped": false, + "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + } +] +``` + +#### `/v1/slots/lowest` +```bash +curl "http://localhost:5059/v1/slots/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots/highest` +```bash +curl "http://localhost:5059/v1/slots/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "skipped": false, + "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" +} +``` + +#### `v1/slots/{slot}/block` +```bash +curl "http://localhost:5059/v1/slots/4635296/block" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}` +```bash +curl "http://localhost:5059/v1/blocks/4635296" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" + } +] +``` + +#### `/v1/blocks/{block_id}/previous` +```bash +curl "http://localhost:5059/v1/blocks/4635297/previous" +# OR +curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}/next` +```bash +curl "http://localhost:5059/v1/blocks/4635296/next" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" +``` +```json +{ + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/blocks/lowest` +```bash +curl "http://localhost:5059/v1/blocks/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/highest` +```bash +curl "http://localhost:5059/v1/blocks/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" +} +``` + +#### `/v1/blocks/{block_id}/proposer` +```bash +curl "http://localhost:5059/v1/blocks/4635296/proposer" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" + +``` +```json +{ + "slot": "4635296", + "proposer_index": 223126, + "graffiti": "" +} +``` + +#### `/v1/blocks/{block_id}/rewards` +```bash +curl "http://localhost:5059/v1/blocks/4635296/reward" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" + +``` +```json +{ + "slot": "4635296", + "total": 25380059, + "attestation_reward": 24351867, + "sync_committee_reward": 1028192 +} +``` + +#### `/v1/blocks/{block_id}/packing` +```bash +curl "http://localhost:5059/v1/blocks/4635296/packing" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" + +``` +```json +{ + "slot": "4635296", + "available": 16152, + "included": 13101, + "prior_skip_slots": 0 +} +``` + +#### `/v1/validators/{validator}` +```bash +curl "http://localhost:5059/v1/validators/1" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" +``` +```json +{ + "index": 1, + "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "status": "active_ongoing", + "client": null, + "activation_epoch": 0, + "exit_epoch": null +} +``` + +#### `/v1/validators/{validator}/attestation/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/1/attestation/144853" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" +``` +```json +{ + "index": 1, + "epoch": "144853", + "source": true, + "head": true, + "target": true +} +``` + +#### `/v1/validators/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853" +``` +```json +[ + 63, + 67, + 98, + ... +] +``` + +#### `/v1/validators/missed/{vote}/{epoch}/graffiti` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" +``` +```json +{ + "Mr F was here": 3, + "Lighthouse/v3.1.0-aa022f4": 5, + ... +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/clients/missed/source/144853" +``` +```json +{ + "Lighthouse": 100, + "Lodestar": 100, + "Nimbus": 100, + "Prysm": 100, + "Teku": 100, + "Unknown": 100 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages` +Note that this endpoint expresses the following: +``` +What percentage of each client implementation missed this vote? +``` + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" +``` +```json +{ + "Lighthouse": 0.51234567890, + "Lodestar": 0.51234567890, + "Nimbus": 0.51234567890, + "Prysm": 0.09876543210, + "Teku": 0.09876543210, + "Unknown": 0.05647382910 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` +Note that this endpoint expresses the following: +``` +For the validators which did miss this vote, what percentage of them were from each client implementation? +``` +You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" +``` +```json +{ + "Lighthouse": 11.11111111111111, + "Lodestar": 11.11111111111111, + "Nimbus": 11.11111111111111, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 33.33333333333333 +} + +``` + +#### `/v1/clients` +```bash +curl "http://localhost:5059/v1/clients" +``` +```json +{ + "Lighthouse": 5000, + "Lodestar": 5000, + "Nimbus": 5000, + "Prysm": 5000, + "Teku": 5000, + "Unknown": 5000 +} +``` + +#### `/v1/clients/percentages` +```bash +curl "http://localhost:5059/v1/clients/percentages" +``` +```json +{ + "Lighthouse": 16.66666666666667, + "Lodestar": 16.66666666666667, + "Nimbus": 16.66666666666667, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 16.66666666666667 +} +``` + +### Future work +- New tables + - `skip_slots`? + + +- More API endpoints + - `/v1/proposers?start_epoch={}&end_epoch={}` and similar + - `/v1/validators/{status}/count` + + +- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. + + +- Better/prettier (async?) logging. + + +- Connect to a range of beacon_nodes to sync different components concurrently. +Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. + + +### Architecture +Connection Pooling: +- 1 Pool for Updater (read and write) +- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default new file mode 100644 index 00000000000..131609237cb --- /dev/null +++ b/watch/config.yaml.default @@ -0,0 +1,49 @@ +--- +database: + user: "postgres" + password: "postgres" + dbname: "dev" + default_dbname: "postgres" + host: "localhost" + port: 5432 + connect_timeout_millis: 2000 + +server: + listen_addr: "127.0.0.1" + listen_port: 5059 + +updater: + # The URL of the Beacon Node to perform sync tasks with. + # Cannot yet accept multiple beacon nodes. + beacon_node_url: "http://localhost:5052" + # The number of epochs to backfill. Must be below 100. + max_backfill_size_epochs: 2 + # The epoch at which to stop backfilling. + backfill_stop_epoch: 0 + # Whether to sync the attestations table. + attestations: true + # Whether to sync the proposer_info table. + proposer_info: true + # Whether to sync the block_rewards table. + block_rewards: true + # Whether to sync the block_packing table. + block_packing: true + +blockprint: + # Whether to sync client information from blockprint. + enabled: false + # The URL of the blockprint server. + url: "" + # The username used to authenticate to the blockprint server. + username: "" + # The password used to authenticate to the blockprint server. + password: "" + +# Log level. +# Valid options are: +# - "trace" +# - "debug" +# - "info" +# - "warn" +# - "error" +log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml new file mode 100644 index 00000000000..bfb01bccf0f --- /dev/null +++ b/watch/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 00000000000..a9f52609119 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 00000000000..d68895b1a7b --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql new file mode 100644 index 00000000000..551ed6605c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql @@ -0,0 +1 @@ +DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql new file mode 100644 index 00000000000..2629f11a4c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE canonical_slots ( + slot integer PRIMARY KEY, + root bytea NOT NULL, + skipped boolean NOT NULL, + beacon_block bytea UNIQUE +) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql new file mode 100644 index 00000000000..8901956f47c --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql @@ -0,0 +1 @@ +DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql new file mode 100644 index 00000000000..250c667b232 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE beacon_blocks ( + slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, + root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, + parent_root bytea NOT NULL, + attestation_count integer NOT NULL, + transaction_count integer +) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql new file mode 100644 index 00000000000..17819fc3491 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/down.sql @@ -0,0 +1 @@ +DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql new file mode 100644 index 00000000000..69cfef6772b --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE validators ( + index integer PRIMARY KEY, + public_key bytea NOT NULL, + status text NOT NULL, + activation_epoch integer, + exit_epoch integer +) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql new file mode 100644 index 00000000000..d61330be5b2 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/down.sql @@ -0,0 +1 @@ +DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql new file mode 100644 index 00000000000..488aedb2730 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE proposer_info ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, + graffiti text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql new file mode 100644 index 00000000000..b4304eb7b72 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/down.sql @@ -0,0 +1 @@ +DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql new file mode 100644 index 00000000000..476a0911607 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE active_config ( + id integer PRIMARY KEY CHECK (id=1), + config_name text NOT NULL, + slots_per_epoch integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql new file mode 100644 index 00000000000..fa53325dad1 --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/down.sql @@ -0,0 +1 @@ +DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql new file mode 100644 index 00000000000..2d5741f50b7 --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/up.sql @@ -0,0 +1,4 @@ +CREATE TABLE blockprint ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + best_guess text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql new file mode 100644 index 00000000000..2dc87995c74 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/down.sql @@ -0,0 +1 @@ +DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql new file mode 100644 index 00000000000..47cb4304f06 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_rewards ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + total integer NOT NULL, + attestation_reward integer NOT NULL, + sync_committee_reward integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql new file mode 100644 index 00000000000..e9e7755e3e0 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/down.sql @@ -0,0 +1 @@ +DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql new file mode 100644 index 00000000000..63a9925f920 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_packing ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + available integer NOT NULL, + included integer NOT NULL, + prior_skip_slots integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql new file mode 100644 index 00000000000..0f32b6b4f33 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql @@ -0,0 +1 @@ +DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql new file mode 100644 index 00000000000..5352afefc8d --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE suboptimal_attestations ( + epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, + index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, + source boolean NOT NULL, + head boolean NOT NULL, + target boolean NOT NULL, + PRIMARY KEY(epoch_start_slot, index) +) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql new file mode 100644 index 00000000000..5903b351db9 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/down.sql @@ -0,0 +1,2 @@ +ALTER TABLE beacon_blocks +DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql new file mode 100644 index 00000000000..b52b4b00998 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/up.sql @@ -0,0 +1,3 @@ +ALTER TABLE beacon_blocks +ADD COLUMN withdrawal_count integer; + diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml new file mode 100644 index 00000000000..eae4de4a2ba --- /dev/null +++ b/watch/postgres_docker_compose/compose.yml @@ -0,0 +1,16 @@ +version: "3" + +services: + postgres: + image: postgres:12.3-alpine + restart: always + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + volumes: + - postgres:/var/lib/postgresql/data + ports: + - 127.0.0.1:5432:5432 + +volumes: + postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs new file mode 100644 index 00000000000..f7375431cb3 --- /dev/null +++ b/watch/src/block_packing/database.rs @@ -0,0 +1,140 @@ +use crate::database::{ + schema::{beacon_blocks, block_packing}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_packing)] +pub struct WatchBlockPacking { + pub slot: WatchSlot, + pub available: i32, + pub included: i32, + pub prior_skip_slots: i32, +} + +/// Insert a batch of values into the `block_packing` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_packing( + conn: &mut PgConn, + packing: Vec, +) -> Result<(), Error> { + use self::block_packing::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_packing) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_packing` table where `slot` is minimum. +pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_packing` table where `slot` is maximum. +pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. +pub fn get_block_packing_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_packing); + + let result = join + .select((slot, available, included, prior_skip_slots)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. +pub fn get_block_packing_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_packing`. +#[allow(dead_code)] +pub fn get_unknown_block_packing( + conn: &mut PgConn, + slots_per_epoch: u64, +) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_packing::dsl::block_packing; + + let join = beacon_blocks.left_join(block_packing); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block packing cannot be retrieved for epoch 0 so we need to exclude them. + .filter(slot.ge(slots_per_epoch as i32)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs new file mode 100644 index 00000000000..5d74fc59799 --- /dev/null +++ b/watch/src/block_packing/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; +pub use server::block_packing_routes; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/block_packing`. +/// Formats the response into a vector of `WatchBlockPacking`. +/// +/// Will fail if `start_epoch == 0`. +pub async fn get_block_packing( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) + .await? + .into_iter() + .map(|data| WatchBlockPacking { + slot: WatchSlot::from_slot(data.slot), + available: data.available_attestations as i32, + included: data.included_attestations as i32, + prior_skip_slots: data.prior_skip_slots as i32, + }) + .collect()) +} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs new file mode 100644 index 00000000000..819144562a5 --- /dev/null +++ b/watch/src/block_packing/server.rs @@ -0,0 +1,31 @@ +use crate::block_packing::database::{ + get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_packing( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_packing_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_packing_routes() -> Router { + Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) +} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs new file mode 100644 index 00000000000..215964901a6 --- /dev/null +++ b/watch/src/block_packing/updater.rs @@ -0,0 +1,211 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_packing::get_block_packing; + +use eth2::types::{Epoch, EthSpec}; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `block_packing` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_packing` API with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) + /// `end_epoch` -> epoch of highest beacon block + /// + /// It will resync the latest epoch if it is not fully filled. + /// That is, `if highest_filled_slot % slots_per_epoch != 31` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn fill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_packing` table. + let highest_filled_slot_opt = if self.config.block_packing { + database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot.as_slot() % self.slots_per_epoch + == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { + lowest_beacon_block + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not fill the `block_packing` table. + warn!("Refusing to fill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Block packing is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Since we pull a full epoch of data but are not guaranteed to have all blocks of + // that epoch available, only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_packing` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_block_packing` function with: + /// `start_epoch` -> epoch of lowest_beacon_block + /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_packing_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `block_packing` table. + let lowest_filled_slot_opt = if self.config.block_packing { + database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { + lowest_filled_slot + .as_slot() + .epoch(self.slots_per_epoch) + .saturating_sub(Epoch::new(1)) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot().epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not backfill the `block_packing` table. + warn!("Refusing to backfill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_epoch <= 1 { + debug!("Block packing backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch >= end_epoch { + debug!("Block packing is up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_packing_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { + start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) + } + + // The `block_packing` API cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs new file mode 100644 index 00000000000..a2bf49f3e4d --- /dev/null +++ b/watch/src/block_rewards/database.rs @@ -0,0 +1,137 @@ +use crate::database::{ + schema::{beacon_blocks, block_rewards}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_rewards)] +pub struct WatchBlockRewards { + pub slot: WatchSlot, + pub total: i32, + pub attestation_reward: i32, + pub sync_committee_reward: i32, +} + +/// Insert a batch of values into the `block_rewards` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_rewards( + conn: &mut PgConn, + rewards: Vec, +) -> Result<(), Error> { + use self::block_rewards::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_rewards) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_rewards` table where `slot` is minimum. +pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_rewards` table where `slot` is maximum. +pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. +pub fn get_block_rewards_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_rewards); + + let result = join + .select((slot, total, attestation_reward, sync_committee_reward)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. +pub fn get_block_rewards_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_rewards`. +#[allow(dead_code)] +pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_rewards::dsl::block_rewards; + + let join = beacon_blocks.left_join(block_rewards); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. + .filter(slot.ne(0)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs new file mode 100644 index 00000000000..0dac88ea58d --- /dev/null +++ b/watch/src/block_rewards/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +mod server; +mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; +pub use server::block_rewards_routes; + +use eth2::BeaconNodeHttpClient; +use types::Slot; + +/// Sends a request to `lighthouse/analysis/block_rewards`. +/// Formats the response into a vector of `WatchBlockRewards`. +/// +/// Will fail if `start_slot == 0`. +pub async fn get_block_rewards( + bn: &BeaconNodeHttpClient, + start_slot: Slot, + end_slot: Slot, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_rewards(start_slot, end_slot) + .await? + .into_iter() + .map(|data| WatchBlockRewards { + slot: WatchSlot::from_slot(data.meta.slot), + total: data.total as i32, + attestation_reward: data.attestation_rewards.total as i32, + sync_committee_reward: data.sync_committee_rewards as i32, + }) + .collect()) +} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs new file mode 100644 index 00000000000..480346e25b3 --- /dev/null +++ b/watch/src/block_rewards/server.rs @@ -0,0 +1,31 @@ +use crate::block_rewards::database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_rewards( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_rewards_routes() -> Router { + Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) +} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs new file mode 100644 index 00000000000..ad34b1f0785 --- /dev/null +++ b/watch/src/block_rewards/updater.rs @@ -0,0 +1,157 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_rewards::get_block_rewards; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `block_rewards` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_rewards` table. + let highest_filled_slot_opt = if self.config.block_rewards { + database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `block_rewards` table. + warn!("Refusing to fill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Block rewards are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + database::insert_batch_block_rewards(&mut conn, rewards)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_rewards` tables starting from the entry with the + /// lowest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `block_rewards` table. + let lowest_filled_slot_opt = if self.config.block_rewards { + database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `block_rewards` table. + warn!("Refusing to backfill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Block rewards backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Block rewards are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_reward_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { + start_slot = end_slot.saturating_sub(max_block_reward_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) + } + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + + if self.config.block_rewards { + database::insert_batch_block_rewards(&mut conn, rewards)?; + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs new file mode 100644 index 00000000000..721fa7cb197 --- /dev/null +++ b/watch/src/blockprint/config.rs @@ -0,0 +1,40 @@ +use serde::{Deserialize, Serialize}; + +pub const fn enabled() -> bool { + false +} + +pub const fn url() -> Option { + None +} + +pub const fn username() -> Option { + None +} + +pub const fn password() -> Option { + None +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "enabled")] + pub enabled: bool, + #[serde(default = "url")] + pub url: Option, + #[serde(default = "username")] + pub username: Option, + #[serde(default = "password")] + pub password: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: enabled(), + url: url(), + username: username(), + password: password(), + } + } +} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs new file mode 100644 index 00000000000..afa35c81b63 --- /dev/null +++ b/watch/src/blockprint/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + self, + schema::{beacon_blocks, blockprint}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::sql_types::{Integer, Text}; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Instant; + +type WatchConsensusClient = String; +pub fn list_consensus_clients() -> Vec { + vec![ + "Lighthouse".to_string(), + "Lodestar".to_string(), + "Nimbus".to_string(), + "Prysm".to_string(), + "Teku".to_string(), + "Unknown".to_string(), + ] +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = blockprint)] +pub struct WatchBlockprint { + pub slot: WatchSlot, + pub best_guess: WatchConsensusClient, +} + +#[derive(Debug, QueryableByName, diesel::FromSqlRow)] +pub struct WatchValidatorBlockprint { + #[diesel(sql_type = Integer)] + pub proposer_index: i32, + #[diesel(sql_type = Text)] + pub best_guess: WatchConsensusClient, + #[diesel(sql_type = Integer)] + pub slot: WatchSlot, +} + +/// Insert a batch of values into the `blockprint` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_blockprint( + conn: &mut PgConn, + prints: Vec, +) -> Result<(), Error> { + use self::blockprint::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(blockprint) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `blockprint` table where `slot` is minimum. +pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `blockprint` table where `slot` is maximum. +pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. +pub fn get_blockprint_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(blockprint); + + let result = join + .select((slot, best_guess)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. +pub fn get_blockprint_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `blockprint`. +#[allow(dead_code)] +pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::blockprint::dsl::blockprint; + + let join = beacon_blocks.left_join(blockprint); + + let result = join + .select(slot) + .filter(root.is_null()) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} + +/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before +/// `target_slot`. +/// Inserts `"Unknown" if no prior proposals exist. +pub fn construct_validator_blockprints_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + use self::blockprint::dsl::{blockprint, slot}; + + let total_validators = + database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? + as usize; + + let mut blockprint_map = HashMap::with_capacity(total_validators); + + let latest_proposals = + database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; + + let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); + + let result = blockprint + .filter(slot.eq_any(latest_proposal_slots)) + .load::(conn)?; + + // Insert the validators which have available blockprints. + for print in result { + if let Some(proposer) = latest_proposals.get(&print.slot) { + blockprint_map.insert(*proposer, print.best_guess); + } + } + + // Insert the rest of the unknown validators. + for validator_index in 0..total_validators { + blockprint_map + .entry(validator_index as i32) + .or_insert_with(|| "Unknown".to_string()); + } + + Ok(blockprint_map) +} + +/// Counts the number of occurances of each `client` present in the `validators` table at or before some +/// `target_slot`. +pub fn get_validators_clients_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + let mut client_map: HashMap = HashMap::new(); + + // This includes all validators which were activated at or before `target_slot`. + let validator_blockprints = + construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; + + for client in list_consensus_clients() { + let count = validator_blockprints + .iter() + .filter(|(_, v)| (*v).clone() == client) + .count(); + client_map.insert(client, count); + } + + Ok(client_map) +} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs new file mode 100644 index 00000000000..b8107e5bf58 --- /dev/null +++ b/watch/src/blockprint/mod.rs @@ -0,0 +1,149 @@ +pub mod database; +pub mod server; +pub mod updater; + +mod config; + +use crate::database::WatchSlot; + +use eth2::SensitiveUrl; +use reqwest::{Client, Response, Url}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use types::Slot; + +pub use config::Config; +pub use database::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + list_consensus_clients, WatchBlockprint, +}; +pub use server::blockprint_routes; + +const TIMEOUT: Duration = Duration::from_secs(50); + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), + BlockprintNotSynced, + Other(String), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchBlockprintClient { + pub client: Client, + pub server: SensitiveUrl, + pub username: Option, + pub password: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintSyncingResponse { + pub greatest_block_slot: Slot, + pub synced: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintResponse { + pub proposer_index: i32, + pub slot: Slot, + pub best_guess_single: String, +} + +impl WatchBlockprintClient { + async fn get(&self, url: Url) -> Result { + let mut builder = self.client.get(url).timeout(TIMEOUT); + if let Some(username) = &self.username { + builder = builder.basic_auth(username, self.password.as_ref()); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + + if !response.status().is_success() { + return Err(Error::Other(response.text().await?)); + } + + Ok(response) + } + + // Returns the `greatest_block_slot` as reported by the Blockprint server. + // Will error if the Blockprint server is not synced. + #[allow(dead_code)] + pub async fn ensure_synced(&self) -> Result { + let url = self.server.full.join("sync/")?.join("status")?; + + let response = self.get(url).await?; + + let result = response.json::().await?; + if !result.synced { + return Err(Error::BlockprintNotSynced); + } + + Ok(result.greatest_block_slot) + } + + // Pulls the latest blockprint for all validators. + #[allow(dead_code)] + pub async fn blockprint_all_validators( + &self, + highest_validator: i32, + ) -> Result, Error> { + let url = self + .server + .full + .join("validator/")? + .join("blocks/")? + .join("latest")?; + + let response = self.get(url).await?; + + let mut result = response.json::>().await?; + result.retain(|print| print.proposer_index <= highest_validator); + + let mut map: HashMap = HashMap::with_capacity(result.len()); + for print in result { + map.insert(print.proposer_index, print.best_guess_single); + } + + Ok(map) + } + + // Construct a request to the Blockprint server for a range of slots between `start_slot` and + // `end_slot`. + pub async fn get_blockprint( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let url = self + .server + .full + .join("blocks/")? + .join(&format!("{start_slot}/{end_slot}"))?; + + let response = self.get(url).await?; + + let result = response + .json::>() + .await? + .iter() + .map(|response| WatchBlockprint { + slot: WatchSlot::from_slot(response.slot), + best_guess: response.best_guess_single.clone(), + }) + .collect(); + Ok(result) + } +} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs new file mode 100644 index 00000000000..488af157174 --- /dev/null +++ b/watch/src/blockprint/server.rs @@ -0,0 +1,31 @@ +use crate::blockprint::database::{ + get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_blockprint( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_blockprint_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn blockprint_routes() -> Router { + Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) +} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs new file mode 100644 index 00000000000..28c3184556c --- /dev/null +++ b/watch/src/blockprint/updater.rs @@ -0,0 +1,172 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `blockprint` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn fill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `blockprint` table. + let mut start_slot = if let Some(highest_filled_slot) = + database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) + { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `blockprint` table. + warn!("Refusing to fill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Blockprint is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in either + // `blockprint` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + + Ok(()) + } + + /// Backfill the `blockprint` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + let max_blockprint_backfill = + self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `blockprint` table. + let end_slot = if let Some(lowest_filled_slot) = + database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) + { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `blockprint` table. + warn!("Refusing to backfill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Blockprint backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Blockprint are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_blockprint_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { + start_slot = end_slot.saturating_sub(max_blockprint_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) + } + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` + // table. This is a critical failure. It usually means someone has manually tampered with the + // database tables and should not occur during normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + Ok(()) + } +} diff --git a/watch/src/cli.rs b/watch/src/cli.rs new file mode 100644 index 00000000000..a8e5f3716fe --- /dev/null +++ b/watch/src/cli.rs @@ -0,0 +1,55 @@ +use crate::{config::Config, logger, server, updater}; +use clap::{App, Arg}; +use tokio::sync::oneshot; + +pub const SERVE: &str = "serve"; +pub const RUN_UPDATER: &str = "run-updater"; +pub const CONFIG: &str = "config"; + +fn run_updater<'a, 'b>() -> App<'a, 'b> { + App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +} + +fn serve<'a, 'b>() -> App<'a, 'b> { + App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +} + +pub fn app<'a, 'b>() -> App<'a, 'b> { + App::new("beacon_watch_daemon") + .author("Sigma Prime ") + .setting(clap::AppSettings::ColoredHelp) + .arg( + Arg::with_name(CONFIG) + .long(CONFIG) + .value_name("PATH_TO_CONFIG") + .help("Path to configuration file") + .takes_value(true) + .global(true), + ) + .subcommand(run_updater()) + .subcommand(serve()) +} + +pub async fn run() -> Result<(), String> { + let matches = app().get_matches(); + + let config = match matches.value_of(CONFIG) { + Some(path) => Config::load_from_file(path.to_string())?, + None => Config::default(), + }; + + logger::init_logger(&config.log_level); + + match matches.subcommand() { + (RUN_UPDATER, Some(_)) => updater::run_updater(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), + (SERVE, Some(_)) => { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + server::serve(config, shutdown_rx) + .await + .map_err(|e| format!("Failure: {:?}", e)) + } + _ => Err("Unsupported subcommand. See --help".into()), + } +} diff --git a/watch/src/client.rs b/watch/src/client.rs new file mode 100644 index 00000000000..43aaccde343 --- /dev/null +++ b/watch/src/client.rs @@ -0,0 +1,178 @@ +use crate::block_packing::WatchBlockPacking; +use crate::block_rewards::WatchBlockRewards; +use crate::database::models::{ + WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, +}; +use crate::suboptimal_attestations::WatchAttestation; + +use eth2::types::BlockId; +use reqwest::Client; +use serde::de::DeserializeOwned; +use types::Hash256; +use url::Url; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchHttpClient { + pub client: Client, + pub server: Url, +} + +impl WatchHttpClient { + async fn get_opt(&self, url: Url) -> Result, Error> { + let response = self.client.get(url).send().await?; + + if response.status() == 404 { + Ok(None) + } else { + response + .error_for_status()? + .json() + .await + .map_err(Into::into) + } + } + + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&block_id.to_string())?; + + self.get_opt(url).await + } + + pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_lowest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_next_beacon_block( + &self, + parent: Hash256, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{parent:?}/"))? + .join("next")?; + + self.get_opt(url).await + } + + pub async fn get_validator_by_index( + &self, + index: i32, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join(&format!("{index}"))?; + + self.get_opt(url).await + } + + pub async fn get_proposer_info( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("proposer")?; + + self.get_opt(url).await + } + + pub async fn get_block_reward( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("rewards")?; + + self.get_opt(url).await + } + + pub async fn get_block_packing( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("packing")?; + + self.get_opt(url).await + } + + pub async fn get_all_validators(&self) -> Result>, Error> { + let url = self.server.join("v1/")?.join("validators/")?.join("all")?; + + self.get_opt(url).await + } + + pub async fn get_attestations( + &self, + epoch: i32, + ) -> Result>, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join("all/")? + .join("attestation/")? + .join(&format!("{epoch}"))?; + + self.get_opt(url).await + } +} diff --git a/watch/src/config.rs b/watch/src/config.rs new file mode 100644 index 00000000000..4e61f9df9ca --- /dev/null +++ b/watch/src/config.rs @@ -0,0 +1,50 @@ +use crate::blockprint::Config as BlockprintConfig; +use crate::database::Config as DatabaseConfig; +use crate::server::Config as ServerConfig; +use crate::updater::Config as UpdaterConfig; + +use serde::{Deserialize, Serialize}; +use std::fs::File; + +pub const LOG_LEVEL: &str = "debug"; + +fn log_level() -> String { + LOG_LEVEL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub blockprint: BlockprintConfig, + #[serde(default)] + pub database: DatabaseConfig, + #[serde(default)] + pub server: ServerConfig, + #[serde(default)] + pub updater: UpdaterConfig, + /// The minimum severity for log messages. + #[serde(default = "log_level")] + pub log_level: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + blockprint: BlockprintConfig::default(), + database: DatabaseConfig::default(), + server: ServerConfig::default(), + updater: UpdaterConfig::default(), + log_level: log_level(), + } + } +} + +impl Config { + pub fn load_from_file(path_to_file: String) -> Result { + let file = + File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; + let config: Config = serde_yaml::from_reader(file) + .map_err(|e| format!("Error parsing config file: {:?}", e))?; + Ok(config) + } +} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs new file mode 100644 index 00000000000..b8cda0b2168 --- /dev/null +++ b/watch/src/database/compat.rs @@ -0,0 +1,49 @@ +//! Implementations of PostgreSQL compatibility traits. +use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; +use diesel::deserialize::{self, FromSql}; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{self, Output, ToSql}; +use diesel::sql_types::{Binary, Integer}; + +use std::convert::TryFrom; + +macro_rules! impl_to_from_sql_int { + ($type:ty) => { + impl ToSql for $type + where + i32: ToSql, + { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; + >::to_sql(&v, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Ok(Self::new(i32::from_sql(bytes)? as u64)) + } + } + }; +} + +macro_rules! impl_to_from_sql_binary { + ($type:ty) => { + impl ToSql for $type { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let b = self.as_bytes(); + <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) + } + } + }; +} + +impl_to_from_sql_int!(WatchSlot); +impl_to_from_sql_binary!(WatchHash); +impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs new file mode 100644 index 00000000000..dc0c70832f4 --- /dev/null +++ b/watch/src/database/config.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; + +pub const USER: &str = "postgres"; +pub const PASSWORD: &str = "postgres"; +pub const DBNAME: &str = "dev"; +pub const DEFAULT_DBNAME: &str = "postgres"; +pub const HOST: &str = "localhost"; +pub const fn port() -> u16 { + 5432 +} +pub const fn connect_timeout_millis() -> u64 { + 2_000 // 2s +} + +fn user() -> String { + USER.to_string() +} + +fn password() -> String { + PASSWORD.to_string() +} + +fn dbname() -> String { + DBNAME.to_string() +} + +fn default_dbname() -> String { + DEFAULT_DBNAME.to_string() +} + +fn host() -> String { + HOST.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "user")] + pub user: String, + #[serde(default = "password")] + pub password: String, + #[serde(default = "dbname")] + pub dbname: String, + #[serde(default = "default_dbname")] + pub default_dbname: String, + #[serde(default = "host")] + pub host: String, + #[serde(default = "port")] + pub port: u16, + #[serde(default = "connect_timeout_millis")] + pub connect_timeout_millis: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + user: user(), + password: password(), + dbname: dbname(), + default_dbname: default_dbname(), + host: host(), + port: port(), + connect_timeout_millis: connect_timeout_millis(), + } + } +} + +impl Config { + pub fn build_database_url(&self) -> String { + format!( + "postgres://{}:{}@{}:{}/{}", + self.user, self.password, self.host, self.port, self.dbname + ) + } +} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs new file mode 100644 index 00000000000..8c5088fa133 --- /dev/null +++ b/watch/src/database/error.rs @@ -0,0 +1,55 @@ +use bls::Error as BlsError; +use diesel::result::{ConnectionError, Error as PgError}; +use eth2::SensitiveError; +use r2d2::Error as PoolError; +use std::fmt; +use types::BeaconStateError; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + Database(PgError), + DatabaseCorrupted, + InvalidSig(BlsError), + PostgresConnection(ConnectionError), + Pool(PoolError), + SensitiveUrl(SensitiveError), + InvalidRoot, + Other(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ConnectionError) -> Self { + Error::PostgresConnection(e) + } +} + +impl From for Error { + fn from(e: PgError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: PoolError) -> Self { + Error::Pool(e) + } +} + +impl From for Error { + fn from(e: BlsError) -> Self { + Error::InvalidSig(e) + } +} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs new file mode 100644 index 00000000000..b9a7a900a59 --- /dev/null +++ b/watch/src/database/mod.rs @@ -0,0 +1,782 @@ +mod config; +mod error; + +pub mod compat; +pub mod models; +pub mod schema; +pub mod utils; +pub mod watch_types; + +use self::schema::{ + active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, + validators, +}; + +use diesel::dsl::max; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; +use diesel::upsert::excluded; +use log::{debug, info}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SignedBeaconBlock}; + +pub use self::error::Error; +pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; +pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; + +pub use crate::block_rewards::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; + +pub use crate::block_packing::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; + +pub use crate::suboptimal_attestations::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use crate::blockprint::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + WatchBlockprint, +}; + +pub use config::Config; + +/// Batch inserts cannot exceed a certain size. +/// See https://github.com/diesel-rs/diesel/issues/2414. +/// For some reason, this seems to translate to 65535 / 5 (13107) records. +pub const MAX_SIZE_BATCH_INSERT: usize = 13107; + +pub type PgPool = Pool>; +pub type PgConn = PooledConnection>; + +/// Connect to a Postgresql database and build a connection pool. +pub fn build_connection_pool(config: &Config) -> Result { + let database_url = config.clone().build_database_url(); + info!("Building connection pool at: {database_url}"); + let pg = ConnectionManager::::new(&database_url); + Builder::new().build(pg).map_err(Error::Pool) +} + +/// Retrieve an idle connection from the pool. +pub fn get_connection(pool: &PgPool) -> Result { + pool.get().map_err(Error::Pool) +} + +/// Insert the active config into the database. This is used to check if the connected beacon node +/// is compatible with the database. These values will not change (except +/// `current_blockprint_checkpoint`). +pub fn insert_active_config( + conn: &mut PgConn, + new_config_name: String, + new_slots_per_epoch: u64, +) -> Result<(), Error> { + use self::active_config::dsl::*; + + diesel::insert_into(active_config) + .values(&vec![( + id.eq(1), + config_name.eq(new_config_name), + slots_per_epoch.eq(new_slots_per_epoch as i32), + )]) + .on_conflict_do_nothing() + .execute(conn)?; + + Ok(()) +} + +/// Get the active config from the database. +pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { + use self::active_config::dsl::*; + Ok(active_config + .select((config_name, slots_per_epoch)) + .filter(id.eq(1)) + .first::<(String, i32)>(conn) + .optional()?) +} + +/// +/// INSERT statements +/// + +/// Inserts a single row into the `canonical_slots` table. +/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { + diesel::insert_into(canonical_slots::table) + .values(&new_slot) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Canonical slot inserted: {}", new_slot.slot); + Ok(()) +} + +pub fn insert_beacon_block( + conn: &mut PgConn, + block: SignedBeaconBlock, + root: WatchHash, +) -> Result<(), Error> { + use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; + + let block_message = block.message(); + + // Pull out relevant values from the block. + let slot = WatchSlot::from_slot(block.slot()); + let parent_root = WatchHash::from_hash(block.parent_root()); + let proposer_index = block_message.proposer_index() as i32; + let graffiti = block_message.body().graffiti().as_utf8_lossy(); + let attestation_count = block_message.body().attestations().len() as i32; + + let full_payload = block_message.execution_payload().ok(); + + let transaction_count: Option = if let Some(bellatrix_payload) = + full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + { + Some(bellatrix_payload.transactions.len() as i32) + } else { + full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.transactions.len() as i32) + }; + + let withdrawal_count: Option = full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.withdrawals.len() as i32); + + let block_to_add = WatchBeaconBlock { + slot, + root, + parent_root, + attestation_count, + transaction_count, + withdrawal_count, + }; + + let proposer_info_to_add = WatchProposerInfo { + slot, + proposer_index, + graffiti, + }; + + // Update the canonical slots table. + diesel::update(canonical_slots::table) + .set(beacon_block.eq(root)) + .filter(canonical_slot.eq(slot)) + // Do not overwrite the value if it already exists. + .filter(beacon_block.is_null()) + .execute(conn)?; + + diesel::insert_into(beacon_blocks::table) + .values(block_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + diesel::insert_into(proposer_info::table) + .values(proposer_info_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); + Ok(()) +} + +/// Insert a validator into the `validators` table +/// +/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. +pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { + use self::validators::dsl::*; + let new_index = validator.index; + let new_public_key = validator.public_key; + + diesel::insert_into(validators) + .values(validator) + .on_conflict(index) + .do_update() + .set(( + status.eq(excluded(status)), + activation_epoch.eq(excluded(activation_epoch)), + exit_epoch.eq(excluded(exit_epoch)), + )) + .execute(conn)?; + + debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); + Ok(()) +} + +/// Insert a batch of values into the `validators` table. +/// +/// On a conflict, it will do nothing. +/// +/// Should not be used when updating validators. +/// Validators should be updated through the `insert_validator` function which contains the correct +/// `on_conflict` clauses. +pub fn insert_batch_validators( + conn: &mut PgConn, + all_validators: Vec, +) -> Result<(), Error> { + use self::validators::dsl::*; + + let mut count = 0; + + for chunk in all_validators.chunks(1000) { + count += diesel::insert_into(validators) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + debug!("Validators inserted, count: {count}"); + Ok(()) +} + +/// +/// SELECT statements +/// + +/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. +pub fn get_canonical_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. +/// Only returns the non-skipped slot which matches `root`. +pub fn get_canonical_slot_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(root.eq(root_query)) + .filter(skipped.eq(false)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given +/// `slot_query`. +#[allow(dead_code)] +pub fn get_root_at_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .select(root) + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot`. +pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot` and where `skipped == false`. +pub fn get_lowest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot`. +pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot` and where `skipped == false`. +pub fn get_highest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_canonical_slots_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", + start_slot.as_u64(), + end_slot.as_u64(), + time_taken + ); + Ok(result) +} + +/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` +/// and `skipped == false` +pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + + let result = canonical_slots + .select(root) + .filter(beacon_block.is_null()) + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .load::(conn)?; + + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is minimum. +pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is maximum. +pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. +pub fn get_beacon_block_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. +pub fn get_beacon_block_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. +/// This fetches the next block in the database. +/// +/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). +pub fn get_beacon_block_with_parent( + conn: &mut PgConn, + parent: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(parent_root.eq(parent)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_beacon_blocks_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. +pub fn get_proposer_info_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(proposer_info); + + let result = join + .select((slot, proposer_index, graffiti)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +pub fn get_proposer_info_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +#[allow(dead_code)] +pub fn get_proposer_info_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" + ); + Ok(result) +} + +pub fn get_validators_latest_proposer_info( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let proposers = proposer_info + .filter(proposer_index.eq_any(indices_query)) + .load::(conn)?; + + let mut result = HashMap::new(); + for proposer in proposers { + result + .entry(proposer.proposer_index) + .or_insert_with(|| proposer.clone()); + let entry = result + .get_mut(&proposer.proposer_index) + .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; + if proposer.slot > entry.slot { + entry.slot = proposer.slot + } + } + + Ok(result) +} + +/// Selects the max(`slot`) and `proposer_index` of each unique index in the +/// `proposer_info` table and returns them formatted as a `HashMap`. +/// Only returns rows which have `slot <= target_slot`. +/// +/// Ideally, this would return the full row, but I have not found a way to do that without using +/// a much more expensive SQL query. +pub fn get_all_validators_latest_proposer_info_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let latest_proposals: Vec<(i32, Option)> = proposer_info + .group_by(proposer_index) + .select((proposer_index, max(slot))) + .filter(slot.le(target_slot)) + .load::<(i32, Option)>(conn)?; + + let mut result = HashMap::new(); + + for proposal in latest_proposals { + if let Some(latest_slot) = proposal.1 { + result.insert(latest_slot, proposal.0); + } + } + + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `validator_index_query`. +pub fn get_validator_by_index( + conn: &mut PgConn, + validator_index_query: i32, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(index.eq(validator_index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `public_key_query`. +pub fn get_validator_by_public_key( + conn: &mut PgConn, + public_key_query: WatchPK, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(public_key.eq(public_key_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects all rows from the `validators` table which have an `index` contained in +/// the `indices_query`. +#[allow(dead_code)] +pub fn get_validators_by_indices( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let query_len = indices_query.len(); + let result = validators + .filter(index.eq_any(indices_query)) + .load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("{query_len} validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +// Selects all rows from the `validators` table. +pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators.load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("All validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +/// Counts the number of rows in the `validators` table. +#[allow(dead_code)] +pub fn count_validators(conn: &mut PgConn) -> Result { + use self::validators::dsl::*; + + validators.count().get_result(conn).map_err(Error::Database) +} + +/// Counts the number of rows in the `validators` table where +/// `activation_epoch <= target_slot.epoch()`. +pub fn count_validators_activated_before_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result { + use self::validators::dsl::*; + + let target_epoch = target_slot.epoch(slots_per_epoch); + + validators + .count() + .filter(activation_epoch.le(target_epoch.as_u64() as i32)) + .get_result(conn) + .map_err(Error::Database) +} + +/// +/// DELETE statements. +/// + +/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. +/// +/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from +/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, +/// `block_packing` and `proposer_info`. +pub fn delete_canonical_slots_above( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result { + use self::canonical_slots::dsl::*; + + let result = diesel::delete(canonical_slots) + .filter(slot.gt(slot_query)) + .execute(conn)?; + + debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); + Ok(result) +} + +/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater +/// than `epoch_start_slot_query`. +pub fn delete_suboptimal_attestations_above( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result { + use self::suboptimal_attestations::dsl::*; + + let result = diesel::delete(suboptimal_attestations) + .filter(epoch_start_slot.gt(epoch_start_slot_query)) + .execute(conn)?; + + debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); + Ok(result) +} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs new file mode 100644 index 00000000000..f42444d6612 --- /dev/null +++ b/watch/src/database/models.rs @@ -0,0 +1,67 @@ +use crate::database::{ + schema::{beacon_blocks, canonical_slots, proposer_info, validators}, + watch_types::{WatchHash, WatchPK, WatchSlot}, +}; +use diesel::{Insertable, Queryable}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +pub type WatchEpoch = i32; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = canonical_slots)] +pub struct WatchCanonicalSlot { + pub slot: WatchSlot, + pub root: WatchHash, + pub skipped: bool, + pub beacon_block: Option, +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = beacon_blocks)] +pub struct WatchBeaconBlock { + pub slot: WatchSlot, + pub root: WatchHash, + pub parent_root: WatchHash, + pub attestation_count: i32, + pub transaction_count: Option, + pub withdrawal_count: Option, +} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = validators)] +pub struct WatchValidator { + pub index: i32, + pub public_key: WatchPK, + pub status: String, + pub activation_epoch: Option, + pub exit_epoch: Option, +} + +// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. +impl Hash for WatchValidator { + fn hash(&self, state: &mut H) { + self.index.hash(state); + self.status.hash(state); + self.activation_epoch.hash(state); + self.exit_epoch.hash(state); + } +} + +impl PartialEq for WatchValidator { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + && self.status == other.status + && self.activation_epoch == other.activation_epoch + && self.exit_epoch == other.exit_epoch + } +} +impl Eq for WatchValidator {} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = proposer_info)] +pub struct WatchProposerInfo { + pub slot: WatchSlot, + pub proposer_index: i32, + pub graffiti: String, +} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs new file mode 100644 index 00000000000..32f22d506db --- /dev/null +++ b/watch/src/database/schema.rs @@ -0,0 +1,102 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + active_config (id) { + id -> Int4, + config_name -> Text, + slots_per_epoch -> Int4, + } +} + +diesel::table! { + beacon_blocks (slot) { + slot -> Int4, + root -> Bytea, + parent_root -> Bytea, + attestation_count -> Int4, + transaction_count -> Nullable, + withdrawal_count -> Nullable, + } +} + +diesel::table! { + block_packing (slot) { + slot -> Int4, + available -> Int4, + included -> Int4, + prior_skip_slots -> Int4, + } +} + +diesel::table! { + block_rewards (slot) { + slot -> Int4, + total -> Int4, + attestation_reward -> Int4, + sync_committee_reward -> Int4, + } +} + +diesel::table! { + blockprint (slot) { + slot -> Int4, + best_guess -> Text, + } +} + +diesel::table! { + canonical_slots (slot) { + slot -> Int4, + root -> Bytea, + skipped -> Bool, + beacon_block -> Nullable, + } +} + +diesel::table! { + proposer_info (slot) { + slot -> Int4, + proposer_index -> Int4, + graffiti -> Text, + } +} + +diesel::table! { + suboptimal_attestations (epoch_start_slot, index) { + epoch_start_slot -> Int4, + index -> Int4, + source -> Bool, + head -> Bool, + target -> Bool, + } +} + +diesel::table! { + validators (index) { + index -> Int4, + public_key -> Bytea, + status -> Text, + activation_epoch -> Nullable, + exit_epoch -> Nullable, + } +} + +diesel::joinable!(block_packing -> beacon_blocks (slot)); +diesel::joinable!(block_rewards -> beacon_blocks (slot)); +diesel::joinable!(blockprint -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> validators (proposer_index)); +diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); +diesel::joinable!(suboptimal_attestations -> validators (index)); + +diesel::allow_tables_to_appear_in_same_query!( + active_config, + beacon_blocks, + block_packing, + block_rewards, + blockprint, + canonical_slots, + proposer_info, + suboptimal_attestations, + validators, +); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs new file mode 100644 index 00000000000..7e450f0cee7 --- /dev/null +++ b/watch/src/database/utils.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] +use crate::database::config::Config; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; + +/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. +/// +/// This is useful for creating or dropping databases, since these actions must be done by +/// logging into another database. +pub fn get_config_using_default_db(config: &Config) -> (Config, String) { + let mut config = config.clone(); + let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); + (config, new_dbname) +} + +/// Runs the set of migrations as detected in the local directory. +/// Equivalent to `diesel migration run`. +/// +/// Contains `unwrap`s so is only suitable for test code. +/// TODO(mac) refactor to return Result +pub fn run_migrations(config: &Config) -> PgConnection { + let database_url = config.clone().build_database_url(); + let mut conn = PgConnection::establish(&database_url).unwrap(); + let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); + conn.run_pending_migrations(migrations).unwrap(); + conn.begin_test_transaction().unwrap(); + conn +} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs new file mode 100644 index 00000000000..0b3ba2c304d --- /dev/null +++ b/watch/src/database/watch_types.rs @@ -0,0 +1,119 @@ +use crate::database::error::Error; +use diesel::{ + sql_types::{Binary, Integer}, + AsExpression, FromSqlRow, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +#[derive( + Clone, + Copy, + Debug, + AsExpression, + FromSqlRow, + Deserialize, + Serialize, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[diesel(sql_type = Integer)] +pub struct WatchSlot(Slot); + +impl fmt::Display for WatchSlot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl WatchSlot { + pub fn new(slot: u64) -> Self { + Self(Slot::new(slot)) + } + + pub fn from_slot(slot: Slot) -> Self { + Self(slot) + } + + pub fn as_slot(self) -> Slot { + self.0 + } + + pub fn as_u64(self) -> u64 { + self.0.as_u64() + } + + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + self.as_slot().epoch(slots_per_epoch) + } +} + +#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] +#[diesel(sql_type = Binary)] +pub struct WatchHash(Hash256); + +impl fmt::Display for WatchHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchHash { + pub fn as_hash(&self) -> Hash256 { + self.0 + } + + pub fn from_hash(hash: Hash256) -> Self { + WatchHash(hash) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() == 32 { + Ok(WatchHash(Hash256::from_slice(src))) + } else { + Err(Error::InvalidRoot) + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = Binary)] +pub struct WatchPK(PublicKeyBytes); + +impl fmt::Display for WatchPK { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchPK { + pub fn as_bytes(&self) -> &[u8] { + self.0.as_serialized() + } + + pub fn from_bytes(src: &[u8]) -> Result { + Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) + } + + pub fn from_pubkey(key: PublicKeyBytes) -> Self { + WatchPK(key) + } +} + +impl FromStr for WatchPK { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(WatchPK( + PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, + )) + } +} diff --git a/watch/src/lib.rs b/watch/src/lib.rs new file mode 100644 index 00000000000..664c9451655 --- /dev/null +++ b/watch/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg(unix)] +pub mod block_packing; +pub mod block_rewards; +pub mod blockprint; +pub mod cli; +pub mod client; +pub mod config; +pub mod database; +pub mod logger; +pub mod server; +pub mod suboptimal_attestations; +pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs new file mode 100644 index 00000000000..49310b42aae --- /dev/null +++ b/watch/src/logger.rs @@ -0,0 +1,24 @@ +use env_logger::Builder; +use log::{info, LevelFilter}; +use std::process; + +pub fn init_logger(log_level: &str) { + let log_level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + }; + + let mut builder = Builder::new(); + builder.filter(Some("watch"), log_level); + + builder.init(); + + info!("Logger initialized with log-level: {log_level}"); +} diff --git a/watch/src/main.rs b/watch/src/main.rs new file mode 100644 index 00000000000..f971747da42 --- /dev/null +++ b/watch/src/main.rs @@ -0,0 +1,41 @@ +#[cfg(unix)] +use std::process; + +#[cfg(unix)] +mod block_packing; +#[cfg(unix)] +mod block_rewards; +#[cfg(unix)] +mod blockprint; +#[cfg(unix)] +mod cli; +#[cfg(unix)] +mod config; +#[cfg(unix)] +mod database; +#[cfg(unix)] +mod logger; +#[cfg(unix)] +mod server; +#[cfg(unix)] +mod suboptimal_attestations; +#[cfg(unix)] +mod updater; + +#[cfg(unix)] +#[tokio::main] +async fn main() { + match cli::run().await { + Ok(()) => process::exit(0), + Err(e) => { + eprintln!("Command failed with: {}", e); + drop(e); + process::exit(1) + } + } +} + +#[cfg(windows)] +fn main() { + eprintln!("Windows is not supported. Exiting."); +} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs new file mode 100644 index 00000000000..a7d38e706f8 --- /dev/null +++ b/watch/src/server/config.rs @@ -0,0 +1,28 @@ +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; + +pub const LISTEN_ADDR: &str = "127.0.0.1"; + +pub const fn listen_port() -> u16 { + 5059 +} +fn listen_addr() -> IpAddr { + LISTEN_ADDR.parse().expect("Server address is not valid") +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "listen_addr")] + pub listen_addr: IpAddr, + #[serde(default = "listen_port")] + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: listen_addr(), + listen_port: listen_port(), + } + } +} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs new file mode 100644 index 00000000000..d1542f78419 --- /dev/null +++ b/watch/src/server/error.rs @@ -0,0 +1,50 @@ +use crate::database::Error as DbError; +use axum::Error as AxumError; +use axum::{http::StatusCode, response::IntoResponse, Json}; +use hyper::Error as HyperError; +use serde_json::json; + +#[derive(Debug)] +pub enum Error { + Axum(AxumError), + Hyper(HyperError), + Database(DbError), + BadRequest, + NotFound, + Other(String), +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + let (status, error_message) = match self { + Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), + Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), + _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), + }; + (status, Json(json!({ "error": error_message }))).into_response() + } +} + +impl From for Error { + fn from(e: HyperError) -> Self { + Error::Hyper(e) + } +} + +impl From for Error { + fn from(e: AxumError) -> Self { + Error::Axum(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs new file mode 100644 index 00000000000..6777026867e --- /dev/null +++ b/watch/src/server/handler.rs @@ -0,0 +1,266 @@ +use crate::database::{ + self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, + WatchProposerInfo, WatchSlot, WatchValidator, +}; +use crate::server::Error; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use eth2::types::BlockId; +use std::collections::HashMap; +use std::str::FromStr; + +pub async fn get_slot( + Path(slot): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_canonical_slot( + &mut conn, + WatchSlot::new(slot), + )?)) +} + +pub async fn get_slot_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slot_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slots_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_canonical_slots_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; + match block_id { + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_previous( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => { + if let Some(block) = + database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? + .map(|block| block.parent_root) + { + Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) + } else { + Err(Error::NotFound) + } + } + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_next( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot + 1_u64), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_blocks_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_beacon_blocks_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block_proposer( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validator( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_public_key( + &mut conn, pubkey, + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_index(&mut conn, index)?)) + } +} + +pub async fn get_all_validators( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_all_validators(&mut conn)?)) +} + +pub async fn get_validator_latest_proposal( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + let validator = + database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![validator.index], + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![index], + )?)) + } +} + +pub async fn get_client_breakdown( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + Ok(Json(database::get_validators_clients_at_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?)) + } else { + Err(Error::Database(DbError::Other( + "No slots found in database.".to_string(), + ))) + } +} + +pub async fn get_client_breakdown_percentages( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + let mut result = HashMap::new(); + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + let total = database::count_validators_activated_before_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?; + let clients = + database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; + for (client, number) in clients.iter() { + let percentage: f64 = *number as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs new file mode 100644 index 00000000000..09d5ec6aac5 --- /dev/null +++ b/watch/src/server/mod.rs @@ -0,0 +1,134 @@ +use crate::block_packing::block_packing_routes; +use crate::block_rewards::block_rewards_routes; +use crate::blockprint::blockprint_routes; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool}; +use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; +use axum::{ + handler::Handler, + http::{StatusCode, Uri}, + routing::get, + Extension, Json, Router, +}; +use eth2::types::ErrorMessage; +use log::info; +use std::future::Future; +use std::net::SocketAddr; +use tokio::sync::oneshot; + +pub use config::Config; +pub use error::Error; + +mod config; +mod error; +mod handler; + +pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { + let db = database::build_connection_pool(&config.database)?; + let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? + .ok_or_else(|| { + Error::Other( + "Database not found. Please run the updater prior to starting the server" + .to_string(), + ) + })?; + + let server = start_server(&config, slots_per_epoch as u64, db, async { + let _ = shutdown.await; + })?; + + server.await?; + + Ok(()) +} + +/// Creates a server that will serve requests using information from `config`. +/// +/// The server will create its own connection pool to serve connections to the database. +/// This is separate to the connection pool that is used for the `updater`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the address specified in the config and then return a +/// Future representing the actual server that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn start_server( + config: &FullConfig, + slots_per_epoch: u64, + pool: PgPool, + shutdown: impl Future + Send + Sync + 'static, +) -> Result> + 'static, Error> { + let mut routes = Router::new() + .route("/v1/slots", get(handler::get_slots_by_range)) + .route("/v1/slots/:slot", get(handler::get_slot)) + .route("/v1/slots/lowest", get(handler::get_slot_lowest)) + .route("/v1/slots/highest", get(handler::get_slot_highest)) + .route("/v1/slots/:slot/block", get(handler::get_block)) + .route("/v1/blocks", get(handler::get_blocks_by_range)) + .route("/v1/blocks/:block", get(handler::get_block)) + .route("/v1/blocks/lowest", get(handler::get_block_lowest)) + .route("/v1/blocks/highest", get(handler::get_block_highest)) + .route( + "/v1/blocks/:block/previous", + get(handler::get_block_previous), + ) + .route("/v1/blocks/:block/next", get(handler::get_block_next)) + .route( + "/v1/blocks/:block/proposer", + get(handler::get_block_proposer), + ) + .route("/v1/validators/:validator", get(handler::get_validator)) + .route("/v1/validators/all", get(handler::get_all_validators)) + .route( + "/v1/validators/:validator/latest_proposal", + get(handler::get_validator_latest_proposal), + ) + .route("/v1/clients", get(handler::get_client_breakdown)) + .route( + "/v1/clients/percentages", + get(handler::get_client_breakdown_percentages), + ) + .merge(attestation_routes()) + .merge(blockprint_routes()) + .merge(block_packing_routes()) + .merge(block_rewards_routes()); + + if config.blockprint.enabled && config.updater.attestations { + routes = routes.merge(blockprint_attestation_routes()) + } + + let app = routes + .fallback(route_not_found.into_service()) + .layer(Extension(pool)) + .layer(Extension(slots_per_epoch)); + + let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); + + let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); + + let server = server.with_graceful_shutdown(async { + shutdown.await; + }); + + info!("HTTP server listening on {}", addr); + + Ok(server) +} + +// The default route indicating that no available routes matched the request. +async fn route_not_found(uri: Uri) -> (StatusCode, Json) { + ( + StatusCode::METHOD_NOT_ALLOWED, + Json(ErrorMessage { + code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), + message: format!("No route for {uri}"), + stacktraces: vec![], + }), + ) +} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs new file mode 100644 index 00000000000..cb947d250a2 --- /dev/null +++ b/watch/src/suboptimal_attestations/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + schema::{suboptimal_attestations, validators}, + watch_types::{WatchPK, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +use types::Epoch; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct WatchAttestation { + pub index: i32, + pub epoch: Epoch, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchAttestation { + pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { + WatchAttestation { + index, + epoch, + source: true, + head: true, + target: true, + } + } +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = suboptimal_attestations)] +pub struct WatchSuboptimalAttestation { + pub epoch_start_slot: WatchSlot, + pub index: i32, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchSuboptimalAttestation { + pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { + WatchAttestation { + index: self.index, + epoch: self.epoch_start_slot.epoch(slots_per_epoch), + source: self.source, + head: self.head, + target: self.target, + } + } +} + +/// Insert a batch of values into the `suboptimal_attestations` table +/// +/// Since attestations technically occur per-slot but we only store them per-epoch (via its +/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a +/// 'suboptimal' attestation could now be 'optimal'. +/// +/// This is handled in the update code, where in the case of a re-org, the affected epoch is +/// deleted completely. +/// +/// On a conflict, it will do nothing. +pub fn insert_batch_suboptimal_attestations( + conn: &mut PgConn, + attestations: Vec, +) -> Result<(), Error> { + use self::suboptimal_attestations::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(suboptimal_attestations) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. +pub fn get_lowest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.asc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. +pub fn get_highest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.desc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding to a given +/// `index_query` and `epoch_query`. +pub fn get_attestation_by_index( + conn: &mut PgConn, + index_query: i32, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + let timer = Instant::now(); + + let result = suboptimal_attestations + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(index.eq(index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding +/// to a given `pubkey_query` and `epoch_query`. +#[allow(dead_code)] +pub fn get_attestation_by_pubkey( + conn: &mut PgConn, + pubkey_query: WatchPK, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + use self::validators::dsl::{public_key, validators}; + let timer = Instant::now(); + + let join = validators.inner_join(suboptimal_attestations); + + let result = join + .select((epoch_start_slot, index, source, head, target)) + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(public_key.eq(pubkey_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `source == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_source( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(source.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `head == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_head( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(head.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `target == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_target( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(target.eq(false)) + .load::(conn)?) +} + +/// Selects all rows from the `suboptimal_attestations` table for the given +/// `epoch_start_slot_query`. +pub fn get_all_suboptimal_attestations_for_epoch( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .load::(conn)?) +} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs new file mode 100644 index 00000000000..a94532e8ab2 --- /dev/null +++ b/watch/src/suboptimal_attestations/mod.rs @@ -0,0 +1,56 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use server::{attestation_routes, blockprint_attestation_routes}; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/attestation_performance`. +/// Formats the response into a vector of `WatchSuboptimalAttestation`. +/// +/// Any attestations with `source == true && head == true && target == true` are ignored. +pub async fn get_attestation_performances( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + let mut output = Vec::new(); + let result = bn + .get_lighthouse_analysis_attestation_performance( + start_epoch, + end_epoch, + "global".to_string(), + ) + .await?; + for index in result { + for epoch in index.epochs { + if epoch.1.active { + // Check if the attestation is suboptimal. + if !epoch.1.source || !epoch.1.head || !epoch.1.target { + output.push(WatchSuboptimalAttestation { + epoch_start_slot: WatchSlot::from_slot( + Epoch::new(epoch.0).start_slot(slots_per_epoch), + ), + index: index.index as i32, + source: epoch.1.source, + head: epoch.1.head, + target: epoch.1.target, + }) + } + } + } + } + Ok(output) +} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs new file mode 100644 index 00000000000..391db9a41b5 --- /dev/null +++ b/watch/src/suboptimal_attestations/server.rs @@ -0,0 +1,299 @@ +use crate::database::{ + get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, + get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, + WatchSlot, +}; + +use crate::blockprint::database::construct_validator_blockprints_at_slot; +use crate::server::Error; +use crate::suboptimal_attestations::database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, + get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, + WatchAttestation, WatchSuboptimalAttestation, +}; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use types::Epoch; + +// Will return Ok(None) if the epoch is not synced or if the validator does not exist. +// In the future it might be worth differentiating these events. +pub async fn get_validator_attestation( + Path((validator_query, epoch_query)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + let epoch = Epoch::new(epoch_query); + + // Ensure the database has synced the target epoch. + if get_canonical_slot( + &mut conn, + WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), + )? + .is_none() + { + // Epoch is not fully synced. + return Ok(Json(None)); + } + + let index = if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + get_validator_by_public_key(&mut conn, pubkey)? + .ok_or(Error::NotFound)? + .index + } else { + i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? + }; + let attestation = if let Some(suboptimal_attestation) = + get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? + { + Some(suboptimal_attestation.to_attestation(slots_per_epoch)) + } else { + // Attestation was not in database. Check if the validator was active. + match get_validator_by_index(&mut conn, index)? { + Some(validator) => { + if let Some(activation_epoch) = validator.activation_epoch { + if activation_epoch <= epoch.as_u64() as i32 { + if let Some(exit_epoch) = validator.exit_epoch { + if exit_epoch > epoch.as_u64() as i32 { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } else { + // Validator has exited. + None + } + } else { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } + } else { + // Validator is not yet active. + None + } + } else { + // Validator is not yet active. + None + } + } + None => return Err(Error::Other("Validator index does not exist".to_string())), + } + }; + Ok(Json(attestation)) +} + +pub async fn get_all_validators_attestations( + Path(epoch): Path, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + Ok(Json(get_all_suboptimal_attestations_for_epoch( + &mut conn, + epoch_start_slot, + )?)) +} + +pub async fn get_validators_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + match vote.to_lowercase().as_str() { + "source" => Ok(Json(get_validators_missed_source( + &mut conn, + epoch_start_slot, + )?)), + "head" => Ok(Json(get_validators_missed_head( + &mut conn, + epoch_start_slot, + )?)), + "target" => Ok(Json(get_validators_missed_target( + &mut conn, + epoch_start_slot, + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validators_missed_vote_graffiti( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? + .values() + .map(|info| info.graffiti.clone()) + .collect::>(); + + let mut result = HashMap::new(); + for graffiti in graffitis { + if !result.contains_key(&graffiti) { + result.insert(graffiti.clone(), 0); + } + *result + .get_mut(&graffiti) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + + Ok(Json(result)) +} + +pub fn attestation_routes() -> Router { + Router::new() + .route( + "/v1/validators/:validator/attestation/:epoch", + get(get_validator_attestation), + ) + .route( + "/v1/validators/all/attestation/:epoch", + get(get_all_validators_attestations), + ) + .route( + "/v1/validators/missed/:vote/:epoch", + get(get_validators_missed_vote), + ) + .route( + "/v1/validators/missed/:vote/:epoch/graffiti", + get(get_validators_missed_vote_graffiti), + ) +} + +/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be +/// disabled. +pub async fn get_clients_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + // All validators which missed the vote. + let indices_map = indices.into_iter().collect::>(); + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + // All validators. + let client_map = + construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + + for index in indices_map { + if let Some(print) = client_map.get(&index) { + if !result.contains_key(print) { + result.insert(print.clone(), 0); + } + *result + .get_mut(print) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool.clone()), + Extension(slots_per_epoch), + ) + .await?; + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + let mut conn = get_connection(&pool)?; + let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + let client_total: f64 = *totals + .get(client) + .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? + as f64; + // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. + if client_total == 0.0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / client_total * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages_relative( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let mut total: u64 = 0; + for (_, count) in clients_counts.iter() { + total += *count + } + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + // `total` should never be 0, but if it is, return `-` instead of `inf`. + if total == 0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub fn blockprint_attestation_routes() -> Router { + Router::new() + .route( + "/v1/clients/missed/:vote/:epoch", + get(get_clients_missed_vote), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages", + get(get_clients_missed_vote_percentages), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages/relative", + get(get_clients_missed_vote_percentages_relative), + ) +} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs new file mode 100644 index 00000000000..aeabff2035c --- /dev/null +++ b/watch/src/suboptimal_attestations/updater.rs @@ -0,0 +1,236 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::suboptimal_attestations::get_attestation_performances; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest + /// slot. + /// + /// It construts a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) + /// `end_epoch` -> epoch of highest canonical slot + /// + /// It will resync the latest epoch if it is not fully filled but will not overwrite existing + /// values unless there is a re-org. + /// That is, `if highest_filled_slot % slots_per_epoch != 31`. + /// + /// In the event the most recent epoch has no suboptimal attestations, it will attempt to + /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let highest_filled_slot_opt = if self.config.attestations { + database::get_highest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` + // instead. + if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { + lowest_canonical_slot + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no slots in the database, do not fill the `suboptimal_attestations` + // table. + warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); + return Ok(()); + } + }; + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); + + // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations + // which are more than 1 epoch old. + // We assume that `highest_canonical_slot` is near the head of the chain. + end_epoch = end_epoch.saturating_sub(2_u64); + + // If end_epoch == 0 then the chain just started so we need to wait until + // `current_epoch >= 2`. + if end_epoch == 0 { + debug!("Chain just begun, refusing to sync attestations"); + return Ok(()); + } + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert attestations with corresponding `canonical_slot`s. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest canonical slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slots` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> epoch of the lowest `canonical_slot`. + /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest + /// canonical slot) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// + /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to + /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_attestation_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `suboptimal_attestations` table. + let lowest_filled_slot_opt = if self.config.attestations { + database::get_lowest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot % self.slots_per_epoch == 0 { + lowest_filled_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + // Subtract 2 since `end_epoch` must be less than the current epoch - 1. + // We assume that `highest_canonical_slot` is near the head of the chain. + highest_canonical_slot + .epoch(self.slots_per_epoch) + .saturating_sub(2_u64) + } else { + // There are no slots in the database, do not backfill the + // `suboptimal_attestations` table. + warn!("Refusing to backfill attestations as there are no slots in the database"); + return Ok(()); + } + }; + + if end_epoch == 0 { + debug!("Attestations backfill is complete"); + return Ok(()); + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the base of the database"); + return Ok(()); + } + + // Ensure the request range does not exceed `max_attestation_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { + start_epoch = end_epoch.saturating_sub(max_attestation_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) + } + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slot` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs new file mode 100644 index 00000000000..0179be73db6 --- /dev/null +++ b/watch/src/updater/config.rs @@ -0,0 +1,65 @@ +use serde::{Deserialize, Serialize}; + +pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; + +pub const fn max_backfill_size_epochs() -> u64 { + 2 +} +pub const fn backfill_stop_epoch() -> u64 { + 0 +} +pub const fn attestations() -> bool { + true +} +pub const fn proposer_info() -> bool { + true +} +pub const fn block_rewards() -> bool { + true +} +pub const fn block_packing() -> bool { + true +} + +fn beacon_node_url() -> String { + BEACON_NODE_URL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// The URL of the beacon you wish to sync from. + #[serde(default = "beacon_node_url")] + pub beacon_node_url: String, + /// The maximum size each backfill iteration will allow per request (in epochs). + #[serde(default = "max_backfill_size_epochs")] + pub max_backfill_size_epochs: u64, + /// The epoch at which to never backfill past. + #[serde(default = "backfill_stop_epoch")] + pub backfill_stop_epoch: u64, + /// Whether to sync the suboptimal_attestations table. + #[serde(default = "attestations")] + pub attestations: bool, + /// Whether to sync the proposer_info table. + #[serde(default = "proposer_info")] + pub proposer_info: bool, + /// Whether to sync the block_rewards table. + #[serde(default = "block_rewards")] + pub block_rewards: bool, + /// Whether to sync the block_packing table. + #[serde(default = "block_packing")] + pub block_packing: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + beacon_node_url: beacon_node_url(), + max_backfill_size_epochs: max_backfill_size_epochs(), + backfill_stop_epoch: backfill_stop_epoch(), + attestations: attestations(), + proposer_info: proposer_info(), + block_rewards: block_rewards(), + block_packing: block_packing(), + } + } +} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs new file mode 100644 index 00000000000..74091c8f217 --- /dev/null +++ b/watch/src/updater/error.rs @@ -0,0 +1,56 @@ +use crate::blockprint::Error as BlockprintError; +use crate::database::Error as DbError; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{Error as Eth2Error, SensitiveError}; +use std::fmt; + +#[derive(Debug)] +pub enum Error { + BeaconChain(BeaconChainError), + Eth2(Eth2Error), + SensitiveUrl(SensitiveError), + Database(DbError), + Blockprint(BlockprintError), + UnableToGetRemoteHead, + BeaconNodeSyncing, + NotEnabled(String), + NoValidatorsFound, + BeaconNodeNotCompatible(String), + InvalidConfig(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(e) + } +} + +impl From for Error { + fn from(e: Eth2Error) -> Self { + Error::Eth2(e) + } +} + +impl From for Error { + fn from(e: SensitiveError) -> Self { + Error::SensitiveUrl(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: BlockprintError) -> Self { + Error::Blockprint(e) + } +} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs new file mode 100644 index 00000000000..1e1662bf749 --- /dev/null +++ b/watch/src/updater/handler.rs @@ -0,0 +1,471 @@ +use crate::blockprint::WatchBlockprintClient; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; +use crate::updater::{Config, Error, WatchSpec}; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{ + types::{BlockId, SyncingData}, + BeaconNodeHttpClient, SensitiveUrl, +}; +use log::{debug, error, info, warn}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +use crate::updater::{get_beacon_block, get_header, get_validators}; + +const MAX_EXPECTED_REORG_LENGTH: u64 = 32; + +/// Ensure the existing database is valid for this run. +pub async fn ensure_valid_database( + spec: &WatchSpec, + pool: &mut PgPool, +) -> Result<(), Error> { + let mut conn = database::get_connection(pool)?; + + let bn_slots_per_epoch = spec.slots_per_epoch(); + let bn_config_name = spec.network.clone(); + + if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { + if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { + Err(Error::InvalidConfig( + "The config stored in the database does not match the beacon node.".to_string(), + )) + } else { + // Configs match. + Ok(()) + } + } else { + // No config exists in the DB. + database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; + Ok(()) + } +} + +pub struct UpdateHandler { + pub pool: PgPool, + pub bn: BeaconNodeHttpClient, + pub blockprint: Option, + pub config: Config, + pub slots_per_epoch: u64, + pub spec: WatchSpec, +} + +impl UpdateHandler { + pub async fn new( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, + ) -> Result, Error> { + let blockprint = if config.blockprint.enabled { + if let Some(server) = config.blockprint.url { + let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; + Some(WatchBlockprintClient { + client: reqwest::Client::new(), + server: blockprint_url, + username: config.blockprint.username, + password: config.blockprint.password, + }) + } else { + return Err(Error::NotEnabled( + "blockprint was enabled but url was not set".to_string(), + )); + } + } else { + None + }; + + let mut pool = database::build_connection_pool(&config.database)?; + + ensure_valid_database(&spec, &mut pool).await?; + + Ok(Self { + pool, + bn, + blockprint, + config: config.updater, + slots_per_epoch: spec.slots_per_epoch(), + spec, + }) + } + + /// Gets the syncing status of the connected beacon node. + pub async fn get_bn_syncing_status(&mut self) -> Result { + Ok(self.bn.get_node_syncing().await?.data) + } + + /// Gets a list of block roots from the database which do not yet contain a corresponding + /// entry in the `beacon_blocks` table and inserts them. + pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let roots = database::get_unknown_canonical_blocks(&mut conn)?; + for root in roots { + let block_opt: Option> = + get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; + if let Some(block) = block_opt { + database::insert_beacon_block(&mut conn, block, root)?; + } + } + + Ok(()) + } + + /// Performs a head update with the following steps: + /// 1. Pull the latest header from the beacon node and the latest canonical slot from the + /// database. + /// 2. Loop back through the beacon node and database to find the first matching slot -> root + /// pair. + /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// 4. Remove any invalid slots from the database. + /// 5. Sync all blocks between the first valid block of the database and the head of the beacon + /// chain. + /// + /// In the event there are no slots present in the database, it will sync from the head block + /// block back to the first slot of the epoch. + /// This will ensure backfills are always done in full epochs (which helps keep certain syncing + /// tasks efficient). + pub async fn perform_head_update(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + // Load the head from the beacon node. + let bn_header = get_header(&self.bn, BlockId::Head) + .await? + .ok_or(Error::UnableToGetRemoteHead)?; + let header_root = bn_header.canonical_root(); + + if let Some(latest_matching_canonical_slot) = + self.get_first_matching_block(bn_header.clone()).await? + { + // Check for reorgs. + let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; + + // Remove all slots above `latest_db_slot` from the database. + let result = database::delete_canonical_slots_above( + &mut conn, + WatchSlot::from_slot(latest_db_slot), + )?; + info!("{result} old records removed during head update"); + + if result > 0 { + // If slots were removed, we need to resync the suboptimal_attestations table for + // the epoch since they will have changed and cannot be fixed by a simple update. + let epoch = latest_db_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64); + debug!("Preparing to resync attestations above epoch {epoch}"); + database::delete_suboptimal_attestations_above( + &mut conn, + WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), + )?; + } + + // Since we are syncing backwards, `start_slot > `end_slot`. + let start_slot = bn_header.slot; + let end_slot = latest_db_slot + 1; + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + + // Attempt to sync new blocks with blockprint. + //self.sync_blockprint_until(start_slot).await?; + } else { + // There are no matching parent blocks. Sync from the head block back until the first + // block of the epoch. + let start_slot = bn_header.slot; + let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + } + + Ok(()) + } + + /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of + /// the block header as reported by the beacon node. + /// + /// Any blocks above this value are not canonical according to the beacon node. + /// + /// Note: In the event that there are skip slots above the slot returned by the function, + /// they will not be returned, so may be pruned or re-synced by other code despite being + /// canonical. + pub async fn get_first_matching_block( + &mut self, + mut bn_header: BeaconBlockHeader, + ) -> Result, Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Load latest non-skipped canonical slot from database. + if let Some(db_canonical_slot) = + database::get_highest_non_skipped_canonical_slot(&mut conn)? + { + // Check if the header or parent root matches the entry in the database. + if bn_header.parent_root == db_canonical_slot.root.as_hash() + || bn_header.canonical_root() == db_canonical_slot.root.as_hash() + { + Ok(Some(db_canonical_slot)) + } else { + // Header is not the child of the highest entry in the database. + // From here we need to iterate backwards through the database until we find + // a slot -> root pair that matches the beacon node. + loop { + // Store working `parent_root`. + let parent_root = bn_header.parent_root; + + // Try the next header. + let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; + if let Some(header) = next_header { + bn_header = header.clone(); + if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( + &mut conn, + WatchHash::from_hash(header.parent_root), + )? { + // Check if the entry in the database matches the parent of + // the header. + if header.parent_root == db_canonical_slot.root.as_hash() { + return Ok(Some(db_canonical_slot)); + } else { + // Move on to the next header. + continue; + } + } else { + // Database does not have the referenced root. Try the next header. + continue; + } + } else { + // If we get this error it means that the `parent_root` of the header + // did not reference a canonical block. + return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( + parent_root, + ))); + } + } + } + } else { + // There are no non-skipped blocks present in the database. + Ok(None) + } + } + + /// Given the latest slot in the database which matches a root in the beacon node, + /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip + /// of the database is consistent with the beacon node (in the case that reorgs have occured). + /// + /// Returns the slot before the oldest canonical_slot which has an invalid child. + pub async fn check_for_reorg( + &mut self, + latest_canonical_slot: WatchCanonicalSlot, + ) -> Result { + let mut conn = database::get_connection(&self.pool)?; + + let end_slot = latest_canonical_slot.slot.as_u64(); + let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); + + for i in start_slot..end_slot { + let slot = Slot::new(i); + let db_canonical_slot_opt = + database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; + if let Some(db_canonical_slot) = db_canonical_slot_opt { + let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; + if let Some(header) = header_opt { + if header.canonical_root() == db_canonical_slot.root.as_hash() { + // The roots match (or are both skip slots). + continue; + } else { + // The block roots do not match. We need to re-sync from here. + warn!("Block {slot} does not match the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else if !db_canonical_slot.skipped { + // The block exists in the database, but does not exist on the beacon node. + // We need to re-sync from here. + warn!("Block {slot} does not exist on the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else { + // This slot does not exist in the database. + let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? + .map(|canonical_slot| canonical_slot.slot.as_slot()); + if lowest_slot > Some(slot) { + // The database has not back-filled this slot yet, so skip it. + continue; + } else { + // The database does not contain this block, but has back-filled past it. + // We need to resync from here. + warn!("Slot {slot} missing from database. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } + } + + // The database is consistent with the beacon node, so return the head of the database. + Ok(latest_canonical_slot.slot.as_slot()) + } + + /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. + /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. + /// + /// Skip slots set `root` to the root of the previous non-skipped slot and also sets + /// `skipped == true`. + /// + /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite + /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that + /// needs to be resynced, must first be deleted from the database. + pub async fn reverse_fill_canonical_slots( + &mut self, + mut header: BeaconBlockHeader, + mut header_root: Hash256, + mut skipped: bool, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + let mut count = 0; + + let mut conn = database::get_connection(&self.pool)?; + + // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). + for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { + // Insert header. + database::insert_canonical_slot( + &mut conn, + WatchCanonicalSlot { + slot: WatchSlot::new(slot), + root: WatchHash::from_hash(header_root), + skipped, + beacon_block: None, + }, + )?; + count += 1; + + // Load the next header: + // We must use BlockId::Slot since we want to include skip slots. + header = if let Some(new_header) = get_header( + &self.bn, + BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), + ) + .await? + { + header_root = new_header.canonical_root(); + skipped = false; + new_header + } else { + if header.slot == 0 { + info!("Reverse fill exhausted at slot 0"); + break; + } + // Slot was skipped, so use the parent_root (most recent non-skipped block). + skipped = true; + header_root = header.parent_root; + header + }; + } + + Ok(count) + } + + /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and + /// stopping after `max_backfill_size_epochs` epochs. + pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; + // Check to see if we have finished backfilling. + if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { + if lowest_slot.slot.as_slot() == backfill_stop_slot { + debug!("Backfill sync complete, all slots filled"); + return Ok(()); + } + } + + let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + if let Some(lowest_non_skipped_canonical_slot) = + database::get_lowest_non_skipped_canonical_slot(&mut conn)? + { + // Set `start_slot` equal to the lowest non-skipped slot in the database. + // While this will attempt to resync some parts of the bottom of the chain, it reduces + // complexity when dealing with skip slots. + let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); + let mut end_slot = lowest_non_skipped_canonical_slot + .slot + .as_slot() + .saturating_sub(backfill_slot_count); + + // Ensure end_slot doesn't go below `backfill_stop_epoch` + if end_slot <= backfill_stop_slot { + end_slot = Slot::new(backfill_stop_slot); + } + + let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; + + if let Some(header) = header_opt { + let header_root = header.canonical_root(); + let count = self + .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) + .await?; + + info!("Backfill completed to slot: {end_slot}, records added: {count}"); + } else { + // The lowest slot of the database is inconsistent with the beacon node. + // Currently we have no way to recover from this. The entire database will need to + // be re-synced. + error!( + "Database is inconsistent with the beacon node. \ + Please ensure your beacon node is set to the right network, \ + otherwise you may need to resync" + ); + } + } else { + // There are no blocks in the database. Forward sync needs to happen first. + info!("Backfill was not performed since there are no blocks in the database"); + return Ok(()); + }; + + Ok(()) + } + + // Attempt to update the validator set. + // This downloads the latest validator set from the beacon node, and pulls the known validator + // set from the database. + // We then take any new or updated validators and insert them into the database (overwriting + // exiting validators). + // + // In the event there are no validators in the database, it will initialize the validator set. + pub async fn update_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let current_validators = database::get_all_validators(&mut conn)?; + + if !current_validators.is_empty() { + let old_validators = HashSet::from_iter(current_validators); + + // Pull the new validator set from the beacon node. + let new_validators = get_validators(&self.bn).await?; + + // The difference should only contain validators that contain either a new `exit_epoch` (implying an + // exit) or a new `index` (implying a validator activation). + let val_diff = new_validators.difference(&old_validators); + + for diff in val_diff { + database::insert_validator(&mut conn, diff.clone())?; + } + } else { + info!("No validators present in database. Initializing the validator set"); + self.initialize_validator_set().await?; + } + + Ok(()) + } + + // Initialize the validator set by downloading it from the beacon node, inserting blockprint + // data (if required) and writing it to the database. + pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Pull all validators from the beacon node. + let validators = Vec::from_iter(get_validators(&self.bn).await?); + + database::insert_batch_validators(&mut conn, validators)?; + + Ok(()) + } +} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs new file mode 100644 index 00000000000..1fbb0107aef --- /dev/null +++ b/watch/src/updater/mod.rs @@ -0,0 +1,234 @@ +use crate::config::Config as FullConfig; +use crate::database::{WatchPK, WatchValidator}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use log::{debug, error, info}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; + +pub use config::Config; +pub use error::Error; +pub use handler::UpdateHandler; + +mod config; +pub mod error; +pub mod handler; + +const FAR_FUTURE_EPOCH: u64 = u64::MAX; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +const MAINNET: &str = "mainnet"; +const GNOSIS: &str = "gnosis"; + +pub struct WatchSpec { + network: String, + spec: PhantomData, +} + +impl WatchSpec { + fn slots_per_epoch(&self) -> u64 { + T::slots_per_epoch() + } +} + +impl WatchSpec { + pub fn mainnet(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +impl WatchSpec { + fn gnosis(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +pub async fn run_updater(config: FullConfig) -> Result<(), Error> { + let beacon_node_url = + SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + + let config_map = bn.get_config_spec::>().await?.data; + + let config_name = config_map + .get("CONFIG_NAME") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) + })? + .clone(); + + match config_map + .get("PRESET_BASE") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) + })? + .to_lowercase() + .as_str() + { + MAINNET => { + let spec = WatchSpec::mainnet(config_name); + run_once(bn, spec, config).await + } + GNOSIS => { + let spec = WatchSpec::gnosis(config_name); + run_once(bn, spec, config).await + } + _ => unimplemented!("unsupported PRESET_BASE"), + } +} + +pub async fn run_once( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, +) -> Result<(), Error> { + let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; + + let sync_data = watch.get_bn_syncing_status().await?; + if sync_data.is_syncing { + error!( + "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", + sync_data.head_slot, sync_data.sync_distance + ); + return Err(Error::BeaconNodeSyncing); + } + + info!("Performing head update"); + let head_timer = Instant::now(); + watch.perform_head_update().await?; + let head_timer_elapsed = head_timer.elapsed(); + debug!("Head update complete, time taken: {head_timer_elapsed:?}"); + + info!("Performing block backfill"); + let block_backfill_timer = Instant::now(); + watch.backfill_canonical_slots().await?; + let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); + debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); + + info!("Updating validator set"); + let validator_timer = Instant::now(); + watch.update_validator_set().await?; + let validator_timer_elapsed = validator_timer.elapsed(); + debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); + + // Update blocks after updating the validator set since the `proposer_index` must exist in the + // `validators` table. + info!("Updating unknown blocks"); + let unknown_block_timer = Instant::now(); + watch.update_unknown_blocks().await?; + let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); + debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); + + // Run additional modules + if config.updater.attestations { + info!("Updating suboptimal attestations"); + let attestation_timer = Instant::now(); + watch.fill_suboptimal_attestations().await?; + watch.backfill_suboptimal_attestations().await?; + let attestation_timer_elapsed = attestation_timer.elapsed(); + debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); + } + + if config.updater.block_rewards { + info!("Updating block rewards"); + let rewards_timer = Instant::now(); + watch.fill_block_rewards().await?; + watch.backfill_block_rewards().await?; + let rewards_timer_elapsed = rewards_timer.elapsed(); + debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); + } + + if config.updater.block_packing { + info!("Updating block packing statistics"); + let packing_timer = Instant::now(); + watch.fill_block_packing().await?; + watch.backfill_block_packing().await?; + let packing_timer_elapsed = packing_timer.elapsed(); + debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); + } + + if config.blockprint.enabled { + info!("Updating blockprint"); + let blockprint_timer = Instant::now(); + watch.fill_blockprint().await?; + watch.backfill_blockprint().await?; + let blockprint_timer_elapsed = blockprint_timer.elapsed(); + debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); + } + + Ok(()) +} + +/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. +pub async fn get_header( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result, Error> { + let resp = bn + .get_beacon_headers_block_id(block_id) + .await? + .map(|resp| (resp.data.root, resp.data.header.message)); + // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... + // This check escapes the loop. + if let Some((root, header)) = resp { + if root == header.parent_root { + return Ok(None); + } else { + return Ok(Some(header)); + } + } + Ok(None) +} + +pub async fn get_beacon_block( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result>, Error> { + let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); + + Ok(block) +} + +/// Queries the beacon node for the current validator set. +pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { + let mut validator_map = HashSet::new(); + + let validators = bn + .get_beacon_states_validators(StateId::Head, None, None) + .await? + .ok_or(Error::NoValidatorsFound)? + .data; + + for val in validators { + // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. + let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.activation_epoch.as_u64() as i32) + }; + // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. + let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.exit_epoch.as_u64() as i32) + }; + validator_map.insert(WatchValidator { + index: val.index as i32, + public_key: WatchPK::from_pubkey(val.validator.pubkey), + status: val.status.to_string(), + activation_epoch, + exit_epoch, + }); + } + Ok(validator_map) +} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs new file mode 100644 index 00000000000..acdda8c306a --- /dev/null +++ b/watch/tests/tests.rs @@ -0,0 +1,1254 @@ +#![recursion_limit = "256"] +#![cfg(unix)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use http_api::test_utils::{create_api_server, ApiServer}; +use network::NetworkReceivers; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tokio::sync::oneshot; +use types::{Hash256, MainnetEthSpec, Slot}; +use url::Url; +use watch::{ + client::WatchHttpClient, + config::Config, + database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, + server::{start_server, Config as ServerConfig}, + updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, +}; + +use log::error; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; +use unused_port::unused_tcp4_port; + +use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + +type E = MainnetEthSpec; + +const VALIDATOR_COUNT: usize = 32; +const SLOTS_PER_EPOCH: u64 = 32; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { + let mut postgres_config = PostgresConfig::new(); + postgres_config + .user(&config.user) + .password(&config.password) + .dbname(&config.default_dbname) + .host(&config.host) + .port(config.port) + .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); + postgres_config +} + +async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { + let db_config = build_test_config(config); + let (client, conn) = db_config + .connect(NoTls) + .await + .expect("Could not connect to db"); + let connection = runtime::Handle::current().spawn(async move { + if let Err(e) = conn.await { + error!("Connection error {:?}", e); + } + }); + + (client, connection) +} + +pub async fn create_test_database(config: &DatabaseConfig) { + let (db, _) = connect(config).await; + + db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) + .await + .expect("Database creation failed"); +} + +struct TesterBuilder { + pub harness: BeaconChainHarness>, + pub config: Config, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, +} + +impl TesterBuilder { + pub async fn new() -> TesterBuilder { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(); + + /* + * Spawn a Beacon Node HTTP API. + */ + let ApiServer { + server, + listening_socket: bn_api_listening_socket, + shutdown_tx: _bn_api_shutdown_tx, + network_rx: _bn_network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + tokio::spawn(server); + + /* + * Create a watch configuration + */ + let database_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let config = Config { + database: DatabaseConfig { + dbname: random_dbname(), + port: database_port, + ..Default::default() + }, + server: ServerConfig { + listen_port: server_port, + ..Default::default() + }, + updater: UpdaterConfig { + beacon_node_url: format!( + "http://{}:{}", + bn_api_listening_socket.ip(), + bn_api_listening_socket.port() + ), + ..Default::default() + }, + ..Default::default() + }; + + Self { + harness, + config, + _bn_network_rx, + _bn_api_shutdown_tx, + } + } + pub async fn build(self, pool: PgPool) -> Tester { + /* + * Spawn a Watch HTTP API. + */ + let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { + let _ = watch_shutdown_rx.await; + }) + .unwrap(); + tokio::spawn(watch_server); + + let addr = SocketAddr::new( + self.config.server.listen_addr, + self.config.server.listen_port, + ); + + /* + * Create a HTTP client to talk to the watch HTTP API. + */ + let client = WatchHttpClient { + client: reqwest::Client::new(), + server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), + }; + + /* + * Create a HTTP client to talk to the Beacon Node API. + */ + let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + let spec = WatchSpec::mainnet("mainnet".to_string()); + + /* + * Build update service + */ + let updater = UpdateHandler::new(bn, spec, self.config.clone()) + .await + .unwrap(); + + Tester { + harness: self.harness, + client, + config: self.config, + updater, + _bn_network_rx: self._bn_network_rx, + _bn_api_shutdown_tx: self._bn_api_shutdown_tx, + _watch_shutdown_tx, + } + } + async fn initialize_database(&self) -> PgPool { + create_test_database(&self.config.database).await; + database::utils::run_migrations(&self.config.database); + database::build_connection_pool(&self.config.database) + .expect("Could not build connection pool") + } +} + +struct Tester { + pub harness: BeaconChainHarness>, + pub client: WatchHttpClient, + pub config: Config, + pub updater: UpdateHandler, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, + _watch_shutdown_tx: oneshot::Sender<()>, +} + +impl Tester { + /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. + pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { + self.harness.advance_slot(); + self.harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + // Advance the slot clock without a block. This results in a skipped slot. + pub fn skip_slot(&mut self) -> &mut Self { + self.harness.advance_slot(); + self + } + + // Perform a single slot re-org. + pub async fn reorg_chain(&mut self) -> &mut Self { + let previous_slot = self.harness.get_current_slot(); + self.harness.advance_slot(); + let first_slot = self.harness.get_current_slot(); + self.harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + }, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + /// Run the watch updater service. + pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { + for _ in 0..num_runs { + run_updater(self.config.clone()).await.unwrap(); + } + self + } + + pub async fn perform_head_update(&mut self) -> &mut Self { + self.updater.perform_head_update().await.unwrap(); + self + } + + pub async fn perform_backfill(&mut self) -> &mut Self { + self.updater.backfill_canonical_slots().await.unwrap(); + self + } + + pub async fn update_unknown_blocks(&mut self) -> &mut Self { + self.updater.update_unknown_blocks().await.unwrap(); + self + } + + pub async fn update_validator_set(&mut self) -> &mut Self { + self.updater.update_validator_set().await.unwrap(); + self + } + + pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater.fill_suboptimal_attestations().await.unwrap(); + + self + } + + pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater + .backfill_suboptimal_attestations() + .await + .unwrap(); + + self + } + + pub async fn fill_block_rewards(&mut self) -> &mut Self { + self.updater.fill_block_rewards().await.unwrap(); + + self + } + + pub async fn backfill_block_rewards(&mut self) -> &mut Self { + self.updater.backfill_block_rewards().await.unwrap(); + + self + } + + pub async fn fill_block_packing(&mut self) -> &mut Self { + self.updater.fill_block_packing().await.unwrap(); + + self + } + + pub async fn backfill_block_packing(&mut self) -> &mut Self { + self.updater.backfill_block_packing().await.unwrap(); + + self + } + + pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { + let lowest_slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .map(|slot| slot.slot.as_slot()); + + assert_eq!(lowest_slot, None); + + self + } + + pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_highest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { + self.client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { + assert!(self + .client + .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) + .await + .unwrap() + .is_none()); + self + } + + pub async fn assert_all_validators_exist(&mut self) -> &mut Self { + assert_eq!( + self.client + .get_all_validators() + .await + .unwrap() + .unwrap() + .len(), + VALIDATOR_COUNT + ); + self + } + + pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + while block.slot.as_slot() <= SLOTS_PER_EPOCH { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + /// Check that the canonical chain in watch matches that of the harness. Also check that all + /// canonical blocks can be retrieved. + pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { + let head_root = self.harness.chain.head_beacon_block_root(); + let mut chain: Vec<(Hash256, Slot)> = self + .harness + .chain + .rev_iter_block_roots_from(head_root) + .unwrap() + .map(Result::unwrap) + .collect(); + + // `chain` contains skip slots, but the `watch` API will not return blocks that do not + // exist. + // We need to filter them out. + chain.reverse(); + chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); + + // Remove any slots below `last_slot` since it is known that the database has not + // backfilled past it. + chain.retain(|(_, slot)| slot.as_u64() >= last_slot); + + for (root, slot) in &chain { + let block = self + .client + .get_beacon_blocks(BlockId::Root(*root)) + .await + .unwrap() + .unwrap(); + assert_eq!(block.slot.as_slot(), *slot); + } + + self + } + + /// Check that every block in the `beacon_blocks` table has corresponding entries in the + /// `proposer_info`, `block_rewards` and `block_packing` tables. + pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { + let pool = database::build_connection_pool(&self.config.database).unwrap(); + + let mut conn = database::get_connection(&pool).unwrap(); + let highest_block_slot = database::get_highest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { + let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + if !canonical_slot.skipped { + database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + } + } + + self + } +} + +pub fn random_dbname() -> String { + let mut s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + // Postgres gets weird about capitals in database names. + s.make_ascii_lowercase(); + format!("test_{}", s) +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(16) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_sync_starts_on_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .skip_slot() + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(7) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_reorg() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .reorg_chain() + .await + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(8) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + // Apply four blocks to the chain. + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata_and_multiple_skip_slots() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + // And also backfill to the epoch boundary. + .await + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Add multiple skip slots. + .skip_slot() + .skip_slot() + .skip_slot() + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(8) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(10) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_to_second_epoch() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(40) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(40) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(32) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(40) + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(43) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Update new block_packing + // Backfill before forward fill to ensure order is arbitrary + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn large_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(400) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(400) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(384) + .await + // Backfill 2 epochs as per default config. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(384) + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // Should have backfilled 2 more epochs. + .assert_lowest_canonical_slot(320) + .await + .assert_highest_canonical_slot(400) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + .perform_backfill() + .await + // Should have backfilled 2 more epochs + .assert_lowest_canonical_slot(256) + .await + .assert_highest_canonical_slot(403) + .await + // Update validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Get suboptimal attestations. + .fill_suboptimal_attestations() + .await + .backfill_suboptimal_attestations() + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packing. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(256) + .await + // Check every block has rewards, proposer info and packing statistics. + .assert_all_blocks_have_metadata() + .await; +}