diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index e6b75ea7b17..e3342ac3705 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -58,8 +58,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release run: make test-release release-tests-windows: @@ -78,9 +78,8 @@ jobs: run: | choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - - name: Install anvil - # Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115 - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -141,8 +140,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -197,8 +196,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -213,8 +212,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -243,8 +242,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: diff --git a/Cargo.lock b/Cargo.lock index fc73c845caa..f8895db6db4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,6 +245,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "anvil-rpc" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "anyhow" version = "1.0.71" @@ -503,9 +512,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", @@ -521,22 +530,23 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite 0.2.9", + "rustversion", "serde", "serde_json", + "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -544,6 +554,7 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] @@ -602,7 +613,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" dependencies = [ "ethereum-consensus", "http", @@ -629,6 +640,7 @@ dependencies = [ "eth1", "eth2", "ethereum_hashing", + "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", @@ -710,6 +722,30 @@ dependencies = [ "unused_port", ] +[[package]] +name = "beacon_processor" +version = "0.1.0" +dependencies = [ + "derivative", + "ethereum_ssz", + "fnv", + "futures", + "hex", + "itertools", + "lazy_static", + "lighthouse_metrics", + "lighthouse_network", + "logging", + "parking_lot 0.12.1", + "slog", + "slot_clock", + "strum", + "task_executor", + "tokio", + "tokio-util 0.6.10", + "types", +] + [[package]] name = "bincode" version = "1.3.3" @@ -1191,6 +1227,7 @@ name = "client" version = "0.2.0" dependencies = [ "beacon_chain", + "beacon_processor", "directory", "dirs", "environment", @@ -1208,6 +1245,7 @@ dependencies = [ "logging", "monitoring_api", "network", + "num_cpus", "operation_pool", "parking_lot 0.12.1", "sensitive_url", @@ -2521,7 +2559,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" dependencies = [ "async-stream", "blst", @@ -2534,8 +2572,9 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "serde_yaml", "sha2 0.9.9", - "ssz-rs", + "ssz_rs", "thiserror", "tokio", "tokio-stream", @@ -2808,7 +2847,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz-rs", + "ssz_rs", "ssz_types", "state_processing", "strum", @@ -3538,12 +3577,6 @@ dependencies = [ "pin-project-lite 0.2.9", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "http_api" version = "0.1.0" @@ -5023,9 +5056,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "md-5" @@ -5114,16 +5147,20 @@ dependencies = [ [[package]] name = "mev-rs" -version = "0.2.1" -source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" +version = "0.3.0" +source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" dependencies = [ + "anvil-rpc", "async-trait", "axum", "beacon-api-client", "ethereum-consensus", "hyper", + "parking_lot 0.12.1", + "reqwest", "serde", - "ssz-rs", + "serde_json", + "ssz_rs", "thiserror", "tokio", "tracing", @@ -5507,6 +5544,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "beacon_processor", "delay_map", "derivative", "environment", @@ -5530,6 +5568,7 @@ dependencies = [ "matches", "num_cpus", "operation_pool", + "parking_lot 0.12.1", "rand 0.8.5", "rlp", "slog", @@ -5595,6 +5634,7 @@ dependencies = [ "execution_layer", "sensitive_url", "tempfile", + "tokio", "types", "validator_client", "validator_dir", @@ -6407,9 +6447,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] @@ -7576,6 +7616,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.12" @@ -8073,23 +8123,24 @@ dependencies = [ ] [[package]] -name = "ssz-rs" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" dependencies = [ "bitvec 1.0.1", "hex", "num-bigint", "serde", "sha2 0.9.9", - "ssz-rs-derive", - "thiserror", + "ssz_rs_derive", ] [[package]] -name = "ssz-rs-derive" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs_derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ "proc-macro2", "quote", @@ -8864,25 +8915,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags 1.3.2", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite 0.2.9", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" diff --git a/Cargo.toml b/Cargo.toml index 717577c83f6..3ef47d77c3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/beacon_processor", "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", @@ -93,13 +94,6 @@ arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b9998 # FIXME(sproul): restore upstream xdelta3 = { git = "http://github.com/michaelsproul/xdelta3-rs", rev="cb3be8d445c0ed2adf815c62b14c197ca19bd94a" } -[patch."https://github.com/ralexstokes/mev-rs"] -mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } -[patch."https://github.com/ralexstokes/ethereum-consensus"] -ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } -[patch."https://github.com/ralexstokes/ssz-rs"] -ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9fd70ece96b..ff126eb6dfa 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -41,4 +41,4 @@ monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } unused_port = { path = "../common/unused_port" } -strum = "0.24.1" +strum = "0.24.1" \ No newline at end of file diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 613e3926be9..eb4b1f6f9ad 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -27,6 +27,7 @@ operation_pool = { path = "../operation_pool" } rayon = "1.4.1" serde = "1.0.116" serde_derive = "1.0.116" +ethereum_serde_utils = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index e5302cf54b2..4f72523e3de 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -3,7 +3,8 @@ use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttest use eth2::lighthouse::StandardAttestationRewards; use participation_cache::ParticipationCache; use safe_arith::SafeArith; -use slog::{debug, Logger}; +use serde_utils::quoted_u64::Quoted; +use slog::debug; use state_processing::{ common::altair::BaseRewardPerIncrement, per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, @@ -15,32 +16,111 @@ use store::consts::altair::{ }; use types::consts::altair::WEIGHT_DENOMINATOR; -use types::{Epoch, EthSpec}; +use types::{BeaconState, Epoch, EthSpec}; use eth2::types::ValidatorId; +use state_processing::common::base::get_base_reward_from_effective_balance; +use state_processing::per_epoch_processing::base::rewards_and_penalties::{ + get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, + get_inactivity_penalty_delta, get_inclusion_delay_delta, +}; +use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; +use state_processing::per_epoch_processing::base::{ + TotalBalances, ValidatorStatus, ValidatorStatuses, +}; impl BeaconChain { pub fn compute_attestation_rewards( &self, epoch: Epoch, validators: Vec, - log: Logger, ) -> Result { - debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + debug!(self.log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); // Get state - let spec = &self.spec; - let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); let state_root = self .state_root_at_slot(state_slot)? .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; - let mut state = self + let state = self .get_state(&state_root, Some(state_slot))? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + match state { + BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + self.compute_attestation_rewards_altair(state, validators) + } + } + } + + fn compute_attestation_rewards_base( + &self, + mut state: BeaconState, + validators: Vec, + ) -> Result { + let spec = &self.spec; + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state)?; + + let ideal_rewards = + self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; + + let indices_to_attestation_delta = if validators.is_empty() { + get_attestation_deltas_all(&state, &validator_statuses, spec)? + .into_iter() + .enumerate() + .collect() + } else { + let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; + get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? + }; + + let mut total_rewards = vec![]; + + for (index, delta) in indices_to_attestation_delta.into_iter() { + let head_delta = delta.head_delta; + let head = (head_delta.rewards as i64).safe_sub(head_delta.penalties as i64)?; + + let target_delta = delta.target_delta; + let target = (target_delta.rewards as i64).safe_sub(target_delta.penalties as i64)?; + + let source_delta = delta.source_delta; + let source = (source_delta.rewards as i64).safe_sub(source_delta.penalties as i64)?; + + // No penalties associated with inclusion delay + let inclusion_delay = delta.inclusion_delay_delta.rewards; + let inactivity = delta.inactivity_penalty_delta.penalties.wrapping_neg() as i64; + + let rewards = TotalAttestationRewards { + validator_index: index as u64, + head, + target, + source, + inclusion_delay: Some(Quoted { + value: inclusion_delay, + }), + inactivity, + }; + + total_rewards.push(rewards); + } + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } + + fn compute_attestation_rewards_altair( + &self, + mut state: BeaconState, + validators: Vec, + ) -> Result { + let spec = &self.spec; + // Calculate ideal_rewards let participation_cache = ParticipationCache::new(&state, spec) .map_err(|_| BeaconChainError::AttestationRewardsError)?; @@ -68,7 +148,7 @@ impl BeaconChain { let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; - for effective_balance_eth in 0..=32 { + for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { let effective_balance = effective_balance_eth.safe_mul(spec.effective_balance_increment)?; let base_reward = @@ -83,7 +163,7 @@ impl BeaconChain { let ideal_reward = reward_numerator .safe_div(active_increments)? .safe_div(WEIGHT_DENOMINATOR)?; - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { ideal_rewards_hashmap .insert((flag_index, effective_balance), (ideal_reward, penalty)); } else { @@ -98,15 +178,7 @@ impl BeaconChain { let validators = if validators.is_empty() { participation_cache.eligible_validator_indices().to_vec() } else { - validators - .into_iter() - .map(|validator| match validator { - ValidatorId::Index(i) => Ok(i as usize), - ValidatorId::PublicKey(pubkey) => state - .get_validator_index(&pubkey)? - .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), - }) - .collect::, _>>()? + Self::validators_ids_to_indices(&mut state, validators)? }; for &validator_index in &validators { @@ -114,7 +186,7 @@ impl BeaconChain { .get_validator(validator_index) .map_err(|_| BeaconChainError::AttestationRewardsError)?; let eligible = validator.is_eligible; - let mut head_reward = 0u64; + let mut head_reward = 0i64; let mut target_reward = 0i64; let mut source_reward = 0i64; @@ -130,7 +202,7 @@ impl BeaconChain { .map_err(|_| BeaconChainError::AttestationRewardsError)?; if voted_correctly { if flag_index == TIMELY_HEAD_FLAG_INDEX { - head_reward += ideal_reward; + head_reward += *ideal_reward as i64; } else if flag_index == TIMELY_TARGET_FLAG_INDEX { target_reward += *ideal_reward as i64; } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { @@ -150,6 +222,9 @@ impl BeaconChain { head: head_reward, target: target_reward, source: source_reward, + inclusion_delay: None, + // TODO: altair calculation logic needs to be updated to include inactivity penalty + inactivity: 0, }); } @@ -171,6 +246,9 @@ impl BeaconChain { head: 0, target: 0, source: 0, + inclusion_delay: None, + // TODO: altair calculation logic needs to be updated to include inactivity penalty + inactivity: 0, }); match *flag_index { TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, @@ -190,4 +268,126 @@ impl BeaconChain { total_rewards, }) } + + fn max_effective_balance_increment_steps(&self) -> Result { + let spec = &self.spec; + let max_steps = spec + .max_effective_balance + .safe_div(spec.effective_balance_increment)?; + Ok(max_steps) + } + + fn validators_ids_to_indices( + state: &mut BeaconState, + validators: Vec, + ) -> Result, BeaconChainError> { + let indices = validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()?; + Ok(indices) + } + + fn compute_ideal_rewards_base( + &self, + state: &BeaconState, + total_balances: &TotalBalances, + ) -> Result, BeaconChainError> { + let spec = &self.spec; + let previous_epoch = state.previous_epoch(); + let finality_delay = previous_epoch + .safe_sub(state.finalized_checkpoint().epoch)? + .as_u64(); + + let ideal_validator_status = ValidatorStatus { + is_previous_epoch_attester: true, + is_slashed: false, + inclusion_info: Some(InclusionInfo { + delay: 1, + ..Default::default() + }), + ..Default::default() + }; + + let mut ideal_attestation_rewards_list = Vec::new(); + + for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { + let effective_balance = + effective_balance_step.safe_mul(spec.effective_balance_increment)?; + let base_reward = get_base_reward_from_effective_balance::( + effective_balance, + total_balances.current_epoch(), + spec, + )?; + + // compute ideal head rewards + let head = get_attestation_component_delta( + true, + total_balances.previous_epoch_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal target rewards + let target = get_attestation_component_delta( + true, + total_balances.previous_epoch_target_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal source rewards + let source = get_attestation_component_delta( + true, + total_balances.previous_epoch_head_attesters(), + total_balances, + base_reward, + finality_delay, + spec, + )? + .rewards; + + // compute ideal inclusion delay rewards + let inclusion_delay = + get_inclusion_delay_delta(&ideal_validator_status, base_reward, spec)? + .0 + .rewards; + + // compute inactivity penalty + let inactivity = get_inactivity_penalty_delta( + &ideal_validator_status, + base_reward, + finality_delay, + spec, + )? + .penalties + .wrapping_neg() as i64; + + let ideal_attestation_rewards = IdealAttestationRewards { + effective_balance, + head, + target, + source, + inclusion_delay: Some(Quoted { + value: inclusion_delay, + }), + inactivity, + }; + + ideal_attestation_rewards_list.push(ideal_attestation_rewards); + } + + Ok(ideal_attestation_rewards_list) + } } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3db45e9d636..26db90d4fb9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -193,6 +193,17 @@ pub struct PrePayloadAttributes { pub parent_block_number: u64, } +/// Information about a state/block at a specific slot. +#[derive(Debug, Clone, Copy)] +pub struct FinalizationAndCanonicity { + /// True if the slot of the state or block is finalized. + /// + /// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`. + pub slot_is_finalized: bool, + /// True if the state or block is canonical at its slot. + pub canonical: bool, +} + /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] @@ -420,6 +431,12 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); +impl FinalizationAndCanonicity { + pub fn is_finalized(self) -> bool { + self.slot_is_finalized && self.canonical + } +} + impl BeaconChain { /// Checks if a block is finalized. /// The finalization check is done with the block slot. The block root is used to verify that @@ -449,16 +466,30 @@ impl BeaconChain { state_root: &Hash256, state_slot: Slot, ) -> Result { + self.state_finalization_and_canonicity(state_root, state_slot) + .map(FinalizationAndCanonicity::is_finalized) + } + + /// Fetch the finalization and canonicity status of the state with `state_root`. + pub fn state_finalization_and_canonicity( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { let finalized_slot = self .canonical_head .cached_head() .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let is_canonical = self + let slot_is_finalized = state_slot <= finalized_slot; + let canonical = self .state_root_at_slot(state_slot)? .map_or(false, |canonical_root| state_root == &canonical_root); - Ok(state_slot <= finalized_slot && is_canonical) + Ok(FinalizationAndCanonicity { + slot_is_finalized, + canonical, + }) } /// Persists the head tracker and fork choice. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index cc7a957ecc8..efbc9905b7a 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -83,6 +83,8 @@ pub struct ChainConfig { pub enable_backfill_rate_limiting: bool, /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. pub progressive_balances_mode: ProgressiveBalancesMode, + /// Number of epochs between each migration of data from the hot database to the freezer. + pub epochs_per_migration: u64, } impl Default for ChainConfig { @@ -114,6 +116,7 @@ impl Default for ChainConfig { always_prepare_payload: false, enable_backfill_rate_limiting: true, progressive_balances_mode: ProgressiveBalancesMode::Checked, + epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 37f31ae6c1e..d400f66c22c 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -24,7 +24,7 @@ use state_processing::{ }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, - BlockProcessingError, BlockReplayError, SlotProcessingError, + BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, }; use std::time::Duration; use task_executor::ShutdownReason; @@ -61,6 +61,7 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + EpochProcessingError(EpochProcessingError), StateAdvanceError(StateAdvanceError), UnableToAdvanceState(String), NoStateForAttestation { @@ -215,6 +216,7 @@ pub enum BeaconChainError { } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(EpochProcessingError, BeaconChainError); easy_from_to!(AttestationValidationError, BeaconChainError); easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 1a67bb6b4b5..b1dfad1b6c4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -71,6 +71,7 @@ pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; +pub use migrate::MigratorConfig; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 36be181289c..50a39ac3839 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -3,7 +3,6 @@ use crate::errors::BeaconChainError; use crate::head_tracker::{HeadTracker, SszHeadTracker}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use parking_lot::Mutex; -use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::mem; @@ -28,13 +27,13 @@ const COMPACTION_FINALITY_DISTANCE: u64 = 1024; const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4; /// Default number of epochs to wait between finalization migrations. -pub const DEFAULT_EPOCHS_PER_RUN: u64 = 1; +pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; /// The background migrator runs a thread to perform pruning and migrate state from the hot /// to the cold database. pub struct BackgroundMigrator, Cold: ItemStore> { db: Arc>, - /// Record of when the last migration ran, for enforcing `epochs_per_run`. + /// Record of when the last migration ran, for enforcing `epochs_per_migration`. prev_migration: Arc>, tx_thread: Option< Mutex<( @@ -47,41 +46,45 @@ pub struct BackgroundMigrator, Cold: ItemStore> log: Logger, } -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct MigratorConfig { pub blocking: bool, - /// Run migrations at most once per `epochs_per_run`. + /// Run migrations at most once per `epochs_per_migration`. /// - /// If set to 0, then run every finalization. - pub epochs_per_run: u64, + /// If set to 0 or 1, then run every finalization. + pub epochs_per_migration: u64, } impl Default for MigratorConfig { fn default() -> Self { Self { blocking: false, - epochs_per_run: DEFAULT_EPOCHS_PER_RUN, + epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION, } } } -pub struct PrevMigration { - epoch: Option, - epochs_per_run: u64, -} - impl MigratorConfig { pub fn blocking(mut self) -> Self { self.blocking = true; self } - pub fn epochs_per_run(mut self, epochs_per_run: u64) -> Self { - self.epochs_per_run = epochs_per_run; + pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self { + self.epochs_per_migration = epochs_per_migration; self } } +/// Record of when the last migration ran. +#[derive(Debug)] +pub struct PrevMigration { + /// The epoch at which the last finalization migration ran. + epoch: Epoch, + /// The number of epochs to wait between runs. + epochs_per_migration: u64, +} + /// Pruning can be successful, or in rare cases deferred to a later point. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PruningOutcome { @@ -128,6 +131,7 @@ pub struct FinalizationNotification { finalized_state_root: BeaconStateHash, finalized_checkpoint: Checkpoint, head_tracker: Arc, + prev_migration: Arc>, genesis_block_root: Hash256, } @@ -151,9 +155,10 @@ impl, Cold: ItemStore> BackgroundMigrator Self { + // Estimate last migration run from DB split slot. let prev_migration = Arc::new(Mutex::new(PrevMigration { - epoch: None, - epochs_per_run: config.epochs_per_run, + epoch: db.get_split_slot().epoch(E::slots_per_epoch()), + epochs_per_migration: config.epochs_per_migration, })); let tx_thread = if config.blocking { None @@ -188,6 +193,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator prev_migration.epoch, + "new_finalized_epoch" => epoch, + "epochs_per_migration" => prev_migration.epochs_per_migration, + ); + return; + } + + // Update the previous migration epoch immediately to avoid holding the lock. If the + // migration doesn't succeed then the next migration will be retried at the next scheduled + // run. + prev_migration.epoch = epoch; + drop(prev_migration); + debug!(log, "Database consolidation started"); let finalized_state_root = notif.finalized_state_root; @@ -438,23 +464,21 @@ impl, Cold: ItemStore> BackgroundMigrator prev_epoch, - "new_finalized_epoch" => epoch, - "epochs_per_run" => prev_migration.epochs_per_run, - ); - continue; - } + if epoch < prev_migration.epoch + prev_migration.epochs_per_migration { + debug!( + log, + "Finalization migration deferred"; + "last_finalized_epoch" => prev_migration.epoch, + "new_finalized_epoch" => epoch, + "epochs_per_migration" => prev_migration.epochs_per_migration, + ); + continue; } // We intend to run at this epoch, update the in-memory record of the last epoch // at which we ran. This value isn't tracked on disk so we will always migrate // on the first finalization after startup. - prev_migration.epoch = Some(epoch); + prev_migration.epoch = epoch; Self::run_migration(db.clone(), notif.to_owned(), &log); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index abcaedceace..c95af4c46f4 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -513,18 +513,23 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); + let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) .custom_spec(spec) .store(self.store.expect("cannot build without store")) - .store_migrator_config(MigratorConfig::default().blocking()) + .store_migrator_config( + MigratorConfig::default() + .blocking() + .epochs_per_migration(chain_config.epochs_per_migration), + ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") .shutdown_sender(shutdown_tx) - .chain_config(self.chain_config.unwrap_or_default()) + .chain_config(chain_config) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 8cc1b69962c..96810055663 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -9,19 +9,22 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, }; +use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; +use eth2::lighthouse::StandardAttestationRewards; +use eth2::types::ValidatorId; use lazy_static::lazy_static; +use types::beacon_state::Error as BeaconStateError; +use types::{BeaconState, ChainSpec}; pub const VALIDATOR_COUNT: usize = 64; +type E = MinimalEthSpec; + lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness() -> BeaconChainHarness> { - let mut spec = E::default_spec(); - - spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests - +fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .keypairs(KEYPAIRS.to_vec()) @@ -35,8 +38,11 @@ fn get_harness() -> BeaconChainHarness> { #[tokio::test] async fn test_sync_committee_rewards() { - let num_block_produced = MinimalEthSpec::slots_per_epoch(); - let harness = get_harness::(); + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + + let harness = get_harness(spec); + let num_block_produced = E::slots_per_epoch(); let latest_block_root = harness .extend_chain( @@ -119,3 +125,175 @@ async fn test_sync_committee_rewards() { mismatches.join(",") ); } + +#[tokio::test] +async fn test_verify_attestation_rewards_base() { + let harness = get_harness(E::default_spec()); + + // epoch 0 (N), only two thirds of validators vote. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds_validators: Vec = (0..two_thirds).collect(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(two_thirds_validators), + ) + .await; + + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + + // extend slots to beginning of epoch N + 2 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(0), vec![]) + .unwrap(); + + // assert no inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); + assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().to_vec(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_base_inactivity_leak() { + let spec = E::default_spec(); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; + + // advance until beginning of epoch N + 1 and get balances + harness + .extend_chain( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators.clone()), + ) + .await; + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + + // extend slots to beginning of epoch N + 2 + harness.advance_slot(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators), + ) + .await; + let _slot = harness.get_current_slot(); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); + assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().to_vec(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_base_subset_only() { + let harness = get_harness(E::default_spec()); + + // epoch 0 (N), only two thirds of validators vote. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds_validators: Vec = (0..two_thirds).collect(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(two_thirds_validators), + ) + .await; + + // a small subset of validators to compute attestation rewards for + let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; + + // capture balances before transitioning to N + 2 + let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); + + // extend slots to beginning of epoch N + 2 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + let validators_subset_ids: Vec = validators_subset + .into_iter() + .map(|idx| ValidatorId::Index(idx as u64)) + .collect(); + + // compute reward deltas for the subset of validators in epoch N + let StandardAttestationRewards { + ideal_rewards: _, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) + .unwrap(); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances = get_validator_balances(harness.get_current_state(), &validators_subset); + assert_eq!(expected_balances, balances); +} + +/// Apply a vec of `TotalAttestationRewards` to initial balances, and return +fn apply_attestation_rewards( + initial_balances: &[u64], + attestation_rewards: Vec, +) -> Vec { + initial_balances + .iter() + .zip(attestation_rewards) + .map(|(&initial_balance, rewards)| { + let expected_balance = initial_balance as i64 + + rewards.head + + rewards.source + + rewards.target + + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 + + rewards.inactivity; + expected_balance as u64 + }) + .collect::>() +} + +fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec { + validators + .iter() + .flat_map(|&id| { + state + .balances() + .get(id) + .cloned() + .ok_or(BeaconStateError::BalancesOutOfBounds(id)) + }) + .collect() +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6566ab06247..b5e444e809d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -62,9 +62,6 @@ fn get_harness( store: Arc, LevelDB>>, validator_count: usize, ) -> TestHarness { - // Most tests were written expecting instant migration on finalization. - let migrator_config = MigratorConfig::default().blocking().epochs_per_run(0); - let log = store.log.clone(); let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -74,9 +71,6 @@ fn get_harness( .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() - .initial_mutator(Box::new(|builder: BeaconChainBuilder<_>| { - builder.store_migrator_config(migrator_config) - })) .build(); harness.advance_slot(); harness diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml new file mode 100644 index 00000000000..5c5200e101e --- /dev/null +++ b/beacon_node/beacon_processor/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "beacon_processor" +version = "0.1.0" +edition = "2021" + +[dependencies] +slog = { version = "2.5.2", features = ["max_level_trace"] } +itertools = "0.10.0" +logging = { path = "../../common/logging" } +tokio = { version = "1.14.0", features = ["full"] } +tokio-util = { version = "0.6.3", features = ["time"] } +futures = "0.3.7" +fnv = "1.0.7" +strum = "0.24.0" +task_executor = { path = "../../common/task_executor" } +slot_clock = { path = "../../common/slot_clock" } +lighthouse_network = { path = "../lighthouse_network" } +hex = "0.4.2" +derivative = "2.2.0" +types = { path = "../../consensus/types" } +ethereum_ssz = "0.5.0" +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +parking_lot = "0.12.0" \ No newline at end of file diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/beacon_processor/src/lib.rs similarity index 58% rename from beacon_node/network/src/beacon_processor/mod.rs rename to beacon_node/beacon_processor/src/lib.rs index 84d8e1b07a8..88066f2a305 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -38,51 +38,34 @@ //! checks the queues to see if there are more parcels of work that can be spawned in a new worker //! task. -use crate::sync::manager::BlockProcessType; -use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; -use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; -use derivative::Derivative; +use crate::work_reprocessing_queue::{ + spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock, + QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage, +}; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; -use lighthouse_network::rpc::LightClientBootstrapRequest; -use lighthouse_network::{ - rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, -}; +use lighthouse_network::NetworkGlobals; +use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; +use parking_lot::Mutex; use slog::{crit, debug, error, trace, warn, Logger}; -use std::collections::VecDeque; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::{HashSet, VecDeque}; +use std::fmt; use std::future::Future; -use std::path::PathBuf; use std::pin::Pin; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::task::Context; use std::time::Duration; -use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{ - Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, -}; -use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, - QueuedUnaggregate, ReadyWork, -}; - -use worker::{Toolbox, Worker}; - -mod tests; -mod work_reprocessing_queue; -mod worker; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId}; +use work_reprocessing_queue::IgnoredRpcBlock; -use crate::beacon_processor::work_reprocessing_queue::{ - QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, -}; -pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; +mod metrics; +pub mod work_reprocessing_queue; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// @@ -96,7 +79,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; +pub const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. @@ -221,6 +204,7 @@ pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; +pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -366,394 +350,104 @@ impl DuplicateCache { } /// An event to be processed by the manager task. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] -pub struct WorkEvent { - drop_during_sync: bool, - work: Work, +#[derive(Debug)] +pub struct WorkEvent { + pub drop_during_sync: bool, + pub work: Work, } -impl WorkEvent { - /// Create a new `Work` event for some unaggregated attestation. - pub fn unaggregated_attestation( - message_id: MessageId, - peer_id: PeerId, - attestation: Attestation, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipAttestation { - message_id, - peer_id, - attestation: Box::new(attestation), - subnet_id, - should_import, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some aggregated attestation. - pub fn aggregated_attestation( - message_id: MessageId, - peer_id: PeerId, - aggregate: SignedAggregateAndProof, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipAggregate { - message_id, - peer_id, - aggregate: Box::new(aggregate), - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some block. - pub fn gossip_beacon_block( - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some sync committee signature. - pub fn gossip_sync_signature( - message_id: MessageId, - peer_id: PeerId, - sync_signature: SyncCommitteeMessage, - subnet_id: SyncSubnetId, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature: Box::new(sync_signature), - subnet_id, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some sync committee contribution. - pub fn gossip_sync_contribution( - message_id: MessageId, - peer_id: PeerId, - sync_contribution: SignedContributionAndProof, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution: Box::new(sync_contribution), - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some exit. - pub fn gossip_voluntary_exit( - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - }, - } - } - - /// Create a new `Work` event for some proposer slashing. - pub fn gossip_proposer_slashing( - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - }, - } - } - - /// Create a new `Work` event for some light client finality update. - pub fn gossip_light_client_finality_update( - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipLightClientFinalityUpdate { - message_id, - peer_id, - light_client_finality_update, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some light client optimistic update. - pub fn gossip_light_client_optimistic_update( - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::GossipLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - }, - } - } - - /// Create a new `Work` event for some attester slashing. - pub fn gossip_attester_slashing( - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - }, - } - } - - /// Create a new `Work` event for some BLS to execution change. - pub fn gossip_bls_to_execution_change( - message_id: MessageId, - peer_id: PeerId, - bls_to_execution_change: Box, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::GossipBlsToExecutionChange { - message_id, - peer_id, - bls_to_execution_change, - }, - } - } - - /// Create a new `Work` event for some block, where the result from computation (if any) is - /// sent to the other side of `result_tx`. - pub fn rpc_beacon_block( - block_root: Hash256, - block: Arc>, - seen_timestamp: Duration, - process_type: BlockProcessType, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process: true, - }, - } - } - - /// Create a new work event to import `blocks` as a beacon chain segment. - pub fn chain_segment( - process_id: ChainSegmentProcessId, - blocks: Vec>>, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::ChainSegment { process_id, blocks }, - } - } - - /// Create a new work event to process `StatusMessage`s from the RPC network. - pub fn status_message(peer_id: PeerId, message: StatusMessage) -> Self { - Self { - drop_during_sync: false, - work: Work::Status { peer_id, message }, - } - } - - /// Create a new work event to process `BlocksByRangeRequest`s from the RPC network. - pub fn blocks_by_range_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRangeRequest, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - }, - } - } - - /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. - pub fn blocks_by_roots_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - }, - } - } - - /// Create a new work event to process `LightClientBootstrap`s from the RPC network. - pub fn lightclient_bootstrap_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, - ) -> Self { - Self { - drop_during_sync: true, - work: Work::LightClientBootstrapRequest { - peer_id, - request_id, - request, - }, - } - } - +impl WorkEvent { /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() } } -impl std::convert::From> for WorkEvent { - fn from(ready_work: ReadyWork) -> Self { +impl std::convert::From for WorkEvent { + fn from(ready_work: ReadyWork) -> Self { match ready_work { ReadyWork::Block(QueuedGossipBlock { - peer_id, - block, - seen_timestamp, + beacon_block_slot, + beacon_block_root, + process_fn, }) => Self { drop_during_sync: false, work: Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, + beacon_block_slot, + beacon_block_root, + process_fn, }, }, ReadyWork::RpcBlock(QueuedRpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, + beacon_block_root: _, + process_fn, + ignore_fn: _, }) => Self { drop_during_sync: false, - work: Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, - }, + work: Work::RpcBlock { process_fn }, + }, + ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { process_fn }) => Self { + drop_during_sync: false, + work: Work::IgnoredRpcBlock { process_fn }, }, ReadyWork::Unaggregate(QueuedUnaggregate { - peer_id, - message_id, - attestation, - subnet_id, - should_import, - seen_timestamp, + beacon_block_root: _, + process_fn, }) => Self { drop_during_sync: true, - work: Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - }, + work: Work::UnknownBlockAttestation { process_fn }, }, ReadyWork::Aggregate(QueuedAggregate { - peer_id, - message_id, - attestation, - seen_timestamp, + process_fn, + beacon_block_root: _, }) => Self { drop_during_sync: true, - work: Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate: attestation, - seen_timestamp, - }, + work: Work::UnknownBlockAggregate { process_fn }, }, ReadyWork::LightClientUpdate(QueuedLightClientUpdate { - peer_id, - message_id, - light_client_optimistic_update, - seen_timestamp, - .. + parent_root, + process_fn, }) => Self { drop_during_sync: true, work: Work::UnknownLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, + parent_root, + process_fn, }, }, - ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { - WorkEvent::chain_segment(process_id, blocks) - } + ReadyWork::BackfillSync(QueuedBackfillBatch(process_fn)) => Self { + drop_during_sync: false, + work: Work::ChainSegmentBackfill(process_fn), + }, } } } -pub struct BeaconProcessorSend(pub mpsc::Sender>); +/// Items required to verify a batch of unaggregated gossip attestations. +#[derive(Debug)] +pub struct GossipAttestationPackage { + pub message_id: MessageId, + pub peer_id: PeerId, + pub attestation: Box>, + pub subnet_id: SubnetId, + pub should_import: bool, + pub seen_timestamp: Duration, +} + +/// Items required to verify a batch of aggregated gossip attestations. +#[derive(Debug)] +pub struct GossipAggregatePackage { + pub message_id: MessageId, + pub peer_id: PeerId, + pub aggregate: Box>, + pub beacon_block_root: Hash256, + pub seen_timestamp: Duration, +} -impl BeaconProcessorSend { - pub fn try_send(&self, message: WorkEvent) -> Result<(), Box>>> { +#[derive(Clone)] +pub struct BeaconProcessorSend(pub mpsc::Sender>); + +impl BeaconProcessorSend { + pub fn try_send(&self, message: WorkEvent) -> Result<(), TrySendError>> { let work_type = message.work_type(); match self.0.try_send(message) { Ok(res) => Ok(res), @@ -762,146 +456,82 @@ impl BeaconProcessorSend { &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, &[work_type], ); - Err(Box::new(e)) + Err(e) } } } } -/// A consensus message (or multiple) from the network that requires processing. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] -pub enum Work { +pub type AsyncFn = Pin + Send + Sync>>; +pub type BlockingFn = Box; +pub type BlockingFnWithManualSendOnIdle = Box; + +/// Indicates the type of work to be performed and therefore its priority and +/// queuing specifics. +pub enum Work { GossipAttestation { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, + attestation: GossipAttestationPackage, + process_individual: Box) + Send + Sync>, + process_batch: Box>) + Send + Sync>, }, UnknownBlockAttestation { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, + process_fn: BlockingFn, }, GossipAttestationBatch { - packages: Vec>, + attestations: Vec>, + process_batch: Box>) + Send + Sync>, }, GossipAggregate { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, + aggregate: GossipAggregatePackage, + process_individual: Box) + Send + Sync>, + process_batch: Box>) + Send + Sync>, }, UnknownBlockAggregate { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, + process_fn: BlockingFn, }, UnknownLightClientOptimisticUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, + parent_root: Hash256, + process_fn: BlockingFn, }, GossipAggregateBatch { - packages: Vec>, - }, - GossipBlock { - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block: Arc>, - seen_timestamp: Duration, + aggregates: Vec>, + process_batch: Box>) + Send + Sync>, }, + GossipBlock(AsyncFn), DelayedImportBlock { - peer_id: PeerId, - block: Box>, - seen_timestamp: Duration, - }, - GossipVoluntaryExit { - message_id: MessageId, - peer_id: PeerId, - voluntary_exit: Box, - }, - GossipProposerSlashing { - message_id: MessageId, - peer_id: PeerId, - proposer_slashing: Box, - }, - GossipAttesterSlashing { - message_id: MessageId, - peer_id: PeerId, - attester_slashing: Box>, - }, - GossipSyncSignature { - message_id: MessageId, - peer_id: PeerId, - sync_signature: Box, - subnet_id: SyncSubnetId, - seen_timestamp: Duration, - }, - GossipSyncContribution { - message_id: MessageId, - peer_id: PeerId, - sync_contribution: Box>, - seen_timestamp: Duration, - }, - GossipLightClientFinalityUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_finality_update: Box>, - seen_timestamp: Duration, - }, - GossipLightClientOptimisticUpdate { - message_id: MessageId, - peer_id: PeerId, - light_client_optimistic_update: Box>, - seen_timestamp: Duration, + beacon_block_slot: Slot, + beacon_block_root: Hash256, + process_fn: AsyncFn, }, + GossipVoluntaryExit(BlockingFn), + GossipProposerSlashing(BlockingFn), + GossipAttesterSlashing(BlockingFn), + GossipSyncSignature(BlockingFn), + GossipSyncContribution(BlockingFn), + GossipLightClientFinalityUpdate(BlockingFn), + GossipLightClientOptimisticUpdate(BlockingFn), RpcBlock { - block_root: Hash256, - block: Arc>, - seen_timestamp: Duration, - process_type: BlockProcessType, - should_process: bool, + process_fn: AsyncFn, }, - ChainSegment { - process_id: ChainSegmentProcessId, - blocks: Vec>>, - }, - Status { - peer_id: PeerId, - message: StatusMessage, - }, - BlocksByRangeRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRangeRequest, - }, - BlocksByRootsRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: BlocksByRootRequest, - }, - GossipBlsToExecutionChange { - message_id: MessageId, - peer_id: PeerId, - bls_to_execution_change: Box, - }, - LightClientBootstrapRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: LightClientBootstrapRequest, + IgnoredRpcBlock { + process_fn: BlockingFn, }, + ChainSegment(AsyncFn), + ChainSegmentBackfill(AsyncFn), + Status(BlockingFn), + BlocksByRangeRequest(BlockingFnWithManualSendOnIdle), + BlocksByRootsRequest(BlockingFnWithManualSendOnIdle), + GossipBlsToExecutionChange(BlockingFn), + LightClientBootstrapRequest(BlockingFn), } -impl Work { +impl fmt::Debug for Work { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.str_id()) + } +} + +impl Work { /// Provides a `&str` that uniquely identifies each enum variant. fn str_id(&self) -> &'static str { match self { @@ -909,58 +539,56 @@ impl Work { Work::GossipAttestationBatch { .. } => GOSSIP_ATTESTATION_BATCH, Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, - Work::GossipBlock { .. } => GOSSIP_BLOCK, + Work::GossipBlock(_) => GOSSIP_BLOCK, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, - Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, - Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, - Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, - Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, - Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, - Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, - Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, + Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, + Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, + Work::GossipAttesterSlashing(_) => GOSSIP_ATTESTER_SLASHING, + Work::GossipSyncSignature(_) => GOSSIP_SYNC_SIGNATURE, + Work::GossipSyncContribution(_) => GOSSIP_SYNC_CONTRIBUTION, + Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, + Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, - Work::ChainSegment { - process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, - .. - } => CHAIN_SEGMENT_BACKFILL, + Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, - Work::Status { .. } => STATUS_PROCESSING, - Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, - Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, - Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, + Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, + Work::Status(_) => STATUS_PROCESSING, + Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, + Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, + Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, + Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } } /// Unifies all the messages processed by the `BeaconProcessor`. -enum InboundEvent { +enum InboundEvent { /// A worker has completed a task and is free. WorkerIdle, /// There is new work to be done. - WorkEvent(WorkEvent), + WorkEvent(WorkEvent), /// A work event that was queued for re-processing has become ready. - ReprocessingWork(WorkEvent), + ReprocessingWork(WorkEvent), } /// Combines the various incoming event streams for the `BeaconProcessor` into a single stream. /// /// This struct has a similar purpose to `tokio::select!`, however it allows for more fine-grained /// control (specifically in the ordering of event processing). -struct InboundEvents { +struct InboundEvents { /// Used by workers when they finish a task. idle_rx: mpsc::Receiver<()>, /// Used by upstream processes to send new work to the `BeaconProcessor`. - event_rx: mpsc::Receiver>, + event_rx: mpsc::Receiver>, /// Used internally for queuing work ready to be re-processed. - reprocess_work_rx: mpsc::Receiver>, + reprocess_work_rx: mpsc::Receiver, } -impl Stream for InboundEvents { - type Item = InboundEvent; +impl Stream for InboundEvents { + type Item = InboundEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Always check for idle workers before anything else. This allows us to ensure that a big @@ -1001,31 +629,20 @@ impl Stream for InboundEvents { } } -/// Defines if and where we will store the SSZ files of invalid blocks. -#[derive(Clone)] -pub enum InvalidBlockStorage { - Enabled(PathBuf), - Disabled, -} - /// A mutli-threaded processor for messages received on the network /// that need to be processed by the `BeaconChain` /// /// See module level documentation for more information. -pub struct BeaconProcessor { - pub beacon_chain: Weak>, - pub network_tx: mpsc::UnboundedSender>, - pub sync_tx: mpsc::UnboundedSender>, - pub network_globals: Arc>, +pub struct BeaconProcessor { + pub network_globals: Arc>, pub executor: TaskExecutor, pub max_workers: usize, pub current_workers: usize, - pub importing_blocks: DuplicateCache, - pub invalid_block_storage: InvalidBlockStorage, + pub enable_backfill_rate_limiting: bool, pub log: Logger, } -impl BeaconProcessor { +impl BeaconProcessor { /// Spawns the "manager" task which checks the receiver end of the returned `Sender` for /// messages which contain some new work which will be: /// @@ -1037,10 +654,13 @@ impl BeaconProcessor { /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work /// events processed by `self`. This should only be used during testing. - pub fn spawn_manager( + pub fn spawn_manager( mut self, - event_rx: mpsc::Receiver>, + event_rx: mpsc::Receiver>, + work_reprocessing_tx: mpsc::Sender, + work_reprocessing_rx: mpsc::Receiver, work_journal_tx: Option>, + slot_clock: S, ) { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -1093,20 +713,15 @@ impl BeaconProcessor { let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); - let chain = match self.beacon_chain.upgrade() { - Some(chain) => chain, - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - None => return, - }; - // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). - let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = spawn_reprocess_scheduler( + let (ready_work_tx, ready_work_rx) = + mpsc::channel::(MAX_SCHEDULED_WORK_QUEUE_LEN); + spawn_reprocess_scheduler( ready_work_tx, + work_reprocessing_rx, &self.executor, - chain.slot_clock.clone(), + slot_clock, self.log.clone(), ); @@ -1121,7 +736,7 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; - let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + let enable_backfill_rate_limiting = self.enable_backfill_rate_limiting; loop { let work_event = match inbound_events.next().await { @@ -1209,33 +824,29 @@ impl BeaconProcessor { .as_ref() .map_or(false, |event| event.drop_during_sync); + let idle_tx = idle_tx.clone(); match work_event { // There is no new work event, but we are able to spawn a new worker. // // We don't check the `work.drop_during_sync` here. We assume that if it made // it into the queue at any point then we should process it. None if can_spawn => { - let toolbox = Toolbox { - idle_tx: idle_tx.clone(), - work_reprocessing_tx: work_reprocessing_tx.clone(), - }; - // Check for chain segments first, they're the most efficient way to get // blocks into the system. if let Some(item) = chain_segment_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check sync blocks before gossip blocks, since we've already explicitly // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check gossip blocks before gossip attestations, since a block might be // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1246,7 +857,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single aggregate is in the queue, process it individually. if let Some(item) = aggregate_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } } else { // Collect two or more aggregates into a batch, so they can take @@ -1254,32 +865,45 @@ impl BeaconProcessor { // // Note: this will convert the `Work::GossipAggregate` item into a // `Work::GossipAggregateBatch` item. - let mut packages = Vec::with_capacity(batch_size); + let mut aggregates = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; for _ in 0..batch_size { if let Some(item) = aggregate_queue.pop() { match item { Work::GossipAggregate { - message_id, - peer_id, aggregate, - seen_timestamp, + process_individual: _, + process_batch, } => { - packages.push(GossipAggregatePackage::new( - message_id, - peer_id, - aggregate, - seen_timestamp, - )); + aggregates.push(aggregate); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } } _ => { - error!(self.log, "Invalid item in aggregate queue") + error!(self.log, "Invalid item in aggregate queue"); } } } } - // Process all aggregates with a single worker. - self.spawn_worker(Work::GossipAggregateBatch { packages }, toolbox) + if let Some(process_batch) = process_batch_opt { + // Process all aggregates with a single worker. + self.spawn_worker( + Work::GossipAggregateBatch { + aggregates, + process_batch, + }, + idle_tx, + ) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!(self.log, "Missing aggregate work"); + } } // Check the unaggregated attestation queue. // @@ -1293,7 +917,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single attestation is in the queue, process it individually. if let Some(item) = attestation_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } } else { // Collect two or more attestations into a batch, so they can take @@ -1301,26 +925,20 @@ impl BeaconProcessor { // // Note: this will convert the `Work::GossipAttestation` item into a // `Work::GossipAttestationBatch` item. - let mut packages = Vec::with_capacity(batch_size); + let mut attestations = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; for _ in 0..batch_size { if let Some(item) = attestation_queue.pop() { match item { Work::GossipAttestation { - message_id, - peer_id, attestation, - subnet_id, - should_import, - seen_timestamp, + process_individual: _, + process_batch, } => { - packages.push(GossipAttestationPackage::new( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - )); + attestations.push(attestation); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } } _ => error!( self.log, @@ -1330,54 +948,66 @@ impl BeaconProcessor { } } - // Process all attestations with a single worker. - self.spawn_worker( - Work::GossipAttestationBatch { packages }, - toolbox, - ) + if let Some(process_batch) = process_batch_opt { + // Process all attestations with a single worker. + self.spawn_worker( + Work::GossipAttestationBatch { + attestations, + process_batch, + }, + idle_tx, + ) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!(self.log, "Missing attestations work"); + } } // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = sync_message_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = unknown_block_attestation_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check RPC methods next. Status messages are needed for sync so // prioritize them over syncing requests from other peers (BlocksByRange // and BlocksByRoot) } else if let Some(item) = status_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = bbrange_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = bbroots_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // // Check attester slashings before proposer slashings since they have the // potential to slash multiple validators at once. } else if let Some(item) = gossip_attester_slashing_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = gossip_proposer_slashing_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Check exits and address changes late since our validators don't get // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); // This statement should always be the final else statement. } else if let Some(item) = lcbootstrap_queue.pop() { - self.spawn_worker(item, toolbox); + self.spawn_worker(item, idle_tx); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1419,13 +1049,9 @@ impl BeaconProcessor { // it. Some(WorkEvent { work, .. }) => { let work_id = work.str_id(); - let toolbox = Toolbox { - idle_tx: idle_tx.clone(), - work_reprocessing_tx: work_reprocessing_tx.clone(), - }; match work { - _ if can_spawn => self.spawn_worker(work, toolbox), + _ if can_spawn => self.spawn_worker(work, idle_tx), Work::GossipAttestation { .. } => attestation_queue.push(work), // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. @@ -1467,16 +1093,15 @@ impl BeaconProcessor { Work::GossipLightClientOptimisticUpdate { .. } => { optimistic_update_queue.push(work, work_id, &self.log) } - Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), - Work::ChainSegment { ref process_id, .. } => match process_id { - ChainSegmentProcessId::RangeBatchId { .. } - | ChainSegmentProcessId::ParentLookup { .. } => { - chain_segment_queue.push(work, work_id, &self.log) - } - ChainSegmentProcessId::BackSyncBatchId { .. } => { - backfill_chain_segment.push(work, work_id, &self.log) - } - }, + Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { + rpc_block_queue.push(work, work_id, &self.log) + } + Work::ChainSegment { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + Work::ChainSegmentBackfill { .. } => { + backfill_chain_segment.push(work, work_id, &self.log) + } Work::Status { .. } => status_queue.push(work, work_id, &self.log), Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) @@ -1583,10 +1208,7 @@ impl BeaconProcessor { /// Spawns a blocking worker thread to process some `Work`. /// /// Sends an message on `idle_tx` when the work is complete and the task is stopping. - fn spawn_worker(&mut self, work: Work, toolbox: Toolbox) { - let idle_tx = toolbox.idle_tx; - let work_reprocessing_tx = toolbox.work_reprocessing_tx; - + fn spawn_worker(&mut self, work: Work, idle_tx: mpsc::Sender<()>) { let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1609,27 +1231,8 @@ impl BeaconProcessor { let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); - let chain = if let Some(chain) = self.beacon_chain.upgrade() { - chain - } else { - debug!( - self.log, - "Beacon chain dropped, shutting down"; - ); - return; - }; - let executor = self.executor.clone(); - let worker = Worker { - chain, - network_tx: self.network_tx.clone(), - sync_tx: self.sync_tx.clone(), - log: self.log.clone(), - }; - - let duplicate_cache = self.importing_blocks.clone(); - trace!( self.log, "Spawning beacon processor worker"; @@ -1638,349 +1241,72 @@ impl BeaconProcessor { ); let task_spawner = TaskSpawner { - executor: executor.clone(), + executor, send_idle_on_drop, }; - let sub_executor = executor; match work { - /* - * Individual unaggregated attestation verification. - */ Work::GossipAttestation { - message_id, - peer_id, attestation, - subnet_id, - should_import, - seen_timestamp, + process_individual, + process_batch: _, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ) + process_individual(attestation); }), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { - worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + Work::GossipAttestationBatch { + attestations, + process_batch, + } => task_spawner.spawn_blocking(move || { + process_batch(attestations); }), - /* - * Individual aggregated attestation verification. - */ Work::GossipAggregate { - message_id, - peer_id, aggregate, - seen_timestamp, + process_individual, + process_batch: _, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ) - }), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) + process_individual(aggregate); }), - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - } => { - let invalid_block_storage = self.invalid_block_storage.clone(); - task_spawner.spawn_async(async move { - worker - .process_gossip_block( - message_id, - peer_id, - peer_client, - block, - work_reprocessing_tx, - duplicate_cache, - invalid_block_storage, - seen_timestamp, - ) - .await - }) - } - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, - } => { - let invalid_block_storage = self.invalid_block_storage.clone(); - task_spawner.spawn_async(worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - invalid_block_storage, - seen_timestamp, - )) - } - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, + Work::GossipAggregateBatch { + aggregates, + process_batch, } => task_spawner.spawn_blocking(move || { - worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + process_batch(aggregates); }), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) - }), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + Work::ChainSegment(process_fn) => task_spawner.spawn_async(async move { + process_fn.await; }), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ) - }), - /* - * Sync contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ) - }), - /* - * BLS to execution change verification. - */ - Work::GossipBlsToExecutionChange { - message_id, - peer_id, - bls_to_execution_change, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_bls_to_execution_change( - message_id, - peer_id, - *bls_to_execution_change, - ) - }), - /* - * Light client finality update verification. - */ - Work::GossipLightClientFinalityUpdate { - message_id, - peer_id, - light_client_finality_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_finality_update( - message_id, - peer_id, - *light_client_finality_update, - seen_timestamp, - ) - }), - /* - * Light client optimistic update verification. - */ - Work::GossipLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_optimistic_update( - message_id, - peer_id, - *light_client_optimistic_update, - Some(work_reprocessing_tx), - seen_timestamp, - ) + Work::UnknownBlockAttestation { process_fn } => task_spawner.spawn_blocking(process_fn), + Work::UnknownBlockAggregate { process_fn } => task_spawner.spawn_blocking(process_fn), + Work::UnknownLightClientOptimisticUpdate { + parent_root: _, + process_fn, + } => task_spawner.spawn_blocking(process_fn), + Work::DelayedImportBlock { + beacon_block_slot: _, + beacon_block_root: _, + process_fn, + } => task_spawner.spawn_async(process_fn), + Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn), + Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), + Work::GossipBlock(work) => task_spawner.spawn_async(async move { + work.await; }), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block_root, - block, - seen_timestamp, - process_type, - should_process, - } => task_spawner.spawn_async(worker.process_rpc_block( - block_root, - block, - seen_timestamp, - process_type, - work_reprocessing_tx, - duplicate_cache, - should_process, - )), - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - let notify_execution_layer = if self - .network_globals - .sync_state - .read() - .is_syncing_finalized() - { - NotifyExecutionLayer::No - } else { - NotifyExecutionLayer::Yes - }; - - task_spawner.spawn_async(async move { - worker - .process_chain_segment(process_id, blocks, notify_execution_layer) - .await - }) + Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { + task_spawner.spawn_blocking_with_manual_send_idle(work) } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => { - task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) + Work::ChainSegmentBackfill(process_fn) => task_spawner.spawn_async(process_fn), + Work::GossipVoluntaryExit(process_fn) + | Work::GossipProposerSlashing(process_fn) + | Work::GossipAttesterSlashing(process_fn) + | Work::GossipSyncSignature(process_fn) + | Work::GossipSyncContribution(process_fn) + | Work::GossipLightClientFinalityUpdate(process_fn) + | Work::GossipLightClientOptimisticUpdate(process_fn) + | Work::Status(process_fn) + | Work::GossipBlsToExecutionChange(process_fn) + | Work::LightClientBootstrapRequest(process_fn) => { + task_spawner.spawn_blocking(process_fn) } - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), - /* - * Processing of lightclient bootstrap requests from other peers. - */ - Work::LightClientBootstrapRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking(move || { - worker.handle_light_client_bootstrap(peer_id, request_id, request) - }), - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ) - }), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ) - }), - Work::UnknownLightClientOptimisticUpdate { - message_id, - peer_id, - light_client_optimistic_update, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_optimistic_update( - message_id, - peer_id, - *light_client_optimistic_update, - None, - seen_timestamp, - ) - }), }; } } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs new file mode 100644 index 00000000000..65ab0bd8fc5 --- /dev/null +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -0,0 +1,141 @@ +pub use lighthouse_metrics::*; + +lazy_static::lazy_static! { + + /* + * Gossip processor + */ + pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_rx_count", + "Count of work events received (but not necessarily processed)", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_ignored_count", + "Count of work events purposefully ignored", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_started_count", + "Count of work events which have been started by a worker", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( + "beacon_processor_worker_time", + "Time taken for a worker to fully process some parcel of work.", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( + "beacon_processor_workers_spawned_total", + "The number of workers ever spawned by the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_workers_active_total", + "Count of active workers in the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( + "beacon_processor_idle_events_total", + "Count of idle events processed by the gossip processor manager." + ); + pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( + "beacon_processor_event_handling_seconds", + "Time spent handling a new message and allocating it to a queue or worker." + ); + // Gossip blocks. + pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_gossip_block_queue_total", + "Count of blocks from gossip waiting to be verified." + ); + // Gossip Exits. + pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_exit_queue_total", + "Count of exits from gossip waiting to be verified." + ); + // Gossip proposer slashings. + pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_proposer_slashing_queue_total", + "Count of proposer slashings from gossip waiting to be verified." + ); + // Gossip attester slashings. + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_attester_slashing_queue_total", + "Count of attester slashings from gossip waiting to be verified." + ); + // Gossip BLS to execution changes. + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified." + ); + // Rpc blocks. + pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_block_queue_total", + "Count of blocks from the rpc waiting to be verified." + ); + // Chain segments. + pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_chain_segment_queue_total", + "Count of chain segments from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified." + ); + // Unaggregated attestations. + pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_unaggregated_attestation_queue_total", + "Count of unagg. attestations waiting to be processed." + ); + // Aggregated attestations. + pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_aggregated_attestation_queue_total", + "Count of agg. attestations waiting to be processed." + ); + // Sync committee messages. + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_message_queue_total", + "Count of sync committee messages waiting to be processed." + ); + // Sync contribution. + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_contribution_queue_total", + "Count of sync committee contributions waiting to be processed." + ); + + /* + * Attestation reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result = + try_create_int_gauge_vec( + "beacon_processor_reprocessing_queue_total", + "Count of items in a reprocessing queue.", + &["type"] + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_attestations", + "Number of queued attestations which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_attestations", + "Number of queued attestations where as matching block has been imported." + ); + + /* + * Light client update reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_optimistic_updates", + "Number of queued light client optimistic updates which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); + + /// Errors and Debugging Stats + pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = + try_create_int_counter_vec( + "beacon_processor_send_error_per_work_type", + "Total number of beacon processor send error per work type", + &["type"] + ); +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs similarity index 85% rename from beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs rename to beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 427be6d5138..608f634d537 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -10,23 +10,18 @@ //! //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. -use super::MAX_SCHEDULED_WORK_QUEUE_LEN; -use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; -use crate::sync::manager::BlockProcessType; -use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use crate::{AsyncFn, BlockingFn, Work, WorkEvent}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; use itertools::Itertools; -use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::future::Future; use std::pin::Pin; -use std::sync::Arc; use std::task::Context; use std::time::Duration; use strum::AsRefStr; @@ -34,10 +29,7 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{ - Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, - SignedBeaconBlock, SubnetId, -}; +use types::{EthSpec, Hash256, Slot}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; @@ -47,7 +39,7 @@ const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. -const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); +pub const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); @@ -84,12 +76,12 @@ pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ /// Messages that the scheduler can receive. #[derive(AsRefStr)] -pub enum ReprocessQueueMessage { +pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. - EarlyBlock(QueuedGossipBlock), + EarlyBlock(QueuedGossipBlock), /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. - RpcBlock(QueuedRpcBlock), + RpcBlock(QueuedRpcBlock), /// A block that was successfully processed. We use this to handle attestations and light client updates /// for unknown blocks. BlockImported { @@ -97,139 +89,127 @@ pub enum ReprocessQueueMessage { parent_root: Hash256, }, /// An unaggregated attestation that references an unknown block. - UnknownBlockUnaggregate(QueuedUnaggregate), + UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. - UnknownBlockAggregate(QueuedAggregate), + UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. - UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), /// A new backfill batch that needs to be scheduled for processing. - BackfillSync(QueuedBackfillBatch), + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. -pub enum ReadyWork { - Block(QueuedGossipBlock), - RpcBlock(QueuedRpcBlock), - Unaggregate(QueuedUnaggregate), - Aggregate(QueuedAggregate), - LightClientUpdate(QueuedLightClientUpdate), - BackfillSync(QueuedBackfillBatch), +pub enum ReadyWork { + Block(QueuedGossipBlock), + RpcBlock(QueuedRpcBlock), + IgnoredRpcBlock(IgnoredRpcBlock), + Unaggregate(QueuedUnaggregate), + Aggregate(QueuedAggregate), + LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for /// later. -pub struct QueuedUnaggregate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub attestation: Box>, - pub subnet_id: SubnetId, - pub should_import: bool, - pub seen_timestamp: Duration, +pub struct QueuedUnaggregate { + pub beacon_block_root: Hash256, + pub process_fn: BlockingFn, } /// An aggregated attestation for which the corresponding block was not seen while processing, queued for /// later. -pub struct QueuedAggregate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub attestation: Box>, - pub seen_timestamp: Duration, +pub struct QueuedAggregate { + pub beacon_block_root: Hash256, + pub process_fn: BlockingFn, } /// A light client update for which the corresponding parent block was not seen while processing, /// queued for later. -pub struct QueuedLightClientUpdate { - pub peer_id: PeerId, - pub message_id: MessageId, - pub light_client_optimistic_update: Box>, +pub struct QueuedLightClientUpdate { pub parent_root: Hash256, - pub seen_timestamp: Duration, + pub process_fn: BlockingFn, } /// A block that arrived early and has been queued for later import. -pub struct QueuedGossipBlock { - pub peer_id: PeerId, - pub block: Box>, - pub seen_timestamp: Duration, +pub struct QueuedGossipBlock { + pub beacon_block_slot: Slot, + pub beacon_block_root: Hash256, + pub process_fn: AsyncFn, } /// A block that arrived for processing when the same block was being imported over gossip. /// It is queued for later import. -pub struct QueuedRpcBlock { - pub block_root: Hash256, - pub block: Arc>, - pub process_type: BlockProcessType, - pub seen_timestamp: Duration, - /// Indicates if the beacon chain should process this block or not. - /// We use this to ignore block processing when rpc block queues are full. - pub should_process: bool, +pub struct QueuedRpcBlock { + pub beacon_block_root: Hash256, + /// Processes/imports the block. + pub process_fn: AsyncFn, + /// Ignores the block. + pub ignore_fn: BlockingFn, } -/// A backfill batch work that has been queued for processing later. -#[derive(Clone)] -pub struct QueuedBackfillBatch { - pub process_id: ChainSegmentProcessId, - pub blocks: Vec>>, +/// A block that arrived for processing when the same block was being imported over gossip. +/// It is queued for later import. +pub struct IgnoredRpcBlock { + pub process_fn: BlockingFn, } -impl TryFrom> for QueuedBackfillBatch { +/// A backfill batch work that has been queued for processing later. +pub struct QueuedBackfillBatch(pub AsyncFn); + +impl TryFrom> for QueuedBackfillBatch { type Error = WorkEvent; fn try_from(event: WorkEvent) -> Result> { match event { WorkEvent { - work: - Work::ChainSegment { - process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), - blocks, - }, + work: Work::ChainSegmentBackfill(process_fn), .. - } => Ok(QueuedBackfillBatch { process_id, blocks }), + } => Ok(QueuedBackfillBatch(process_fn)), _ => Err(event), } } } -impl From> for WorkEvent { - fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { - WorkEvent::chain_segment( - queued_backfill_batch.process_id, - queued_backfill_batch.blocks, - ) +impl From for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent { + drop_during_sync: false, + work: Work::ChainSegmentBackfill(queued_backfill_batch.0), + } } } /// Unifies the different messages processed by the block delay queue. -enum InboundEvent { +enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. - ReadyGossipBlock(QueuedGossipBlock), + ReadyGossipBlock(QueuedGossipBlock), /// A rpc block that was queued because the same gossip block was being imported /// will now be retried for import. - ReadyRpcBlock(QueuedRpcBlock), + ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), /// A light client update that is ready for re-processing. ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A backfill batch that was queued is ready for processing. - ReadyBackfillSync(QueuedBackfillBatch), + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` - Msg(ReprocessQueueMessage), + Msg(ReprocessQueueMessage), } /// Manages scheduling works that need to be later re-processed. -struct ReprocessQueue { +struct ReprocessQueue { /// Receiver of messages relevant to schedule works for reprocessing. - work_reprocessing_rx: Receiver>, + work_reprocessing_rx: Receiver, /// Sender of works once they become ready - ready_work_tx: Sender>, + ready_work_tx: Sender, /* Queues */ /// Queue to manage scheduled early blocks. - gossip_block_delay_queue: DelayQueue>, + gossip_block_delay_queue: DelayQueue, /// Queue to manage scheduled early blocks. - rpc_block_delay_queue: DelayQueue>, + rpc_block_delay_queue: DelayQueue, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, /// Queue to manage scheduled light client updates. @@ -239,17 +219,17 @@ struct ReprocessQueue { /// Queued blocks. queued_gossip_block_roots: HashSet, /// Queued aggregated attestations. - queued_aggregates: FnvHashMap, DelayKey)>, + queued_aggregates: FnvHashMap, /// Queued attestations. - queued_unaggregates: FnvHashMap, DelayKey)>, + queued_unaggregates: FnvHashMap, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, /// Queued Light Client Updates. - queued_lc_updates: FnvHashMap, DelayKey)>, + queued_lc_updates: FnvHashMap, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, /// Queued backfill batches - queued_backfill_batches: Vec>, + queued_backfill_batches: Vec, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations @@ -260,7 +240,7 @@ struct ReprocessQueue { attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, next_backfill_batch_event: Option>>, - slot_clock: Pin>, + slot_clock: Pin>, } pub type QueuedLightClientUpdateId = usize; @@ -271,20 +251,20 @@ enum QueuedAttestationId { Unaggregate(usize), } -impl QueuedAggregate { +impl QueuedAggregate { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation.message.aggregate.data.beacon_block_root + &self.beacon_block_root } } -impl QueuedUnaggregate { +impl QueuedUnaggregate { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation.data.beacon_block_root + &self.beacon_block_root } } -impl Stream for ReprocessQueue { - type Item = InboundEvent; +impl Stream for ReprocessQueue { + type Item = InboundEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection @@ -375,16 +355,13 @@ impl Stream for ReprocessQueue { /// Starts the job that manages scheduling works that need re-processing. The returned `Sender` /// gives the communicating channel to receive those works. Once a work is ready, it is sent back /// via `ready_work_tx`. -pub fn spawn_reprocess_scheduler( - ready_work_tx: Sender>, +pub fn spawn_reprocess_scheduler( + ready_work_tx: Sender, + work_reprocessing_rx: Receiver, executor: &TaskExecutor, - slot_clock: T::SlotClock, + slot_clock: S, log: Logger, -) -> Sender> { - let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - // Basic sanity check. - assert!(ADDITIONAL_QUEUED_BLOCK_DELAY < MAXIMUM_GOSSIP_CLOCK_DISPARITY); - +) { let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, @@ -423,19 +400,17 @@ pub fn spawn_reprocess_scheduler( }, TASK_NAME, ); - - work_reprocessing_tx } -impl ReprocessQueue { - fn handle_message(&mut self, msg: InboundEvent, slot_clock: &T::SlotClock, log: &Logger) { +impl ReprocessQueue { + fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) { use ReprocessQueueMessage::*; match msg { // Some block has been indicated as "early" and should be processed when the // appropriate slot arrives. InboundEvent::Msg(EarlyBlock(early_block)) => { - let block_slot = early_block.block.block.slot(); - let block_root = early_block.block.block_root; + let block_slot = early_block.beacon_block_slot; + let block_root = early_block.beacon_block_root; // Don't add the same block to the queue twice. This prevents DoS attacks. if self.queued_gossip_block_roots.contains(&block_root) { @@ -494,7 +469,7 @@ impl ReprocessQueue { // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` // and then send the rpc block back for processing assuming the gossip import // has completed by then. - InboundEvent::Msg(RpcBlock(mut rpc_block)) => { + InboundEvent::Msg(RpcBlock(rpc_block)) => { // Check to ensure this won't over-fill the queue. if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { if self.rpc_block_debounce.elapsed() { @@ -507,10 +482,11 @@ impl ReprocessQueue { } // Return the block to the beacon processor signalling to // ignore processing for this block - rpc_block.should_process = false; if self .ready_work_tx - .try_send(ReadyWork::RpcBlock(rpc_block)) + .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { + process_fn: rpc_block.ignore_fn, + })) .is_err() { error!( @@ -529,7 +505,7 @@ impl ReprocessQueue { debug!( log, "Sending rpc block for reprocessing"; - "block_root" => %queued_rpc_block.block.canonical_root() + "block_root" => %queued_rpc_block.beacon_block_root ); if self .ready_work_tx @@ -767,7 +743,7 @@ impl ReprocessQueue { } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { - let block_root = ready_block.block.block_root; + let block_root = ready_block.beacon_block_root; if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this @@ -885,18 +861,28 @@ impl ReprocessQueue { "millis_from_slot_start" => millis_from_slot_start ); - if self + match self .ready_work_tx - .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) - .is_err() + .try_send(ReadyWork::BackfillSync(queued_backfill_batch)) { - error!( + // The message was sent successfully. + Ok(()) => (), + // The message was not sent, recover it from the returned `Err`. + Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch))) + | Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches.insert(0, batch) + } + // The message was not sent and we didn't get the correct + // return result. This is a logic error. + _ => crit!( log, - "Failed to send scheduled backfill work"; - "info" => "sending work back to queue" - ); - self.queued_backfill_batches - .insert(0, queued_backfill_batch); + "Unexpected return from try_send error"; + ), } } } @@ -927,7 +913,7 @@ impl ReprocessQueue { // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue if !self.queued_backfill_batches.is_empty() { self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( - ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), ))); } else { self.next_backfill_batch_event = None @@ -936,7 +922,7 @@ impl ReprocessQueue { /// Returns duration until the next scheduled processing time. The schedule ensure that backfill /// processing is done in windows of time that aren't critical - fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + fn duration_until_next_backfill_batch_event(slot_clock: &S) -> Duration { let slot_duration = slot_clock.slot_duration(); slot_clock .millis_from_current_slot_start() @@ -966,16 +952,9 @@ impl ReprocessQueue { #[cfg(test)] mod tests { use super::*; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; use slot_clock::TestingSlotClock; - use store::MemoryStore; - use types::MainnetEthSpec as E; use types::Slot; - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - #[test] fn backfill_processing_schedule_calculation() { let slot_duration = Duration::from_secs(12); @@ -988,7 +967,7 @@ mod tests { for &event_duration_from_slot_start in event_times.iter() { let duration_to_next_event = - ReprocessQueue::::duration_until_next_backfill_batch_event( + ReprocessQueue::::duration_until_next_backfill_batch_event( &slot_clock, ); @@ -1005,7 +984,7 @@ mod tests { // check for next event beyond the current slot let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); let duration_to_next_event = - ReprocessQueue::::duration_until_next_backfill_batch_event( + ReprocessQueue::::duration_until_next_backfill_batch_event( &slot_clock, ); assert_eq!( diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 64c79ea668b..72b6a6c7d47 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -43,3 +43,5 @@ slasher = { path = "../../slasher" } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } +beacon_processor = { path = "../beacon_processor" } +num_cpus = "1.13.0" diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e556f9ab598..b1a507eaa59 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -11,7 +11,11 @@ use beacon_chain::{ slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, - BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, +}; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend, + WorkEvent, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, }; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; @@ -27,12 +31,13 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; +use std::cmp; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::oneshot; +use tokio::sync::{mpsc, oneshot}; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -72,6 +77,10 @@ pub struct ClientBuilder { http_metrics_config: http_metrics::Config, slasher: Option>>, eth_spec_instance: T::EthSpec, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_receive: mpsc::Receiver>, + work_reprocessing_tx: mpsc::Sender, + work_reprocessing_rx: mpsc::Receiver, } impl @@ -87,6 +96,10 @@ where /// /// The `eth_spec_instance` parameter is used to concretize `TEthSpec`. pub fn new(eth_spec_instance: TEthSpec) -> Self { + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (work_reprocessing_tx, work_reprocessing_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); Self { slot_clock: None, store: None, @@ -104,6 +117,10 @@ where http_metrics_config: <_>::default(), slasher: None, eth_spec_instance, + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + beacon_processor_receive, + work_reprocessing_tx, + work_reprocessing_rx, } } @@ -167,8 +184,10 @@ where .store(store) .task_executor(context.executor.clone()) .custom_spec(spec.clone()) + .store_migrator_config( + MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), + ) .chain_config(chain_config) - .store_migrator_config(config.store_migrator.clone()) .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) @@ -569,6 +588,8 @@ where gossipsub_registry .as_mut() .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + self.beacon_processor_send.clone(), + self.work_reprocessing_tx.clone(), ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; @@ -756,6 +777,27 @@ where } if let Some(beacon_chain) = self.beacon_chain.as_ref() { + if let Some(network_globals) = &self.network_globals { + let beacon_processor_context = runtime_context.service_context("bproc".into()); + BeaconProcessor { + network_globals: network_globals.clone(), + executor: beacon_processor_context.executor.clone(), + max_workers: cmp::max(1, num_cpus::get()), + current_workers: 0, + enable_backfill_rate_limiting: beacon_chain + .config + .enable_backfill_rate_limiting, + log: beacon_processor_context.log().clone(), + } + .spawn_manager( + self.beacon_processor_receive, + self.work_reprocessing_tx, + self.work_reprocessing_rx, + None, + beacon_chain.slot_clock.clone(), + ); + } + let state_advance_context = runtime_context.service_context("state_advance".into()); let state_advance_log = state_advance_context.log().clone(); spawn_state_advance_timer( diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5701d428abd..95a00b37492 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,3 @@ -use beacon_chain::migrate::MigratorConfig; use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; @@ -71,7 +70,6 @@ pub struct Config { /// via the CLI at runtime, instead of from a configuration file saved to disk. pub genesis: ClientGenesis, pub store: store::StoreConfig, - pub store_migrator: MigratorConfig, pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, @@ -93,7 +91,6 @@ impl Default for Config { log_file: PathBuf::from(""), genesis: <_>::default(), store: <_>::default(), - store_migrator: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), dummy_eth1_backend: false, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 49f97bf15f4..0a6a621c225 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -41,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } +ssz_rs = "0.9.0" tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2720569c8d8..d72686baf55 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -163,7 +163,7 @@ impl> BlockProposalContents ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), Err(reason) if !reason.payload_invalid() => { @@ -913,7 +913,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), // If the payload is valid then use it. The local EE failed @@ -922,7 +922,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }, )), Err(reason) => { @@ -1129,7 +1129,7 @@ impl ExecutionLayer { Ok(BlockProposalContents::Payload { payload: execution_payload.into(), block_value, - _phantom: PhantomData::default(), + _phantom: PhantomData, }) }) .await @@ -2018,6 +2018,22 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { + None +} + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} + #[cfg(test)] mod test { use super::*; @@ -2164,19 +2180,3 @@ mod test { .await; } } - -fn noop( - _: &ExecutionLayer, - _: ExecutionPayloadRef, -) -> Option> { - None -} - -#[cfg(test)] -/// Returns the duration since the unix epoch. -fn timestamp_now() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_secs() -} diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 668d1fb3b1c..9471be636dc 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -11,11 +11,17 @@ use ethereum_consensus::{ }; use fork_choice::ForkchoiceUpdateParameters; use mev_rs::{ - bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, - BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, + blinded_block_provider::Server as BlindedBlockProviderServer, + signing::{sign_builder_message, verify_signed_builder_message}, + types::{ + bellatrix::{ + BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, + }, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, + BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, + SignedBuilderBid, SignedValidatorRegistration, + }, + Error as MevError, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -47,7 +53,7 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), MevError> { match self { Operation::FeeRecipient(fee_recipient) => { *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? @@ -73,7 +79,7 @@ pub trait BidStuff { fn prev_randao_mut(&mut self) -> &mut Hash32; fn block_number_mut(&mut self) -> &mut u64; fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; fn sign_builder_message( &mut self, @@ -134,11 +140,9 @@ impl BidStuff for BuilderBid { } } - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { match self { - Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( - "withdrawals_root called on bellatrix bid".to_string(), - )), + Self::Bellatrix(_) => Err(MevError::InvalidFork), Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), } } @@ -274,7 +278,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -288,7 +292,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), BlindedBlockProviderError> { + ) -> Result<(), MevError> { for registration in registrations { let pubkey = registration.message.public_key.clone(); let message = &mut registration.message; @@ -307,10 +311,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { Ok(()) } - async fn fetch_best_bid( - &self, - bid_request: &BidRequest, - ) -> Result { + async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { let slot = Slot::new(bid_request.slot); let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self @@ -336,7 +337,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(BlindedBlockProviderError::Custom(format!( + return Err(custom_err(format!( "head mismatch: {} {}", head_execution_hash, bid_request.parent_hash ))); @@ -396,7 +397,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .get_debug_beacon_states(StateId::Head) .await .map_err(convert_err)? - .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .ok_or_else(|| custom_err("missing head state".to_string()))? .data; let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) @@ -409,10 +410,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) } ForkName::Base | ForkName::Altair => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))); + return Err(MevError::InvalidFork); } }; @@ -452,12 +450,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, public_key: self.builder_sk.public_key(), }), - ForkName::Base | ForkName::Altair => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))) - } + ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), }; *message.gas_limit_mut() = cached_data.gas_limit; @@ -475,7 +468,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let node = match signed_block { SignedBlindedBeaconBlock::Bellatrix(block) => { block.message.body.execution_payload_header.hash_tree_root() @@ -496,9 +489,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { } } -pub fn from_ssz_rs( - ssz_rs_data: &T, -) -> Result { +pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { U::from_ssz_bytes( ssz_rs::serialize(ssz_rs_data) .map_err(convert_err)? @@ -507,12 +498,17 @@ pub fn from_ssz_rs( .map_err(convert_err) } -pub fn to_ssz_rs( - ssz_data: &T, -) -> Result { +pub fn to_ssz_rs(ssz_data: &T) -> Result { ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) } -fn convert_err(e: E) -> BlindedBlockProviderError { - BlindedBlockProviderError::Custom(format!("{e:?}")) +fn convert_err(e: E) -> MevError { + custom_err(format!("{e:?}")) +} + +// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. +fn custom_err(s: String) -> MevError { + MevError::Consensus(ethereum_consensus::state_transition::Error::Io( + std::io::Error::new(std::io::ErrorKind::Other, s), + )) } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index e2295fc2022..7e5e30ad932 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -75,7 +75,7 @@ impl PackingEfficiencyHandler { available_attestations: HashSet::new(), included_attestations: HashMap::new(), committee_store: CommitteeStore::new(), - _phantom: PhantomData::default(), + _phantom: PhantomData, }; handler.compute_epoch(start_epoch, &starting_state, spec)?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 380acfc2a34..759c5035f98 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2052,15 +2052,11 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) - .and(log_filter.clone()) .and_then( - |chain: Arc>, - epoch: Epoch, - validators: Vec, - log: Logger| { + |chain: Arc>, epoch: Epoch, validators: Vec| { blocking_json_task(move || { let attestation_rewards = chain - .compute_attestation_rewards(epoch, validators, log) + .compute_attestation_rewards(epoch, validators) .map_err(|e| match e { BeaconChainError::MissingBeaconState(root) => { warp_utils::reject::custom_not_found(format!( diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index d5e7d3df0f9..a387983bc3f 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -70,15 +70,32 @@ impl StateId { .map_err(BeaconChainError::DBError) .map_err(warp_utils::reject::beacon_chain_error)? { - let execution_optimistic = chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) - .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; - let finalized = chain - .is_finalized_state(root, hot_summary.slot) + let finalization_status = chain + .state_finalization_and_canonicity(root, hot_summary.slot) .map_err(warp_utils::reject::beacon_chain_error)?; + let finalized = finalization_status.is_finalized(); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let execution_optimistic = if finalization_status.slot_is_finalized + && !finalization_status.canonical + { + // This block is permanently orphaned and has likely been pruned from fork + // choice. If it isn't found in fork choice, mark it optimistic to be on the + // safe side. + fork_choice + .is_optimistic_or_invalid_block_no_fallback( + &hot_summary.latest_block_root, + ) + .unwrap_or(true) + } else { + // This block is either old and finalized, or recent and unfinalized, so + // it's safe to fallback to the optimistic status of the finalized block. + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)? + }; return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 70a9ba77bac..0e184b9741b 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -327,11 +327,8 @@ async fn sync_committee_indices_across_fork() { /// Assert that an HTTP API error has the given status code and indexed errors for the given indices. fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { - let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { - code, - failures, - .. - }) = error else { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error + else { panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") }; assert_eq!(code, status_code); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index da92419744e..d7ea7c26284 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -2,8 +2,9 @@ use beacon_chain::{ chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + ChainConfig, }; -use eth2::types::DepositContractData; +use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; @@ -17,7 +18,7 @@ use std::time::Duration; use tree_hash::TreeHash; use types::{ Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, ProposerPreparationData, Slot, + MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, }; type E = MainnetEthSpec; @@ -48,6 +49,76 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } +// Test that state lookups by root function correctly for states that are finalized but still +// present in the hot database, and have had their block pruned from fork choice. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn state_by_root_pruned_from_fork_choice() { + type E = MinimalEthSpec; + + let validator_count = 24; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(move |builder| { + builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + .chain_config(ChainConfig { + epochs_per_migration: 1024, + ..ChainConfig::default() + }) + })), + None, + ) + .await; + + let client = &tester.client; + let harness = &tester.harness; + + // Create some chain depth and finalize beyond fork choice's pruning depth. + let num_epochs = 8_u64; + let num_initial = num_epochs * E::slots_per_epoch(); + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + ) + .await; + + // Should now be finalized. + let finalized_epoch = harness.finalized_checkpoint().epoch; + assert_eq!(finalized_epoch, num_epochs - 2); + + // The split slot should still be at 0. + assert_eq!(harness.chain.store.get_split_slot(), 0); + + // States that are between the split and the finalized slot should be able to be looked up by + // state root. + for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() { + let state_root = harness + .chain + .state_root_at_slot(Slot::new(slot)) + .unwrap() + .unwrap(); + let response = client + .get_debug_beacon_states::(StateId::Root(state_root)) + .await + .unwrap() + .unwrap(); + + assert!(response.finalized.unwrap()); + assert!(!response.execution_optimistic.unwrap()); + + let mut state = response.data; + assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); + } +} + /// Data structure for tracking fork choice updates received by the mock execution layer. #[derive(Debug, Default)] struct ForkChoiceUpdates { diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 3df7f7c16f3..5ce0c55cacd 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,7 +1,10 @@ //! ENR extension trait to support libp2p integration. use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; +use libp2p::{ + core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, + identity::secp256k1, +}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -36,6 +39,8 @@ pub trait CombinedKeyPublicExt { pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. + fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } impl EnrExt for Enr { @@ -220,12 +225,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey { impl CombinedKeyExt for CombinedKey { fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { match key { - Keypair::Secp256k1(key) => { - let secret = - discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) - .expect("libp2p key must be valid"); - Ok(CombinedKey::Secp256k1(secret)) - } + Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), Keypair::Ed25519(key) => { let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( &(key.encode()[..32]) @@ -237,6 +237,11 @@ impl CombinedKeyExt for CombinedKey { Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), } } + fn from_secp256k1(key: &secp256k1::Keypair) -> Self { + let secret = discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + CombinedKey::Secp256k1(secret) + } } // helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3ee74ebf018..d4d0baef6b7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1101,6 +1101,7 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; + use libp2p::identity::secp256k1; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; @@ -1119,10 +1120,10 @@ mod tests { } async fn build_discovery() -> Discovery { - let keypair = libp2p::identity::Keypair::generate_secp256k1(); + let keypair = secp256k1::Keypair::generate(); let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( @@ -1138,6 +1139,7 @@ mod tests { false, &log, ); + let keypair = Keypair::Secp256k1(keypair); Discovery::new(&keypair, &config, Arc::new(globals), &log) .await .unwrap() @@ -1184,8 +1186,8 @@ mod tests { fn make_enr(subnet_ids: Vec) -> Enr { let mut builder = EnrBuilder::new("v4"); - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = secp256k1::Keypair::generate(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); // set the "attnets" field on our ENR let mut bitfield = BitVector::::new(); diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 295616f36ba..97eaaa0051b 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -134,9 +134,8 @@ impl NetworkGlobals { log: &slog::Logger, ) -> NetworkGlobals { use crate::CombinedKeyExt; - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: discv5::enr::CombinedKey = - discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index aa1827787c6..a5cc12bbc55 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -11,7 +11,6 @@ matches = "0.1.8" exit-future = "0.2.0" slog-term = "2.6.0" slog-async = "2.5.0" -environment = { path = "../../lighthouse/environment" } [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -46,4 +45,7 @@ derivative = "2.2.0" delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } -execution_layer = { path = "../execution_layer" } \ No newline at end of file +execution_layer = { path = "../execution_layer" } +beacon_processor = { path = "../beacon_processor" } +parking_lot = "0.12.0" +environment = { path = "../../lighthouse/environment" } \ No newline at end of file diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs deleted file mode 100644 index 1cbc64b6329..00000000000 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::work_reprocessing_queue::ReprocessQueueMessage; -use crate::{service::NetworkMessage, sync::SyncMessage}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, Logger}; -use std::sync::Arc; -use tokio::sync::mpsc; - -mod gossip_methods; -mod rpc_methods; -mod sync_methods; - -pub use gossip_methods::{GossipAggregatePackage, GossipAttestationPackage}; -pub use sync_methods::ChainSegmentProcessId; - -pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; - -/// Contains the context necessary to import blocks, attestations, etc to the beacon chain. -pub struct Worker { - pub chain: Arc>, - pub network_tx: mpsc::UnboundedSender>, - pub sync_tx: mpsc::UnboundedSender>, - pub log: Logger, -} - -impl Worker { - /// Send a message to `sync_tx`. - /// - /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { - self.sync_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the sync service"; - "error" => %e) - }); - } - - /// Send a message to `network_tx`. - /// - /// Creates a log if there is an internal error. - fn send_network_message(&self, message: NetworkMessage) { - self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service. Likely shutdown"; - "error" => %e) - }); - } -} - -/// Contains the necessary items for a worker to do their job. -pub struct Toolbox { - pub idle_tx: mpsc::Sender<()>, - pub work_reprocessing_tx: mpsc::Sender>, -} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 648c636acca..da64368b16d 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -6,10 +6,10 @@ pub mod error; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; -mod beacon_processor; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; mod nat; +mod network_beacon_processor; mod persisted_dht; mod router; mod status; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 27d7dc9625d..0144824861d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -49,47 +49,8 @@ lazy_static! { /* * Gossip processor */ - pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_rx_count", - "Count of work events received (but not necessarily processed)", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_ignored_count", - "Count of work events purposefully ignored", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_started_count", - "Count of work events which have been started by a worker", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( - "beacon_processor_worker_time", - "Time taken for a worker to fully process some parcel of work.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( - "beacon_processor_workers_spawned_total", - "The number of workers ever spawned by the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_workers_active_total", - "Count of active workers in the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( - "beacon_processor_idle_events_total", - "Count of idle events processed by the gossip processor manager." - ); - pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( - "beacon_processor_event_handling_seconds", - "Time spent handling a new message and allocating it to a queue or worker." - ); + // Gossip blocks. - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_gossip_block_queue_total", - "Count of blocks from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_gossip_block_verified_total", "Total number of gossip blocks verified for propagation." @@ -107,10 +68,6 @@ lazy_static! { "Whenever a gossip block is received early this metrics is set to how early that block was." ); // Gossip Exits. - pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_exit_queue_total", - "Count of exits from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_exit_verified_total", "Total number of voluntary exits verified for propagation." @@ -120,10 +77,6 @@ lazy_static! { "Total number of voluntary exits imported to the op pool." ); // Gossip proposer slashings. - pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_proposer_slashing_queue_total", - "Count of proposer slashings from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_proposer_slashing_verified_total", "Total number of proposer slashings verified for propagation." @@ -133,10 +86,6 @@ lazy_static! { "Total number of proposer slashings imported to the op pool." ); // Gossip attester slashings. - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_attester_slashing_queue_total", - "Count of attester slashings from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_attester_slashing_verified_total", "Total number of attester slashings verified for propagation." @@ -146,10 +95,6 @@ lazy_static! { "Total number of attester slashings imported to the op pool." ); // Gossip BLS to execution changes. - pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_bls_to_execution_change_queue_total", - "Count of address changes from gossip waiting to be verified." - ); pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_bls_to_execution_change_verified_total", "Total number of address changes verified for propagation." @@ -159,23 +104,11 @@ lazy_static! { "Total number of address changes imported to the op pool." ); // Rpc blocks. - pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_rpc_block_queue_total", - "Count of blocks from the rpc waiting to be verified." - ); pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_rpc_block_imported_total", "Total number of gossip blocks imported to fork choice, etc." ); // Chain segments. - pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_chain_segment_queue_total", - "Count of chain segments from the rpc waiting to be verified." - ); - pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_backfill_chain_segment_queue_total", - "Count of backfill chain segments from the rpc waiting to be verified." - ); pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( "beacon_processor_chain_segment_success_total", "Total number of chain segments successfully processed." @@ -193,10 +126,6 @@ lazy_static! { "Total number of backfill chain segments that failed processing." ); // Unaggregated attestations. - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_unaggregated_attestation_queue_total", - "Count of unagg. attestations waiting to be processed." - ); pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_unaggregated_attestation_verified_total", "Total number of unaggregated attestations verified for gossip." @@ -210,10 +139,6 @@ lazy_static! { "Total number of unaggregated attestations that referenced an unknown block and were re-queued." ); // Aggregated attestations. - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_aggregated_attestation_queue_total", - "Count of agg. attestations waiting to be processed." - ); pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_aggregated_attestation_verified_total", "Total number of aggregated attestations verified for gossip." @@ -227,10 +152,6 @@ lazy_static! { "Total number of aggregated attestations that referenced an unknown block and were re-queued." ); // Sync committee messages. - pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_message_queue_total", - "Count of sync committee messages waiting to be processed." - ); pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_message_verified_total", "Total number of sync committee messages verified for gossip." @@ -240,10 +161,6 @@ lazy_static! { "Total number of sync committee messages imported to fork choice, etc." ); // Sync contribution. - pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_contribution_queue_total", - "Count of sync committee contributions waiting to be processed." - ); pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_contribution_verified_total", "Total number of sync committee contributions verified for gossip." @@ -279,12 +196,6 @@ lazy_static! { "Gossipsub light_client_optimistic_update errors per error type", &["type"] ); - pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = - try_create_int_counter_vec( - "beacon_processor_send_error_per_work_type", - "Total number of beacon processor send error per work type", - &["type"] - ); /* @@ -371,35 +282,9 @@ lazy_static! { "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); - /* - * Attestation reprocessing queue metrics. - */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result = - try_create_int_gauge_vec( - "beacon_processor_reprocessing_queue_total", - "Count of items in a reprocessing queue.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported." - ); - /* * Light client update reprocessing queue metrics. */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_expired_optimistic_updates", - "Number of queued light client optimistic updates which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_optimistic_updates", - "Number of queued light client optimistic updates where as matching block has been imported." - ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_sent_optimistic_updates", "Number of queued light client optimistic updates where as matching block has been imported." diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs similarity index 95% rename from beacon_node/network/src/beacon_processor/worker/gossip_methods.rs rename to beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 91ec81b18d3..cde4da9ffcc 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,4 +1,9 @@ -use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use crate::{ + metrics, + network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, + service::NetworkMessage, + sync::SyncMessage, +}; use beacon_chain::store::Error; use beacon_chain::{ @@ -30,14 +35,13 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; -use super::{ - super::work_reprocessing_queue::{ +use beacon_processor::{ + work_reprocessing_queue::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, ReprocessQueueMessage, }, - Worker, + DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, }; -use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage}; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus /// messages. @@ -144,65 +148,7 @@ impl FailedAtt { } } -/// Items required to verify a batch of unaggregated gossip attestations. -#[derive(Debug)] -pub struct GossipAttestationPackage { - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, -} - -impl GossipAttestationPackage { - pub fn new( - message_id: MessageId, - peer_id: PeerId, - attestation: Box>, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, - ) -> Self { - Self { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } - } -} - -/// Items required to verify a batch of aggregated gossip attestations. -#[derive(Debug)] -pub struct GossipAggregatePackage { - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - beacon_block_root: Hash256, - seen_timestamp: Duration, -} - -impl GossipAggregatePackage { - pub fn new( - message_id: MessageId, - peer_id: PeerId, - aggregate: Box>, - seen_timestamp: Duration, - ) -> Self { - Self { - message_id, - peer_id, - beacon_block_root: aggregate.message.aggregate.data.beacon_block_root, - aggregate, - seen_timestamp, - } - } -} - -impl Worker { +impl NetworkBeaconProcessor { /* Auxiliary functions */ /// Penalizes a peer for misbehaviour. @@ -245,13 +191,13 @@ impl Worker { /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] pub fn process_gossip_attestation( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, attestation: Box>, subnet_id: SubnetId, should_import: bool, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { let result = match self @@ -277,9 +223,9 @@ impl Worker { } pub fn process_gossip_attestation_batch( - self, + self: Arc, packages: Vec>, - reprocess_tx: Option>>, + reprocess_tx: Option>, ) { let attestations_and_subnets = packages .iter() @@ -348,12 +294,12 @@ impl Worker { // cant' be mixed-up) and creating a struct would result in more complexity. #[allow(clippy::too_many_arguments)] fn process_gossip_attestation_result( - &self, + self: &Arc, result: Result, RejectedUnaggregate>, message_id: MessageId, peer_id: PeerId, subnet_id: SubnetId, - reprocess_tx: Option>>, + reprocess_tx: Option>, should_import: bool, seen_timestamp: Duration, ) { @@ -456,11 +402,11 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_gossip_aggregate( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, aggregate: Box>, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; @@ -490,9 +436,9 @@ impl Worker { } pub fn process_gossip_aggregate_batch( - self, + self: Arc, packages: Vec>, - reprocess_tx: Option>>, + reprocess_tx: Option>, ) { let aggregates = packages.iter().map(|package| package.aggregate.as_ref()); @@ -555,12 +501,12 @@ impl Worker { } fn process_gossip_aggregate_result( - &self, + self: &Arc, result: Result, RejectedAggregate>, beacon_block_root: Hash256, message_id: MessageId, peer_id: PeerId, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { match result { @@ -659,12 +605,12 @@ impl Worker { /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] pub async fn process_gossip_block( - self, + self: Arc, message_id: MessageId, peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, invalid_block_storage: InvalidBlockStorage, seen_duration: Duration, @@ -708,12 +654,12 @@ impl Worker { /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. pub async fn process_gossip_unverified_block( - &self, + self: &Arc, message_id: MessageId, peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, seen_duration: Duration, ) -> Option> { let block_delay = @@ -911,11 +857,25 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); + let inner_self = self.clone(); + let process_fn = Box::pin(async move { + let reprocess_tx = inner_self.reprocess_tx.clone(); + let invalid_block_storage = inner_self.invalid_block_storage.clone(); + inner_self + .process_gossip_verified_block( + peer_id, + verified_block, + reprocess_tx, + invalid_block_storage, + seen_duration, + ) + .await; + }); if reprocess_tx .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { - peer_id, - block: Box::new(verified_block), - seen_timestamp: seen_duration, + beacon_block_slot: block_slot, + beacon_block_root: block_root, + process_fn, })) .is_err() { @@ -948,10 +908,10 @@ impl Worker { /// /// Raises a log if there are errors. pub async fn process_gossip_verified_block( - self, + self: Arc, peer_id: PeerId, verified_block: GossipVerifiedBlock, - reprocess_tx: mpsc::Sender>, + reprocess_tx: mpsc::Sender, invalid_block_storage: InvalidBlockStorage, // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, @@ -1051,7 +1011,7 @@ impl Worker { } pub fn process_gossip_voluntary_exit( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, voluntary_exit: SignedVoluntaryExit, @@ -1109,7 +1069,7 @@ impl Worker { } pub fn process_gossip_proposer_slashing( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, proposer_slashing: ProposerSlashing, @@ -1171,7 +1131,7 @@ impl Worker { } pub fn process_gossip_attester_slashing( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, attester_slashing: AttesterSlashing, @@ -1225,7 +1185,7 @@ impl Worker { } pub fn process_gossip_bls_to_execution_change( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, bls_to_execution_change: SignedBlsToExecutionChange, @@ -1308,7 +1268,7 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_gossip_sync_committee_signature( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, sync_signature: SyncCommitteeMessage, @@ -1371,7 +1331,7 @@ impl Worker { /// /// Raises a log if there are errors. pub fn process_sync_committee_contribution( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, sync_contribution: SignedContributionAndProof, @@ -1426,7 +1386,7 @@ impl Worker { } pub fn process_gossip_finality_update( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, light_client_finality_update: LightClientFinalityUpdate, @@ -1492,11 +1452,11 @@ impl Worker { } pub fn process_gossip_optimistic_update( - self, + self: &Arc, message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, - reprocess_tx: Option>>, + reprocess_tx: Option>, seen_timestamp: Duration, ) { match self.chain.verify_optimistic_update_for_gossip( @@ -1527,15 +1487,19 @@ impl Worker { ); if let Some(sender) = reprocess_tx { + let processor = self.clone(); let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( QueuedLightClientUpdate { - peer_id, - message_id, - light_client_optimistic_update: Box::new( - light_client_optimistic_update, - ), parent_root, - seen_timestamp, + process_fn: Box::new(move || { + processor.process_gossip_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + None, // Do not reprocess this message again. + seen_timestamp, + ) + }), }, ); @@ -1624,11 +1588,11 @@ impl Worker { /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( - &self, + self: &Arc, peer_id: PeerId, message_id: MessageId, failed_att: FailedAtt, - reprocess_tx: Option>>, + reprocess_tx: Option>, error: AttnError, seen_timestamp: Duration, ) { @@ -1860,11 +1824,18 @@ impl Worker { metrics::inc_counter( &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL, ); + let processor = self.clone(); ReprocessQueueMessage::UnknownBlockAggregate(QueuedAggregate { - peer_id, - message_id, - attestation, - seen_timestamp, + beacon_block_root: *beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_aggregate( + message_id, + peer_id, + attestation, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), }) } FailedAtt::Unaggregate { @@ -1876,13 +1847,20 @@ impl Worker { metrics::inc_counter( &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, ); + let processor = self.clone(); ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { - peer_id, - message_id, - attestation, - subnet_id, - should_import, - seen_timestamp, + beacon_block_root: *beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), }) } }; diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs new file mode 100644 index 00000000000..7f0ef1fb817 --- /dev/null +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -0,0 +1,590 @@ +use crate::{ + service::NetworkMessage, + sync::{manager::BlockProcessType, SyncMessage}, +}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, test_utils::BeaconChainHarness, BeaconChain, +}; +use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, + GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, + MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, +}; +use environment::null_logger; +use lighthouse_network::{ + rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, + Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, +}; +use slog::{debug, Logger}; +use slot_clock::ManualSlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use store::MemoryStore; +use task_executor::test_utils::TestRuntime; +use task_executor::TaskExecutor; +use tokio::sync::mpsc::{self, error::TrySendError}; +use types::*; + +pub use sync_methods::ChainSegmentProcessId; + +pub type Error = TrySendError>; + +mod gossip_methods; +mod rpc_methods; +mod sync_methods; +mod tests; + +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; + +/// Defines if and where we will store the SSZ files of invalid blocks. +#[derive(Clone)] +pub enum InvalidBlockStorage { + Enabled(PathBuf), + Disabled, +} + +/// Provides an interface to a `BeaconProcessor` running in some other thread. +/// The wider `networking` crate should use this struct to interface with the +/// beacon processor. +pub struct NetworkBeaconProcessor { + pub beacon_processor_send: BeaconProcessorSend, + pub duplicate_cache: DuplicateCache, + pub chain: Arc>, + pub network_tx: mpsc::UnboundedSender>, + pub sync_tx: mpsc::UnboundedSender>, + pub reprocess_tx: mpsc::Sender, + pub network_globals: Arc>, + pub invalid_block_storage: InvalidBlockStorage, + pub executor: TaskExecutor, + pub log: Logger, +} + +impl NetworkBeaconProcessor { + fn try_send(&self, event: BeaconWorkEvent) -> Result<(), Error> { + self.beacon_processor_send + .try_send(event) + .map_err(Into::into) + } + + /// Create a new `Work` event for some unaggregated attestation. + pub fn send_unaggregated_attestation( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + attestation: Attestation, + subnet_id: SubnetId, + should_import: bool, + seen_timestamp: Duration, + ) -> Result<(), Error> { + // Define a closure for processing individual attestations. + let processor = self.clone(); + let process_individual = move |package: GossipAttestationPackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; + + // Define a closure for processing batches of attestations. + let processor = self.clone(); + let process_batch = move |attestations| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation_batch(attestations, Some(reprocess_tx)) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAttestation { + attestation: GossipAttestationPackage { + message_id, + peer_id, + attestation: Box::new(attestation), + subnet_id, + should_import, + seen_timestamp, + }, + process_individual: Box::new(process_individual), + process_batch: Box::new(process_batch), + }, + }) + } + + /// Create a new `Work` event for some aggregated attestation. + pub fn send_aggregated_attestation( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + aggregate: SignedAggregateAndProof, + seen_timestamp: Duration, + ) -> Result<(), Error> { + // Define a closure for processing individual attestations. + let processor = self.clone(); + let process_individual = move |package: GossipAggregatePackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_aggregate( + package.message_id, + package.peer_id, + package.aggregate, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; + + // Define a closure for processing batches of attestations. + let processor = self.clone(); + let process_batch = move |aggregates| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_aggregate_batch(aggregates, Some(reprocess_tx)) + }; + + let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAggregate { + aggregate: GossipAggregatePackage { + message_id, + peer_id, + aggregate: Box::new(aggregate), + beacon_block_root, + seen_timestamp, + }, + process_individual: Box::new(process_individual), + process_batch: Box::new(process_batch), + }, + }) + } + + /// Create a new `Work` event for some block. + pub fn send_gossip_beacon_block( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + block: Arc>, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + let reprocess_tx = processor.reprocess_tx.clone(); + let invalid_block_storage = processor.invalid_block_storage.clone(); + let duplicate_cache = processor.duplicate_cache.clone(); + processor + .process_gossip_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx, + duplicate_cache, + invalid_block_storage, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipBlock(Box::pin(process_fn)), + }) + } + + /// Create a new `Work` event for some sync committee signature. + pub fn send_gossip_sync_signature( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_sync_committee_signature( + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipSyncSignature(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some sync committee contribution. + pub fn send_gossip_sync_contribution( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_sync_committee_contribution( + message_id, + peer_id, + sync_contribution, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipSyncContribution(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some exit. + pub fn send_gossip_voluntary_exit( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + voluntary_exit: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipVoluntaryExit(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some proposer slashing. + pub fn send_gossip_proposer_slashing( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + proposer_slashing: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipProposerSlashing(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some light client finality update. + pub fn send_gossip_light_client_finality_update( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_finality_update( + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipLightClientFinalityUpdate(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some light client optimistic update. + pub fn send_gossip_light_client_optimistic_update( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + Some(reprocess_tx), + seen_timestamp, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipLightClientOptimisticUpdate(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some attester slashing. + pub fn send_gossip_attester_slashing( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + attester_slashing: Box>, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipAttesterSlashing(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some BLS to execution change. + pub fn send_gossip_bls_to_execution_change( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.process_gossip_bls_to_execution_change( + message_id, + peer_id, + *bls_to_execution_change, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipBlsToExecutionChange(Box::new(process_fn)), + }) + } + + /// Create a new `Work` event for some block, where the result from computation (if any) is + /// sent to the other side of `result_tx`. + pub fn send_rpc_beacon_block( + self: &Arc, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + block_root, + block, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcBlock { process_fn }, + }) + } + + /// Create a new work event to import `blocks` as a beacon chain segment. + pub fn send_chain_segment( + self: &Arc, + process_id: ChainSegmentProcessId, + blocks: Vec>>, + ) -> Result<(), Error> { + let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); + let processor = self.clone(); + let process_fn = async move { + let notify_execution_layer = if processor + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + processor + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await; + }; + let process_fn = Box::pin(process_fn); + + // Back-sync batches are dispatched with a different `Work` variant so + // they can be rate-limited. + let work = if is_backfill { + Work::ChainSegmentBackfill(process_fn) + } else { + Work::ChainSegment(process_fn) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work, + }) + } + + /// Create a new work event to process `StatusMessage`s from the RPC network. + pub fn send_status_message( + self: &Arc, + peer_id: PeerId, + message: StatusMessage, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || processor.process_status(peer_id, message); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::Status(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlocksByRangeRequest`s from the RPC network. + pub fn send_blocks_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlocksByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move |send_idle_on_drop| { + let executor = processor.executor.clone(); + processor.handle_blocks_by_range_request( + executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlocksByRangeRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. + pub fn send_blocks_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlocksByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move |send_idle_on_drop| { + let executor = processor.executor.clone(); + processor.handle_blocks_by_root_request( + executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlocksByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn send_lightclient_bootstrap_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_light_client_bootstrap(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest(Box::new(process_fn)), + }) + } + + /// Send a message to `sync_tx`. + /// + /// Creates a log if there is an internal error. + fn send_sync_message(&self, message: SyncMessage) { + self.sync_tx.send(message).unwrap_or_else(|e| { + debug!(self.log, "Could not send message to the sync service"; + "error" => %e) + }); + } + + /// Send a message to `network_tx`. + /// + /// Creates a log if there is an internal error. + fn send_network_message(&self, message: NetworkMessage) { + self.network_tx.send(message).unwrap_or_else(|e| { + debug!(self.log, "Could not send message to the network service. Likely shutdown"; + "error" => %e) + }); + } +} + +type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + +impl NetworkBeaconProcessor> { + // Instantiates a mostly non-functional version of `Self` and returns the + // event receiver that would normally go to the beacon processor. This is + // useful for testing that messages are actually being sent to the beacon + // processor (but not much else). + pub fn null_for_testing( + network_globals: Arc>, + ) -> (Self, mpsc::Receiver>) { + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); + let (reprocess_tx, _reprocess_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); + let log = null_logger().unwrap(); + let harness: BeaconChainHarness> = + BeaconChainHarness::builder(E::default()) + .spec(E::default_spec()) + .deterministic_keypairs(8) + .logger(log.clone()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + let runtime = TestRuntime::default(); + + let network_beacon_processor = Self { + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + duplicate_cache: DuplicateCache::default(), + chain: harness.chain, + network_tx, + sync_tx, + reprocess_tx, + network_globals, + invalid_block_storage: InvalidBlockStorage::Disabled, + executor: runtime.task_executor.clone(), + log, + }; + + (network_beacon_processor, beacon_processor_receive) + } +} + +#[cfg(test)] +mod test { + #[test] + fn queued_block_delay_is_sane() { + assert!( + beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY + < beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY + ); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs similarity index 98% rename from beacon_node/network/src/beacon_processor/worker/rpc_methods.rs rename to beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 83baa0417bc..19b0a60a43e 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -1,21 +1,21 @@ -use crate::beacon_processor::{worker::FUTURE_SLOT_TOLERANCE, SendOnDrop}; +use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; +use beacon_processor::SendOnDrop; use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use tokio_stream::StreamExt; use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; -use super::Worker; - -impl Worker { +impl NetworkBeaconProcessor { /* Auxiliary functions */ /// Disconnects and ban's a peer, sending a Goodbye request with the associated reason. @@ -124,7 +124,7 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( - self, + self: Arc, executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, @@ -210,7 +210,7 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_light_client_bootstrap( - self, + self: &Arc, peer_id: PeerId, request_id: PeerRequestId, request: LightClientBootstrapRequest, @@ -283,7 +283,7 @@ impl Worker { /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( - self, + self: Arc, executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs similarity index 90% rename from beacon_node/network/src/beacon_processor/worker/sync_methods.rs rename to beacon_node/network/src/network_beacon_processor/sync_methods.rs index ac59b1daa93..c33e2acf542 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,17 +1,21 @@ use std::time::Duration; -use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker}; -use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; -use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; -use crate::beacon_processor::DuplicateCache; use crate::metrics; -use crate::sync::manager::{BlockProcessType, SyncMessage}; -use crate::sync::{BatchProcessResult, ChainId}; +use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; +use crate::sync::BatchProcessResult; +use crate::sync::{ + manager::{BlockProcessType, SyncMessage}, + ChainId, +}; use beacon_chain::{ observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; +use beacon_processor::{ + work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, + AsyncFn, BlockingFn, DuplicateCache, +}; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use slot_clock::SlotClock; @@ -39,27 +43,71 @@ struct ChainSegmentFailed { peer_action: Option, } -impl Worker { - /// Attempt to process a block received from a direct RPC request. - #[allow(clippy::too_many_arguments)] - pub async fn process_rpc_block( - self, +impl NetworkBeaconProcessor { + /// Returns an async closure which processes a beacon block recieved via RPC. + /// + /// This separate function was required to prevent a cycle during compiler + /// type checking. + pub fn generate_rpc_beacon_block_process_fn( + self: Arc, block_root: Hash256, block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, - reprocess_tx: mpsc::Sender>, - duplicate_cache: DuplicateCache, - should_process: bool, - ) { - if !should_process { + ) -> AsyncFn { + let process_fn = async move { + let reprocess_tx = self.reprocess_tx.clone(); + let duplicate_cache = self.duplicate_cache.clone(); + self.process_rpc_block( + block_root, + block, + seen_timestamp, + process_type, + reprocess_tx, + duplicate_cache, + ) + .await; + }; + Box::pin(process_fn) + } + + /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. + pub fn generate_rpc_beacon_block_fns( + self: Arc, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> (AsyncFn, BlockingFn) { + // An async closure which will import the block. + let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + block_root, + block, + seen_timestamp, + process_type.clone(), + ); + // A closure which will ignore the block. + let ignore_fn = move || { // Sync handles these results self.send_sync_message(SyncMessage::BlockProcessed { process_type, result: crate::sync::manager::BlockProcessResult::Ignored, }); - return; - } + }; + (process_fn, Box::new(ignore_fn)) + } + + /// Attempt to process a block received from a direct RPC request. + #[allow(clippy::too_many_arguments)] + pub async fn process_rpc_block( + self: Arc>, + block_root: Hash256, + block: Arc>, + seen_timestamp: Duration, + process_type: BlockProcessType, + reprocess_tx: mpsc::Sender, + duplicate_cache: DuplicateCache, + ) { // Check if the block is already being imported through another source let handle = match duplicate_cache.check_and_insert(block_root) { Some(handle) => handle, @@ -70,13 +118,18 @@ impl Worker { "action" => "sending rpc block to reprocessing queue", "block_root" => %block_root, ); + // Send message to work reprocess queue to retry the block - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( block_root, - block: block.clone(), - process_type, + block, seen_timestamp, - should_process: true, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, }); if reprocess_tx.try_send(reprocess_msg).is_err() { @@ -130,12 +183,16 @@ impl Worker { ); // Send message to work reprocess queue to retry the block - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( block_root, - block: block.clone(), - process_type, + block, seen_timestamp, - should_process: true, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, }); if reprocess_tx.try_send(reprocess_msg).is_err() { diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs similarity index 89% rename from beacon_node/network/src/beacon_processor/tests.rs rename to beacon_node/network/src/network_beacon_processor/tests.rs index b93e83ad785..b8d5db568ec 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -1,20 +1,23 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::beacon_processor::work_reprocessing_queue::{ - QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, +use crate::{ + network_beacon_processor::{ + ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor, + }, + service::NetworkMessage, + sync::{manager::BlockProcessType, SyncMessage}, }; -use crate::beacon_processor::*; -use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, }; use slot_clock::SlotClock; use std::cmp; @@ -23,8 +26,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -51,11 +54,12 @@ struct TestRig { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, - beacon_processor_tx: mpsc::Sender>, + beacon_processor_tx: BeaconProcessorSend, work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, duplicate_cache: DuplicateCache, + network_beacon_processor: Arc>, _harness: BeaconChainHarness, } @@ -64,7 +68,7 @@ struct TestRig { impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. - self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; + self.beacon_processor_tx = BeaconProcessorSend(mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0); } } @@ -169,6 +173,7 @@ impl TestRig { let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let beacon_processor_tx = BeaconProcessorSend(beacon_processor_tx); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata @@ -191,22 +196,40 @@ impl TestRig { let executor = harness.runtime.task_executor.clone(); + let (work_reprocessing_tx, work_reprocessing_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); let duplicate_cache = DuplicateCache::default(); - BeaconProcessor { - beacon_chain: Arc::downgrade(&chain), + let network_beacon_processor = NetworkBeaconProcessor { + beacon_processor_send: beacon_processor_tx.clone(), + duplicate_cache: duplicate_cache.clone(), + chain: harness.chain.clone(), network_tx, sync_tx, + reprocess_tx: work_reprocessing_tx.clone(), + network_globals: network_globals.clone(), + invalid_block_storage: InvalidBlockStorage::Disabled, + executor: executor.clone(), + log: log.clone(), + }; + let network_beacon_processor = Arc::new(network_beacon_processor); + + BeaconProcessor { network_globals, executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - importing_blocks: duplicate_cache.clone(), - invalid_block_storage: InvalidBlockStorage::Disabled, + enable_backfill_rate_limiting: harness.chain.config.enable_backfill_rate_limiting, log: log.clone(), } - .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); + .spawn_manager( + beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx, + Some(work_journal_tx), + harness.chain.slot_clock.clone(), + ); Self { chain, @@ -222,6 +245,7 @@ impl TestRig { _network_rx, _sync_rx, duplicate_cache, + network_beacon_processor, _harness: harness, } } @@ -235,102 +259,105 @@ impl TestRig { } pub fn enqueue_gossip_block(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_beacon_block( + self.network_beacon_processor + .send_gossip_beacon_block( junk_message_id(), junk_peer_id(), Client::default(), self.next_block.clone(), Duration::from_secs(0), - )) + ) .unwrap(); } pub fn enqueue_rpc_block(&self) { - let event = WorkEvent::rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), - std::time::Duration::default(), - BlockProcessType::ParentLookup { - chain_hash: Hash256::random(), - }, - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_rpc_beacon_block( + self.next_block.canonical_root(), + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::ParentLookup { + chain_hash: Hash256::random(), + }, + ) + .unwrap(); } pub fn enqueue_single_lookup_rpc_block(&self) { - let event = WorkEvent::rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), - std::time::Duration::default(), - BlockProcessType::SingleBlock { id: 1 }, - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_rpc_beacon_block( + self.next_block.canonical_root(), + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ) + .unwrap(); } pub fn enqueue_backfill_batch(&self) { - let event = WorkEvent::chain_segment( - ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), - Vec::default(), - ); - self.beacon_processor_tx.try_send(event).unwrap(); + self.network_beacon_processor + .send_chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ) + .unwrap(); } pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); - self.beacon_processor_tx - .try_send(WorkEvent::unaggregated_attestation( + self.network_beacon_processor + .send_unaggregated_attestation( junk_message_id(), junk_peer_id(), attestation, subnet_id, true, Duration::from_secs(0), - )) + ) .unwrap(); } pub fn enqueue_gossip_attester_slashing(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_attester_slashing( + self.network_beacon_processor + .send_gossip_attester_slashing( junk_message_id(), junk_peer_id(), Box::new(self.attester_slashing.clone()), - )) + ) .unwrap(); } pub fn enqueue_gossip_proposer_slashing(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_proposer_slashing( + self.network_beacon_processor + .send_gossip_proposer_slashing( junk_message_id(), junk_peer_id(), Box::new(self.proposer_slashing.clone()), - )) + ) .unwrap(); } pub fn enqueue_gossip_voluntary_exit(&self) { - self.beacon_processor_tx - .try_send(WorkEvent::gossip_voluntary_exit( + self.network_beacon_processor + .send_gossip_voluntary_exit( junk_message_id(), junk_peer_id(), Box::new(self.voluntary_exit.clone()), - )) + ) .unwrap(); } pub fn enqueue_next_block_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.next_block_attestations.first().unwrap().clone(); - self.beacon_processor_tx - .try_send(WorkEvent::unaggregated_attestation( + self.network_beacon_processor + .send_unaggregated_attestation( junk_message_id(), junk_peer_id(), attestation, subnet_id, true, Duration::from_secs(0), - )) + ) .unwrap(); } @@ -340,13 +367,13 @@ impl TestRig { .first() .unwrap() .clone(); - self.beacon_processor_tx - .try_send(WorkEvent::aggregated_attestation( + self.network_beacon_processor + .send_aggregated_attestation( junk_message_id(), junk_peer_id(), aggregate, Duration::from_secs(0), - )) + ) .unwrap(); } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7a91f2d0b1a..c8332705cfa 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -5,16 +5,16 @@ //! syncing-related responses to the Sync manager. #![allow(clippy::unit_arg)] -use crate::beacon_processor::{ - BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, - MAX_WORK_EVENT_QUEUE_LEN, -}; use crate::error; +use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; use crate::service::{NetworkMessage, RequestId}; use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_processor::{ + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, +}; use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ @@ -23,7 +23,6 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{debug, o, trace}; use slog::{error, warn}; -use std::cmp; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; @@ -41,7 +40,7 @@ pub struct Router { /// A network context to return and handle RPC requests. network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: BeaconProcessorSend, + network_beacon_processor: Arc>, /// The `Router` logger. log: slog::Logger, /// Provides de-bounce functionality for logging. @@ -80,12 +79,15 @@ pub enum RouterMessage { impl Router { /// Initializes and runs the Router. + #[allow(clippy::too_many_arguments)] pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, executor: task_executor::TaskExecutor, invalid_block_storage: InvalidBlockStorage, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); @@ -93,42 +95,41 @@ impl Router { let (handler_send, handler_recv) = mpsc::unbounded_channel(); - let (beacon_processor_send, beacon_processor_receive) = - mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); - let sync_logger = log.new(o!("service"=> "sync")); + // generate the message channel + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); + + let network_beacon_processor = NetworkBeaconProcessor { + beacon_processor_send, + duplicate_cache: DuplicateCache::default(), + chain: beacon_chain.clone(), + network_tx: network_send.clone(), + sync_tx: sync_send.clone(), + reprocess_tx: beacon_processor_reprocess_tx, + network_globals: network_globals.clone(), + invalid_block_storage, + executor: executor.clone(), + log: log.clone(), + }; + let network_beacon_processor = Arc::new(network_beacon_processor); // spawn the sync thread - let sync_send = crate::sync::manager::spawn( + crate::sync::manager::spawn( executor.clone(), beacon_chain.clone(), - network_globals.clone(), network_send.clone(), - BeaconProcessorSend(beacon_processor_send.clone()), + network_beacon_processor.clone(), + sync_recv, sync_logger, ); - BeaconProcessor { - beacon_chain: Arc::downgrade(&beacon_chain), - network_tx: network_send.clone(), - sync_tx: sync_send.clone(), - network_globals: network_globals.clone(), - executor: executor.clone(), - max_workers: cmp::max(1, num_cpus::get()), - current_workers: 0, - importing_blocks: Default::default(), - invalid_block_storage, - log: log.clone(), - } - .spawn_manager(beacon_processor_receive, None); - // generate the Message handler let mut handler = Router { network_globals, chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send: BeaconProcessorSend(beacon_processor_send), + network_beacon_processor, log: message_handler_log, logger_debounce: TimeLatch::default(), }; @@ -197,14 +198,17 @@ impl Router { Request::Status(status_message) => { self.on_status_request(peer_id, request_id, status_message) } - Request::BlocksByRange(request) => self.send_beacon_processor_work( - BeaconWorkEvent::blocks_by_range_request(peer_id, request_id, request), + Request::BlocksByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blocks_by_range_request(peer_id, request_id, request), ), - Request::BlocksByRoot(request) => self.send_beacon_processor_work( - BeaconWorkEvent::blocks_by_roots_request(peer_id, request_id, request), + Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blocks_by_roots_request(peer_id, request_id, request), ), - Request::LightClientBootstrap(request) => self.send_beacon_processor_work( - BeaconWorkEvent::lightclient_bootstrap_request(peer_id, request_id, request), + Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_lightclient_bootstrap_request(peer_id, request_id, request), ), } } @@ -219,10 +223,10 @@ impl Router { match response { Response::Status(status_message) => { debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); - self.send_beacon_processor_work(BeaconWorkEvent::status_message( - peer_id, - status_message, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_status_message(peer_id, status_message), + ) } Response::BlocksByRange(beacon_block) => { self.on_blocks_by_range_response(peer_id, request_id, beacon_block); @@ -247,36 +251,40 @@ impl Router { ) { match gossip_message { PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self - .send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( - message_id, - peer_id, - *aggregate_and_proof, - timestamp_now(), - )), - PubsubMessage::Attestation(subnet_attestation) => { - self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( - message_id, - peer_id, - subnet_attestation.1, - subnet_attestation.0, - should_process, - timestamp_now(), - )) - } - PubsubMessage::BeaconBlock(block) => { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_aggregated_attestation( + message_id, + peer_id, + *aggregate_and_proof, + timestamp_now(), + ), + ), + PubsubMessage::Attestation(subnet_attestation) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_unaggregated_attestation( + message_id, + peer_id, + subnet_attestation.1, + subnet_attestation.0, + should_process, + timestamp_now(), + ), + ), + PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_beacon_block( message_id, peer_id, self.network_globals.client(&peer_id), block, timestamp_now(), - )) - } + ), + ), PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( - message_id, peer_id, exit, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_voluntary_exit(message_id, peer_id, exit), + ) } PubsubMessage::ProposerSlashing(proposer_slashing) => { debug!( @@ -284,11 +292,13 @@ impl Router { "Received a proposer slashing"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( - message_id, - peer_id, - proposer_slashing, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_proposer_slashing( + message_id, + peer_id, + proposer_slashing, + ), + ) } PubsubMessage::AttesterSlashing(attester_slashing) => { debug!( @@ -296,11 +306,13 @@ impl Router { "Received a attester slashing"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( - message_id, - peer_id, - attester_slashing, - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_attester_slashing( + message_id, + peer_id, + attester_slashing, + ), + ) } PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { trace!( @@ -308,12 +320,14 @@ impl Router { "Received sync committee aggregate"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( - message_id, - peer_id, - *contribution_and_proof, - timestamp_now(), - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_sync_contribution( + message_id, + peer_id, + *contribution_and_proof, + timestamp_now(), + ), + ) } PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { trace!( @@ -321,13 +335,15 @@ impl Router { "Received sync committee signature"; "peer_id" => %peer_id ); - self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( - message_id, - peer_id, - sync_committtee_msg.1, - sync_committtee_msg.0, - timestamp_now(), - )) + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_sync_signature( + message_id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + timestamp_now(), + ), + ) } PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { trace!( @@ -335,13 +351,14 @@ impl Router { "Received light client finality update"; "peer_id" => %peer_id ); - self.send_beacon_processor_work( - BeaconWorkEvent::gossip_light_client_finality_update( - message_id, - peer_id, - light_client_finality_update, - timestamp_now(), - ), + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_light_client_finality_update( + message_id, + peer_id, + *light_client_finality_update, + timestamp_now(), + ), ) } PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { @@ -350,21 +367,25 @@ impl Router { "Received light client optimistic update"; "peer_id" => %peer_id ); - self.send_beacon_processor_work( - BeaconWorkEvent::gossip_light_client_optimistic_update( - message_id, - peer_id, - light_client_optimistic_update, - timestamp_now(), - ), + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_light_client_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + timestamp_now(), + ), ) } PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self - .send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( - message_id, - peer_id, - bls_to_execution_change, - )), + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + ), + ), } } @@ -415,7 +436,10 @@ impl Router { request_id, ); - self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) + self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_status_message(peer_id, status), + ) } /// Handle a `BlocksByRange` response from the peer. @@ -480,20 +504,22 @@ impl Router { }); } - fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { - self.beacon_processor_send - .try_send(work) - .unwrap_or_else(|e| { - let work_type = match &*e { - mpsc::error::TrySendError::Closed(work) - | mpsc::error::TrySendError::Full(work) => work.work_type(), - }; - - if self.logger_debounce.elapsed() { - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + fn handle_beacon_processor_send_result( + &mut self, + result: Result<(), crate::network_beacon_processor::Error>, + ) { + if let Err(e) = result { + let work_type = match &e { + mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => { + work.work_type() } - }) + }; + + if self.logger_debounce.elapsed() { + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + } + } } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 2c919233fca..c2719477f1f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,5 +1,5 @@ use super::sync::manager::RequestId as SyncId; -use crate::beacon_processor::InvalidBlockStorage; +use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::SyncCommitteeService; @@ -9,6 +9,7 @@ use crate::{ NetworkConfig, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -224,6 +225,8 @@ impl NetworkService { config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, ) -> error::Result<(Arc>, NetworkSenders)> { let network_log = executor.log().clone(); // build the channels for external comms @@ -311,6 +314,8 @@ impl NetworkService { network_senders.network_send(), executor.clone(), invalid_block_storage, + beacon_processor_send, + beacon_processor_reprocess_tx, network_log.clone(), )?; diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 83fcc8c9ac8..9943da15a0d 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,12 +4,15 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; + use beacon_processor::{ + BeaconProcessorSend, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN, + }; use lighthouse_network::Enr; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; - use tokio::runtime::Runtime; + use tokio::{runtime::Runtime, sync::mpsc}; use types::MinimalEthSpec; fn get_logger(actual_log: bool) -> Logger { @@ -67,10 +70,20 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let _network_service = - NetworkService::start(beacon_chain.clone(), &config, executor, None) - .await - .unwrap(); + let (beacon_processor_send, _beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); + let (beacon_processor_reprocess_tx, _beacon_processor_reprocess_rx) = + mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); + let _network_service = NetworkService::start( + beacon_chain.clone(), + &config, + executor, + None, + BeaconProcessorSend(beacon_processor_send), + beacon_processor_reprocess_tx, + ) + .await + .unwrap(); drop(signal); }); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 460c8b1ee92..a1c2404e5ed 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -8,7 +8,7 @@ //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! sync as failed, log an error and attempt to retry once a new peer joins the node. -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ @@ -537,8 +537,8 @@ impl BackFillSync { self.current_processing_batch = Some(batch_id); if let Err(e) = network - .processor_channel() - .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + .beacon_processor() + .send_chain_segment(process_id, blocks) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); @@ -1098,12 +1098,7 @@ impl BackFillSync { match self.batches.entry(batch_id) { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch - if batch_id - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(batch_id) { self.last_batch_downloaded = true; } @@ -1114,12 +1109,7 @@ impl BackFillSync { } Entry::Vacant(entry) => { entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); - if batch_id - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(batch_id) { self.last_batch_downloaded = true; } self.to_be_downloaded = self @@ -1151,14 +1141,8 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. fn check_completed(&mut self) -> bool { - if self.current_start - == self - .beacon_chain - .genesis_backfill_slot - .epoch(T::EthSpec::slots_per_epoch()) - { + if self.would_complete(self.current_start) { // Check that the beacon chain agrees - if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { // Conditions that we have completed a backfill sync if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { @@ -1171,6 +1155,15 @@ impl BackFillSync { false } + /// Checks if backfill would complete by syncing to `start_epoch`. + fn would_complete(&self, start_epoch: Epoch) -> bool { + start_epoch + <= self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + } + /// Updates the global network state indicating the current state of a backfill sync. fn set_state(&self, state: BackFillState) { *self.network_globals.backfill_state.write() = state; diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index aa2694769c2..4340aa41d8b 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -2,6 +2,7 @@ use std::collections::hash_map::Entry; use std::collections::HashMap; use std::time::Duration; +use crate::network_beacon_processor::ChainSegmentProcessId; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -11,7 +12,6 @@ use smallvec::SmallVec; use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; use self::parent_lookup::PARENT_FAIL_TOLERANCE; @@ -542,8 +542,8 @@ impl BlockLookups { BlockProcessResult::Ok | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { // Check if the beacon processor is available - let beacon_processor_send = match cx.processor_channel_if_enabled() { - Some(channel) => channel, + let beacon_processor = match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => beacon_processor, None => { return trace!( self.log, @@ -555,7 +555,7 @@ impl BlockLookups { let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { + match beacon_processor.send_chain_segment(process_id, blocks) { Ok(_) => { self.processing_parent_lookups .insert(chain_hash, (hashes, request)); @@ -664,11 +664,15 @@ impl BlockLookups { process_type: BlockProcessType, cx: &mut SyncNetworkContext, ) -> Result<(), ()> { - match cx.processor_channel_if_enabled() { - Some(beacon_processor_send) => { + match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => { trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); - let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type); - if let Err(e) = beacon_processor_send.try_send(event) { + if let Err(e) = beacon_processor.send_rpc_beacon_block( + block_root, + block, + duration, + process_type, + ) { error!( self.log, "Failed to send sync block to processor"; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 82334db0f8e..c588f867bd9 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::beacon_processor::BeaconProcessorSend; +use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::RequestId; use crate::sync::manager::RequestId as SyncId; use crate::NetworkMessage; @@ -9,18 +9,19 @@ use super::*; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_processor::WorkEvent; use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; -use slot_clock::SystemTimeSlotClock; +use slot_clock::ManualSlotClock; use store::MemoryStore; use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::MinimalEthSpec as E; -type T = Witness, E, MemoryStore, MemoryStore>; +type T = Witness, E, MemoryStore, MemoryStore>; struct TestRig { - beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx: mpsc::Receiver>, network_rx: mpsc::UnboundedReceiver>, rng: XorShiftRng, } @@ -41,8 +42,10 @@ impl TestRig { } }; - let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + let (network_beacon_processor, beacon_processor_rx) = + NetworkBeaconProcessor::null_for_testing(globals); let rng = XorShiftRng::from_seed([42; 16]); let rig = TestRig { beacon_processor_rx, @@ -51,11 +54,9 @@ impl TestRig { }; let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { - let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( network_tx, - globals, - BeaconProcessorSend(beacon_processor_tx), + Arc::new(network_beacon_processor), log.new(slog::o!("component" => "network_context")), ) }; @@ -102,7 +103,7 @@ impl TestRig { fn expect_block_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOCK); + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); } other => panic!("Expected block process, found {:?}", other), } @@ -112,7 +113,7 @@ impl TestRig { fn expect_parent_chain_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); } other => panic!("Expected chain segment process, found {:?}", other), } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c24d4c192b1..72542752c51 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId}; +use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; @@ -159,9 +159,6 @@ pub struct SyncManager { /// A reference to the underlying beacon chain. chain: Arc>, - /// A reference to the network globals and peer-db. - network_globals: Arc>, - /// A receiving channel sent by the message processor thread. input_channel: mpsc::UnboundedReceiver>, @@ -186,29 +183,22 @@ pub struct SyncManager { pub fn spawn( executor: task_executor::TaskExecutor, beacon_chain: Arc>, - network_globals: Arc>, network_send: mpsc::UnboundedSender>, - beacon_processor_send: BeaconProcessorSend, + beacon_processor: Arc>, + sync_recv: mpsc::UnboundedReceiver>, log: slog::Logger, -) -> mpsc::UnboundedSender> { +) { assert!( MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); - // generate the message channel - let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); // create an instance of the SyncManager + let network_globals = beacon_processor.network_globals.clone(); let mut sync_manager = SyncManager { chain: beacon_chain.clone(), - network_globals: network_globals.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new( - network_send, - network_globals.clone(), - beacon_processor_send, - log.clone(), - ), + network: SyncNetworkContext::new(network_send, beacon_processor, log.clone()), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), block_lookups: BlockLookups::new(log.clone()), @@ -218,10 +208,13 @@ pub fn spawn( // spawn the sync manager thread debug!(log, "Sync Manager started"); executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); - sync_send } impl SyncManager { + fn network_globals(&self) -> &NetworkGlobals { + self.network.network_globals() + } + /* Input Handling Functions */ /// A peer has connected which has blocks that are unknown to us. @@ -322,12 +315,12 @@ impl SyncManager { let rpr = new_state.as_str(); // Drop the write lock let update_sync_status = self - .network_globals + .network_globals() .peers .write() .update_sync_status(peer_id, new_state.clone()); if let Some(was_updated) = update_sync_status { - let is_connected = self.network_globals.peers.read().is_connected(peer_id); + let is_connected = self.network_globals().peers.read().is_connected(peer_id); if was_updated { debug!( self.log, @@ -383,7 +376,7 @@ impl SyncManager { let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); - let peers = self.network_globals.peers.read(); + let peers = self.network_globals().peers.read(); if current_slot >= head && current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64) && head > 0 @@ -445,8 +438,8 @@ impl SyncManager { }, }; - let old_state = self.network_globals.set_sync_state(new_state); - let new_state = self.network_globals.sync_state.read(); + let old_state = self.network_globals().set_sync_state(new_state); + let new_state = self.network_globals().sync_state.read().clone(); if !new_state.eq(&old_state) { info!(self.log, "Sync state updated"; "old_state" => %old_state, "new_state" => %new_state); // If we have become synced - Subscribe to all the core subnet topics @@ -505,7 +498,7 @@ impl SyncManager { } SyncMessage::UnknownBlock(peer_id, block, block_root) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals.sync_state.read().is_synced() { + if !self.network_globals().sync_state.read().is_synced() { let head_slot = self.chain.canonical_head.cached_head().head_slot(); let unknown_block_slot = block.slot(); @@ -519,7 +512,7 @@ impl SyncManager { return; } } - if self.network_globals.peers.read().is_connected(&peer_id) + if self.network_globals().peers.read().is_connected(&peer_id) && self.network.is_execution_engine_online() { self.block_lookups @@ -528,8 +521,8 @@ impl SyncManager { } SyncMessage::UnknownBlockHash(peer_id, block_hash) => { // If we are not synced, ignore this block. - if self.network_globals.sync_state.read().is_synced() - && self.network_globals.peers.read().is_connected(&peer_id) + if self.network_globals().sync_state.read().is_synced() + && self.network_globals().peers.read().is_connected(&peer_id) && self.network.is_execution_engine_online() { self.block_lookups diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 03c466eecea..adc235130b0 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,7 +3,7 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; -use crate::beacon_processor::BeaconProcessorSend; +use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChainTypes, EngineState}; @@ -20,9 +20,6 @@ pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, - /// Access to the network global vars. - network_globals: Arc>, - /// A sequential ID for all RPC requests. request_id: Id, @@ -36,8 +33,8 @@ pub struct SyncNetworkContext { /// `beacon_processor_send`. execution_engine_state: EngineState, - /// Channel to send work to the beacon processor. - beacon_processor_send: BeaconProcessorSend, + /// Sends work to the beacon processor via a channel. + network_beacon_processor: Arc>, /// Logger for the `SyncNetworkContext`. log: slog::Logger, @@ -46,25 +43,27 @@ pub struct SyncNetworkContext { impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, - network_globals: Arc>, - beacon_processor_send: BeaconProcessorSend, + network_beacon_processor: Arc>, log: slog::Logger, ) -> Self { Self { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start - network_globals, request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), - beacon_processor_send, + network_beacon_processor, log, } } + pub fn network_globals(&self) -> &NetworkGlobals { + &self.network_beacon_processor.network_globals + } + /// Returns the Client type of the peer if known pub fn client_type(&self, peer_id: &PeerId) -> Client { - self.network_globals + self.network_globals() .peers .read() .peer_info(peer_id) @@ -278,13 +277,13 @@ impl SyncNetworkContext { }) } - pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend> { + pub fn beacon_processor_if_enabled(&self) -> Option<&Arc>> { self.is_execution_engine_online() - .then_some(&self.beacon_processor_send) + .then_some(&self.network_beacon_processor) } - pub fn processor_channel(&self) -> &BeaconProcessorSend { - &self.beacon_processor_send + pub fn beacon_processor(&self) -> &Arc> { + &self.network_beacon_processor } fn next_id(&mut self) -> Id { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 51ca9e2b071..af547885dca 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,5 +1,5 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; @@ -294,8 +294,8 @@ impl SyncingChain { return Ok(KeepChain); } - let beacon_processor_send = match network.processor_channel_if_enabled() { - Some(channel) => channel, + let beacon_processor = match network.beacon_processor_if_enabled() { + Some(beacon_processor) => beacon_processor, None => return Ok(KeepChain), }; @@ -317,9 +317,7 @@ impl SyncingChain { let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); self.current_processing_batch = Some(batch_id); - if let Err(e) = - beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) - { + if let Err(e) = beacon_processor.send_chain_segment(process_id, blocks) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); // This is unlikely to happen but it would stall syncing since the batch now has no diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 2c35c57d9e4..05ad5204b9e 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -371,22 +371,23 @@ where #[cfg(test)] mod tests { + use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::RequestId; use crate::NetworkMessage; use super::*; - use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent}; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; use beacon_chain::EngineState; + use beacon_processor::WorkEvent as BeaconWorkEvent; use lighthouse_network::rpc::BlocksByRangeRequest; use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; use tokio::sync::mpsc; - use slot_clock::SystemTimeSlotClock; + use slot_clock::ManualSlotClock; use std::collections::HashSet; use std::sync::Arc; use store::MemoryStore; @@ -437,7 +438,7 @@ mod tests { } type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, E, MemoryStore, MemoryStore>; fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); @@ -455,7 +456,7 @@ mod tests { struct TestRig { log: slog::Logger, /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx: mpsc::Receiver>, /// To set up different scenarios where sync is told about known/unkown blocks. chain: Arc, /// Needed by range to handle communication with the network. @@ -583,7 +584,7 @@ mod tests { fn expect_chain_segment(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); } other => panic!("Expected chain segment process, found {:?}", other), } @@ -593,17 +594,17 @@ mod tests { fn range(log_enabled: bool) -> (TestRig, RangeSync) { let chain = Arc::new(FakeStorage::default()); let log = build_log(slog::Level::Trace, log_enabled); - let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); let range_sync = RangeSync::::new( chain.clone(), log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + let (network_beacon_processor, beacon_processor_rx) = + NetworkBeaconProcessor::null_for_testing(globals.clone()); let cx = SyncNetworkContext::new( network_tx, - globals.clone(), - BeaconProcessorSend(beacon_processor_tx), + Arc::new(network_beacon_processor), log.new(o!("component" => "network_context")), ); let test_rig = TestRig { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0f954b26f8d..738680286b4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -533,6 +533,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [default: 8192 (mainnet) or 64 (minimal)]") .takes_value(true) ) + .arg( + Arg::with_name("epochs-per-migration") + .long("epochs-per-migration") + .value_name("N") + .help("The number of epochs to wait between running the migration of data from the \ + hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ + writes") + .default_value("1") + .takes_value(true) + ) .arg( Arg::with_name("block-cache-size") .long("block-cache-size") @@ -691,16 +701,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) - .arg( - Arg::with_name("db-migration-period") - .long("db-migration-period") - .value_name("EPOCHS") - .help("Specifies the number of epochs to wait between applying each finalization \ - migration to the database. Applying migrations less frequently can lead to \ - less total disk writes.") - .default_value("1") - .takes_value(true) - ) .arg( Arg::with_name("epochs-per-state-diff") .long("epochs-per-state-diff") @@ -890,7 +890,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .value_name("SECONDS") .takes_value(true) - .default_value("60") + .default_value("180") ) .arg( Arg::with_name("reconstruct-historic-states") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 51dd6ec57ee..48914b16a72 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -416,17 +416,18 @@ pub fn get_config( client_config.store.prune_payloads = prune_payloads; } - if let Some(epochs_per_migration) = clap_utils::parse_optional(cli_args, "db-migration-period")? - { - client_config.store_migrator.epochs_per_run = epochs_per_migration; - } - if let Some(epochs_per_state_diff) = clap_utils::parse_optional(cli_args, "epochs-per-state-diff")? { client_config.store.epochs_per_state_diff = epochs_per_state_diff; } + if let Some(epochs_per_migration) = + clap_utils::parse_optional(cli_args, "epochs-per-migration")? + { + client_config.chain.epochs_per_migration = epochs_per_migration; + } + /* * Zero-ports * diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 43e58732de2..9d7834e4fc2 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -67,6 +67,7 @@ pub enum Error { InvalidValidatorPubkeyBytes(bls::Error), ValidatorPubkeyCacheUninitialized, InvalidKey, + InvalidBytes, UnableToDowngrade, Hdiff(hdiff::Error), InconsistentFork(InconsistentFork), diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index cd67d26a818..ac244040027 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -109,7 +109,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator .next()? .and_then(|(slot_bytes, root_bytes)| { if slot_bytes.len() != 8 || root_bytes.len() != 32 { - panic!("FIXME(sproul): put an error here") + Err(Error::InvalidBytes) } else { let slot = Slot::new(u64::from_be_bytes(slot_bytes.try_into().unwrap())); let root = Hash256::from_slice(&root_bytes); diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index d5c8b18e577..57883828947 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,6 +48,17 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` +### Adjusting the timeout + +If the beacon node fails to start due to a timeout from the checkpoint sync server, you can try +running it again with a longer timeout by adding the flag `--checkpoint-sync-url-timeout`. + +``` +lighthouse bn --checkpoint-sync-url-timeout 300 --checkpoint-sync-url https://example.com/ ... +``` + +The flag takes a value in seconds. For more information see `lighthouse bn --help`. + ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index bebd1c661b3..fa3f93d06f8 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use serde_utils::quoted_u64::Quoted; // Details about the rewards paid for attestations // All rewards in GWei @@ -17,6 +18,12 @@ pub struct IdealAttestationRewards { // Ideal attester's reward for source vote in gwei #[serde(with = "serde_utils::quoted_u64")] pub source: u64, + // Ideal attester's inclusion_delay reward in gwei (phase0 only) + #[serde(skip_serializing_if = "Option::is_none")] + pub inclusion_delay: Option>, + // Ideal attester's inactivity penalty in gwei + #[serde(with = "serde_utils::quoted_i64")] + pub inactivity: i64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] @@ -25,16 +32,20 @@ pub struct TotalAttestationRewards { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "serde_utils::quoted_u64")] - pub head: u64, + #[serde(with = "serde_utils::quoted_i64")] + pub head: i64, // attester's reward for target vote in gwei #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei #[serde(with = "serde_utils::quoted_i64")] pub source: i64, - // TBD attester's inclusion_delay reward in gwei (phase0 only) - // pub inclusion_delay: u64, + // attester's inclusion_delay reward in gwei (phase0 only) + #[serde(skip_serializing_if = "Option::is_none")] + pub inclusion_delay: Option>, + // attester's inactivity penalty in gwei + #[serde(with = "serde_utils::quoted_i64")] + pub inactivity: i64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 196629cb8d5..428a082cc0f 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -1,6 +1,8 @@ # Lighthouse Team (Sigma Prime) -- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo -- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo +- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I +- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I +- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I +- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I # EF Team - enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg - enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg @@ -15,4 +17,4 @@ - enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg # Nimbus team - enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM +- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM \ No newline at end of file diff --git a/consensus/state_processing/src/common/base.rs b/consensus/state_processing/src/common/base.rs index 47b0de9ef1e..3b08b783228 100644 --- a/consensus/state_processing/src/common/base.rs +++ b/consensus/state_processing/src/common/base.rs @@ -28,3 +28,15 @@ pub fn get_base_reward( .safe_div(sqrt_total_active_balance.as_u64())? .safe_div(spec.base_rewards_per_epoch) } + +pub fn get_base_reward_from_effective_balance( + effective_balance: u64, + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + effective_balance + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt())? + .safe_div(spec.base_rewards_per_epoch) + .map_err(Into::into) +} diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 92def937ddc..7d84c426e8c 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -2,7 +2,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing, clippy::unwrap_used, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 709302eec17..22309334fa3 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2992a49ab01..36db52aaa73 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -960,7 +960,7 @@ async fn fork_spanning_exit() { spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.shard_committee_period = 0; - let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) + let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index ae975aaba5c..3534ff67c2e 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -220,9 +220,12 @@ impl ParticipationCache { // Fast path for inactivity scores update when we are definitely not in an inactivity leak. // This breaks the dependence of `process_inactivity_updates` on the finalization // re-calculation. - let definitely_not_in_inactivity_leak = - state.finalized_checkpoint().epoch + spec.min_epochs_to_inactivity_penalty + 1 - >= state.current_epoch(); + let definitely_not_in_inactivity_leak = state + .finalized_checkpoint() + .epoch + .safe_add(spec.min_epochs_to_inactivity_penalty)? + .safe_add(1)? + >= state.current_epoch(); let mut inactivity_score_updates = MaxMap::default(); // Iterate through all validators, updating: @@ -242,7 +245,7 @@ impl ParticipationCache { for (val_index, (((val, curr_epoch_flags), prev_epoch_flags), inactivity_score)) in iter { let is_active_current_epoch = val.is_active_at(current_epoch); let is_active_previous_epoch = val.is_active_at(previous_epoch); - let is_eligible = state.is_eligible_validator(previous_epoch, val); + let is_eligible = state.is_eligible_validator(previous_epoch, val)?; if is_active_current_epoch { current_epoch_participation.process_active_validator( diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index a4a7c8b97e6..9510f2eaec4 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -54,7 +54,7 @@ pub fn get_flag_index_deltas( let mut delta = Delta::default(); if validator.is_unslashed_participating_index(flag_index)? { - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index b1fb0ca22b0..796379ae756 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -39,7 +39,7 @@ pub fn process_epoch( justification_and_finalization_state.apply_changes_to_state(state); // Rewards and Penalties. - process_rewards_and_penalties(state, &mut validator_statuses, spec)?; + process_rewards_and_penalties(state, &validator_statuses, spec)?; // Registry Updates. process_registry_updates(state, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 411e0ee9c21..57694a2dc7f 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -48,7 +48,7 @@ impl AttestationDelta { /// Apply attester and proposer rewards. pub fn process_rewards_and_penalties( state: &mut BeaconState, - validator_statuses: &mut ValidatorStatuses, + validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result<(), Error> { if state.current_epoch() == T::genesis_epoch() { @@ -62,7 +62,7 @@ pub fn process_rewards_and_penalties( return Err(Error::ValidatorStatusesInconsistent); } - let deltas = get_attestation_deltas(state, validator_statuses, spec)?; + let deltas = get_attestation_deltas_all(state, validator_statuses, spec)?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -76,10 +76,41 @@ pub fn process_rewards_and_penalties( } /// Apply rewards for participation in attestations during the previous epoch. -pub fn get_attestation_deltas( +pub fn get_attestation_deltas_all( state: &BeaconState, validator_statuses: &ValidatorStatuses, spec: &ChainSpec, +) -> Result, Error> { + get_attestation_deltas(state, validator_statuses, None, spec) +} + +/// Apply rewards for participation in attestations during the previous epoch, and only compute +/// rewards for a subset of validators. +pub fn get_attestation_deltas_subset( + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + validators_subset: &Vec, + spec: &ChainSpec, +) -> Result, Error> { + get_attestation_deltas(state, validator_statuses, Some(validators_subset), spec).map(|deltas| { + deltas + .into_iter() + .enumerate() + .filter(|(index, _)| validators_subset.contains(index)) + .collect() + }) +} + +/// Apply rewards for participation in attestations during the previous epoch. +/// If `maybe_validators_subset` specified, only the deltas for the specified validator subset is +/// returned, otherwise deltas for all validators are returned. +/// +/// Returns a vec of validator indices to `AttestationDelta`. +fn get_attestation_deltas( + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + maybe_validators_subset: Option<&Vec>, + spec: &ChainSpec, ) -> Result, Error> { let finality_delay = state .previous_epoch() @@ -91,6 +122,13 @@ pub fn get_attestation_deltas( let total_balances = &validator_statuses.total_balances; let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_balances.current_epoch()); + // Ignore validator if a subset is specified and validator is not in the subset + let include_validator_delta = |idx| match maybe_validators_subset.as_ref() { + None => true, + Some(validators_subset) if validators_subset.contains(&idx) => true, + Some(_) => false, + }; + for (index, validator) in validator_statuses.statuses.iter().enumerate() { // Ignore ineligible validators. All sub-functions of the spec do this except for // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in @@ -106,41 +144,46 @@ pub fn get_attestation_deltas( spec, )?; - let source_delta = - get_source_delta(validator, base_reward, total_balances, finality_delay, spec)?; - let target_delta = - get_target_delta(validator, base_reward, total_balances, finality_delay, spec)?; - let head_delta = - get_head_delta(validator, base_reward, total_balances, finality_delay, spec)?; let (inclusion_delay_delta, proposer_delta) = get_inclusion_delay_delta(validator, base_reward, spec)?; - let inactivity_penalty_delta = - get_inactivity_penalty_delta(validator, base_reward, finality_delay, spec)?; - - let delta = deltas - .get_mut(index) - .ok_or(Error::DeltaOutOfBounds(index))?; - delta.source_delta.combine(source_delta)?; - delta.target_delta.combine(target_delta)?; - delta.head_delta.combine(head_delta)?; - delta.inclusion_delay_delta.combine(inclusion_delay_delta)?; - delta - .inactivity_penalty_delta - .combine(inactivity_penalty_delta)?; + + if include_validator_delta(index) { + let source_delta = + get_source_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let target_delta = + get_target_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let head_delta = + get_head_delta(validator, base_reward, total_balances, finality_delay, spec)?; + let inactivity_penalty_delta = + get_inactivity_penalty_delta(validator, base_reward, finality_delay, spec)?; + + let delta = deltas + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))?; + delta.source_delta.combine(source_delta)?; + delta.target_delta.combine(target_delta)?; + delta.head_delta.combine(head_delta)?; + delta.inclusion_delay_delta.combine(inclusion_delay_delta)?; + delta + .inactivity_penalty_delta + .combine(inactivity_penalty_delta)?; + } if let Some((proposer_index, proposer_delta)) = proposer_delta { - deltas - .get_mut(proposer_index) - .ok_or(Error::ValidatorStatusesInconsistent)? - .inclusion_delay_delta - .combine(proposer_delta)?; + if include_validator_delta(proposer_index) { + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; + } } } Ok(deltas) } -fn get_attestation_component_delta( +pub fn get_attestation_component_delta( index_in_unslashed_attesting_indices: bool, attesting_balance: u64, total_balances: &TotalBalances, @@ -223,7 +266,7 @@ fn get_head_delta( ) } -fn get_inclusion_delay_delta( +pub fn get_inclusion_delay_delta( validator: &ValidatorStatus, base_reward: u64, spec: &ChainSpec, @@ -249,7 +292,7 @@ fn get_inclusion_delay_delta( } } -fn get_inactivity_penalty_delta( +pub fn get_inactivity_penalty_delta( validator: &ValidatorStatus, base_reward: u64, finality_delay: u64, diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index 38ece87cf63..ebdd5d83cb5 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -205,7 +205,7 @@ impl ValidatorStatuses { let effective_balance = validator.effective_balance(); let mut status = ValidatorStatus { is_slashed: validator.slashed(), - is_eligible: state.is_eligible_validator(previous_epoch, validator), + is_eligible: state.is_eligible_validator(previous_epoch, validator)?, is_withdrawable_in_current_epoch: validator.is_withdrawable_at(current_epoch), current_epoch_effective_balance: effective_balance, ..ValidatorStatus::default() diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index f4b4b77c08d..b1231333566 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -116,7 +116,7 @@ pub fn process_epoch_single_pass( let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); let next_epoch = state.next_epoch()?; - let is_in_inactivity_leak = state.is_in_inactivity_leak(previous_epoch, spec); + let is_in_inactivity_leak = state.is_in_inactivity_leak(previous_epoch, spec)?; let total_active_balance = state.get_total_active_balance()?; let churn_limit = state.get_churn_limit(spec)?; let finalized_checkpoint = state.finalized_checkpoint(); @@ -198,7 +198,7 @@ pub fn process_epoch_single_pass( let is_active_previous_epoch = validator.is_active_at(previous_epoch); let is_eligible = is_active_previous_epoch || (validator.slashed() - && previous_epoch + Epoch::new(1) < validator.withdrawable_epoch()); + && previous_epoch.safe_add(1)? < validator.withdrawable_epoch()); let base_reward = if is_eligible { epoch_cache.get_base_reward(index)? diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 864844080fb..b3924cd9732 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -138,7 +138,7 @@ impl VerifyOperation for SignedVoluntaryExit { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } @@ -156,7 +156,7 @@ impl VerifyOperation for AttesterSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, @@ -177,7 +177,7 @@ impl VerifyOperation for ProposerSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self @@ -200,7 +200,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![] } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 92b2471696b..8b5603eb8e3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1888,15 +1888,27 @@ impl BeaconState { /// Passing `previous_epoch` to this function rather than computing it internally provides /// a tangible speed improvement in state processing. - pub fn is_eligible_validator(&self, previous_epoch: Epoch, val: &Validator) -> bool { - val.is_active_at(previous_epoch) - || (val.slashed() && previous_epoch + Epoch::new(1) < val.withdrawable_epoch()) + pub fn is_eligible_validator( + &self, + previous_epoch: Epoch, + val: &Validator, + ) -> Result { + Ok(val.is_active_at(previous_epoch) + || (val.slashed() + && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch())) } /// Passing `previous_epoch` to this function rather than computing it internally provides /// a tangible speed improvement in state processing. - pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { - (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty + pub fn is_in_inactivity_leak( + &self, + previous_epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + Ok( + (previous_epoch.safe_sub(self.finalized_checkpoint().epoch)?) + > spec.min_epochs_to_inactivity_penalty, + ) } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees @@ -1926,7 +1938,7 @@ impl BeaconState { } // FIXME(sproul): missing eth1 data votes, they would need a ResetListDiff - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { // Required for macros (which use type-hints internally). type GenericValidator = Validator; @@ -2031,7 +2043,7 @@ impl BeaconState::NUM_FIELDS.next_power_of_two(); /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); @@ -2054,7 +2066,7 @@ impl BeaconState Result<(), Error> { match self { Self::Base(inner) => { @@ -2104,7 +2116,7 @@ impl BeaconState { map_beacon_state_base_fields!(state, |_, field| { diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index b6781f46c2d..90459c20b73 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::BeaconState; use crate::*; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 95a45ce12f0..98640ac3077 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -260,7 +260,7 @@ impl ChainSpec { /// Return the name of the fork activated at `slot`, if any. pub fn fork_activated_at_slot(&self, slot: Slot) -> Option { - let prev_slot_fork = self.fork_name_at_slot::(slot - 1); + let prev_slot_fork = self.fork_name_at_slot::(slot.saturating_sub(Slot::new(1))); let slot_fork = self.fork_name_at_slot::(slot); (slot_fork != prev_slot_fork).then_some(slot_fork) } @@ -468,7 +468,7 @@ impl ChainSpec { epoch.safe_add(1)?.safe_add(self.max_seed_lookahead) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub const fn attestation_subnet_prefix_bits(&self) -> u32 { let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 7761c432abc..25f52bc1208 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -109,7 +109,7 @@ impl ExecutionPayload { } } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_merge_size() -> usize { // Fixed part @@ -120,7 +120,7 @@ impl ExecutionPayload { + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_capella_size() -> usize { // Fixed part diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5dc2535b48e..748e8234c48 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -3,7 +3,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing ) diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs index 89a56cb87d8..be119fbef26 100644 --- a/consensus/types/src/participation_list.rs +++ b/consensus/types/src/participation_list.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use crate::{Hash256, ParticipationFlags, Unsigned, VariableList}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 6793fe55740..eb25b57b0d7 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -72,7 +72,7 @@ impl SubnetId { .into()) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Computes the set of subnets the node should be subscribed to during the current epoch, /// along with the first epoch in which these subscriptions are no longer valid. pub fn compute_subnets_for_epoch( diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index c0333bcfd66..d172342ee64 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use std::fmt::Debug; diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 519387bd589..3176c4c4bf6 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -28,7 +28,7 @@ pub trait TestRandom { impl TestRandom for PhantomData { fn random_for_test(_rng: &mut impl RngCore) -> Self { - PhantomData::default() + PhantomData } } diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index a47b48a30ad..a4237d855b5 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -31,14 +31,19 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; use std::path::PathBuf; use std::time::{Duration, Instant}; use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; /* diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 8662a804761..0584cd65496 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use lighthouse_network::{ - discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, + discovery::{build_enr, CombinedKey, CombinedKeyExt, ENR_FILENAME}, + libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; use std::fs::File; @@ -29,8 +30,8 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { config.enr_udp4_port = Some(udp_port); config.enr_tcp6_port = Some(tcp_port); - let local_keypair = Keypair::generate_secp256k1(); - let enr_key = CombinedKey::from_libp2p(&local_keypair)?; + let secp256k1_keypair = secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); let enr_fork_id = EnrForkId { fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()), next_fork_version: genesis_fork_version, @@ -47,13 +48,10 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { .write_all(enr.to_base64().as_bytes()) .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; - let secret_bytes = match local_keypair { - Keypair::Secp256k1(key) => key.secret().to_bytes(), - _ => return Err("Key is not a secp256k1 key".into()), - }; - let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME)) .map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?; + + let secret_bytes = secp256k1_keypair.secret().to_bytes(); key_file .write_all(&secret_bytes) .map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 76963f067ef..7520bd916ab 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -16,11 +16,13 @@ mod parse_ssz; mod replace_state_pubkeys; mod skip_slots; mod state_diff; +mod state_root; mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; -use clap_utils::parse_path_with_default_in_home_dir; +use clap_utils::parse_optional; use environment::{EnvironmentBuilder, LoggerConfig}; +use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; @@ -39,7 +41,6 @@ fn main() { .long("spec") .value_name("STRING") .takes_value(true) - .required(true) .possible_values(&["minimal", "mainnet", "gnosis"]) .default_value("mainnet") .global(true), @@ -51,7 +52,16 @@ fn main() { .value_name("PATH") .takes_value(true) .global(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + .help("The testnet dir."), + ) + .arg( + Arg::with_name("network") + .long("network") + .value_name("NAME") + .takes_value(true) + .global(true) + .help("The network to use. Defaults to mainnet.") + .conflicts_with("testnet-dir") ) .subcommand( SubCommand::with_name("skip-slots") @@ -127,7 +137,7 @@ fn main() { .takes_value(true) .conflicts_with("beacon-url") .requires("block-path") - .help("Path to load a BeaconState from file as SSZ."), + .help("Path to load a BeaconState from as SSZ."), ) .arg( Arg::with_name("block-path") @@ -136,7 +146,7 @@ fn main() { .takes_value(true) .conflicts_with("beacon-url") .requires("pre-state-path") - .help("Path to load a SignedBeaconBlock from file as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ."), ) .arg( Arg::with_name("post-state-output-path") @@ -362,7 +372,6 @@ fn main() { .index(2) .value_name("BIP39_MNENMONIC") .takes_value(true) - .required(true) .default_value( "replace nephew blur decorate waste convince soup column \ orient excite play baby", @@ -383,7 +392,6 @@ fn main() { .help("The block hash used when generating an execution payload. This \ value is used for `execution_payload_header.block_hash` as well as \ `execution_payload_header.random`") - .required(true) .default_value( "0x0000000000000000000000000000000000000000000000000000000000000000", ), @@ -401,7 +409,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The base fee per gas field in the execution payload generated.") - .required(true) .default_value("1000000000"), ) .arg( @@ -410,7 +417,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The gas limit field in the execution payload generated.") - .required(true) .default_value("30000000"), ) .arg( @@ -809,14 +815,14 @@ fn main() { ) .subcommand( SubCommand::with_name("block-root") - .about("Computes the block root of some block") + .about("Computes the block root of some block.") .arg( Arg::with_name("block-path") .long("block-path") .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .help("Path to load a SignedBeaconBlock from file as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ."), ) .arg( Arg::with_name("beacon-url") @@ -858,6 +864,41 @@ fn main() { .help("Path to second SSZ state"), ) ) + .subcommand( + SubCommand::with_name("state-root") + .about("Computes the state root of some state.") + .arg( + Arg::with_name("state-path") + .long("state-path") + .value_name("PATH") + .takes_value(true) + .conflicts_with("beacon-url") + .help("Path to load a BeaconState from as SSZ."), + ) + .arg( + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") + .takes_value(true) + .help("URL to a beacon-API provider."), + ) + .arg( + Arg::with_name("state-id") + .long("state-id") + .value_name("BLOCK_ID") + .takes_value(true) + .requires("beacon-url") + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), + ) + ) .get_matches(); let result = matches @@ -904,17 +945,44 @@ fn run( .build() .map_err(|e| format!("should build env: {:?}", e))?; - let testnet_dir = parse_path_with_default_in_home_dir( - matches, - "testnet-dir", - PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), - )?; + // Determine testnet-dir path or network name depending on CLI flags. + let (testnet_dir, network_name) = + if let Some(testnet_dir) = parse_optional::(matches, "testnet-dir")? { + (Some(testnet_dir), None) + } else { + let network_name = + parse_optional(matches, "network")?.unwrap_or_else(|| "mainnet".to_string()); + (None, Some(network_name)) + }; + + // Lazily load either the testnet dir or the network config, as required. + // Some subcommands like new-testnet need the testnet dir but not the network config. + let get_testnet_dir = || testnet_dir.clone().ok_or("testnet-dir is required"); + let get_network_config = || { + if let Some(testnet_dir) = &testnet_dir { + Eth2NetworkConfig::load(testnet_dir.clone()).map_err(|e| { + format!( + "Unable to open testnet dir at {}: {}", + testnet_dir.display(), + e + ) + }) + } else { + let network_name = network_name.ok_or("no network name or testnet-dir provided")?; + Eth2NetworkConfig::constant(&network_name)?.ok_or("invalid network name".into()) + } + }; match matches.subcommand() { - ("transition-blocks", Some(matches)) => transition_blocks::run::(env, matches) - .map_err(|e| format!("Failed to transition blocks: {}", e)), + ("transition-blocks", Some(matches)) => { + let network_config = get_network_config()?; + transition_blocks::run::(env, network_config, matches) + .map_err(|e| format!("Failed to transition blocks: {}", e)) + } ("skip-slots", Some(matches)) => { - skip_slots::run::(env, matches).map_err(|e| format!("Failed to skip slots: {}", e)) + let network_config = get_network_config()?; + skip_slots::run::(env, network_config, matches) + .map_err(|e| format!("Failed to skip slots: {}", e)) } ("pretty-ssz", Some(matches)) => { run_parse_ssz::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) @@ -923,22 +991,33 @@ fn run( deploy_deposit_contract::run::(env, matches) .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) } - ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, testnet_dir, matches) - .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)), - ("interop-genesis", Some(matches)) => interop_genesis::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run interop-genesis command: {}", e)), + ("eth1-genesis", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + eth1_genesis::run::(env, testnet_dir, matches) + .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)) + } + ("interop-genesis", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + interop_genesis::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run interop-genesis command: {}", e)) + } ("change-genesis-time", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; change_genesis_time::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) } ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), ("replace-state-pubkeys", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; replace_state_pubkeys::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) } - ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run new_testnet command: {}", e)), + ("new-testnet", Some(matches)) => { + let testnet_dir = get_testnet_dir()?; + new_testnet::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run new_testnet command: {}", e)) + } ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) @@ -949,10 +1028,18 @@ fn run( .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), - ("block-root", Some(matches)) => block_root::run::(env, matches) - .map_err(|e| format!("Failed to run block-root command: {}", e)), ("state-diff", Some(matches)) => state_diff::run::(env, matches) .map_err(|e| format!("Failed to run state-diff command: {}", e)), + ("block-root", Some(matches)) => { + let network_config = get_network_config()?; + block_root::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run block-root command: {}", e)) + } + ("state-root", Some(matches)) => { + let network_config = get_network_config()?; + state_root::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run state-root command: {}", e)) + } (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index bd0fdbf823c..c6880f3c810 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -49,6 +49,7 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use state_processing::AllCaches; @@ -60,8 +61,12 @@ use types::{BeaconState, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; let output_path: Option = parse_optional(matches, "output-path")?; diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs new file mode 100644 index 00000000000..efcee2827ab --- /dev/null +++ b/lcli/src/state_root.rs @@ -0,0 +1,76 @@ +use crate::transition_blocks::load_from_ssz_with; +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use eth2_network_config::Eth2NetworkConfig; +use std::path::PathBuf; +use std::time::{Duration, Instant}; +use types::{BeaconState, EthSpec}; + +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); + +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let executor = env.core_context().executor; + + let spec = &network_config.chain_spec::()?; + + let state_path: Option = parse_optional(matches, "state-path")?; + let beacon_url: Option = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + + info!( + "Using {} network ({} spec)", + spec.config_name.as_deref().unwrap_or("unknown"), + T::spec_name() + ); + info!("Doing {} runs", runs); + + let state = match (state_path, beacon_url) { + (Some(state_path), None) => { + info!("State path: {:?}", state_path); + load_from_ssz_with(&state_path, spec, BeaconState::from_ssz_bytes)? + } + (None, Some(beacon_url)) => { + let state_id: StateId = parse_required(matches, "state-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + client + .get_debug_beacon_states::(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data + } + _ => return Err("must supply either --state-path or --beacon-url".into()), + }; + + /* + * Perform the core "runs". + */ + let mut state_root = None; + for i in 0..runs { + let mut state = state.clone(); + let timer = Instant::now(); + state_root = Some( + state + .update_tree_hash_cache() + .map_err(|e| format!("error computing state root: {e:?}"))?, + ); + info!("Run {}: {:?}", i, timer.elapsed()); + } + + if let Some(state_root) = state_root { + info!("State root is {:?}", state_root); + } + Ok(()) +} diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 92df22e0fc6..0db2763aa7f 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -71,6 +71,7 @@ use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; +use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, @@ -94,8 +95,12 @@ struct Config { exclude_post_block_thc: bool, } -pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { - let spec = &T::default_spec(); +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; let executor = env.core_context().executor; /* diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 91284d58104..6cba35b5edf 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -178,7 +178,7 @@ fn checkpoint_sync_url_timeout_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.chain.checkpoint_sync_url_timeout, 60); + assert_eq!(config.chain.checkpoint_sync_url_timeout, 180); }); } @@ -1134,48 +1134,13 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let mainnet = vec![ - // Lighthouse Team (Sigma Prime) - "enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo", - "enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo", - // EF Team - "enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg", - "enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg", - "enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg", - "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", - // Teku team (Consensys) - "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", - "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA", - // Prysm team (Prysmatic Labs) - "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", - "enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA", - "enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg", - // Nimbus team - "enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM", - "enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM" - ]; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() .with_config(|config| { // Lighthouse Team (Sigma Prime) - assert_eq!(config.network.boot_nodes_enr[0].to_base64(), mainnet[0]); - assert_eq!(config.network.boot_nodes_enr[1].to_base64(), mainnet[1]); - // EF Team - assert_eq!(config.network.boot_nodes_enr[2].to_base64(), mainnet[2]); - assert_eq!(config.network.boot_nodes_enr[3].to_base64(), mainnet[3]); - assert_eq!(config.network.boot_nodes_enr[4].to_base64(), mainnet[4]); - assert_eq!(config.network.boot_nodes_enr[5].to_base64(), mainnet[5]); - // Teku team (Consensys) - assert_eq!(config.network.boot_nodes_enr[6].to_base64(), mainnet[6]); - assert_eq!(config.network.boot_nodes_enr[7].to_base64(), mainnet[7]); - // Prysm team (Prysmatic Labs) - assert_eq!(config.network.boot_nodes_enr[8].to_base64(), mainnet[8]); - assert_eq!(config.network.boot_nodes_enr[9].to_base64(), mainnet[9]); - assert_eq!(config.network.boot_nodes_enr[10].to_base64(), mainnet[10]); - // Nimbus team - assert_eq!(config.network.boot_nodes_enr[11].to_base64(), mainnet[11]); - assert_eq!(config.network.boot_nodes_enr[12].to_base64(), mainnet[12]); + assert_eq!(config.network.boot_nodes_enr.len(), number_of_boot_nodes); }); } #[test] @@ -1738,23 +1703,24 @@ fn no_reconstruct_historic_states_flag() { .with_config(|config| assert!(!config.chain.reconstruct_historic_states)); } #[test] -fn db_migration_period_default() { +fn epochs_per_migration_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { assert_eq!( - config.store_migrator.epochs_per_run, - beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_RUN + config.chain.epochs_per_migration, + beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_MIGRATION ) }); } #[test] -fn db_migration_period_override() { +fn epochs_per_migration_override() { CommandLineTest::new() - .flag("db-migration-period", Some("128")) + .flag("epochs-per-migration", Some("128")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store_migrator.epochs_per_run, 128)); + .with_config(|config| assert_eq!(config.chain.epochs_per_migration, 128)); } + #[test] fn epochs_per_state_diff_default() { CommandLineTest::new() diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index 6db8753d02b..cd915e470d6 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -9,5 +9,5 @@ set -Eeuo pipefail source ./vars.env if [ -d $DATADIR ]; then - rm -r $DATADIR + rm -rf $DATADIR fi diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs index 9e68107de74..fd1a6ae14f6 100644 --- a/slasher/tests/backend.rs +++ b/slasher/tests/backend.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "lmdb"))] +#![cfg(feature = "lmdb")] use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; use std::fs::File; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index f7562f477a2..81a1739eb12 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -13,7 +13,7 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) -WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget) +CURL := $(if $(LIGHTHOUSE_GITHUB_TOKEN),curl -L --header "Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",curl -L) all: make $(OUTPUT_DIR) @@ -27,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS) $(BLS_OUTPUT_DIR): mkdir $(BLS_OUTPUT_DIR) - $(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + $(CURL) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -o $(BLS_TARBALL) tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) %-$(TESTS_TAG).tar.gz: - $(WGET) $(BASE_URL)/$*.tar.gz -O $@ + $(CURL) $(BASE_URL)/$*.tar.gz -o $@ clean-test-files: rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 1b15ae075ec..a81eda94c92 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -123,7 +123,7 @@ impl EpochTransition for RewardsAndPenalties { BeaconState::Base(_) => { let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; - base::process_rewards_and_penalties(state, &mut validator_statuses, spec) + base::process_rewards_and_penalties(state, &validator_statuses, spec) } BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_rewards_and_penalties_slow(state, spec) diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index c59ceabe0b1..ee0fc265e11 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -118,7 +118,7 @@ impl Case for RewardsTest { let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state)?; - let deltas = base::rewards_and_penalties::get_attestation_deltas( + let deltas = base::rewards_and_penalties::get_attestation_deltas_all( &state, &validator_statuses, spec, diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index ea5d005c16a..ac77349c58e 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -14,3 +14,4 @@ validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } execution_layer = { path = "../../beacon_node/execution_layer" } +tokio = { version = "1.14.0", features = ["time"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index d4fd115bec3..62db67b8c57 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; +use tokio::time::timeout; use types::EthSpec; use validator_client::ProductionValidatorClient; use validator_dir::insecure_keys::build_deterministic_validator_dirs; @@ -24,6 +25,8 @@ pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(4); +/// The timeout for a beacon node to start up. +const STARTUP_TIMEOUT: Duration = Duration::from_secs(60); /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). @@ -51,12 +54,16 @@ impl LocalBeaconNode { client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); - ProductionBeaconNode::new(context, client_config) - .await - .map(move |client| Self { - client: client.into_inner(), - datadir, - }) + timeout( + STARTUP_TIMEOUT, + ProductionBeaconNode::new(context, client_config), + ) + .await + .map_err(|_| format!("Beacon node startup timed out after {:?}", STARTUP_TIMEOUT))? + .map(move |client| Self { + client: client.into_inner(), + datadir, + }) } } diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 5dc2d5ec84a..ff80201051f 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -25,8 +25,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) @@ -64,8 +64,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3e764d27d02..57c944cf1a7 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,14 +1,16 @@ use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; -use crate::{checks, LocalNetwork, E}; +use crate::{checks, LocalNetwork}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::AnvilEth1Instance; +use crate::retry::with_retry; use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; +use node_test_rig::environment::RuntimeContext; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, + testing_client_config, testing_validator_config, ClientConfig, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use sensitive_url::SensitiveUrl; @@ -107,71 +109,24 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let context = env.core_context(); let main_future = async { - /* - * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit - * validators. - */ - let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = anvil_eth1_instance.deposit_contract; - let chain_id = anvil_eth1_instance.anvil.chain_id(); - let anvil = anvil_eth1_instance.anvil; - let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str()) - .expect("Unable to parse anvil endpoint."); - let deposit_contract_address = deposit_contract.address(); - - // Start a timer that produces eth1 blocks on an interval. - tokio::spawn(async move { - let mut interval = tokio::time::interval(eth1_block_time); - loop { - interval.tick().await; - let _ = anvil.evm_mine().await; - } - }); - - // Submit deposits to the deposit contract. - tokio::spawn(async move { - for i in 0..total_validator_count { - println!("Submitting deposit for validator {}...", i); - let _ = deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .await; - } - }); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.eth1.node_far_behind_seconds = 20; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; - beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count + proposer_nodes - 1; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - - if post_merge_sim { - let el_config = execution_layer::Config { - execution_endpoints: vec![SensitiveUrl::parse(&format!( - "http://localhost:{}", - EXECUTION_PORT - )) - .unwrap()], - ..Default::default() - }; - - beacon_config.execution_layer = Some(el_config); - } - /* * Create a new `LocalNetwork` with one beacon node. */ - let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; + let max_retries = 3; + let (network, beacon_config) = with_retry(max_retries, || { + Box::pin(create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }, + context.clone(), + )) + }) + .await?; /* * One by one, add beacon nodes to the network. @@ -341,3 +296,88 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok(()) } + +struct LocalNetworkParams { + eth1_block_time: Duration, + total_validator_count: usize, + deposit_amount: u64, + node_count: usize, + proposer_nodes: usize, + post_merge_sim: bool, +} + +async fn create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }: LocalNetworkParams, + context: RuntimeContext, +) -> Result<(LocalNetwork, ClientConfig), String> { + /* + * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit + * validators. + */ + let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; + let deposit_contract = anvil_eth1_instance.deposit_contract; + let chain_id = anvil_eth1_instance.anvil.chain_id(); + let anvil = anvil_eth1_instance.anvil; + let eth1_endpoint = + SensitiveUrl::parse(anvil.endpoint().as_str()).expect("Unable to parse anvil endpoint."); + let deposit_contract_address = deposit_contract.address(); + + // Start a timer that produces eth1 blocks on an interval. + tokio::spawn(async move { + let mut interval = tokio::time::interval(eth1_block_time); + loop { + interval.tick().await; + let _ = anvil.evm_mine().await; + } + }); + + // Submit deposits to the deposit contract. + tokio::spawn(async move { + for i in 0..total_validator_count { + println!("Submitting deposit for validator {}...", i); + let _ = deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .await; + } + }); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.eth1.node_far_behind_seconds = 20; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; + beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; + beacon_config.eth1.chain_id = Eth1Id::from(chain_id); + beacon_config.network.target_peers = node_count + proposer_nodes - 1; + + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + + let network = LocalNetwork::new(context, beacon_config.clone()).await?; + Ok((network, beacon_config)) +} diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index a19777c5abc..e8af9c18067 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -21,6 +21,7 @@ mod cli; mod eth1_sim; mod local_network; mod no_eth1_sim; +mod retry; mod sync_sim; use cli::cli_app; diff --git a/testing/simulator/src/retry.rs b/testing/simulator/src/retry.rs new file mode 100644 index 00000000000..a4eb52cea1f --- /dev/null +++ b/testing/simulator/src/retry.rs @@ -0,0 +1,63 @@ +use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; + +/// Executes the function with a specified number of retries if the function returns an error. +/// Once it exceeds `max_retries` and still fails, the error is returned. +pub async fn with_retry(max_retries: usize, mut func: F) -> Result +where + F: FnMut() -> Pin>>>, + E: Debug, +{ + let mut retry_count = 0; + loop { + let result = Box::pin(func()).await; + if result.is_ok() || retry_count >= max_retries { + break result; + } + retry_count += 1; + + if let Err(e) = result { + eprintln!( + "Operation failed with error {:?}, retrying {} of {}", + e, retry_count, max_retries + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::VecDeque; + + async fn my_async_func(is_ok: bool) -> Result<(), ()> { + if is_ok { + Ok(()) + } else { + Err(()) + } + } + + #[tokio::test] + async fn test_with_retry_ok() { + let res = with_retry(3, || Box::pin(my_async_func(true))).await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_2nd_ok() { + let mut mock_results = VecDeque::from([false, true]); + let res = with_retry(3, || { + Box::pin(my_async_func(mock_results.pop_front().unwrap())) + }) + .await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_fail() { + let res = with_retry(3, || Box::pin(my_async_func(false))).await; + assert!(res.is_err()); + } +} diff --git a/watch/Cargo.toml b/watch/Cargo.toml index d1793a9d068..1a003167dc8 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { path = "../consensus/types" } eth2 = { path = "../common/eth2" } beacon_node = { path = "../beacon_node"} tokio = { version = "1.14.0", features = ["time"] } -axum = "0.5.15" +axum = "0.6.18" hyper = "0.14.20" serde = "1.0.116" serde_json = "1.0.58" diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index 09d5ec6aac5..d8ae0eb6c62 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -5,7 +5,6 @@ use crate::config::Config as FullConfig; use crate::database::{self, PgPool}; use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; use axum::{ - handler::Handler, http::{StatusCode, Uri}, routing::get, Extension, Json, Router, @@ -104,7 +103,7 @@ pub fn start_server( } let app = routes - .fallback(route_not_found.into_service()) + .fallback(route_not_found) .layer(Extension(pool)) .layer(Extension(slots_per_epoch));