From 91b877fb1efeaf488764896456dac27df73b7e33 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 28 Jun 2022 13:35:46 +1000 Subject: [PATCH] Squash-merge `nfd-2` --- Cargo.lock | 5 + .../src/attestation_verification.rs | 9 +- .../src/attestation_verification/batch.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 1936 +++++++---------- .../src/beacon_fork_choice_store.rs | 6 +- .../beacon_chain/src/beacon_proposer_cache.rs | 31 +- .../beacon_chain/src/beacon_snapshot.rs | 7 +- .../beacon_chain/src/block_verification.rs | 333 +-- beacon_node/beacon_chain/src/builder.rs | 72 +- .../beacon_chain/src/canonical_head.rs | 1307 +++++++++++ .../beacon_chain/src/early_attester_cache.rs | 7 +- beacon_node/beacon_chain/src/errors.rs | 19 +- .../beacon_chain/src/execution_payload.rs | 292 ++- beacon_node/beacon_chain/src/fork_revert.rs | 8 +- .../beacon_chain/src/historical_blocks.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 7 +- .../beacon_chain/src/proposer_prep_service.rs | 4 +- beacon_node/beacon_chain/src/schema_change.rs | 7 +- .../src/schema_change/migration_schema_v7.rs | 10 +- .../beacon_chain/src/shuffling_cache.rs | 6 + .../beacon_chain/src/snapshot_cache.rs | 15 +- .../beacon_chain/src/state_advance_timer.rs | 43 +- beacon_node/beacon_chain/src/test_utils.rs | 154 +- .../tests/attestation_production.rs | 53 +- .../tests/attestation_verification.rs | 104 +- .../beacon_chain/tests/block_verification.rs | 491 +++-- beacon_node/beacon_chain/tests/fork_choice.rs | 75 +- beacon_node/beacon_chain/tests/merge.rs | 33 +- .../beacon_chain/tests/op_verification.rs | 16 +- .../tests/payload_invalidation.rs | 398 ++-- beacon_node/beacon_chain/tests/store_tests.rs | 870 ++++---- .../tests/sync_committee_verification.rs | 64 +- beacon_node/beacon_chain/tests/tests.rs | 374 ++-- beacon_node/client/src/builder.rs | 28 +- beacon_node/client/src/notifier.rs | 143 +- beacon_node/execution_layer/src/lib.rs | 39 +- beacon_node/http_api/src/attester_duties.rs | 8 +- beacon_node/http_api/src/block_id.rs | 41 +- beacon_node/http_api/src/database.rs | 2 +- beacon_node/http_api/src/lib.rs | 602 +++-- beacon_node/http_api/src/proposer_duties.rs | 26 +- beacon_node/http_api/src/state_id.rs | 40 +- beacon_node/http_api/tests/fork_tests.rs | 4 + .../http_api/tests/interactive_tests.rs | 29 +- beacon_node/http_api/tests/tests.rs | 204 +- .../lighthouse_network/src/behaviour/mod.rs | 4 +- .../src/rpc/codec/ssz_snappy.rs | 72 +- .../lighthouse_network/src/rpc/methods.rs | 5 +- .../lighthouse_network/src/types/pubsub.rs | 5 +- .../lighthouse_network/tests/rpc_tests.rs | 20 +- .../network/src/beacon_processor/mod.rs | 561 ++--- .../network/src/beacon_processor/tests.rs | 339 ++- .../beacon_processor/worker/gossip_methods.rs | 72 +- .../beacon_processor/worker/rpc_methods.rs | 7 +- .../beacon_processor/worker/sync_methods.rs | 52 +- beacon_node/network/src/router/processor.rs | 46 +- beacon_node/network/src/service.rs | 41 +- beacon_node/network/src/status.rs | 38 +- .../network/src/subnet_service/tests/mod.rs | 10 +- .../network/src/sync/backfill_sync/mod.rs | 4 +- .../network/src/sync/block_lookups/mod.rs | 13 +- .../src/sync/block_lookups/parent_lookup.rs | 15 +- .../sync/block_lookups/single_block_lookup.rs | 7 +- .../network/src/sync/block_lookups/tests.rs | 38 +- beacon_node/network/src/sync/manager.rs | 35 +- .../network/src/sync/network_context.rs | 39 +- .../network/src/sync/peer_sync_info.rs | 4 +- .../network/src/sync/range_sync/batch.rs | 18 +- .../src/sync/range_sync/block_storage.rs | 2 +- .../network/src/sync/range_sync/chain.rs | 3 +- .../network/src/sync/range_sync/range.rs | 25 +- beacon_node/operation_pool/Cargo.toml | 1 + beacon_node/operation_pool/src/lib.rs | 40 +- beacon_node/store/src/hot_cold_store.rs | 6 +- beacon_node/store/src/lib.rs | 3 +- beacon_node/timer/src/lib.rs | 30 +- common/task_executor/Cargo.toml | 2 +- common/task_executor/src/lib.rs | 58 + common/task_executor/src/metrics.rs | 10 + consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 135 +- .../fork_choice/src/fork_choice_store.rs | 4 +- consensus/fork_choice/src/lib.rs | 5 +- consensus/fork_choice/tests/tests.rs | 403 ++-- consensus/proto_array/src/proto_array.rs | 4 +- .../src/proto_array_fork_choice.rs | 62 +- consensus/state_processing/Cargo.toml | 1 + .../src/per_block_processing/tests.rs | 322 ++- .../altair/justification_and_finalization.rs | 2 +- .../base/justification_and_finalization.rs | 2 +- .../src/per_epoch_processing/tests.rs | 60 +- .../examples/flamegraph_beacon_state.rs | 2 +- consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_block.rs | 46 + consensus/types/src/beacon_block_body.rs | 91 + .../src/beacon_state/committee_cache/tests.rs | 40 +- consensus/types/src/beacon_state/tests.rs | 99 +- .../types/src/justifiable_beacon_state.rs | 4 +- consensus/types/src/payload.rs | 2 + consensus/types/src/signed_beacon_block.rs | 8 + database_manager/src/lib.rs | 5 +- slasher/service/src/service.rs | 9 +- testing/ef_tests/src/cases.rs | 24 +- testing/ef_tests/src/cases/fork_choice.rs | 160 +- testing/ef_tests/src/handler.rs | 11 +- .../src/test_rig.rs | 7 +- testing/state_transition_vectors/Cargo.toml | 1 + testing/state_transition_vectors/src/exit.rs | 33 +- .../state_transition_vectors/src/macros.rs | 10 +- testing/state_transition_vectors/src/main.rs | 31 +- 110 files changed, 6592 insertions(+), 4549 deletions(-) create mode 100644 beacon_node/beacon_chain/src/canonical_head.rs diff --git a/Cargo.lock b/Cargo.lock index 3199d1425aa..b7d550fdd90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2049,6 +2049,7 @@ dependencies = [ "proto_array", "state_processing", "store", + "tokio", "types", ] @@ -4148,6 +4149,7 @@ dependencies = [ "serde_derive", "state_processing", "store", + "tokio", "types", ] @@ -5933,6 +5935,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "tokio", "tree_hash", "types", ] @@ -5945,6 +5948,7 @@ dependencies = [ "eth2_ssz", "lazy_static", "state_processing", + "tokio", "types", ] @@ -6658,6 +6662,7 @@ dependencies = [ "swap_or_not_shuffle", "tempfile", "test_random_derive", + "tokio", "tree_hash", "tree_hash_derive", ] diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 406c0049aaa..63af6ab9e11 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -976,8 +976,8 @@ fn verify_head_block_is_known( max_skip_slots: Option, ) -> Result { let block_opt = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&attestation.data.beacon_block_root) .or_else(|| { chain @@ -1245,7 +1245,10 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) + if !chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&target.root) && !chain.early_attester_cache.contains_block(target.root) { return Err(Error::UnknownTargetRoot(target.root)); diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 30f1ae7e5be..6f76cce0246 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -65,7 +65,7 @@ where .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; + let fork = chain.canonical_head.cached_head().head_fork(); let mut signature_sets = Vec::with_capacity(num_indexed * 3); @@ -169,13 +169,13 @@ where &metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES, ); + let fork = chain.canonical_head.cached_head().head_fork(); + let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; - let mut signature_sets = Vec::with_capacity(num_partially_verified); // Iterate, flattening to get only the `Ok` values. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 56a308880b8..54f8de3a851 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,15 +9,15 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, - signature_verify_chain_segment, BlockError, FullyVerifiedBlock, GossipVerifiedBlock, - IntoFullyVerifiedBlock, + signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::get_execution_payload; +use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -52,17 +52,17 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; -use eth2::types::{ - EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, -}; +use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; -use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation}; +use fork_choice::{ + AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + InvalidationOperation, PayloadVerificationStatus, +}; use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::ExecutionStatus; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -71,7 +71,7 @@ use ssz::Encode; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::{errors::AttestationValidationError, is_merge_transition_complete}, + per_block_processing::errors::AttestationValidationError, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, @@ -87,16 +87,17 @@ use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterato use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::*; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; + pub type ForkChoiceError = fork_choice::Error; -/// The time-out before failure during an operation to take a read/write RwLock on the canonical -/// head. -pub const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// Alias to appease clippy. +type HashBlockTuple = (Hash256, Arc>); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. @@ -216,22 +217,6 @@ pub enum StateSkipConfig { WithoutStateRoots, } -#[derive(Debug, PartialEq)] -pub struct HeadInfo { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub current_justified_checkpoint: types::Checkpoint, - pub finalized_checkpoint: types::Checkpoint, - pub fork: Fork, - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - pub proposer_shuffling_decision_root: Hash256, - pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, - pub random: Hash256, -} - pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; @@ -240,23 +225,22 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } -/// Indicates the EL payload verification status of the head beacon block. -#[derive(Debug, PartialEq)] -pub enum HeadSafetyStatus { - /// The head block has either been verified by an EL or is does not require EL verification - /// (e.g., it is pre-merge or pre-terminal-block). - /// - /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with - /// the variant. - Safe(Option), - /// The head block execution payload has not yet been verified by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(ExecutionBlockHash), - /// The head block execution payload was deemed to be invalid by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Invalid(ExecutionBlockHash), +/// Used internally to split block production into discreet functions. +struct PartialBeaconBlock { + state: BeaconState, + slot: Slot, + proposer_index: u64, + parent_root: Hash256, + randao_reveal: Signature, + eth1_data: Eth1Data, + graffiti: Graffiti, + proposer_slashings: Vec, + attester_slashings: Vec>, + attestations: Vec>, + deposits: Vec, + voluntary_exits: Vec, + sync_aggregate: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -284,6 +268,8 @@ pub struct BeaconChain { pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. pub store: BeaconStore, + /// Used for spawning async and blocking tasks. + pub task_executor: TaskExecutor, /// Database migrator for running background maintenance on the store. pub store_migrator: BackgroundMigrator, /// Reports the current slot, typically based upon the system clock. @@ -335,21 +321,21 @@ pub struct BeaconChain { pub eth1_chain: Option>, /// Interfaces with the execution client. pub execution_layer: Option, - /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. - pub(crate) canonical_head: TimeoutRwLock>, + /// Stores information about the canonical head and finalized/justified checkpoints of the + /// chain. Also contains the fork choice struct, for computing the canonical head. + pub canonical_head: CanonicalHead, /// The root of the genesis block. pub genesis_block_root: Hash256, /// The root of the genesis state. pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, - /// A state-machine that is updated with information from the network and chooses a canonical - /// head block. - pub fork_choice: RwLock>, /// Transmitter used to indicate that slot-start fork choice has completed running. pub fork_choice_signal_tx: Option, /// Receiver used by block production to wait on slot-start fork choice. pub fork_choice_signal_rx: Option, + /// The genesis time of this `BeaconChain` (seconds since UNIX epoch). + pub genesis_time: u64, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, @@ -358,7 +344,7 @@ pub struct BeaconChain { /// A cache dedicated to block processing. pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. - pub(crate) shuffling_cache: TimeoutRwLock, + pub shuffling_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -430,25 +416,11 @@ impl BeaconChain { .as_kv_store_op(BEACON_CHAIN_DB_KEY) } - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { - let fork_choice = self.fork_choice.read(); - Self::persist_fork_choice_in_batch_standalone(&fork_choice) - } - - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch_standalone( - fork_choice: &BeaconForkChoice, - ) -> KeyValueStoreOp { - let persisted_fork_choice = PersistedForkChoice { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) - } - /// Load fork choice from disk, returning `None` if it isn't found. - pub fn load_fork_choice(store: BeaconStore) -> Result>, Error> { + pub fn load_fork_choice( + store: BeaconStore, + spec: &ChainSpec, + ) -> Result>, Error> { let persisted_fork_choice = match store.get_item::(&FORK_CHOICE_DB_KEY)? { Some(fc) => fc, @@ -461,6 +433,7 @@ impl BeaconChain { Ok(Some(ForkChoice::from_persisted( persisted_fork_choice.fork_choice, fc_store, + spec, )?)) } @@ -538,11 +511,11 @@ impl BeaconChain { )); } - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), local_head.beacon_block_root, &self.spec, )?; @@ -612,77 +585,6 @@ impl BeaconChain { .map(|result| result.map_err(|e| e.into()))) } - /// Iterate through the current chain to find the slot intersecting with the given beacon state. - /// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached - /// and no intersection is found, the finalized slot will be returned. - pub fn find_reorg_slot( - &self, - new_state: &BeaconState, - new_block_root: Hash256, - ) -> Result { - self.with_head(|snapshot| { - let old_state = &snapshot.beacon_state; - let old_block_root = snapshot.beacon_block_root; - - // The earliest slot for which the two chains may have a common history. - let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); - - // Create an iterator across `$state`, assuming that the block at `$state.slot` has the - // block root of `$block_root`. - // - // The iterator will be skipped until the next value returns `lowest_slot`. - // - // This is a macro instead of a function or closure due to the complex types invloved - // in all the iterator wrapping. - macro_rules! aligned_roots_iter { - ($state: ident, $block_root: ident) => { - std::iter::once(Ok(($state.slot(), $block_root))) - .chain($state.rev_iter_block_roots(&self.spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) - }; - } - - // Create iterators across old/new roots where iterators both start at the same slot. - let mut new_roots = aligned_roots_iter!(new_state, new_block_root); - let mut old_roots = aligned_roots_iter!(old_state, old_block_root); - - // Whilst *both* of the iterators are still returning values, try and find a common - // ancestor between them. - while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { - let (old_slot, old_root) = old?; - let (new_slot, new_root) = new?; - - // Sanity check to detect programming errors. - if old_slot != new_slot { - return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); - } - - if old_root == new_root { - // A common ancestor has been found. - return Ok(old_slot); - } - } - - // If no common ancestor is found, declare that the re-org happened at the previous - // finalized slot. - // - // Sometimes this will result in the return slot being *lower* than the actual reorg - // slot. However, assuming we don't re-org through a finalized slot, it will never be - // *higher*. - // - // We provide this potentially-inaccurate-but-safe information to avoid onerous - // database reads during times of deep reorgs. - Ok(old_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch())) - }) - } - /// Iterates backwards across all `(state_root, slot)` pairs starting from /// an arbitrary `BeaconState` to the earliest reachable ancestor (may or may not be genesis). /// @@ -713,12 +615,12 @@ impl BeaconChain { &self, start_slot: Slot, ) -> Result> + '_, Error> { - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), &self.spec, )?; @@ -978,11 +880,11 @@ impl BeaconChain { pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result>>, Error> { if let Some(block) = self.early_attester_cache.get_block(*block_root) { return Ok(Some(block)); } - self.get_block(block_root).await + Ok(self.get_block(block_root).await?.map(Arc::new)) } /// Returns the block at the given root, if any. @@ -1068,55 +970,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; - /// the head of the canonical `BeaconChain`. - /// - /// It is important to note that the `beacon_state` returned may not match the present slot. It - /// is the state as it was when the head block was received, which could be some slots prior to - /// now. - pub fn head(&self) -> Result, Error> { - self.with_head(|head| Ok(head.clone_with(CloneConfig::committee_caches_only()))) - } - - /// Apply a function to the canonical head without cloning it. - pub fn with_head( - &self, - f: impl FnOnce(&BeaconSnapshot) -> Result, - ) -> Result - where - E: From, - { - let head_lock = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - f(&head_lock) - } - - /// Returns the beacon block root at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block_root(&self) -> Result { - self.with_head(|s| Ok(s.beacon_block_root)) - } - - /// Returns the beacon block at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block(&self) -> Result, Error> { - self.with_head(|s| Ok(s.beacon_block.clone())) - } - - /// Returns the beacon state at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_state(&self) -> Result, Error> { - self.with_head(|s| { - Ok(s.beacon_state - .clone_with(CloneConfig::committee_caches_only())) - }) - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -1191,42 +1044,6 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } - /// Returns info representing the head block and state. - /// - /// A summarized version of `Self::head` that involves less cloning. - pub fn head_info(&self) -> Result { - self.with_head(|head| { - let proposer_shuffling_decision_root = head - .beacon_state - .proposer_shuffling_decision_root(head.beacon_block_root)?; - - // The `random` value is used whilst producing an `ExecutionPayload` atop the head. - let current_epoch = head.beacon_state.current_epoch(); - let random = *head.beacon_state.get_randao_mix(current_epoch)?; - - Ok(HeadInfo { - slot: head.beacon_block.slot(), - block_root: head.beacon_block_root, - state_root: head.beacon_state_root(), - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(), - finalized_checkpoint: head.beacon_state.finalized_checkpoint(), - fork: head.beacon_state.fork(), - genesis_time: head.beacon_state.genesis_time(), - genesis_validators_root: head.beacon_state.genesis_validators_root(), - proposer_shuffling_decision_root, - is_merge_transition_complete: is_merge_transition_complete(&head.beacon_state), - execution_payload_block_hash: head - .beacon_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()), - random, - }) - }) - } - /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -1247,7 +1064,7 @@ impl BeaconChain { slot: Slot, config: StateSkipConfig, ) -> Result, Error> { - let head_state = self.head()?.beacon_state; + let head_state = self.head_beacon_state_cloned(); match slot.cmp(&head_state.slot()) { Ordering::Equal => Ok(head_state), @@ -1332,14 +1149,6 @@ impl BeaconChain { self.state_at_slot(self.slot()?, StateSkipConfig::WithStateRoots) } - /// Returns the slot of the highest block in the canonical chain. - pub fn best_slot(&self) -> Result { - self.canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .map(|head| head.beacon_block.slot()) - .ok_or(Error::CanonicalHeadLockTimeout) - } - /// Returns the validator index (if any) for the given public key. /// /// ## Notes @@ -1479,7 +1288,7 @@ impl BeaconChain { validator_indices: &[u64], epoch: Epoch, head_block_root: Hash256, - ) -> Result<(Vec>, Hash256), Error> { + ) -> Result<(Vec>, Hash256, ExecutionStatus), Error> { self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { let duties = validator_indices .iter() @@ -1489,7 +1298,13 @@ impl BeaconChain { }) .collect(); - Ok((duties, dependent_root)) + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; + + Ok((duties, dependent_root, execution_status)) }) } @@ -1537,8 +1352,8 @@ impl BeaconChain { ) -> Result, Error> { let beacon_block_root = attestation.data.beacon_block_root; match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { // The attestation references a block that is not in fork choice, it must be @@ -1626,7 +1441,10 @@ impl BeaconChain { let current_epoch_attesting_info: Option<(Checkpoint, usize)>; let attester_cache_key; let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); - if let Some(head) = self.canonical_head.try_read_for(HEAD_LOCK_TIMEOUT) { + // The following braces are to prevent the `cached_head` Arc from being held for longer than + // required. It also helps reduce the diff for a very large PR (#3244). + { + let head = self.head_snapshot(); let head_state = &head.beacon_state; head_state_slot = head_state.slot(); @@ -1701,15 +1519,13 @@ impl BeaconChain { // routine. attester_cache_key = AttesterCacheKey::new(request_epoch, head_state, beacon_block_root)?; - } else { - return Err(Error::CanonicalHeadLockTimeout); } drop(head_timer); // Only attest to a block if it is fully verified (i.e. not optimistic or invalid). match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { Some(execution_status) if execution_status.is_valid_or_irrelevant() => (), @@ -1913,8 +1729,8 @@ impl BeaconChain { ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.fork_choice - .write() + self.canonical_head + .fork_choice_write_lock() .on_attestation( self.slot()?, verified.indexed_attestation(), @@ -2050,8 +1866,7 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = - self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?; + let fork = self.canonical_head.cached_head().head_fork(); self.op_pool .insert_attestation( @@ -2156,7 +1971,7 @@ impl BeaconChain { // pivot block is the same as the current state's pivot block. If it is, then the // attestation's shuffling is the same as the current state's. // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.fork_choice.read(); + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); let pivot_block_root = fork_choice_lock .proto_array() .core_proto_array() @@ -2247,12 +2062,13 @@ impl BeaconChain { pub fn import_attester_slashing( &self, attester_slashing: SigVerifiedOp>, - ) -> Result<(), Error> { + ) { if self.eth1_chain.is_some() { - self.op_pool - .insert_attester_slashing(attester_slashing, self.head_info()?.fork) + self.op_pool.insert_attester_slashing( + attester_slashing, + self.canonical_head.cached_head().head_fork(), + ) } - Ok(()) } /// Attempt to obtain sync committee duties from the head. @@ -2268,22 +2084,37 @@ impl BeaconChain { }) } - /// Attempt to verify and import a chain of blocks to `self`. - /// - /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., - /// be a chain). An error will be returned if this is not the case. - /// - /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior - /// blocks might be imported. + /// A convenience method for spawning a blocking task. It maps an `Option` and + /// `tokio::JoinError` into a single `BeaconChainError`. + pub(crate) async fn spawn_blocking_handle( + &self, + task: F, + name: &'static str, + ) -> Result + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self + .task_executor + .clone() + .spawn_blocking_handle(task, name) + .ok_or(Error::RuntimeShutdown)?; + + handle.await.map_err(Error::TokioJoin) + } + + /// Accepts a `chain_segment` and filters out any uninteresting blocks (e.g., pre-finalization + /// or already-known). /// - /// This method is generally much more efficient than importing each block using - /// `Self::process_block`. - pub fn process_chain_segment( + /// This method is potentially long-running and should not run on the core executor. + pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, - ) -> ChainSegmentResult { + chain_segment: Vec>>, + ) -> Result>, ChainSegmentResult> { + // This function will never import any blocks. + let imported_blocks = 0; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); - let mut imported_blocks = 0; // Produce a list of the parent root and slot of the child of each block. // @@ -2297,10 +2128,10 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. if let Err(e) = block.fork_name(&self.spec) { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), - }; + }); } let block_root = get_block_root(&block); @@ -2312,18 +2143,18 @@ impl BeaconChain { // Without this check it would be possible to have a block verified using the // incorrect shuffling. That would be bad, mmkay. if block_root != *child_parent_root { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearParentRoots, - }; + }); } // Ensure that the slots are strictly increasing throughout the chain segment. if *child_slot <= block.slot() { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearSlots, - }; + }); } } @@ -2351,18 +2182,18 @@ impl BeaconChain { // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NotFinalizedDescendant { block_parent_root }, - }; + }); } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(e), - }; + }); } // If the block was decided to be irrelevant for any other reason, don't include // this block or any of it's children in the filtered chain segment. @@ -2370,6 +2201,42 @@ impl BeaconChain { } } + Ok(filtered_chain_segment) + } + + /// Attempt to verify and import a chain of blocks to `self`. + /// + /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., + /// be a chain). An error will be returned if this is not the case. + /// + /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior + /// blocks might be imported. + /// + /// This method is generally much more efficient than importing each block using + /// `Self::process_block`. + pub async fn process_chain_segment( + self: &Arc, + chain_segment: Vec>>, + ) -> ChainSegmentResult { + let mut imported_blocks = 0; + + // Filter uninteresting blocks from the chain segment in a blocking task. + let chain = self.clone(); + let filtered_chain_segment_future = self.spawn_blocking_handle( + move || chain.filter_chain_segment(chain_segment), + "filter_chain_segment", + ); + let mut filtered_chain_segment = match filtered_chain_segment_future.await { + Ok(Ok(filtered_segment)) => filtered_segment, + Ok(Err(segment_result)) => return segment_result, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + } + } + }; + while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -2389,20 +2256,32 @@ impl BeaconChain { let mut blocks = filtered_chain_segment.split_off(last_index); std::mem::swap(&mut blocks, &mut filtered_chain_segment); + let chain = self.clone(); + let signature_verification_future = self.spawn_blocking_handle( + move || signature_verify_chain_segment(blocks, &chain), + "signature_verify_chain_segment", + ); + // Verify the signature of the blocks, returning early if the signature is invalid. - let signature_verified_blocks = match signature_verify_chain_segment(blocks, self) { - Ok(blocks) => blocks, - Err(error) => { + let signature_verified_blocks = match signature_verification_future.await { + Ok(Ok(blocks)) => blocks, + Ok(Err(error)) => { return ChainSegmentResult::Failed { imported_blocks, error, }; } + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + }; + } }; // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block) { + match self.process_block(signature_verified_block).await { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2427,43 +2306,54 @@ impl BeaconChain { /// ## Errors /// /// Returns an `Err` if the given block was invalid, or an error was encountered during - pub fn verify_block_for_gossip( - &self, - block: SignedBeaconBlock, + pub async fn verify_block_for_gossip( + self: &Arc, + block: Arc>, ) -> Result, BlockError> { - let slot = block.slot(); - let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); - - match GossipVerifiedBlock::new(block, self) { - Ok(verified) => { - debug!( - self.log, - "Successfully processed gossip block"; - "graffiti" => graffiti_string, - "slot" => slot, - "root" => ?verified.block_root(), - ); + let chain = self.clone(); + self.task_executor + .clone() + .spawn_blocking_handle( + move || { + let slot = block.slot(); + let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); + + match GossipVerifiedBlock::new(block, &chain) { + Ok(verified) => { + debug!( + chain.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => ?verified.block_root(), + ); - Ok(verified) - } - Err(e) => { - debug!( - self.log, - "Rejected gossip block"; - "error" => e.to_string(), - "graffiti" => graffiti_string, - "slot" => slot, - ); + Ok(verified) + } + Err(e) => { + debug!( + chain.log, + "Rejected gossip block"; + "error" => e.to_string(), + "graffiti" => graffiti_string, + "slot" => slot, + ); - Err(e) - } - } + Err(e) + } + } + }, + "payload_verification_handle", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)? } /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// - /// Items that implement `IntoFullyVerifiedBlock` include: + /// Items that implement `IntoExecutionPendingBlock` include: /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` @@ -2472,7 +2362,7 @@ impl BeaconChain { /// /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. - pub fn process_block>( + pub async fn process_block>( self: &Arc, unverified_block: B, ) -> Result> { @@ -2486,13 +2376,16 @@ impl BeaconChain { let block = unverified_block.block().clone(); // A small closure to group the verification and import errors. - let import_block = |unverified_block: B| -> Result> { - let fully_verified = unverified_block.into_fully_verified_block(self)?; - self.import_block(fully_verified) + let chain = self.clone(); + let import_block = async move { + let execution_pending = unverified_block.into_execution_pending_block(&chain)?; + chain + .import_execution_pending_block(execution_pending) + .await }; // Verify and import the block. - match import_block(unverified_block) { + match import_block.await { // The block was successfully verified and imported. Yay. Ok(block_root) => { trace!( @@ -2507,6 +2400,14 @@ impl BeaconChain { Ok(block_root) } + Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { + debug!( + self.log, + "Beacon block processing cancelled"; + "error" => ?e, + ); + Err(e) + } // There was an error whilst attempting to verify and import the block. The block might // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { @@ -2529,6 +2430,81 @@ impl BeaconChain { } } + /// Accepts a fully-verified block and imports it into the chain without performing any + /// additional verification. + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn import_execution_pending_block( + self: Arc, + execution_pending_block: ExecutionPendingBlock, + ) -> Result> { + let ExecutionPendingBlock { + block, + block_root, + state, + parent_block: _, + confirmed_state_roots, + payload_verification_handle, + } = execution_pending_block; + + let PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + } = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + // Log the PoS pandas if a merge transition just occurred. + if is_valid_merge_transition_block { + info!(self.log, "{}", POS_PANDA_BANNER); + info!( + self.log, + "Proof of Stake Activated"; + "slot" => block.slot() + ); + info!( + self.log, ""; + "Terminal POW Block Hash" => ?block + .message() + .execution_payload()? + .parent_hash() + .into_root() + ); + info!( + self.log, ""; + "Merge Transition Block Root" => ?block.message().tree_hash_root() + ); + info!( + self.log, ""; + "Merge Transition Execution Hash" => ?block + .message() + .execution_payload()? + .block_hash() + .into_root() + ); + } + + let chain = self.clone(); + let block_hash = self + .spawn_blocking_handle( + move || { + chain.import_block( + block, + block_root, + state, + confirmed_state_roots, + payload_verification_status, + ) + }, + "payload_verification_handle", + ) + .await??; + + Ok(block_hash) + } + /// Accepts a fully-verified block and imports it into the chain without performing any /// additional verification. /// @@ -2536,15 +2512,14 @@ impl BeaconChain { /// (i.e., this function is not atomic). fn import_block( &self, - fully_verified_block: FullyVerifiedBlock, + signed_block: Arc>, + block_root: Hash256, + mut state: BeaconState, + confirmed_state_roots: Vec, + payload_verification_status: PayloadVerificationStatus, ) -> Result> { - let signed_block = fully_verified_block.block; - let block_root = fully_verified_block.block_root; - let mut state = fully_verified_block.state; let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let mut ops = fully_verified_block.confirmation_db_batch; - let payload_verification_status = fully_verified_block.payload_verification_status; let attestation_observation_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); @@ -2617,21 +2592,29 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - let mut fork_choice = self.fork_choice.write(); + let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - let signed_block = - check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; - let (block, block_signature) = signed_block.clone().deconstruct(); + check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; - // compare the existing finalized checkpoint with the incoming block's finalized checkpoint - let old_finalized_checkpoint = fork_choice.finalized_checkpoint(); + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. let new_finalized_checkpoint = state.finalized_checkpoint(); + // Alias for readability. + let block = signed_block.message(); + // Only perform the weak subjectivity check if it was configured. if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { // This ensures we only perform the check once. - if (old_finalized_checkpoint.epoch < wss_checkpoint.epoch) + if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) { if let Err(e) = @@ -2643,7 +2626,7 @@ impl BeaconChain { "Weak subjectivity checkpoint verification failed while importing block!"; "block_root" => ?block_root, "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?old_finalized_checkpoint.epoch, + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, "error" => ?e, @@ -2671,7 +2654,7 @@ impl BeaconChain { fork_choice .on_block( current_slot, - &block, + block, block_root, block_delay, &mut state, @@ -2847,7 +2830,11 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block))); + let mut ops: Vec<_> = confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag) + .collect(); + ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -2858,18 +2845,23 @@ impl BeaconChain { "msg" => "Restoring fork choice from disk", "error" => ?e, ); - match Self::load_fork_choice(self.store.clone())? { - Some(persisted_fork_choice) => { - *fork_choice = persisted_fork_choice; - } - None => { - crit!( - self.log, - "No stored fork choice found to restore from"; - "warning" => "The database is likely corrupt now, consider --purge-db" - ); - } + + // Since the write failed, try to revert the canonical head back to what was stored + // in the database. This attempts to prevent inconsistency between the database and + // fork choice. + if let Err(e) = + self.canonical_head + .restore_from_store(fork_choice, &self.store, &self.spec) + { + crit!( + self.log, + "No stored fork choice found to restore from"; + "error" => ?e, + "warning" => "The database is likely corrupt now, consider --purge-db" + ); + return Err(BlockError::BeaconChainError(e)); } + return Err(e.into()); } drop(txn_lock); @@ -2884,7 +2876,6 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); - let signed_block = SignedBeaconBlock::from_block(block, block_signature); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -3021,7 +3012,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block>( + pub async fn produce_block>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3033,16 +3024,50 @@ impl BeaconChain { validator_graffiti, ProduceBlockVerification::VerifyRandao, ) + .await } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub fn produce_block_with_verification>( + pub async fn produce_block_with_verification>( self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/2 (blocking) + // + // Load the parent state from disk. + let chain = self.clone(); + let (state, state_root_opt) = self + .task_executor + .spawn_blocking_handle( + move || chain.load_state_for_block_production::(slot), + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/2 (async, with some blocking components) + // + // Produce the blopck upon the state + self.produce_block_on_state::( + state, + state_root_opt, + slot, + randao_reveal, + validator_graffiti, + verification, + ) + .await + } + + /// Same as `produce_block` but allowing for configuration of RANDAO-verification. + fn load_state_for_block_production>( + self: &Arc, + slot: Slot, + ) -> Result<(BeaconState, Option), BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); @@ -3056,16 +3081,19 @@ impl BeaconChain { // signed. If we miss the cache or we're producing a block that conflicts with the head, // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); - let head_info = self - .head_info() - .map_err(BlockProductionError::UnableToGetHeadInfo)?; - let (state, state_root_opt) = if head_info.slot < slot { + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any + // longer than necessary. + let (head_slot, head_block_root) = { + let head = self.canonical_head.cached_head(); + (head.head_slot(), head.head_block_root()) + }; + let (state, state_root_opt) = if head_slot < slot { // Normal case: proposing a block atop the current head. Use the snapshot cache. if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_info.block_root) + snapshot_cache.get_state_for_block_production(head_block_root) }) { (pre_state.pre_state, pre_state.state_root) @@ -3095,16 +3123,10 @@ impl BeaconChain { (state, None) }; + drop(state_load_timer); - self.produce_block_on_state::( - state, - state_root_opt, - slot, - randao_reveal, - validator_graffiti, - verification, - ) + Ok((state, state_root_opt)) } /// Produce a block for some `slot` upon the given `state`. @@ -3119,18 +3141,82 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub fn produce_block_on_state>( - &self, - mut state: BeaconState, + pub async fn produce_block_on_state>( + self: &Arc, + state: BeaconState, state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { - let eth1_chain = self - .eth1_chain - .as_ref() + // Part 1/3 (blocking) + // + // Perform the state advance and block-packing functions. + let chain = self.clone(); + let mut partial_beacon_block = self + .task_executor + .spawn_blocking_handle( + move || { + chain.produce_partial_beacon_block( + state, + state_root_opt, + produce_at_slot, + randao_reveal, + validator_graffiti, + ) + }, + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/3 (async) + // + // Wait for the execution layer to return an execution payload (if one is required). + let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); + let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { + let execution_payload = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + Some(execution_payload) + } else { + None + }; + + // Part 3/3 (blocking) + // + // Perform the final steps of combining all the parts and computing the state root. + let chain = self.clone(); + self.task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + execution_payload, + verification, + ) + }, + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)? + } + + fn produce_partial_beacon_block>( + self: &Arc, + mut state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + validator_graffiti: Option, + ) -> Result, BlockProductionError> { + let eth1_chain = self + .eth1_chain + .as_ref() .ok_or(BlockProductionError::NoEth1ChainConnection)?; // It is invalid to try to produce a block using a state from a future slot. @@ -3158,13 +3244,30 @@ impl BeaconChain { state.latest_block_header().canonical_root() }; + let slot = state.slot(); + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + // If required, start the process of loading an execution payload from the EL early. This + // allows it to run concurrently with things like attestation packing. + let prepare_payload_handle = match &state { + BeaconState::Base(_) | BeaconState::Altair(_) => None, + BeaconState::Merge(_) => { + let finalized_checkpoint = self.canonical_head.cached_head().finalized_checkpoint(); + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + finalized_checkpoint, + proposer_index, + )?; + Some(prepare_payload_handle) + } + }; + let (proposer_slashings, attester_slashings, voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; - let deposits = eth1_chain - .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? - .into(); + let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -3213,29 +3316,72 @@ impl BeaconChain { curr_attestation_filter, &self.spec, ) - .map_err(BlockProductionError::OpPoolError)? - .into(); + .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); - let slot = state.slot(); - let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - - // Closure to fetch a sync aggregate in cases where it is required. - let get_sync_aggregate = || -> Result, BlockProductionError> { - Ok(self - .op_pool - .get_sync_aggregate(&state) - .map_err(BlockProductionError::OpPoolError)? - .unwrap_or_else(|| { - warn!( - self.log, - "Producing block with no sync contributions"; - "slot" => state.slot(), - ); - SyncAggregate::new() - })) + let sync_aggregate = match &state { + BeaconState::Base(_) => None, + BeaconState::Altair(_) | BeaconState::Merge(_) => { + let sync_aggregate = self + .op_pool + .get_sync_aggregate(&state) + .map_err(BlockProductionError::OpPoolError)? + .unwrap_or_else(|| { + warn!( + self.log, + "Producing block with no sync contributions"; + "slot" => state.slot(), + ); + SyncAggregate::new() + }); + Some(sync_aggregate) + } }; + Ok(PartialBeaconBlock { + state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + prepare_payload_handle, + }) + } + + fn complete_partial_beacon_block>( + &self, + partial_beacon_block: PartialBeaconBlock, + execution_payload: Option, + verification: ProduceBlockVerification, + ) -> Result, BlockProductionError> { + let PartialBeaconBlock { + mut state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + // We don't need the prepare payload handle since the `execution_payload` is passed into + // this function. We can assume that the handle has already been consumed in order to + // produce said `execution_payload`. + prepare_payload_handle: _, + } = partial_beacon_block; + let inner_block = match &state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, @@ -3248,56 +3394,51 @@ impl BeaconChain { graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), - attestations, - deposits, + attestations: attestations.into(), + deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), _phantom: PhantomData, }, }), - BeaconState::Altair(_) => { - let sync_aggregate = get_sync_aggregate()?; - BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - _phantom: PhantomData, - }, - }) - } - BeaconState::Merge(_) => { - let sync_aggregate = get_sync_aggregate()?; - let execution_payload = - get_execution_payload::(self, &state, proposer_index)?; - BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - execution_payload, - }, - }) - } + BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: execution_payload + .ok_or(BlockProductionError::MissingExecutionPayload)?, + }, + }), }; let block = SignedBeaconBlock::from_block( @@ -3361,7 +3502,7 @@ impl BeaconChain { /// results in the justified checkpoint being invalidated. /// /// See the documentation of `InvalidationOperation` for information about defining `op`. - pub fn process_invalid_execution_payload( + pub async fn process_invalid_execution_payload( self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -3372,8 +3513,26 @@ impl BeaconChain { "block_root" => ?op.block_root(), ); + // Update the execution status in fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let inner_op = op.clone(); + let fork_choice_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_invalid_execution_payload(&inner_op) + }, + "invalid_payload_fork_choice_update", + ) + .await?; + // Update fork choice. - if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) { + if let Err(e) = fork_choice_result { crit!( self.log, "Failed to process invalid payload"; @@ -3388,7 +3547,7 @@ impl BeaconChain { // // Don't return early though, since invalidating the justified checkpoint might cause an // error here. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { crit!( self.log, "Failed to run fork choice routine"; @@ -3396,8 +3555,22 @@ impl BeaconChain { ); } - // Atomically obtain the justified root from fork choice. - let justified_block = self.fork_choice.read().get_justified_block()?; + // Obtain the justified root from fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let justified_block = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_read_lock() + .get_justified_block() + }, + "invalid_payload_fork_choice_get_justified", + ) + .await??; if justified_block.execution_status.is_invalid() { crit!( @@ -3429,452 +3602,10 @@ impl BeaconChain { Ok(()) } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(self: &Arc) -> Result<(), Error> { - self.fork_choice_at_slot(self.slot()?) - } - - /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. - /// - /// The `slot` is not verified in any way, callers should ensure it corresponds to at most - /// one slot ahead of the current wall-clock slot. - pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { - metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - - let result = self.fork_choice_internal(slot); - - if result.is_err() { - metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - } - - result - } - - fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { - // Atomically obtain the head block root and the finalized block. - let (beacon_block_root, finalized_block) = { - let mut fork_choice = self.fork_choice.write(); - - // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; - - (beacon_block_root, fork_choice.get_finalized_block()?) - }; - - let current_head = self.head_info()?; - let old_finalized_checkpoint = current_head.finalized_checkpoint; - - // Exit early if the head hasn't changed. - if beacon_block_root == current_head.block_root { - return Ok(()); - } - - // Check to ensure that this finalized block hasn't been marked as invalid. - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Err(Error::InvalidFinalizedPayload { - finalized_root: finalized_block.root, - execution_block_hash: block_hash, - }); - } - - let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); - - // At this point we know that the new head block is not the same as the previous one - metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_head = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned(beacon_block_root, CloneConfig::committee_caches_only()) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; - - Ok(BeaconSnapshot { - beacon_block, - beacon_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; - - // Attempt to detect if the new head is not on the same chain as the previous block - // (i.e., a re-org). - // - // Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks - // between calls to fork choice without swapping between chains. This seems like an - // extreme-enough scenario that a warning is fine. - let is_reorg = new_head - .beacon_state - .get_block_root(current_head.slot) - .map_or(true, |root| *root != current_head.block_root); - - let mut reorg_distance = Slot::new(0); - - if is_reorg { - match self.find_reorg_slot(&new_head.beacon_state, new_head.beacon_block_root) { - Ok(slot) => reorg_distance = current_head.slot.saturating_sub(slot), - Err(e) => { - warn!( - self.log, - "Could not find re-org depth"; - "error" => format!("{:?}", e), - ); - } - } - - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); - warn!( - self.log, - "Beacon chain re-org"; - "previous_head" => ?current_head.block_root, - "previous_slot" => current_head.slot, - "new_head_parent" => ?new_head.beacon_block.parent_root(), - "new_head" => ?beacon_block_root, - "new_slot" => new_head.beacon_block.slot(), - "reorg_distance" => reorg_distance, - ); - } else { - debug!( - self.log, - "Head beacon block"; - "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, - "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, - "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => ?beacon_block_root, - "slot" => new_head.beacon_block.slot(), - ); - }; - - let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint(); - - // It is an error to try to update to a head with a lesser finalized epoch. - if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { - return Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_checkpoint.epoch, - new_epoch: new_finalized_checkpoint.epoch, - }); - } - - let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch()) - < new_head - .beacon_state - .slot() - .epoch(T::EthSpec::slots_per_epoch()); - - let update_head_timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - // These fields are used for server-sent events. - let state_root = new_head.beacon_state_root(); - let head_slot = new_head.beacon_state.slot(); - let head_proposer_index = new_head.beacon_block.message().proposer_index(); - let proposer_graffiti = new_head - .beacon_block - .message() - .body() - .graffiti() - .as_utf8_lossy(); - - // Find the dependent roots associated with this head before updating the snapshot. This - // is to ensure consistency when sending server sent events later in this method. - let dependent_root = new_head - .beacon_state - .proposer_shuffling_decision_root(self.genesis_block_root); - let prev_dependent_root = new_head - .beacon_state - .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - - drop(lag_timer); - - // Clear the early attester cache in case it conflicts with `self.canonical_head`. - self.early_attester_cache.clear(); - - // Update the snapshot that stores the head of the chain at the time it received the - // block. - *self - .canonical_head - .try_write_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)? = new_head; - - // The block has now been set as head so we can record times and delays. - metrics::stop_timer(update_head_timer); - - let block_time_set_as_head = timestamp_now(); - - // Calculate the total delay between the start of the slot and when it was set as head. - let block_delay_total = - get_slot_delay_ms(block_time_set_as_head, head_slot, &self.slot_clock); - - // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to - // the cache during sync. - if block_delay_total < self.slot_clock.slot_duration() * 64 { - self.block_times_cache.write().set_time_set_as_head( - beacon_block_root, - current_head.slot, - block_time_set_as_head, - ); - } - - // If a block comes in from over 4 slots ago, it is most likely a block from sync. - let block_from_sync = block_delay_total > self.slot_clock.slot_duration() * 4; - - // Determine whether the block has been set as head too late for proper attestation - // production. - let late_head = block_delay_total >= self.slot_clock.unagg_attestation_production_delay(); - - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - - // Observe the delay between when we imported the block and when we set the block as - // head. - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, - block_delays - .observed - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, - block_delays - .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - // If the block was enshrined as head too late for attestations to be created for it, - // log a debug warning and increment a metric. - if late_head { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); - debug!( - self.log, - "Delayed head block"; - "block_root" => ?beacon_block_root, - "proposer_index" => head_proposer_index, - "slot" => head_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "set_as_head_delay" => ?block_delays.set_as_head, - ); - } - } - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - - if is_epoch_transition || is_reorg { - self.persist_head_and_fork_choice()?; - self.op_pool.prune_attestations(self.epoch()?); - } - - if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Due to race conditions, it's technically possible that the head we load here is - // different to the one earlier in this function. - // - // Since the head can't move backwards in terms of finalized epoch, we can only load a - // head with a *later* finalized state. There is no harm in this. - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - - // State root of the finalized state on the epoch boundary, NOT the state - // of the finalized block. We need to use an iterator in case the state is beyond - // the reach of the new head's `state_roots` array. - let new_finalized_slot = head - .beacon_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &head.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; - - self.after_finalization(&head.beacon_state, new_finalized_state_root)?; - } - - // Register a server-sent event if necessary - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_head_subscribers() { - match (dependent_root, prev_dependent_root) { - (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { - event_handler.register(EventKind::Head(SseHead { - slot: head_slot, - block: beacon_block_root, - state: state_root, - current_duty_dependent_root, - previous_duty_dependent_root, - epoch_transition: is_epoch_transition, - })); - } - (Err(e), _) | (_, Err(e)) => { - warn!( - self.log, - "Unable to find dependent roots, cannot register head event"; - "error" => ?e - ); - } - } - } - - if is_reorg && event_handler.has_reorg_subscribers() { - event_handler.register(EventKind::ChainReorg(SseChainReorg { - slot: head_slot, - depth: reorg_distance.as_u64(), - old_head_block: current_head.block_root, - old_head_state: current_head.state_root, - new_head_block: beacon_block_root, - new_head_state: state_root, - epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), - })); - } - - if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { - let peer_info = self - .block_times_cache - .read() - .get_peer_info(beacon_block_root); - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_slot, - block: beacon_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_proposer_index, - proposer_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - })); - } - } - - // Update the execution layer. - // Always use the wall-clock slot to update the execution engine rather than the `slot` - // passed in. - if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { - crit!( - self.log, - "Failed to update execution head"; - "error" => ?e - ); - } - - // Performing this call immediately after - // `update_execution_engine_forkchoice_blocking` might result in two calls to fork - // choice updated, one *without* payload attributes and then a second *with* - // payload attributes. - // - // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as - // far as I know. - if let Err(e) = self.prepare_beacon_proposer_blocking() { - crit!( - self.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e - ); - } - - Ok(()) - } - - pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { - let current_slot = self.slot()?; - - // Avoids raising an error before Bellatrix. - // - // See `Self::prepare_beacon_proposer_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.prepare_beacon_proposer_async(current_slot)) - .map_err(Error::PrepareProposerBlockingFailed)? + pub fn block_is_known_to_fork_choice(&self, root: &Hash256) -> bool { + self.canonical_head + .fork_choice_read_lock() + .contains_block(root) } /// Determines the beacon proposer for the next slot. If that proposer is registered in the @@ -3889,7 +3620,7 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async( + pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { @@ -3912,20 +3643,45 @@ impl BeaconChain { return Ok(()); } - let head = self.head_info()?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + // Atomically read some values from the canonical head, whilst avoiding holding the cached + // head `Arc` any longer than necessary. + // + // Use a blocking task since blocking the core executor on the canonical head read lock can + // block the core tokio executor. + let chain = self.clone(); + let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = + self.spawn_blocking_handle( + move || { + let cached_head = chain.canonical_head.cached_head(); + let head_block_root = cached_head.head_block_root(); + let decision_root = cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root)?; + Ok::<_, Error>(( + cached_head.head_slot(), + head_block_root, + decision_root, + cached_head.head_random()?, + cached_head.forkchoice_update_parameters(), + )) + }, + "prepare_beacon_proposer_fork_choice_read", + ) + .await??; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); // Don't bother with proposer prep if the head is more than // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. // // This prevents the routine from running during sync. - if head.slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS < current_slot { debug!( self.log, "Head too old for proposer prep"; - "head_slot" => head.slot, + "head_slot" => head_slot, "current_slot" => current_slot, ); return Ok(()); @@ -3934,9 +3690,9 @@ impl BeaconChain { // Ensure that the shuffling decision root is correct relative to the epoch we wish to // query. let shuffling_decision_root = if head_epoch == prepare_epoch { - head.proposer_shuffling_decision_root + head_decision_root } else { - head.block_root + head_root }; // Read the proposer from the proposer cache. @@ -3966,7 +3722,7 @@ impl BeaconChain { return Ok(()); } - let (proposers, decision_root, fork) = + let (proposers, decision_root, _, fork) = compute_proposer_duties_from_head(prepare_epoch, self)?; let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); @@ -4012,7 +3768,7 @@ impl BeaconChain { .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head.random, + prev_randao: head_random, suggested_fee_recipient: execution_layer .get_suggested_fee_recipient(proposer as u64) .await, @@ -4022,18 +3778,13 @@ impl BeaconChain { self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head.block_root, + "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, ); let already_known = execution_layer - .insert_proposer( - prepare_slot, - head.block_root, - proposer as u64, - payload_attributes, - ) + .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) .await; // Only push a log to the user if this is the first time we've seen this proposer for this // slot. @@ -4075,7 +3826,7 @@ impl BeaconChain { // known). if till_prepare_slot <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head.slot + 1 >= prepare_slot + || head_slot + 1 >= prepare_slot { debug!( self.log, @@ -4084,37 +3835,19 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice_async(current_slot) + // Use the blocking method here so that we don't form a queue of these functions when + // routinely calling them. + self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) .await?; } Ok(()) } - pub fn update_execution_engine_forkchoice_blocking( - self: &Arc, - current_slot: Slot, - ) -> Result<(), Error> { - // Avoids raising an error before Bellatrix. - // - // See `Self::update_execution_engine_forkchoice_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.update_execution_engine_forkchoice_async(current_slot)) - .map_err(Error::ForkchoiceUpdate)? - } - - pub async fn update_execution_engine_forkchoice_async( + pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, + params: ForkchoiceUpdateParameters, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4152,73 +3885,56 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - // Deadlock warning: - // - // We are taking the `self.fork_choice` lock whilst holding the `forkchoice_lock`. This - // is intentional, since it allows us to ensure a consistent ordering of messages to the - // execution layer. - let forkchoice_update_parameters = - self.fork_choice.read().get_forkchoice_update_parameters(); - let (head_block_root, head_hash, finalized_hash) = if let Some(params) = - forkchoice_update_parameters + let (head_block_root, head_hash, finalized_hash) = if let Some(head_hash) = params.head_hash { - if let Some(head_hash) = params.head_hash { - ( - params.head_root, - head_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { - // We are post-bellatrix - if execution_layer - .payload_attributes(next_slot, params.head_root) + ( + params.head_root, + head_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + match self.spec.fork_name_at_slot::(next_slot) { + // We are pre-bellatrix; no need to update the EL. + ForkName::Base | ForkName::Altair => return Ok(()), + _ => { + // We are post-bellatrix + if execution_layer + .payload_attributes(next_slot, params.head_root) + .await + .is_some() + { + // We are a proposer, check for terminal_pow_block_hash + if let Some(terminal_pow_block_hash) = execution_layer + .get_terminal_pow_block_hash(&self.spec) .await - .is_some() + .map_err(Error::ForkchoiceUpdate)? { - // We are a proposer, check for terminal_pow_block_hash - if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) - .await - .map_err(Error::ForkchoiceUpdate)? - { - info!( - self.log, - "Prepared POS transition block proposer"; "slot" => next_slot - ); - ( - params.head_root, - terminal_pow_block_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // TTD hasn't been reached yet, no need to update the EL. - return Ok(()); - } + info!( + self.log, + "Prepared POS transition block proposer"; "slot" => next_slot + ); + ( + params.head_root, + terminal_pow_block_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) } else { - // We are not a proposer, no need to update the EL. + // TTD hasn't been reached yet, no need to update the EL. return Ok(()); } + } else { + // We are not a proposer, no need to update the EL. + return Ok(()); } } } - } else { - warn!( - self.log, - "Missing forkchoice params"; - "msg" => "please report this non-critical bug" - ); - return Ok(()); }; let forkchoice_updated_response = execution_layer @@ -4234,11 +3950,19 @@ impl BeaconChain { Ok(status) => match status { PayloadStatus::Valid => { // Ensure that fork choice knows that the block is no longer optimistic. - if let Err(e) = self - .fork_choice - .write() - .on_valid_execution_payload(head_block_root) - { + let chain = self.clone(); + let fork_choice_update_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_valid_execution_payload(head_block_root) + }, + "update_execution_engine_invalid_payload", + ) + .await?; + if let Err(e) = fork_choice_update_result { error!( self.log, "Failed to validate payload"; @@ -4274,24 +3998,14 @@ impl BeaconChain { ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) - }, - "process_invalid_execution_payload_many", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4307,22 +4021,10 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - ) - }, - "process_invalid_execution_payload_single", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4332,30 +4034,85 @@ impl BeaconChain { } /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. - fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { + pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix }) } - /// Returns the status of the current head block, regarding the validity of the execution - /// payload. - pub fn head_safety_status(&self) -> Result { - let head = self.head_info()?; - let head_block = self - .fork_choice - .read() - .get_block(&head.block_root) - .ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?; - - let status = match head_block.execution_status { - ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), - ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), - ExecutionStatus::Optimistic(block_hash) => HeadSafetyStatus::Unsafe(block_hash), - ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), - }; + /// Returns the value of `execution_optimistic` for `block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + pub fn is_optimistic_block( + &self, + block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - Ok(status) + /// Returns the value of `execution_optimistic` for `head_block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + /// + /// This function will return an error if `head_block` is not present in the fork choice store + /// and so should only be used on the head block or when the block *should* be present in the + /// fork choice store. + /// + /// There is a potential race condition when syncing where the block_root of `head_block` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head_block( + &self, + head_block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(head_block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(&head_block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } + + /// Returns the value of `execution_optimistic` for the current head block. + /// You can optionally provide `head_info` if it was computed previously. + /// + /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic`. + /// + /// There is a potential race condition when syncing where the block root of `head_info` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head(&self) -> Result { + self.canonical_head + .head_execution_status() + .map(|status| status.is_optimistic()) + } + + pub fn is_optimistic_block_root( + &self, + block_slot: Slot, + block_root: &Hash256, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block_slot) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(block_root) + .map_err(BeaconChainError::ForkChoiceError) + } } /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. @@ -4417,7 +4174,7 @@ impl BeaconChain { /// Note: this function **MUST** be called from a non-async context since /// it contains a call to `fork_choice` which may eventually call /// `tokio::runtime::block_on` in certain cases. - pub fn per_slot_task(self: &Arc) { + pub async fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { // Always run the light-weight pruning tasks (these structures should be empty during @@ -4426,14 +4183,12 @@ impl BeaconChain { self.block_times_cache.write().prune(slot); // Don't run heavy-weight tasks during sync. - if self.best_slot().map_or(true, |head_slot| { - head_slot + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot - }) { + if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { return; } // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { error!( self.log, "Fork choice error at slot start"; @@ -4457,67 +4212,6 @@ impl BeaconChain { } } - /// Called after `self` has had a new block finalized. - /// - /// Performs pruning and finality-based optimizations. - fn after_finalization( - &self, - head_state: &BeaconState, - new_finalized_state_root: Hash256, - ) -> Result<(), Error> { - self.fork_choice.write().prune()?; - let new_finalized_checkpoint = head_state.finalized_checkpoint(); - - self.observed_block_producers.write().prune( - new_finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - ); - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - - self.op_pool.prune_all(head_state, self.epoch()?); - - self.store_migrator.process_finalization( - new_finalized_state_root.into(), - new_finalized_checkpoint, - self.head_tracker.clone(), - )?; - - self.attester_cache - .prune_below(new_finalized_checkpoint.epoch); - - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_finalized_subscribers() { - event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - epoch: new_finalized_checkpoint.epoch, - block: new_finalized_checkpoint.root, - state: new_finalized_state_root, - })); - } - } - - Ok(()) - } - /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head /// `head_block_root`. The `map_fn` will be supplied two values: /// @@ -4556,8 +4250,8 @@ impl BeaconChain { F: Fn(&CommitteeCache, Hash256) -> Result, { let head_block = self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head_block_root) .ok_or(Error::MissingBeaconBlock(head_block_root))?; @@ -4702,10 +4396,13 @@ impl BeaconChain { ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block.into(), - beacon_block_root: self.head()?.beacon_block_root, - beacon_state: self.head()?.beacon_state, + let mut last_slot = { + let head = self.canonical_head.cached_head(); + BeaconSnapshot { + beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), + beacon_block_root: head.snapshot.beacon_block_root, + beacon_state: head.snapshot.beacon_state.clone(), + } }; dump.push(last_slot.clone()); @@ -4732,7 +4429,7 @@ impl BeaconChain { })?; let slot = BeaconSnapshot { - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; @@ -4770,12 +4467,7 @@ impl BeaconChain { } pub fn dump_as_dot(&self, output: &mut W) { - let canonical_head_hash = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout) - .unwrap() - .beacon_block_root; + let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); let mut finalized_blocks: HashSet = HashSet::new(); let mut justified_blocks: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 38a25a57661..0d65b8aa62e 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -13,8 +13,8 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, - Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + Hash256, Slot, }; #[derive(Debug)] @@ -265,7 +265,7 @@ where fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d645201a580..e76a5a80588 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -9,12 +9,14 @@ //! values it stores are very small, so this should not be an issue. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fork_choice::ExecutionStatus; use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, + BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, + Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -135,11 +137,26 @@ impl BeaconProposerCache { pub fn compute_proposer_duties_from_head( current_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, Fork), BeaconChainError> { - // Take a copy of the head of the chain. - let head = chain.head()?; - let mut state = head.beacon_state; - let head_state_root = head.beacon_block.state_root(); +) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { + // Atomically collect information about the head whilst holding the canonical head `Arc` as + // short as possible. + let (mut state, head_state_root, head_block_root) = { + let head = chain.canonical_head.cached_head(); + // Take a copy of the head state. + let head_state = head + .snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()); + let head_state_root = head.head_state_root(); + let head_block_root = head.head_block_root(); + (head_state, head_state_root, head_block_root) + }; + + let execution_status = chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; @@ -153,7 +170,7 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from)?; - Ok((indices, dependent_root, state.fork())) + Ok((indices, dependent_root, execution_status, state.fork())) } /// If required, advance `state` to `target_epoch`. diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 94adb479c84..8491622cb09 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,4 +1,5 @@ use serde_derive::Serialize; +use std::sync::Arc; use types::{ beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, SignedBeaconBlock, @@ -8,7 +9,7 @@ use types::{ /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { - pub beacon_block: SignedBeaconBlock, + pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -16,7 +17,7 @@ pub struct BeaconSnapshot = FullPayload> impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -39,7 +40,7 @@ impl> BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index afdbaf13ee2..197ce03e314 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -31,24 +31,27 @@ //! |--------------- //! | //! â–¼ -//! SignatureVerifiedBlock +//! SignatureVerifiedBlock //! | //! â–¼ -//! FullyVerifiedBlock +//! ExecutionPendingBlock +//! | +//! await //! | //! â–¼ //! END //! //! ``` use crate::execution_payload::{ - notify_new_payload, validate_execution_payload_for_gossip, validate_merge_block, + is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, + PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, @@ -56,11 +59,11 @@ use crate::{ use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; +use fork_choice::PayloadVerificationStatus; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::per_block_processing::is_merge_transition_block; @@ -75,16 +78,16 @@ use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -const POS_PANDA_BANNER: &str = r#" +pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, ;" ^; ;' ", ;" ^; ;' ", ; s$$$$$$$s ; ; s$$$$$$$s ; @@ -129,7 +132,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Box>), + ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. @@ -419,6 +422,12 @@ impl From for BlockError { } } +/// Stores information about verifying a payload against an execution engine. +pub struct PayloadVerificationOutcome { + pub payload_verification_status: PayloadVerificationStatus, + pub is_valid_merge_transition_block: bool, +} + /// Information about invalid blocks which might still be slashable despite being invalid. #[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { @@ -474,7 +483,7 @@ fn process_block_slash_info( /// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all /// signatures are valid, the `chain_segment` is mapped to a `Vec` that can -/// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any +/// later be transformed into a `ExecutionPendingBlock` without re-checking the signatures. If any /// signature in the block is invalid, an `Err` is returned (it is not possible to known _which_ /// signature was invalid). /// @@ -483,7 +492,7 @@ fn process_block_slash_info( /// The given `chain_segment` must span no more than two epochs, otherwise an error will be /// returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, SignedBeaconBlock)>, + mut chain_segment: Vec<(Hash256, Arc>)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -541,7 +550,7 @@ pub fn signature_verify_chain_segment( #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { - pub block: SignedBeaconBlock, + pub block: Arc>, pub block_root: Hash256, parent: Option>, } @@ -549,11 +558,15 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: Option>, } +/// Used to await the result of executing payload with a remote EE. +type PayloadVerificationHandle = + JoinHandle>>>; + /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: /// @@ -562,42 +575,42 @@ pub struct SignatureVerifiedBlock { /// - State root check /// - Per block processing /// -/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid -/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the +/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid +/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. -pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { - pub block: SignedBeaconBlock, +pub struct ExecutionPendingBlock { + pub block: Arc>, pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub confirmation_db_batch: Vec>, - pub payload_verification_status: PayloadVerificationStatus, + pub confirmed_state_roots: Vec, + pub payload_verification_handle: PayloadVerificationHandle, } -/// Implemented on types that can be converted into a `FullyVerifiedBlock`. +/// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. -pub trait IntoFullyVerifiedBlock: Sized { - fn into_fully_verified_block( +pub trait IntoExecutionPendingBlock: Sized { + fn into_execution_pending_block( self, chain: &Arc>, - ) -> Result, BlockError> { - self.into_fully_verified_block_slashable(chain) - .map(|fully_verified| { + ) -> Result, BlockError> { + self.into_execution_pending_block_slashable(chain) + .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_block_header(fully_verified.block.signed_block_header()); + slasher.accept_block_header(execution_pending.block.signed_block_header()); } - fully_verified + execution_pending }) .map_err(|slash_info| process_block_slash_info(chain, slash_info)) } /// Convert the block to fully-verified form while producing data to aid checking slashability. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>>; + ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; } @@ -608,7 +621,7 @@ impl GossipVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // If the block is valid for gossip we don't supply it to the slasher here because @@ -623,7 +636,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -658,7 +671,11 @@ impl GossipVerifiedBlock { // reboot if the `observed_block_producers` cache is empty. In that case, without this // check, we will load the parent and state from disk only to find out later that we // already know this block. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -678,10 +695,10 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - let block = check_block_is_finalized_descendant::( - block, - &chain.fork_choice.read(), - &chain.store, + check_block_is_finalized_descendant( + chain, + &chain.canonical_head.fork_choice_write_lock(), + &block, )?; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -827,15 +844,15 @@ impl GossipVerifiedBlock { } } -impl IntoFullyVerifiedBlock for GossipVerifiedBlock { +impl IntoExecutionPendingBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { - let fully_verified = + ) -> Result, BlockSlashInfo>> { + let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - fully_verified.into_fully_verified_block_slashable(chain) + execution_pending.into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -849,7 +866,7 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result> { @@ -892,7 +909,7 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { @@ -947,12 +964,12 @@ impl SignatureVerifiedBlock { } } -impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { +impl IntoExecutionPendingBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) @@ -961,7 +978,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignedBeaconBlock { +impl IntoExecutionPendingBlock for Arc> { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. - fn into_fully_verified_block_slashable( + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_fully_verified_block_slashable(chain) + .into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -995,7 +1012,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock FullyVerifiedBlock<'a, T> { +impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. /// @@ -1004,12 +1021,16 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, chain: &Arc>, ) -> Result> { - if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { + if let Some(parent) = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block.parent_root()) + { // Reject any block where the parent has an invalid payload. It's impossible for a valid // block to descend from an invalid parent. if parent.execution_status.is_invalid() { @@ -1028,7 +1049,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } // Reject any block that exceeds our limit on skipped slots. @@ -1048,7 +1069,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // Stage a batch of operations to be completed atomically if this block is imported // successfully. - let mut confirmation_db_batch = vec![]; + let mut confirmed_state_roots = vec![]; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1121,7 +1142,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { chain.store.do_atomically(state_batch)?; drop(txn_lock); - confirmation_db_batch.push(StoreOp::DeleteStateTemporaryFlag(state_root)); + confirmed_state_roots.push(state_root); state_root }; @@ -1140,59 +1161,82 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - let valid_merge_transition_block = - if is_merge_transition_block(&state, block.message().body()) { - validate_merge_block(chain, block.message())?; - true - } else { - false + let block_slot = block.slot(); + let state_current_epoch = state.current_epoch(); + + // Define a future that will verify the execution payload with an execution engine (but + // don't execute it yet). + let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; + let is_valid_merge_transition_block = + is_merge_transition_block(&state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message()).await?; }; - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = notify_new_payload(chain, &state, block.message())?; - - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - if !chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + // + // It is important that this function is called *after* `per_slot_processing`, since the + // `randao` may change. + let payload_verification_status = payload_notifier.notify_new_payload().await?; + + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); + + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } } - } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + if block_slot.epoch(T::EthSpec::slots_per_epoch()) + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 >= epoch { @@ -1201,7 +1245,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // the `validator_monitor` lock from being bounced or held for a long time whilst // performing `per_slot_processing`. for (i, summary) in summaries.iter().enumerate() { - let epoch = state.current_epoch() - Epoch::from(summaries.len() - i); + let epoch = state_current_epoch - Epoch::from(summaries.len() - i); if let Err(e) = validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) { @@ -1300,21 +1344,13 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { }); } - if valid_merge_transition_block { - info!(chain.log, "{}", POS_PANDA_BANNER); - info!(chain.log, "Proof of Stake Activated"; "slot" => block.slot()); - info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash().into_root()); - info!(chain.log, ""; "Merge Transition Block Root" => ?block.message().tree_hash_root()); - info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash().into_root()); - } - Ok(Self { block, block_root, state, parent_block: parent.beacon_block, - confirmation_db_batch, - payload_verification_status, + confirmed_state_roots, + payload_verification_handle, }) } } @@ -1366,8 +1402,9 @@ fn check_block_against_finalized_slot( chain: &BeaconChain, ) -> Result<(), BlockError> { let finalized_slot = chain - .head_info()? - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -1383,13 +1420,17 @@ fn check_block_against_finalized_slot( } /// Returns `Ok(block)` if the block descends from the finalized root. -pub fn check_block_is_finalized_descendant>( - block: SignedBeaconBlock, - fork_choice: &ForkChoice, - store: &HotColdDB, -) -> Result, BlockError> { +/// +/// ## Warning +/// +/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. +pub fn check_block_is_finalized_descendant( + chain: &BeaconChain, + fork_choice: &BeaconForkChoice, + block: &Arc>, +) -> Result<(), BlockError> { if fork_choice.is_descendant_of_finalized(block.parent_root()) { - Ok(block) + Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1399,7 +1440,8 @@ pub fn check_block_is_finalized_descendant( // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -1477,16 +1523,16 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( chain: &BeaconChain, - block: SignedBeaconBlock, -) -> Result<(ProtoBlock, SignedBeaconBlock), BlockError> { + block: Arc>, +) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block.message().parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(Box::new(block))) + Err(BlockError::ParentUnknown(block)) } } @@ -1496,12 +1542,12 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result< ( PreProcessingSnapshot, - SignedBeaconBlock, + Arc>, ), BlockError, > { @@ -1518,11 +1564,11 @@ fn load_parent( // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). if !chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } let block_delay = chain @@ -1717,19 +1763,12 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = chain - .with_head(|head| { - Ok(( - head.beacon_state.fork(), - head.beacon_state.genesis_validators_root(), - )) - }) - .map_err(|e: BlockError| e)?; + let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( &proposer_pubkey, - &fork, - genesis_validators_root, + &head_fork, + chain.genesis_validators_root, &chain.spec, ) { Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 361246b4d38..d8138b69d58 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; @@ -245,6 +245,7 @@ where let fork_choice = BeaconChain::>::load_fork_choice( store.clone(), + &self.spec, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; @@ -337,7 +338,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_state, }, self, @@ -352,12 +353,15 @@ where self = updated_builder; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let current_slot = None; let fork_choice = ForkChoice::from_anchor( fc_store, genesis.beacon_block_root, &genesis.beacon_block, &genesis.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -455,17 +459,20 @@ where let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, - beacon_block: weak_subj_block, + beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -638,17 +645,18 @@ where head_block_root, &head_state, store.clone(), + Some(current_slot), &self.spec, )?; } - let mut canonical_head = BeaconSnapshot { + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - beacon_block: head_block, + beacon_block: Arc::new(head_block), beacon_state: head_state, }; - canonical_head + head_snapshot .beacon_state .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; @@ -658,25 +666,17 @@ where // // This is a sanity check to detect database corruption. let fc_finalized = fork_choice.finalized_checkpoint(); - let head_finalized = canonical_head.beacon_state.finalized_checkpoint(); - if fc_finalized != head_finalized { - let is_genesis = head_finalized.root.is_zero() - && head_finalized.epoch == fc_finalized.epoch - && fc_finalized.root == genesis_block_root; - let is_wss = store.get_anchor_slot().map_or(false, |anchor_slot| { - fc_finalized.epoch == anchor_slot.epoch(TEthSpec::slots_per_epoch()) - }); - if !is_genesis && !is_wss { - return Err(format!( - "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ + let head_finalized = head_snapshot.beacon_state.finalized_checkpoint(); + if fc_finalized.epoch < head_finalized.epoch { + return Err(format!( + "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ {:?}", - fc_finalized, head_finalized - )); - } + fc_finalized, head_finalized + )); } let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { - ValidatorPubkeyCache::new(&canonical_head.beacon_state, store.clone()) + ValidatorPubkeyCache::new(&head_snapshot.beacon_state, store.clone()) .map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e)) })?; @@ -691,7 +691,7 @@ where if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), - &canonical_head.beacon_state, + &head_snapshot.beacon_state, ); } @@ -725,10 +725,18 @@ where .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; + let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); + let genesis_time = head_snapshot.beacon_state.genesis_time(); + let head_for_snapshot_cache = head_snapshot.clone(); + let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, store, + task_executor: self + .task_executor + .ok_or("Cannot build without task executor")?, store_migrator, slot_clock, op_pool: self.op_pool.ok_or("Cannot build without op pool")?, @@ -758,18 +766,18 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, - genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), - canonical_head: TimeoutRwLock::new(canonical_head.clone()), + genesis_validators_root, + genesis_time, + canonical_head, genesis_block_root, genesis_state_root, - fork_choice: RwLock::new(fork_choice), fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, - canonical_head, + head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), beacon_proposer_cache: <_>::default(), @@ -787,9 +795,7 @@ where validator_monitor: RwLock::new(validator_monitor), }; - let head = beacon_chain - .head() - .map_err(|e| format!("Failed to get head: {:?}", e))?; + let head = beacon_chain.head_snapshot(); // Prime the attester cache with the head state. beacon_chain @@ -992,10 +998,10 @@ mod test { .build() .expect("should build"); - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); - let state = head.beacon_state; - let block = head.beacon_block; + let state = &head.beacon_state; + let block = &head.beacon_block; assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); assert_eq!( @@ -1014,7 +1020,7 @@ mod test { .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block.clone().into(), + block.clone_as_blinded(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs new file mode 100644 index 00000000000..78a974eb3e9 --- /dev/null +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -0,0 +1,1307 @@ +//! This module provides all functionality for finding the canonical head, updating all necessary +//! components (e.g. caches) and maintaining a cached head block and state. +//! +//! For practically all applications, the "canonical head" can be read using +//! `beacon_chain.canonical_head.cached_head()`. +//! +//! The canonical head can be updated using `beacon_chain.recompute_head()`. +//! +//! ## Deadlock safety +//! +//! This module contains three locks: +//! +//! 1. `RwLock`: Contains `proto_array` fork choice. +//! 2. `RwLock`: Contains a cached block/state from the last run of `proto_array`. +//! 3. `Mutex<()>`: Is used to prevent concurrent execution of `BeaconChain::recompute_head`. +//! +//! This module has to take great efforts to avoid causing a deadlock with these three methods. Any +//! developers working in this module should tread carefully and seek a detailed review. +//! +//! To encourage safe use of this module, it should **only ever return a read or write lock for the +//! fork choice lock (lock 1)**. Whilst public functions might indirectly utilise locks (2) and (3), +//! the fundamental `RwLockWriteGuard` or `RwLockReadGuard` should never be exposed. This prevents +//! external functions from acquiring these locks in conflicting orders and causing a deadlock. +//! +//! ## Design Considerations +//! +//! We separate the `BeaconForkChoice` and `CachedHead` into two `RwLocks` because we want to ensure +//! fast access to the `CachedHead`. If we were to put them both under the same lock, we would need +//! to take an exclusive write-lock on it in order to run `ForkChoice::get_head`. This can take tens +//! of milliseconds and would block all downstream functions that want to know simple things like +//! the head block root. This is unacceptable for fast-responding functions like the networking +//! stack. + +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::{ + beacon_chain::{ + BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, + }, + block_times_cache::BlockTimesCache, + events::ServerSentEventHandler, + metrics, + validator_monitor::{get_slot_delay_ms, timestamp_now}, + BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, +}; +use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; +use fork_choice::{ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock}; +use itertools::process_results; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use task_executor::{JoinHandle, ShutdownReason}; +use types::*; + +/// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from +/// accessing the contained lock without it being explicitly noted in this module. +pub struct CanonicalHeadRwLock(RwLock); + +impl From> for CanonicalHeadRwLock { + fn from(rw_lock: RwLock) -> Self { + Self(rw_lock) + } +} + +impl CanonicalHeadRwLock { + fn new(item: T) -> Self { + Self::from(RwLock::new(item)) + } + + fn read(&self) -> RwLockReadGuard { + self.0.read() + } + + fn write(&self) -> RwLockWriteGuard { + self.0.write() + } +} + +/// Provides a series of cached values from the last time `BeaconChain::recompute_head` was run. +/// +/// This struct is designed to be cheap-to-clone, any large fields should be wrapped in an `Arc` (or +/// similar). +#[derive(Clone)] +pub struct CachedHead { + /// Provides the head block and state from the last time the head was updated. + pub snapshot: Arc>, + /// The justified checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.head_snapshot.beacon_state.justified_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + justified_checkpoint: Checkpoint, + /// The finalized checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.head_snapshot.beacon_state.finalized_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + finalized_checkpoint: Checkpoint, + /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` + /// before Bellatrix. + head_hash: Option, + /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. + finalized_hash: Option, +} + +impl CachedHead { + /// Returns root of the block at the head of the beacon chain. + pub fn head_block_root(&self) -> Hash256 { + self.snapshot.beacon_block_root + } + + /// Returns root of the `BeaconState` at the head of the beacon chain. + /// + /// ## Note + /// + /// This `BeaconState` has *not* been advanced to the current slot, it has the same slot as the + /// head block. + pub fn head_state_root(&self) -> Hash256 { + self.snapshot.beacon_state_root() + } + + /// Returns slot of the block at the head of the beacon chain. + /// + /// ## Notes + /// + /// This is *not* the current slot as per the system clock. Use `BeaconChain::slot` for the + /// system clock (aka "wall clock") slot. + pub fn head_slot(&self) -> Slot { + self.snapshot.beacon_block.slot() + } + + /// Returns the `Fork` from the `BeaconState` at the head of the chain. + pub fn head_fork(&self) -> Fork { + self.snapshot.beacon_state.fork() + } + + /// Returns the randao mix for the block at the head of the chain. + pub fn head_random(&self) -> Result { + let state = &self.snapshot.beacon_state; + let root = *state.get_randao_mix(state.current_epoch())?; + Ok(root) + } + + /// Returns the active validator count for the current epoch of the head state. + /// + /// Should only return `None` if the caches have not been built on the head state (this should + /// never happen). + pub fn active_validator_count(&self) -> Option { + self.snapshot + .beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + } + + /// Returns the finalized checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the finalized checkpoint of the `head_snapshot.beacon_state`, rather it is the + /// best finalized checkpoint that has been observed by `self.fork_choice`. It is possible that + /// the `head_snapshot.beacon_state` finalized value is earlier than the one returned here. + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + /// Returns the justified checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the "current justified checkpoint" of the `head_snapshot.beacon_state`, rather + /// it is the justified checkpoint in the view of `self.fork_choice`. It is possible that the + /// `head_snapshot.beacon_state` justified value is different to, but not conflicting with, the + /// one returned here. + pub fn justified_checkpoint(&self) -> Checkpoint { + self.justified_checkpoint + } + + /// Returns the cached values of `ForkChoice::forkchoice_update_parameters`. + /// + /// Useful for supplying to the execution layer. + pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { + ForkchoiceUpdateParameters { + head_root: self.snapshot.beacon_block_root, + head_hash: self.head_hash, + finalized_hash: self.finalized_hash, + } + } +} + +/// Represents the "canonical head" of the beacon chain. +/// +/// The `cached_head` is elected by the `fork_choice` algorithm contained in this struct. +/// +/// There is no guarantee that the state of the `fork_choice` struct will always represent the +/// `cached_head` (i.e. we may call `fork_choice` *without* updating the cached values), however +/// there is a guarantee that the `cached_head` represents some past state of `fork_choice` (i.e. +/// `fork_choice` never lags *behind* the `cached_head`). +pub struct CanonicalHead { + /// Provides an in-memory representation of the non-finalized block tree and is used to run the + /// fork choice algorithm and determine the canonical head. + pub fork_choice: CanonicalHeadRwLock>, + /// Provides values cached from a previous execution of `self.fork_choice.get_head`. + /// + /// Although `self.fork_choice` might be slightly more advanced that this value, it is safe to + /// consider that these values represent the "canonical head" of the beacon chain. + pub cached_head: CanonicalHeadRwLock>, + /// A lock used to prevent concurrent runs of `BeaconChain::recompute_head`. + /// + /// This lock **should not be made public**, it should only be used inside this module. + recompute_head_lock: Mutex<()>, +} + +impl CanonicalHead { + /// Instantiate `Self`. + pub fn new( + fork_choice: BeaconForkChoice, + snapshot: Arc>, + ) -> Self { + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot, + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + Self { + fork_choice: CanonicalHeadRwLock::new(fork_choice), + cached_head: CanonicalHeadRwLock::new(cached_head), + recompute_head_lock: Mutex::new(()), + } + } + + /// Load a persisted version of `BeaconForkChoice` from the `store` and restore `self` to that + /// state. + /// + /// This is useful if some database corruption is expected and we wish to go back to our last + /// save-point. + pub(crate) fn restore_from_store( + &self, + // We don't actually need this value, however it's always present when we call this function + // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is + // defensive programming. + mut fork_choice_write_lock: RwLockWriteGuard>, + store: &BeaconStore, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork_choice = >::load_fork_choice(store.clone(), spec)? + .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let beacon_block_root = fork_choice_view.head_block_root; + let beacon_block = store + .get_full_block(&beacon_block_root)? + .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; + let beacon_state_root = beacon_block.state_root(); + let beacon_state = store + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + let snapshot = BeaconSnapshot { + beacon_block_root, + beacon_block: Arc::new(beacon_block), + beacon_state, + }; + + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot: Arc::new(snapshot), + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + *fork_choice_write_lock = fork_choice; + // Avoid interleaving the fork choice and cached head locks. + drop(fork_choice_write_lock); + *self.cached_head.write() = cached_head; + + Ok(()) + } + + /// Returns the execution status of the block at the head of the beacon chain. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_execution_status(&self) -> Result { + let head_block_root = self.cached_head().head_block_root(); + self.fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) + } + + /// Returns a clone of `self.cached_head`. + /// + /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). + /// The `CachedHead` is designed to be fast-to-clone so this is preferred to passing back a + /// `RwLockReadGuard`, which may cause deadlock issues (see module-level documentation). + /// + /// This function is safe to be public since it does not expose any locks. + pub fn cached_head(&self) -> CachedHead { + self.cached_head_read_lock().clone() + } + + /// Access a read-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_read_lock(&self) -> RwLockReadGuard> { + self.cached_head.read() + } + + /// Access a write-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_write_lock(&self) -> RwLockWriteGuard> { + self.cached_head.write() + } + + /// Access a read-lock for fork choice. + pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + self.fork_choice.read() + } + + /// Access a write-lock for fork choice. + pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + self.fork_choice.write() + } +} + +impl BeaconChain { + /// Contains the "best block"; the head of the canonical `BeaconChain`. + /// + /// It is important to note that the `snapshot.beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was received, which could be some slots prior to + /// now. + pub fn head(&self) -> CachedHead { + self.canonical_head.cached_head() + } + + /// Apply a function to an `Arc`-clone of the canonical head snapshot. + /// + /// This method is a relic from an old implementation where the canonical head was not behind + /// an `Arc` and the canonical head lock had to be held whenever it was read. This method is + /// fine to be left here, it just seems a bit weird. + pub fn with_head( + &self, + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result + where + E: From, + { + let head_snapshot = self.head_snapshot(); + f(&head_snapshot) + } + + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Hash256 { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block_root + } + + /// Returns the slot of the highest block in the canonical chain. + pub fn best_slot(&self) -> Slot { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .slot() + } + + /// Returns a `Arc` of the `BeaconSnapshot` at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_snapshot(&self) -> Arc> { + self.canonical_head.cached_head_read_lock().snapshot.clone() + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Arc> { + self.head_snapshot().beacon_block.clone() + } + + /// Returns a clone of the beacon state at the head of the canonical chain. + /// + /// Cloning the head state is expensive and should generally be avoided outside of tests. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state_cloned(&self) -> BeaconState { + // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. + let snapshot: Arc<_> = self.head_snapshot(); + snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()) + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// This method replaces the old `BeaconChain::fork_choice` method. + pub async fn recompute_head_at_current_slot(self: &Arc) -> Result<(), Error> { + let current_slot = self.slot()?; + self.recompute_head_at_slot(current_slot).await + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// The `current_slot` is specified rather than relying on the wall-clock slot. Using a + /// different slot to the wall-clock can be useful for pushing fork choice into the next slot + /// *just* before the start of the slot. This ensures that block production can use the correct + /// head value without being delayed. + pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) -> Result<(), Error> { + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); + + let chain = self.clone(); + match self + .task_executor + .spawn_blocking_handle( + move || chain.recompute_head_at_slot_internal(current_slot), + "recompute_head_internal", + ) + .ok_or(Error::RuntimeShutdown)? + .await + .map_err(Error::TokioJoin)? + { + // Fork choice returned successfully and did not need to update the EL. + Ok(None) => Ok(()), + // Fork choice returned successfully and needed to update the EL. It has returned a + // join-handle from when it spawned some async tasks. We should await those tasks. + Ok(Some(join_handle)) => match join_handle.await { + // The async task completed successfully. + Ok(Some(())) => Ok(()), + // The async task did not complete successfully since the runtime is shutting down. + Ok(None) => { + debug!( + self.log, + "Did not update EL fork choice"; + "info" => "shutting down" + ); + Err(Error::RuntimeShutdown) + } + // The async task did not complete successfully, tokio returned an error. + Err(e) => { + error!( + self.log, + "Did not update EL fork choice"; + "error" => ?e + ); + Err(Error::TokioJoin(e)) + } + }, + // There was an error recomputing the head. + Err(e) => { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); + Err(e) + } + } + } + + /// A non-async (blocking) function which recomputes the canonical head and spawns async tasks. + /// + /// This function performs long-running, heavy-lifting tasks which should not be performed on + /// the core `tokio` executor. + fn recompute_head_at_slot_internal( + self: &Arc, + current_slot: Slot, + ) -> Result>>, Error> { + let recompute_head_lock = self.canonical_head.recompute_head_lock.lock(); + + // Take a clone of the current ("old") head. + let old_cached_head = self.canonical_head.cached_head(); + + // Determine the current ("old") fork choice parameters. + // + // It is important to read the `fork_choice_view` from the cached head rather than from fork + // choice, since the fork choice value might have changed between calls to this function. We + // are interested in the changes since we last cached the head values, not since fork choice + // was last run. + let old_view = ForkChoiceView { + head_block_root: old_cached_head.head_block_root(), + justified_checkpoint: old_cached_head.justified_checkpoint(), + finalized_checkpoint: old_cached_head.finalized_checkpoint(), + }; + + let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); + + // Recompute the current head via the fork choice algorithm. + fork_choice_write_lock.get_head(current_slot, &self.spec)?; + + // Read the current head value from the fork choice algorithm. + let new_view = fork_choice_write_lock.cached_fork_choice_view(); + + // Downgrade the fork choice write-lock to a read lock, without allowing access to any + // other writers. + let fork_choice_read_lock = RwLockWriteGuard::downgrade(fork_choice_write_lock); + + // Check to ensure that the finalized block hasn't been marked as invalid. If it has, + // shut down Lighthouse. + let finalized_proto_block = fork_choice_read_lock.get_finalized_block()?; + check_finalized_payload_validity(self, &finalized_proto_block)?; + + // Sanity check the finalized checkpoint. + // + // The new finalized checkpoint must be either equal to or better than the previous + // finalized checkpoint. + check_against_finality_reversion(&old_view, &new_view)?; + + let new_head_proto_block = fork_choice_read_lock + .get_block(&new_view.head_block_root) + .ok_or(Error::HeadBlockMissingFromForkChoice( + new_view.head_block_root, + ))?; + + // Do not allow an invalid block to become the head. + // + // This check avoids the following infinite loop: + // + // 1. A new block is set as the head. + // 2. The EL is updated with the new head, and returns INVALID. + // 3. We call `process_invalid_execution_payload` and it calls this function. + // 4. This function elects an invalid block as the head. + // 5. GOTO 2 + // + // In theory, fork choice should never select an invalid head (i.e., step #3 is impossible). + // However, this check is cheap. + if new_head_proto_block.execution_status.is_invalid() { + return Err(Error::HeadHasInvalidPayload { + block_root: new_head_proto_block.root, + execution_status: new_head_proto_block.execution_status, + }); + } + + // Exit early if the head or justified/finalized checkpoints have not changed, there's + // nothing to do. + if new_view == old_view { + debug!( + self.log, + "No change in canonical head"; + "head" => ?new_view.head_block_root + ); + return Ok(None); + } + + // Get the parameters to update the execution layer since either the head or some finality + // parameters have changed. + let new_forkchoice_update_parameters = + fork_choice_read_lock.get_forkchoice_update_parameters(); + + perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock, &self.log); + + // Drop the read lock, it's no longer required and holding it any longer than necessary + // will just cause lock contention. + drop(fork_choice_read_lock); + + // If the head has changed, update `self.canonical_head`. + let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + + // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling + // back to a database read if that fails. + let new_snapshot = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_cloned( + new_view.head_block_root, + CloneConfig::committee_caches_only(), + ) + }) + .map::, _>(Ok) + .unwrap_or_else(|| { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let beacon_state_root = beacon_block.state_root(); + let beacon_state: BeaconState = self + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + Ok(BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + }) + }) + .and_then(|mut snapshot| { + // Regardless of where we got the state from, attempt to build the committee + // caches. + snapshot + .beacon_state + .build_all_committee_caches(&self.spec) + .map_err(Into::into) + .map(|()| snapshot) + })?; + + let new_cached_head = CachedHead { + snapshot: Arc::new(new_snapshot), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let new_head = { + // Now the new snapshot has been obtained, take a write-lock on the cached head so + // we can update it quickly. + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + // Enshrine the new head as the canonical cached head. + *cached_head_write_lock = new_cached_head; + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + + new_head + } else { + let new_cached_head = CachedHead { + // The head hasn't changed, take a relatively cheap `Arc`-clone of the existing + // head. + snapshot: old_cached_head.snapshot.clone(), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + + // Enshrine the new head as the canonical cached head. Whilst the head block hasn't + // changed, the FFG checkpoints must have changed. + *cached_head_write_lock = new_cached_head; + + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Alias for readability. + let new_snapshot = &new_cached_head.snapshot; + let old_snapshot = &old_cached_head.snapshot; + + // If the head changed, perform some updates. + if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root { + if let Err(e) = + self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) + { + crit!( + self.log, + "Error updating canonical head"; + "error" => ?e + ); + } + } + + // Drop the old cache head nice and early to try and free the memory as soon as possible. + drop(old_cached_head); + + // If the finalized checkpoint changed, perform some updates. + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + if let Err(e) = + self.after_finalization(&new_cached_head, new_view, finalized_proto_block) + { + crit!( + self.log, + "Error updating finalization"; + "error" => ?e + ); + } + } + + // The execution layer updates might attempt to take a write-lock on fork choice, so it's + // important to ensure the fork-choice lock isn't being held. + let el_update_handle = + spawn_execution_layer_updates(self.clone(), new_forkchoice_update_parameters)?; + + // We have completed recomputing the head and it's now valid for another process to do the + // same. + drop(recompute_head_lock); + + Ok(Some(el_update_handle)) + } + + /// Perform updates to caches and other components after the canonical head has been changed. + fn after_new_head( + self: &Arc, + old_cached_head: &CachedHead, + new_cached_head: &CachedHead, + new_head_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let old_snapshot = &old_cached_head.snapshot; + let new_snapshot = &new_cached_head.snapshot; + + // Detect and potentially report any re-orgs. + let reorg_distance = detect_reorg( + &old_snapshot.beacon_state, + old_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + new_snapshot.beacon_block_root, + &self.spec, + &self.log, + ); + + // Determine if the new head is in a later epoch to the previous head. + let is_epoch_transition = old_snapshot + .beacon_block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + < new_snapshot + .beacon_state + .slot() + .epoch(T::EthSpec::slots_per_epoch()); + + // These fields are used for server-sent events. + let state_root = new_snapshot.beacon_state_root(); + let head_slot = new_snapshot.beacon_state.slot(); + let dependent_root = new_snapshot + .beacon_state + .proposer_shuffling_decision_root(self.genesis_block_root); + let prev_dependent_root = new_snapshot + .beacon_state + .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); + + // Update the snapshot cache with the latest head value. + // + // This *could* be done inside `recompute_head`, however updating the head on the snapshot + // cache is not critical so we avoid placing it on a critical path. Note that this function + // will not return an error if the update fails, it will just log an error. + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.update_head(new_snapshot.beacon_block_root); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "update head" + ); + }); + + observe_head_block_delays( + &mut self.block_times_cache.write(), + &new_head_proto_block, + new_snapshot.beacon_block.message().proposer_index(), + new_snapshot + .beacon_block + .message() + .body() + .graffiti() + .as_utf8_lossy(), + &self.slot_clock, + self.event_handler.as_ref(), + &self.log, + ); + + if is_epoch_transition || reorg_distance.is_some() { + self.persist_head_and_fork_choice()?; + self.op_pool.prune_attestations(self.epoch()?); + } + + // Register server-sent-events for a new head. + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_head_subscribers()) + { + match (dependent_root, prev_dependent_root) { + (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { + event_handler.register(EventKind::Head(SseHead { + slot: head_slot, + block: new_snapshot.beacon_block_root, + state: state_root, + current_duty_dependent_root, + previous_duty_dependent_root, + epoch_transition: is_epoch_transition, + })); + } + (Err(e), _) | (_, Err(e)) => { + warn!( + self.log, + "Unable to find dependent roots, cannot register head event"; + "error" => ?e + ); + } + } + } + + // Register a server-sent-event for a reorg (if necessary). + if let Some(depth) = reorg_distance { + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_reorg_subscribers()) + { + event_handler.register(EventKind::ChainReorg(SseChainReorg { + slot: head_slot, + depth: depth.as_u64(), + old_head_block: old_snapshot.beacon_block_root, + old_head_state: old_snapshot.beacon_state_root(), + new_head_block: new_snapshot.beacon_block_root, + new_head_state: new_snapshot.beacon_state_root(), + epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + })); + } + } + + Ok(()) + } + + /// Perform updates to caches and other components after the finalized checkpoint has been + /// changed. + fn after_finalization( + self: &Arc, + new_cached_head: &CachedHead, + new_view: ForkChoiceView, + finalized_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let new_snapshot = &new_cached_head.snapshot; + + self.op_pool + .prune_all(&new_snapshot.beacon_state, self.epoch()?); + + self.observed_block_producers.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.prune(new_view.finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "prune" + ); + }); + + self.attester_cache + .prune_below(new_view.finalized_checkpoint.epoch); + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_finalized_subscribers() { + event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { + epoch: new_view.finalized_checkpoint.epoch, + block: new_view.finalized_checkpoint.root, + // Provide the state root of the latest finalized block, rather than the + // specific state root at the first slot of the finalized epoch (which + // might be a skip slot). + state: finalized_proto_block.state_root, + })); + } + } + + // The store migration task requires the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + // + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + let new_finalized_slot = new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let new_finalized_state_root = process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + + self.store_migrator.process_finalization( + new_finalized_state_root.into(), + new_view.finalized_checkpoint, + self.head_tracker.clone(), + )?; + + Ok(()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { + Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch_standalone( + fork_choice: &BeaconForkChoice, + ) -> KeyValueStoreOp { + let persisted_fork_choice = PersistedForkChoice { + fork_choice: fork_choice.to_persisted(), + fork_choice_store: fork_choice.fc_store().to_persisted(), + }; + persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) + } +} + +/// Check to see if the `finalized_proto_block` has an invalid execution payload. If so, shut down +/// Lighthouse. +/// +/// ## Notes +/// +/// This function is called whilst holding a write-lock on the `canonical_head`. To ensure dead-lock +/// safety, **do not take any other locks inside this function**. +fn check_finalized_payload_validity( + chain: &BeaconChain, + finalized_proto_block: &ProtoBlock, +) -> Result<(), Error> { + if let ExecutionStatus::Invalid(block_hash) = finalized_proto_block.execution_status { + crit!( + chain.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = chain.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(Error::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_proto_block.root, + execution_block_hash: block_hash, + }); + } + + Ok(()) +} + +/// Check to ensure that the transition from `old_view` to `new_view` will not revert finality. +fn check_against_finality_reversion( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, +) -> Result<(), Error> { + let finalization_equal = new_view.finalized_checkpoint == old_view.finalized_checkpoint; + let finalization_advanced = + new_view.finalized_checkpoint.epoch > old_view.finalized_checkpoint.epoch; + + if finalization_equal || finalization_advanced { + Ok(()) + } else { + Err(Error::RevertedFinalizedEpoch { + old: old_view.finalized_checkpoint, + new: new_view.finalized_checkpoint, + }) + } +} + +fn perform_debug_logging( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, + fork_choice: &BeaconForkChoice, + log: &Logger, +) { + if new_view.head_block_root != old_view.head_block_root { + debug!( + log, + "Fork choice updated head"; + "new_head_weight" => ?fork_choice + .get_block_weight(&new_view.head_block_root), + "new_head" => ?new_view.head_block_root, + "old_head_weight" => ?fork_choice + .get_block_weight(&old_view.head_block_root), + "old_head" => ?old_view.head_block_root, + ) + } + if new_view.justified_checkpoint != old_view.justified_checkpoint { + debug!( + log, + "Fork choice justified"; + "new_root" => ?new_view.justified_checkpoint.root, + "new_epoch" => new_view.justified_checkpoint.epoch, + "old_root" => ?old_view.justified_checkpoint.root, + "old_epoch" => old_view.justified_checkpoint.epoch, + ) + } + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + debug!( + log, + "Fork choice finalized"; + "new_root" => ?new_view.finalized_checkpoint.root, + "new_epoch" => new_view.finalized_checkpoint.epoch, + "old_root" => ?old_view.finalized_checkpoint.root, + "old_epoch" => old_view.finalized_checkpoint.epoch, + ) + } +} + +fn spawn_execution_layer_updates( + chain: Arc>, + forkchoice_update_params: ForkchoiceUpdateParameters, +) -> Result>, Error> { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(Error::UnableToReadSlot)?; + + chain + .task_executor + .clone() + .spawn_handle( + async move { + // Avoids raising an error before Bellatrix. + // + // See `Self::prepare_beacon_proposer` for more detail. + if chain.slot_is_prior_to_bellatrix(current_slot + 1) { + return; + } + + if let Err(e) = chain + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await + { + crit!( + chain.log, + "Failed to update execution head"; + "error" => ?e + ); + } + + // Update the mechanism for preparing for block production on the execution layer. + // + // Performing this call immediately after `update_execution_engine_forkchoice_blocking` + // might result in two calls to fork choice updated, one *without* payload attributes and + // then a second *with* payload attributes. + // + // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as far as I + // know. + if let Err(e) = chain.prepare_beacon_proposer(current_slot).await { + crit!( + chain.log, + "Failed to prepare proposers after fork choice"; + "error" => ?e + ); + } + }, + "update_el_forkchoice", + ) + .ok_or(Error::RuntimeShutdown) +} + +/// Attempt to detect if the new head is not on the same chain as the previous block +/// (i.e., a re-org). +/// +/// Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks +/// between calls to fork choice without swapping between chains. This seems like an +/// extreme-enough scenario that a warning is fine. +fn detect_reorg( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, + log: &Logger, +) -> Option { + let is_reorg = new_state + .get_block_root(old_state.slot()) + .map_or(true, |root| *root != old_block_root); + + if is_reorg { + let reorg_distance = + match find_reorg_slot(old_state, old_block_root, new_state, new_block_root, spec) { + Ok(slot) => old_state.slot().saturating_sub(slot), + Err(e) => { + warn!( + log, + "Could not find re-org depth"; + "error" => format!("{:?}", e), + ); + return None; + } + }; + + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); + warn!( + log, + "Beacon chain re-org"; + "previous_head" => ?old_block_root, + "previous_slot" => old_state.slot(), + "new_head" => ?new_block_root, + "new_slot" => new_state.slot(), + "reorg_distance" => reorg_distance, + ); + + Some(reorg_distance) + } else { + None + } +} + +/// Iterate through the current chain to find the slot intersecting with the given beacon state. +/// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached +/// and no intersection is found, the finalized slot will be returned. +pub fn find_reorg_slot( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, +) -> Result { + // The earliest slot for which the two chains may have a common history. + let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); + + // Create an iterator across `$state`, assuming that the block at `$state.slot` has the + // block root of `$block_root`. + // + // The iterator will be skipped until the next value returns `lowest_slot`. + // + // This is a macro instead of a function or closure due to the complex types invloved + // in all the iterator wrapping. + macro_rules! aligned_roots_iter { + ($state: ident, $block_root: ident) => { + std::iter::once(Ok(($state.slot(), $block_root))) + .chain($state.rev_iter_block_roots(spec)) + .skip_while(|result| { + result + .as_ref() + .map_or(false, |(slot, _)| *slot > lowest_slot) + }) + }; + } + + // Create iterators across old/new roots where iterators both start at the same slot. + let mut new_roots = aligned_roots_iter!(new_state, new_block_root); + let mut old_roots = aligned_roots_iter!(old_state, old_block_root); + + // Whilst *both* of the iterators are still returning values, try and find a common + // ancestor between them. + while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { + let (old_slot, old_root) = old?; + let (new_slot, new_root) = new?; + + // Sanity check to detect programming errors. + if old_slot != new_slot { + return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); + } + + if old_root == new_root { + // A common ancestor has been found. + return Ok(old_slot); + } + } + + // If no common ancestor is found, declare that the re-org happened at the previous + // finalized slot. + // + // Sometimes this will result in the return slot being *lower* than the actual reorg + // slot. However, assuming we don't re-org through a finalized slot, it will never be + // *higher*. + // + // We provide this potentially-inaccurate-but-safe information to avoid onerous + // database reads during times of deep reorgs. + Ok(old_state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch())) +} + +fn observe_head_block_delays( + block_times_cache: &mut BlockTimesCache, + head_block: &ProtoBlock, + head_block_proposer_index: u64, + head_block_graffiti: String, + slot_clock: &S, + event_handler: Option<&ServerSentEventHandler>, + log: &Logger, +) { + let block_time_set_as_head = timestamp_now(); + let head_block_root = head_block.root; + let head_block_slot = head_block.slot; + + // Calculate the total delay between the start of the slot and when it was set as head. + let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); + + // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to + // the cache during sync. + if block_delay_total < slot_clock.slot_duration() * 64 { + block_times_cache.set_time_set_as_head( + head_block_root, + head_block_slot, + block_time_set_as_head, + ); + } + + // If a block comes in from over 4 slots ago, it is most likely a block from sync. + let block_from_sync = block_delay_total > slot_clock.slot_duration() * 4; + + // Determine whether the block has been set as head too late for proper attestation + // production. + let late_head = block_delay_total >= slot_clock.unagg_attestation_production_delay(); + + // Do not store metrics if the block was > 4 slots old, this helps prevent noise during + // sync. + if !block_from_sync { + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, + block_delay_total, + ); + + // Observe the delay between when we imported the block and when we set the block as + // head. + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + block_delays + .observed + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + block_delays + .set_as_head + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + // If the block was enshrined as head too late for attestations to be created for it, + // log a debug warning and increment a metric. + if late_head { + metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + debug!( + log, + "Delayed head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "block_delay" => ?block_delay_total, + "observed_delay" => ?block_delays.observed, + "imported_delay" => ?block_delays.imported, + "set_as_head_delay" => ?block_delays.set_as_head, + ); + } + } + + if let Some(event_handler) = event_handler { + if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { + let peer_info = block_times_cache.get_peer_info(head_block_root); + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + set_as_head_delay: block_delays.set_as_head, + })); + } + } +} diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f589585f8a6..62b584968fc 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -4,6 +4,7 @@ use crate::{ }; use parking_lot::RwLock; use proto_array::Block as ProtoBlock; +use std::sync::Arc; use types::*; pub struct CacheItem { @@ -18,7 +19,7 @@ pub struct CacheItem { /* * Values used to make the block available. */ - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, } @@ -48,7 +49,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -146,7 +147,7 @@ impl EarlyAttesterCache { } /// Returns the block, if `block_root` matches the cached item. - pub fn get_block(&self, block_root: Hash256) -> Option> { + pub fn get_block(&self, block_root: Hash256) -> Option>> { self.item .read() .as_ref() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 834823992ac..d3337dfafe2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -45,8 +45,8 @@ pub enum BeaconChainError { UnableToReadSlot, UnableToComputeTimeAtSlot, RevertedFinalizedEpoch { - previous_epoch: Epoch, - new_epoch: Epoch, + old: Checkpoint, + new: Checkpoint, }, SlotClockDidNotStart, NoStateForSlot(Slot), @@ -161,6 +161,7 @@ pub enum BeaconChainError { BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + HeadBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayload { finalized_root: Hash256, execution_block_hash: ExecutionBlockHash, @@ -184,11 +185,19 @@ pub enum BeaconChainError { beacon_block_root: Hash256, }, RuntimeShutdown, + TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), ForkChoiceSignalOutOfOrder { current: Slot, latest: Slot, }, + ForkchoiceUpdateParamsMissing, + HeadHasInvalidPayload { + block_root: Hash256, + execution_status: ExecutionStatus, + }, + AttestationHeadNotInForkChoice(Hash256), + MissingPersistedForkChoice, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -214,7 +223,6 @@ easy_from_to!(BlockReplayError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { - UnableToGetHeadInfo(BeaconChainError), UnableToGetBlockRootFromState, UnableToReadSlot, UnableToProduceAtSlot(Slot), @@ -239,6 +247,11 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ForkChoiceError(BeaconChainError), + ShuttingDown, + MissingSyncAggregate, + MissingExecutionPayload, + TokioJoin(tokio::task::JoinError), + BeaconChain(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 08e4cd41efd..1ac42610f20 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -21,8 +21,59 @@ use state_processing::per_block_processing::{ partially_verify_execution_payload, }; use std::sync::Arc; +use tokio::task::JoinHandle; use types::*; +pub type PreparePayloadResult = Result; +pub type PreparePayloadHandle = JoinHandle>>; + +/// Used to await the result of executing payload with a remote EE. +pub struct PayloadNotifier { + pub chain: Arc>, + pub block: Arc>, + payload_verification_status: Option, +} + +impl PayloadNotifier { + pub fn new( + chain: Arc>, + block: Arc>, + state: &BeaconState, + ) -> Result> { + let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + }; + + Ok(Self { + chain, + block, + payload_verification_status, + }) + } + + pub async fn notify_new_payload( + self, + ) -> Result> { + if let Some(precomputed_status) = self.payload_verification_status { + Ok(precomputed_status) + } else { + notify_new_payload(&self.chain, self.block.message()).await + } + } +} + /// Verify that `execution_payload` contained by `block` is considered valid by an execution /// engine. /// @@ -32,31 +83,20 @@ use types::*; /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -pub fn notify_new_payload( +async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, - state: &BeaconState, - block: BeaconBlockRef, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - if !is_execution_enabled(state, block.body()) { - return Ok(PayloadVerificationStatus::Irrelevant); - } - let execution_payload = block.execution_payload()?; - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution payload from junk. - partially_verify_execution_payload(state, execution_payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_response = execution_layer.block_on(|execution_layer| { - execution_layer.notify_new_payload(&execution_payload.execution_payload) - }); + + let new_payload_response = execution_layer + .notify_new_payload(&execution_payload.execution_payload) + .await; match new_payload_response { Ok(status) => match status { @@ -70,13 +110,13 @@ pub fn notify_new_payload( // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { + chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { head_block_root: latest_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, - }, - )?; + }) + .await?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } @@ -103,9 +143,9 @@ pub fn notify_new_payload( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub fn validate_merge_block( - chain: &BeaconChain, - block: BeaconBlockRef, +pub async fn validate_merge_block<'a, T: BeaconChainTypes>( + chain: &Arc>, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -137,9 +177,8 @@ pub fn validate_merge_block( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let is_valid_terminal_pow_block = execution_layer - .block_on(|execution_layer| { - execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) - }) + .is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) + .await .map_err(ExecutionPayloadError::from)?; match is_valid_terminal_pow_block { @@ -149,23 +188,7 @@ pub fn validate_merge_block( } .into()), None => { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - // Ensure the block is a candidate for optimistic import. - if chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { + if is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { debug!( chain.log, "Optimistically accepting terminal block"; @@ -180,6 +203,36 @@ pub fn validate_merge_block( } } +/// Check to see if a block with the given parameters is valid to be imported optimistically. +pub async fn is_optimistic_candidate_block( + chain: &Arc>, + block_slot: Slot, + block_parent_root: Hash256, +) -> Result { + let current_slot = chain.slot()?; + let inner_chain = chain.clone(); + + // Use a blocking task to check if the block is an optimistic candidate. Interacting + // with the `fork_choice` lock in an async task can block the core executor. + chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_candidate_block( + current_slot, + block_slot, + &block_parent_root, + &inner_chain.spec, + ) + }, + "validate_merge_block_optimistic_candidate", + ) + .await? + .map_err(BeaconChainError::from) +} + /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( @@ -243,33 +296,49 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload>( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, -) -> Result { - Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index)? - .unwrap_or_default(), - ) -} - -/// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, +pub fn get_execution_payload< + T: BeaconChainTypes, + Payload: ExecPayload + Default + Send + 'static, +>( + chain: Arc>, state: &BeaconState, + finalized_checkpoint: Checkpoint, proposer_index: u64, -) -> Result, BlockProductionError> { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; +) -> Result, BlockProductionError> { + // Compute all required values from the `state` now to avoid needing to pass it into a spawned + // task. + let spec = &chain.spec; + let current_epoch = state.current_epoch(); + let is_merge_transition_complete = is_merge_transition_complete(state); + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(state.current_epoch())?; + let latest_execution_payload_header_block_hash = + state.latest_execution_payload_header()?.block_hash; + + // Spawn a task to obtain the execution payload from the EL via a series of async calls. The + // `join_handle` can be used to await the result of the function. + let join_handle = chain + .task_executor + .clone() + .spawn_handle( + async move { + prepare_execution_payload::( + &chain, + current_epoch, + is_merge_transition_complete, + timestamp, + random, + finalized_checkpoint, + proposer_index, + latest_execution_payload_header_block_hash, + ) + .await + }, + "get_execution_payload", + ) + .ok_or(BlockProductionError::ShuttingDown)?; - execution_layer - .block_on_generic(|_| async { - prepare_execution_payload::(chain, state, proposer_index).await - }) - .map_err(BlockProductionError::BlockingFailed)? + Ok(join_handle) } /// Prepares an execution payload for inclusion in a block. @@ -286,24 +355,36 @@ pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, +#[allow(clippy::too_many_arguments)] +pub async fn prepare_execution_payload( + chain: &Arc>, + current_epoch: Epoch, + is_merge_transition_complete: bool, + timestamp: u64, + random: Hash256, + finalized_checkpoint: Checkpoint, proposer_index: u64, -) -> Result, BlockProductionError> { + latest_execution_payload_header_block_hash: ExecutionBlockHash, +) -> Result +where + T: BeaconChainTypes, + Payload: ExecPayload + Default, +{ let spec = &chain.spec; let execution_layer = chain .execution_layer .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; - let parent_hash = if !is_merge_transition_complete(state) { + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = - state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + current_epoch >= spec.terminal_block_hash_activation_epoch; if is_terminal_block_hash_set && !is_activation_epoch_reached { - return Ok(None); + // Use the "empty" payload if there's a terminal block hash, but we haven't reached the + // terminal block epoch yet. + return Ok(<_>::default()); } let terminal_pow_block_hash = execution_layer @@ -314,36 +395,55 @@ pub async fn prepare_execution_payload::default()); } } else { - state.latest_execution_payload_header()?.block_hash + latest_execution_payload_header_block_hash }; - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; - let random = *state.get_randao_mix(state.current_epoch())?; - let finalized_root = state.finalized_checkpoint().root; + // Try to obtain the finalized proto block from fork choice. + // + // Use a blocking task to interact with the `fork_choice` lock otherwise we risk blocking the + // core `tokio` executor. + let inner_chain = chain.clone(); + let finalized_proto_block = chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .get_block(&finalized_checkpoint.root) + }, + "prepare_execution_payload_finalized_hash", + ) + .await + .map_err(BlockProductionError::BeaconChain)?; // The finalized block hash is not included in the specification, however we provide this // parameter so that the execution layer can produce a payload id if one is not already known // (e.g., due to a recent reorg). - let finalized_block_hash = - if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; + let finalized_block_hash = if let Some(block) = finalized_proto_block { + block.execution_status.block_hash() + } else { + chain + .store + .get_blinded_block(&finalized_checkpoint.root) + .map_err(BlockProductionError::FailedToReadFinalizedBlock)? + .ok_or(BlockProductionError::MissingFinalizedBlock( + finalized_checkpoint.root, + ))? + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash()) + }; // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + // + // This future is not executed here, it's up to the caller to await it. let execution_payload = execution_layer .get_payload::( parent_hash, @@ -355,5 +455,5 @@ pub async fn prepare_execution_payload, Cold: It head_block_root: Hash256, head_state: &BeaconState, store: Arc>, + current_slot: Option, spec: &ChainSpec, ) -> Result, E>, String> { // Fetch finalized block. @@ -138,7 +139,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It })?; let finalized_snapshot = BeaconSnapshot { beacon_block_root: finalized_block_root, - beacon_block: finalized_block, + beacon_block: Arc::new(finalized_block), beacon_state: finalized_state, }; @@ -149,6 +150,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It finalized_block_root, &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, + current_slot, + spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; @@ -180,11 +183,10 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; - let (block, _) = block.deconstruct(); fork_choice .on_block( block.slot(), - &block, + block.message(), block.canonical_root(), // Reward proposer boost. We are reinforcing the canonical chain. Duration::from_secs(0), diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1891362ebbd..cc45a6bb9a9 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; +use std::sync::Arc; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; use types::{Hash256, SignedBlindedBeaconBlock, Slot}; @@ -58,7 +59,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>, + blocks: Vec>>, ) -> Result { let anchor_info = self .store diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 579020b1d1e..b82b690d20c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; +pub mod canonical_head; pub mod chain_config; mod early_attester_cache; mod errors; @@ -42,8 +43,8 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, HeadInfo, HeadSafetyStatus, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; @@ -52,8 +53,10 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; +pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; +pub use fork_choice::ExecutionStatus; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 18abbc8c5bf..9cd177b3409 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -51,9 +51,7 @@ async fn proposer_prep_service( executor.spawn( async move { if let Ok(current_slot) = inner_chain.slot() { - if let Err(e) = inner_chain - .prepare_beacon_proposer_async(current_slot) - .await + if let Err(e) = inner_chain.prepare_beacon_proposer(current_slot).await { error!( inner_chain.log, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index b29943bfb93..ff98e25c1d8 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -10,6 +10,7 @@ use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{ PersistedForkChoiceV1, PersistedForkChoiceV7, PersistedForkChoiceV8, }; +use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; use std::sync::Arc; @@ -24,6 +25,7 @@ pub fn migrate_schema( from: SchemaVersion, to: SchemaVersion, log: Logger, + spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. @@ -31,8 +33,8 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone())?; - migrate_schema::(db, datadir, next, to, log) + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) } // @@ -92,6 +94,7 @@ pub fn migrate_schema( migration_schema_v7::update_with_reinitialized_fork_choice::( &mut persisted_fork_choice_v7, db.clone(), + spec, ) .map_err(StoreError::SchemaMigrationError)?; } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index e31e72b588f..81147b8af67 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -3,8 +3,7 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; -use crate::types::{Checkpoint, Epoch, Hash256}; -use crate::types::{EthSpec, Slot}; +use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; @@ -25,6 +24,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); pub(crate) fn update_with_reinitialized_fork_choice( persisted_fork_choice: &mut PersistedForkChoiceV7, db: Arc>, + spec: &ChainSpec, ) -> Result<(), String> { let anchor_block_root = persisted_fork_choice .fork_choice_store @@ -39,7 +39,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon state".to_string())?; let snapshot = BeaconSnapshot { - beacon_block: anchor_block, + beacon_block: Arc::new(anchor_block), beacon_block_root: anchor_block_root, beacon_state: anchor_state, }; @@ -49,6 +49,10 @@ pub(crate) fn update_with_reinitialized_fork_choice( anchor_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + // Don't provide the current slot here, just use what's in the store. We don't need to know + // the head here, plus it's nice to avoid mutating fork choice during this process. + None, + spec, ) .map_err(|e| format!("{:?}", e))?; persisted_fork_choice.fork_choice = fork_choice.to_persisted(); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 5a287daf0f1..0bbd4419b9d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -47,6 +47,12 @@ impl ShufflingCache { } } +impl Default for ShufflingCache { + fn default() -> Self { + Self::new() + } +} + /// Contains the shuffling IDs for a beacon block. pub struct BlockShufflingIds { pub current: AttestationShufflingId, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 55855813629..f4a0a79dc75 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,6 +1,7 @@ use crate::BeaconSnapshot; use itertools::process_results; use std::cmp; +use std::sync::Arc; use std::time::Duration; use types::{ beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, @@ -33,7 +34,7 @@ impl From> for PreProcessingSnapshot { Self { pre_state: snapshot.beacon_state, beacon_state_root, - beacon_block: snapshot.beacon_block.into(), + beacon_block: snapshot.beacon_block.clone_as_blinded(), beacon_block_root: snapshot.beacon_block_root, } } @@ -63,7 +64,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self.pre_state.unwrap_or(self.beacon_state), beacon_state_root, @@ -76,7 +77,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.clone().into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self .pre_state @@ -116,7 +117,7 @@ pub enum StateAdvance { /// The item stored in the `SnapshotCache`. pub struct CacheItem { - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, /// This state is equivalent to `self.beacon_block.state_root()`. beacon_state: BeaconState, @@ -185,7 +186,7 @@ impl SnapshotCache { ) { let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { - beacon_block: snapshot.beacon_block, + beacon_block: snapshot.beacon_block.clone(), beacon_block_root: snapshot.beacon_block_root, beacon_state: snapshot.beacon_state, pre_state, @@ -385,7 +386,7 @@ mod test { fn get_snapshot(i: u64) -> BeaconSnapshot { let spec = MainnetEthSpec::default_spec(); - let beacon_state = get_harness().chain.head_beacon_state().unwrap(); + let beacon_state = get_harness().chain.head_beacon_state_cloned(); let signed_beacon_block = SignedBeaconBlock::from_block( BeaconBlock::empty(&spec), @@ -396,7 +397,7 @@ mod test { BeaconSnapshot { beacon_state, - beacon_block: signed_beacon_block, + beacon_block: Arc::new(signed_beacon_block), beacon_block_root: Hash256::from_low_u64_be(i), } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 030507a83a0..5abec988775 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -213,16 +213,14 @@ async fn state_advance_timer( let log = log.clone(); let beacon_chain = beacon_chain.clone(); let next_slot = current_slot + 1; - executor.spawn_blocking( - move || { + executor.spawn( + async move { // Don't run fork choice during sync. - if beacon_chain.best_slot().map_or(true, |head_slot| { - head_slot + MAX_FORK_CHOICE_DISTANCE < current_slot - }) { + if beacon_chain.best_slot() + MAX_FORK_CHOICE_DISTANCE < current_slot { return; } - if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { + if let Err(e) = beacon_chain.recompute_head_at_slot(next_slot).await { warn!( log, "Error updating fork choice for next slot"; @@ -231,17 +229,24 @@ async fn state_advance_timer( ); } - // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, - ); - } - } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + beacon_chain.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance_signal_tx", + ); }, "fork_choice_advance", ); @@ -264,7 +269,7 @@ fn advance_head( // // Fork-choice is not run *before* this function to avoid unnecessary calls whilst syncing. { - let head_slot = beacon_chain.head_info()?.slot; + let head_slot = beacon_chain.best_slot(); // Don't run this when syncing or if lagging too far behind. if head_slot + MAX_ADVANCE_DISTANCE < current_slot { @@ -275,7 +280,7 @@ fn advance_head( } } - let head_root = beacon_chain.head_info()?.block_root; + let head_root = beacon_chain.head_beacon_block_root(); let (head_slot, head_state_root, mut state) = match beacon_chain .snapshot_cache diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a47c41edccd..9ca1ceec451 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -515,13 +515,38 @@ where } pub fn get_current_state(&self) -> BeaconState { - self.chain.head().unwrap().beacon_state + self.chain.head_beacon_state_cloned() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { - let head = self.chain.head().unwrap(); + let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - (head.beacon_state, state_root) + ( + head.beacon_state.clone_with_only_committee_caches(), + state_root, + ) + } + + pub fn head_slot(&self) -> Slot { + self.chain.canonical_head.cached_head().head_slot() + } + + pub fn head_block_root(&self) -> Hash256 { + self.chain.canonical_head.cached_head().head_block_root() + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + } + + pub fn justified_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { @@ -565,7 +590,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } - pub fn make_block( + pub async fn make_block( &self, mut state: BeaconState, slot: Slot, @@ -599,6 +624,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -613,7 +639,7 @@ where /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. - pub fn make_block_return_pre_state( + pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, @@ -649,6 +675,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -1098,11 +1125,11 @@ where let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1156,11 +1183,11 @@ where attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1182,19 +1209,14 @@ where } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { - let mut block_header_1 = self - .chain - .head_beacon_block() - .unwrap() - .message() - .block_header(); + let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] @@ -1212,7 +1234,7 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { @@ -1235,7 +1257,7 @@ where /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. - pub fn make_block_with_modifier( + pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, @@ -1244,7 +1266,7 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, pre_state) = self.make_block_return_pre_state(state, slot); + let (block, pre_state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = block.deconstruct(); let mut state = pre_state.clone(); @@ -1354,23 +1376,25 @@ where (deposits, state) } - pub fn process_block( + pub async fn process_block( &self, slot: Slot, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice()?; + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } - pub fn process_block_result( + pub async fn process_block_result( &self, block: SignedBeaconBlock, ) -> Result> { - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice().unwrap(); + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } @@ -1425,14 +1449,14 @@ where self.chain.slot_clock.set_slot(slot.into()); } - pub fn add_block_at_slot( + pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot); - let block_hash = self.process_block(slot, block.clone())?; + let (block, new_state) = self.make_block(state, slot).await; + let block_hash = self.process_block(slot, block.clone()).await?; Ok((block_hash, block, new_state)) } @@ -1449,19 +1473,19 @@ where self.process_attestations(attestations); } - pub fn add_attested_block_at_slot( + pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { - let (block_hash, block, state) = self.add_block_at_slot(slot, state)?; + let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } - pub fn add_attested_blocks_at_slots( + pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, @@ -1470,9 +1494,10 @@ where ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) + .await } - fn add_attested_blocks_at_slots_given_lbh( + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, @@ -1489,6 +1514,7 @@ where for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot(*slot, state, state_root, validators) + .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); @@ -1510,7 +1536,7 @@ where /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. - pub fn add_blocks_on_multiple_chains( + pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { @@ -1569,7 +1595,8 @@ where &epoch_slots, &validators, Some(head_block), - ); + ) + .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); @@ -1618,18 +1645,18 @@ where /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. - pub fn build_block( + pub async fn build_block( &self, state: BeaconState, slot: Slot, _block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot) + self.make_block(state, slot).await } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. - pub fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1640,7 +1667,7 @@ where .checked_add(1) .unwrap(); - self.extend_slots(num_slots) + self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. @@ -1649,8 +1676,8 @@ where /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, - pub fn extend_slots(&self, num_slots: usize) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1659,6 +1686,7 @@ where BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) + .await } /// Deprecated: Use add_attested_blocks_at_slots() instead @@ -1672,7 +1700,7 @@ where /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. - pub fn extend_chain( + pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, @@ -1707,8 +1735,9 @@ where AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = - self.add_attested_blocks_at_slots(state, state_root, &slots, &validators); + let (_, _, last_produced_block_hash, _) = self + .add_attested_blocks_at_slots(state, state_root, &slots, &validators) + .await; last_produced_block_hash.into() } @@ -1722,41 +1751,40 @@ where /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. - pub fn generate_two_forks_by_skipping_a_block( + pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { - let initial_head_slot = self - .chain - .head() - .expect("should get head") - .beacon_block - .slot(); + let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. - let honest_head = self.extend_chain( - honest_fork_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(honest_validators.to_vec()), - ); + let honest_head = self + .extend_chain( + honest_fork_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(honest_validators.to_vec()), + ) + .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. - let faulty_head = self.extend_chain( - faulty_fork_blocks, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: initial_head_slot, - // `initial_head_slot + 2` means one slot is skipped. - first_slot: initial_head_slot + 2, - }, - AttestationStrategy::SomeValidators(faulty_validators.to_vec()), - ); + let faulty_head = self + .extend_chain( + faulty_fork_blocks, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: initial_head_slot, + // `initial_head_slot + 2` means one slot is skipped. + first_slot: initial_head_slot + 2, + }, + AttestationStrategy::SomeValidators(faulty_validators.to_vec()), + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index b1d1f71d6cd..85e4f1f093a 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,6 +3,7 @@ use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; +use std::sync::Arc; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -17,8 +18,8 @@ lazy_static! { /// attestation at each slot from genesis through to three epochs past the head. /// /// It checks the produced attestation against some locally computed values. -#[test] -fn produces_attestations() { +#[tokio::test] +async fn produces_attestations() { let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3; @@ -37,11 +38,13 @@ fn produces_attestations() { if slot > 0 && slot <= num_blocks_produced { harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } let slot = Slot::from(slot); @@ -129,10 +132,20 @@ fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let early_attestation = { - let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + let proto_block = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root) + .unwrap(); chain .early_attester_cache - .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .add_head_block( + block_root, + Arc::new(block.clone()), + proto_block, + &state, + &chain.spec, + ) .unwrap(); chain .early_attester_cache @@ -151,8 +164,8 @@ fn produces_attestations() { /// Ensures that the early attester cache wont create an attestation to a block in a later slot than /// the one requested. -#[test] -fn early_attester_cache_old_request() { +#[tokio::test] +async fn early_attester_cache_old_request() { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[..].to_vec()) @@ -162,18 +175,20 @@ fn early_attester_cache_old_request() { harness.advance_slot(); - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 2); let head_proto_block = harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head.beacon_block_root) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 2fe8818a9aa..6a9e6047938 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -56,7 +56,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( chain: &BeaconChain, ) -> (Attestation, usize, usize, SecretKey, SubnetId) { - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); let current_slot = chain.slot().expect("should get slot"); let mut valid_attestation = chain @@ -106,7 +106,8 @@ fn get_valid_aggregated_attestation( chain: &BeaconChain, aggregate: Attestation, ) -> (SignedAggregateAndProof, usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -155,7 +156,8 @@ fn get_non_aggregator( chain: &BeaconChain, aggregate: &Attestation, ) -> (usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -213,15 +215,17 @@ struct GossipTester { } impl GossipTester { - pub fn new() -> Self { + pub async fn new() -> Self { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -395,9 +399,10 @@ impl GossipTester { } } /// Tests verification of `SignedAggregateAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { GossipTester::new() + .await /* * The following two tests ensure: * @@ -511,8 +516,7 @@ fn aggregated_gossip_verification() { let committee_len = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_beacon_committee(tester.slot(), a.message.aggregate.data.index) .expect("should get committees") @@ -612,7 +616,7 @@ fn aggregated_gossip_verification() { tester.valid_aggregate.message.aggregate.clone(), None, &sk, - &chain.head_info().unwrap().fork, + &chain.canonical_head.cached_head().head_fork(), chain.genesis_validators_root, &chain.spec, ) @@ -669,9 +673,10 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for an unaggregated attestation on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { GossipTester::new() + .await /* * The following test ensures: * @@ -684,8 +689,7 @@ fn unaggregated_gossip_verification() { a.data.index = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_committee_count_at_slot(a.data.slot) .unwrap() @@ -924,16 +928,18 @@ fn unaggregated_gossip_verification() { /// Ensures that an attestation that skips epochs can still be processed. /// /// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache. -#[test] -fn attestation_that_skips_epochs() { +#[tokio::test] +async fn attestation_that_skips_epochs() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); let current_epoch = harness.chain.epoch().expect("should get epoch"); @@ -992,16 +998,18 @@ fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } -#[test] -fn attestation_to_finalized_block() { +#[tokio::test] +async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let finalized_checkpoint = harness .chain @@ -1067,16 +1075,18 @@ fn attestation_to_finalized_block() { .contains(earlier_block_root)); } -#[test] -fn verify_aggregate_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_aggregate_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -1124,16 +1134,18 @@ fn verify_aggregate_for_gossip_doppelganger_detection() { .expect("should check if gossip aggregator was observed")); } -#[test] -fn verify_attestation_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_attestation_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index ca65b05fd8b..4b3e1e72fe1 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -27,19 +27,18 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - - /// A cached set of valid blocks - static ref CHAIN_SEGMENT: Vec> = get_chain_segment(); } -fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> Vec> { let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness .chain @@ -50,11 +49,14 @@ fn get_chain_segment() -> Vec> { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .make_full_block( + &snapshot.beacon_block_root, + snapshot.beacon_block.as_ref().clone(), + ) .unwrap(); BeaconSnapshot { beacon_block_root: snapshot.beacon_block_root, - beacon_block: full_block, + beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, } }) @@ -75,8 +77,8 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { - CHAIN_SEGMENT +fn chain_segment_blocks(chain_segment: &[BeaconSnapshot]) -> Vec>> { + chain_segment .iter() .map(|snapshot| snapshot.beacon_block.clone()) .collect() @@ -110,13 +112,13 @@ fn update_proposal_signatures( .get(proposer_index) .expect("proposer keypair should be available"); - let (block, _) = snapshot.beacon_block.clone().deconstruct(); - snapshot.beacon_block = block.sign( + let (block, _) = snapshot.beacon_block.as_ref().clone().deconstruct(); + snapshot.beacon_block = Arc::new(block.sign( &keypair.sk, &state.fork(), state.genesis_validators_root(), spec, - ); + )); } } @@ -124,17 +126,18 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let Some(child) = snapshots.get_mut(i + 1) { - let (mut block, signature) = child.beacon_block.clone().deconstruct(); + let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = SignedBeaconBlock::from_block(block, signature) + child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) } } } -#[test] -fn chain_segment_full_segment() { +#[tokio::test] +async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -145,33 +148,36 @@ fn chain_segment_full_segment() { harness .chain .process_chain_segment(vec![]) + .await .into_block_error() .expect("should import empty chain segment"); harness .chain .process_chain_segment(blocks.clone()) + .await .into_block_error() .expect("should import chain segment"); - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } -#[test] -fn chain_segment_varying_chunk_size() { +#[tokio::test] +async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -182,36 +188,39 @@ fn chain_segment_varying_chunk_size() { harness .chain .process_chain_segment(chunk.to_vec()) + .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } } -#[test] -fn chain_segment_non_linear_parent_roots() { +#[tokio::test] +async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; + harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(); + let mut blocks = chain_segment_blocks(&chain_segment); blocks.remove(2); assert!( @@ -219,6 +228,7 @@ fn chain_segment_non_linear_parent_roots() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -228,16 +238,17 @@ fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -245,28 +256,30 @@ fn chain_segment_non_linear_parent_roots() { ); } -#[test] -fn chain_segment_non_linear_slots() { +#[tokio::test] +async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -277,16 +290,17 @@ fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -294,7 +308,8 @@ fn chain_segment_non_linear_slots() { ); } -fn assert_invalid_signature( +async fn assert_invalid_signature( + chain_segment: &[BeaconSnapshot], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -311,6 +326,7 @@ fn assert_invalid_signature( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -319,19 +335,20 @@ fn assert_invalid_signature( ); // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks); + let _ = harness.chain.process_chain_segment(ancestor_blocks).await; assert!( matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()), + .process_block(snapshots[block_index].beacon_block.clone()) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid {} signature", @@ -346,25 +363,34 @@ fn assert_invalid_signature( // slot) tuple. } -fn get_invalid_sigs_harness() -> BeaconChainHarness> { +async fn get_invalid_sigs_harness( + chain_segment: &[BeaconSnapshot], +) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT); harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); harness } -#[test] -fn invalid_signature_gossip_block() { +#[tokio::test] +async fn invalid_signature_gossip_block() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); // Import all the ancestors before the `block_index` block. - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) @@ -372,13 +398,18 @@ fn invalid_signature_gossip_block() { harness .chain .process_chain_segment(ancestor_blocks) + .await .into_block_error() .expect("should import all blocks prior to the one being tested"); assert!( matches!( harness .chain - .process_block(SignedBeaconBlock::from_block(block, junk_signature())), + .process_block(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + ))) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid gossip signature", @@ -386,14 +417,21 @@ fn invalid_signature_gossip_block() { } } -#[test] -fn invalid_signature_block_proposal() { +#[tokio::test] +async fn invalid_signature_block_proposal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) @@ -404,6 +442,7 @@ fn invalid_signature_block_proposal() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -412,26 +451,37 @@ fn invalid_signature_block_proposal() { } } -#[test] -fn invalid_signature_randao_reveal() { +#[tokio::test] +async fn invalid_signature_randao_reveal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); *block.body_mut().randao_reveal_mut() = junk_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "randao"); + assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; } } -#[test] -fn invalid_signature_proposer_slashing() { +#[tokio::test] +async fn invalid_signature_proposer_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { message: block.block_header(), @@ -447,18 +497,27 @@ fn invalid_signature_proposer_slashing() { .proposer_slashings_mut() .push(proposer_slashing) .expect("should update proposer slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "proposer slashing", + ) + .await; } } -#[test] -fn invalid_signature_attester_slashing() { +#[tokio::test] +async fn invalid_signature_attester_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let indexed_attestation = IndexedAttestation { attesting_indices: vec![0].into(), data: AttestationData { @@ -480,33 +539,58 @@ fn invalid_signature_attester_slashing() { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .attester_slashings_mut() .push(attester_slashing) .expect("should update attester slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attester slashing", + ) + .await; } } -#[test] -fn invalid_signature_attestation() { +#[tokio::test] +async fn invalid_signature_attestation() { + let chain_segment = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { attestation.signature = junk_aggregate_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attestation", + ) + .await; checked_attestation = true; } } @@ -517,12 +601,13 @@ fn invalid_signature_attestation() { ) } -#[test] -fn invalid_signature_deposit() { +#[tokio::test] +async fn invalid_signature_deposit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let deposit = Deposit { proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), data: DepositData { @@ -532,13 +617,18 @@ fn invalid_signature_deposit() { signature: junk_signature().into(), }, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .deposits_mut() .push(deposit) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); let blocks = snapshots @@ -550,6 +640,7 @@ fn invalid_signature_deposit() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -558,13 +649,18 @@ fn invalid_signature_deposit() { } } -#[test] -fn invalid_signature_exit() { +#[tokio::test] +async fn invalid_signature_exit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .voluntary_exits_mut() @@ -576,10 +672,18 @@ fn invalid_signature_exit() { signature: junk_signature(), }) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "voluntary exit", + ) + .await; } } @@ -590,27 +694,30 @@ fn unwrap_err(result: Result) -> E { } } -#[test] -fn block_gossip_verification() { +#[tokio::test] +async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64()); + .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &CHAIN_SEGMENT[0..block_index] { + for snapshot in &chain_segment[0..block_index] { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) + .await .expect("should obtain gossip verified block"); harness .chain .process_block(gossip_verified) + .await .expect("should import valid gossip verified block"); } @@ -624,15 +731,16 @@ fn block_gossip_verification() { * future blocks for processing at the appropriate slot). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_block_slot = block.slot() + 1; *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -654,21 +762,19 @@ fn block_gossip_verification() { * nodes, etc). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_finalized_slot = harness - .chain - .head_info() - .expect("should get head info") - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -687,8 +793,9 @@ fn block_gossip_verification() { * proposer_index pubkey. */ - let block = CHAIN_SEGMENT[block_index] + let block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -697,10 +804,11 @@ fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip(SignedBeaconBlock::from_block( + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( block, junk_signature() - )) + ))) + .await ), BlockError::ProposalSignatureInvalid ), @@ -715,15 +823,16 @@ fn block_gossip_verification() { * The block's parent (defined by block.parent_root) passes validation. */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let parent_root = Hash256::from_low_u64_be(42); *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -740,15 +849,16 @@ fn block_gossip_verification() { * store.finalized_checkpoint.root */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); - let parent_root = CHAIN_SEGMENT[0].beacon_block_root; + let parent_root = chain_segment[0].beacon_block_root; *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -766,8 +876,9 @@ fn block_gossip_verification() { * processing while proposers for the block's branch are calculated. */ - let mut block = CHAIN_SEGMENT[block_index] + let mut block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -779,13 +890,13 @@ fn block_gossip_verification() { *block.proposer_index_mut() = other_proposer; let block = block.sign( &generate_deterministic_keypair(other_proposer as usize).sk, - &harness.chain.head_info().unwrap().fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -797,7 +908,7 @@ fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::RepeatProposal { proposer, slot, @@ -807,9 +918,9 @@ fn block_gossip_verification() { "should register any valid signature against the proposer, even if the block failed later verification" ); - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( - harness.chain.verify_block_for_gossip(block).is_ok(), + harness.chain.verify_block_for_gossip(block).await.is_ok(), "the valid block should be processed" ); @@ -822,12 +933,13 @@ fn block_gossip_verification() { * signed_beacon_block.message.slot. */ - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( matches!( harness .chain .verify_block_for_gossip(block.clone()) + .await .err() .expect("should error when processing known block"), BlockError::RepeatProposal { @@ -840,8 +952,8 @@ fn block_gossip_verification() { ); } -#[test] -fn verify_block_for_gossip_slashing_detection() { +#[tokio::test] +async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), @@ -858,12 +970,21 @@ fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)); - let (block2, _) = harness.make_block(state, Slot::new(1)); + let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let (block2, _) = harness.make_block(state, Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block1).unwrap(); - harness.chain.process_block(verified_block).unwrap(); - unwrap_err(harness.chain.verify_block_for_gossip(block2)); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block1)) + .await + .unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); + unwrap_err( + harness + .chain + .verify_block_for_gossip(Arc::new(block2)) + .await, + ); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -875,16 +996,20 @@ fn verify_block_for_gossip_slashing_detection() { slasher_dir.close().unwrap(); } -#[test] -fn verify_block_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)); + let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block).unwrap(); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block)) + .await + .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -921,8 +1046,8 @@ fn verify_block_for_gossip_doppelganger_detection() { } } -#[test] -fn add_base_block_to_altair_chain() { +#[tokio::test] +async fn add_base_block_to_altair_chain() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); @@ -940,11 +1065,13 @@ fn add_base_block_to_altair_chain() { harness.advance_slot(); // Build out all the blocks in epoch 0. - harness.extend_chain( - slots_per_epoch as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -952,7 +1079,7 @@ fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot); + let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1007,7 +1134,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(base_block.clone()) + .verify_block_for_gossip(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1020,7 +1148,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(base_block.clone()) + .process_block(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1031,7 +1160,10 @@ fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![base_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(base_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { @@ -1042,8 +1174,8 @@ fn add_base_block_to_altair_chain() { )); } -#[test] -fn add_altair_block_to_base_chain() { +#[tokio::test] +async fn add_altair_block_to_base_chain() { let mut spec = MainnetEthSpec::default_spec(); // Altair never happens. @@ -1060,11 +1192,13 @@ fn add_altair_block_to_base_chain() { harness.advance_slot(); // Build one block. - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -1072,7 +1206,7 @@ fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot); + let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1128,7 +1262,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(altair_block.clone()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1141,7 +1276,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(altair_block.clone()) + .process_block(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1152,7 +1288,10 @@ fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![altair_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(altair_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { diff --git a/beacon_node/beacon_chain/tests/fork_choice.rs b/beacon_node/beacon_chain/tests/fork_choice.rs index 533024528ab..de74a2a9e18 100644 --- a/beacon_node/beacon_chain/tests/fork_choice.rs +++ b/beacon_node/beacon_chain/tests/fork_choice.rs @@ -7,8 +7,8 @@ use types::*; const VALIDATOR_COUNT: usize = 24; -#[test] -fn chooses_highest_justified_checkpoint() { +#[tokio::test] +async fn chooses_highest_justified_checkpoint() { let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); let mut spec = MainnetEthSpec::default_spec(); spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -21,7 +21,7 @@ fn chooses_highest_justified_checkpoint() { harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 0, "the chain head is at genesis"); assert_eq!( head.beacon_state.finalized_checkpoint().epoch, @@ -30,13 +30,15 @@ fn chooses_highest_justified_checkpoint() { ); let slot_a = Slot::from(slots_per_epoch * 4 + slots_per_epoch - 1); - harness.extend_chain( - slot_a.as_usize(), - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slot_a.as_usize(), + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), slot_a); assert_eq!( head.beacon_block.slot() % slots_per_epoch, @@ -67,7 +69,7 @@ fn chooses_highest_justified_checkpoint() { .get_state(&fork_parent_block.state_root(), Some(fork_parent_slot)) .unwrap() .unwrap(); - let (fork_block, fork_state) = harness.make_block(fork_parent_state, slot_a + 1); + let (fork_block, fork_state) = harness.make_block(fork_parent_state, slot_a + 1).await; assert_eq!( fork_state.current_justified_checkpoint().epoch, @@ -85,12 +87,13 @@ fn chooses_highest_justified_checkpoint() { fork_block_root, harness .process_block(fork_block.slot(), fork_block) + .await .unwrap() .into() ); { - let fork_choice = harness.chain.fork_choice.read(); + let fork_choice = harness.chain.canonical_head.fork_choice_read_lock(); let proto_array = fork_choice.proto_array(); assert_eq!( proto_array.get_weight(&fork_block_root).unwrap(), @@ -103,15 +106,15 @@ fn chooses_highest_justified_checkpoint() { ); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( head.beacon_block_root, slot_a_root, "the fork block has not become the head" ); } -#[test] -fn chooses_highest_justified_checkpoint_n_plus_2() { +#[tokio::test] +async fn chooses_highest_justified_checkpoint_n_plus_2() { let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); let mut spec = MainnetEthSpec::default_spec(); spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -124,7 +127,7 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 0, "the chain head is at genesis"); assert_eq!( head.beacon_state.finalized_checkpoint().epoch, @@ -135,27 +138,34 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { let slot_a = Slot::from(slots_per_epoch * 4 + slots_per_epoch - 1); // Extend the chain to the slot before `slot_a` - harness.extend_chain( - slot_a.as_usize() - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slot_a.as_usize() - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Make slashings to include in the block at `slot_a`. - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let mut slashings = vec![]; for i in 0..15 { slashings.push(harness.make_proposer_slashing(i as u64)); } - let (block, _pre_state) = - harness.make_block_with_modifier(head.beacon_state, slot_a, |block| { - block.body_altair_mut().unwrap().proposer_slashings = - VariableList::::new(slashings).unwrap(); - }); + let (block, _pre_state) = harness + .make_block_with_modifier( + head.beacon_state.clone_with_only_committee_caches(), + slot_a, + |block| { + block.body_altair_mut().unwrap().proposer_slashings = + VariableList::::new(slashings).unwrap(); + }, + ) + .await; // Process the block containing the slashings at the slot before the epoch transition and attest to it. - harness.process_block(slot_a, block).unwrap(); - let head = harness.chain.head().unwrap(); + harness.process_block(slot_a, block).await.unwrap(); + let head = harness.chain.head_snapshot(); let vals = (15..VALIDATOR_COUNT).collect::>(); harness.attest_block( &head.beacon_state, @@ -201,7 +211,9 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { .get_state(&fork_parent_block.state_root(), Some(fork_parent_slot)) .unwrap() .unwrap(); - let (fork_block, fork_state) = harness.make_block(fork_parent_state, slot_a + slots_per_epoch); + let (fork_block, fork_state) = harness + .make_block(fork_parent_state, slot_a + slots_per_epoch) + .await; assert_eq!( fork_state.current_justified_checkpoint().epoch, @@ -219,12 +231,13 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { fork_block_root, harness .process_block(fork_block.slot(), fork_block) + .await .unwrap() .into() ); { - let fork_choice = harness.chain.fork_choice.read(); + let fork_choice = harness.chain.canonical_head.fork_choice_read_lock(); let proto_array = fork_choice.proto_array(); assert_eq!( proto_array.get_weight(&fork_block_root).unwrap(), @@ -237,7 +250,7 @@ fn chooses_highest_justified_checkpoint_n_plus_2() { ); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( head.beacon_block_root, slot_a_root, "the fork block has not become the head" diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index d67ed35f9cc..91d5eb21cae 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -27,11 +27,11 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { } } -#[test] +#[tokio::test] // TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` // are causing failed lookups to the execution node. I need to come back to this. #[should_panic] -fn merge_with_terminal_block_hash_override() { +async fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); let bellatrix_fork_epoch = Epoch::new(0); @@ -70,8 +70,7 @@ fn merge_with_terminal_block_hash_override() { assert!( harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_block .as_merge() .is_ok(), @@ -80,9 +79,9 @@ fn merge_with_terminal_block_hash_override() { let mut execution_payloads = vec![]; for i in 0..E::slots_per_epoch() * 3 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; let execution_payload = block.message().body().execution_payload().unwrap().clone(); if i == 0 { @@ -94,8 +93,8 @@ fn merge_with_terminal_block_hash_override() { verify_execution_payload_chain(execution_payloads.as_slice()); } -#[test] -fn base_altair_merge_with_terminal_block_after_fork() { +#[tokio::test] +async fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); @@ -118,15 +117,15 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Start with the base fork. */ - assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok()); + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot); + harness.extend_to_slot(altair_fork_slot).await; - let altair_head = harness.chain.head().unwrap().beacon_block; + let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); assert_eq!(altair_head.slot(), altair_fork_slot); @@ -134,9 +133,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Do the merge fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot); + harness.extend_to_slot(merge_fork_slot).await; - let merge_head = harness.chain.head().unwrap().beacon_block; + let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!( @@ -148,9 +147,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Next merge block shouldn't include an exec payload. */ - harness.extend_slots(1); + harness.extend_slots(1).await; - let one_after_merge_head = harness.chain.head().unwrap().beacon_block; + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert_eq!( *one_after_merge_head .message() @@ -175,9 +174,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { */ for _ in 0..4 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); } diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c9df6aa31db..535fe080a7f 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -46,18 +46,20 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { harness } -#[test] -fn voluntary_exit() { +#[tokio::test] +async fn voluntary_exit() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); let spec = &harness.chain.spec.clone(); - harness.extend_chain( - (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 879f223e967..b00036f4b4c 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,8 +2,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, @@ -12,6 +12,7 @@ use execution_layer::{ use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; use tree_hash::TreeHash; @@ -84,19 +85,19 @@ impl InvalidPayloadRig { fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { self.harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block_root) .unwrap() .execution_status } - fn fork_choice(&self) { - self.harness.chain.fork_choice().unwrap(); - } - - fn head_info(&self) -> HeadInfo { - self.harness.chain.head_info().unwrap() + async fn recompute_head(&self) { + self.harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -142,22 +143,24 @@ impl InvalidPayloadRig { .block_hash } - fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { - (0..num_blocks) - .map(|_| self.import_block(is_valid.clone())) - .collect() + async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + let mut roots = Vec::with_capacity(num_blocks as usize); + for _ in 0..num_blocks { + roots.push(self.import_block(is_valid.clone()).await); + } + roots } - fn move_to_first_justification(&mut self, is_valid: Payload) { + async fn move_to_first_justification(&mut self, is_valid: Payload) { let slots_till_justification = E::slots_per_epoch() * 3; - self.build_blocks(slots_till_justification, is_valid); + self.build_blocks(slots_till_justification, is_valid).await; - let justified_checkpoint = self.head_info().current_justified_checkpoint; + let justified_checkpoint = self.harness.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 2); } /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. - fn import_block(&mut self, is_valid: Payload) -> Hash256 { + async fn import_block(&mut self, is_valid: Payload) -> Hash256 { self.import_block_parametric(is_valid, is_valid, |error| { matches!( error, @@ -166,6 +169,7 @@ impl InvalidPayloadRig { ) ) }) + .await } fn block_root_at_slot(&self, slot: Slot) -> Option { @@ -178,13 +182,13 @@ impl InvalidPayloadRig { fn validate_manually(&self, block_root: Hash256) { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_valid_execution_payload(block_root) .unwrap(); } - fn import_block_parametric) -> bool>( + async fn import_block_parametric) -> bool>( &mut self, new_payload_response: Payload, forkchoice_response: Payload, @@ -192,10 +196,10 @@ impl InvalidPayloadRig { ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - let head = self.harness.chain.head().unwrap(); - let state = head.beacon_state; + let head = self.harness.chain.head_snapshot(); + let state = head.beacon_state.clone_with_only_committee_caches(); let slot = state.slot() + 1; - let (block, post_state) = self.harness.make_block(state, slot); + let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -249,7 +253,11 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - let root = self.harness.process_block(slot, block.clone()).unwrap(); + let root = self + .harness + .process_block(slot, block.clone()) + .await + .unwrap(); if self.enable_attestations { let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -294,7 +302,7 @@ impl InvalidPayloadRig { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); - match self.harness.process_block(slot, block) { + match self.harness.process_block(slot, block).await { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) @@ -309,8 +317,12 @@ impl InvalidPayloadRig { } }; - let block_in_forkchoice = - self.harness.chain.fork_choice.read().get_block(&block_root); + let block_in_forkchoice = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root); if let Payload::Invalid { .. } = new_payload_response { // A block found to be immediately invalid should not end up in fork choice. assert_eq!(block_in_forkchoice, None); @@ -333,106 +345,111 @@ impl InvalidPayloadRig { block_root } - fn invalidate_manually(&self, block_root: Hash256) { + async fn invalidate_manually(&self, block_root: Hash256) { self.harness .chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { block_root }) + .await .unwrap(); } } /// Simple test of the different import types. -#[test] -fn valid_invalid_syncing() { +#[tokio::test] +async fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; rig.import_block(Payload::Invalid { latest_valid_hash: None, - }); - rig.import_block(Payload::Syncing); + }) + .await; + rig.import_block(Payload::Syncing).await; } /// Ensure that an invalid payload can invalidate its parent too (given the right /// `latest_valid_hash`. -#[test] -fn invalid_payload_invalidates_parent() { +#[tokio::test] +async fn invalid_payload_invalidates_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; let roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; let latest_valid_hash = rig.block_hash(roots[0]); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); - assert_eq!(rig.head_info().block_root, roots[0]); + assert_eq!(rig.harness.head_block_root(), roots[0]); } /// Test invalidation of a payload via the fork choice updated message. /// /// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, /// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. -fn immediate_forkchoice_update_invalid_test( +async fn immediate_forkchoice_update_invalid_test( invalid_payload: impl FnOnce(Option) -> Payload, ) { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let valid_head_root = rig.import_block(Payload::Valid); + let valid_head_root = rig.import_block(Payload::Valid).await; let latest_valid_hash = Some(rig.block_hash(valid_head_root)); // Import a block which returns syncing when supplied via newPayload, and then // invalid when the forkchoice update is sent. rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { false - }); + }) + .await; // The head should be the latest valid block. - assert_eq!(rig.head_info().block_root, valid_head_root); + assert_eq!(rig.harness.head_block_root(), valid_head_root); } -#[test] -fn immediate_forkchoice_update_payload_invalid() { +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid() { immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { latest_valid_hash, }) + .await } -#[test] -fn immediate_forkchoice_update_payload_invalid_block_hash() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await } -#[test] -fn immediate_forkchoice_update_payload_invalid_terminal_block() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock).await } /// Ensure the client tries to exit when the justified checkpoint is invalidated. -#[test] -fn justified_checkpoint_becomes_invalid() { +#[tokio::test] +async fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let justified_checkpoint = rig.harness.justified_checkpoint(); let parent_root_of_justified = rig .harness .chain @@ -456,7 +473,8 @@ fn justified_checkpoint_becomes_invalid() { // is invalid. BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) ) - }); + }) + .await; // The beacon chain should have triggered a shutdown. assert_eq!( @@ -468,18 +486,18 @@ fn justified_checkpoint_becomes_invalid() { } /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. -#[test] -fn pre_finalized_latest_valid_hash() { +#[tokio::test] +async fn pre_finalized_latest_valid_hash() { let num_blocks = E::slots_per_epoch() * 4; let finalized_epoch = 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); @@ -490,10 +508,11 @@ fn pre_finalized_latest_valid_hash() { // Import a pre-finalized block. rig.import_block(Payload::Invalid { latest_valid_hash: Some(pre_finalized_block_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -514,16 +533,16 @@ fn pre_finalized_latest_valid_hash() { /// /// - Invalidate descendants of `latest_valid_root`. /// - Validate `latest_valid_root` and its ancestors. -#[test] -fn latest_valid_hash_will_validate() { +#[tokio::test] +async fn latest_valid_hash_will_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(4, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(4, Payload::Syncing).await); let latest_valid_root = rig .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) @@ -532,9 +551,10 @@ fn latest_valid_hash_will_validate() { rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; - assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + assert_eq!(rig.harness.head_slot(), LATEST_VALID_SLOT); for slot in 0..=5 { let slot = Slot::new(slot); @@ -558,18 +578,18 @@ fn latest_valid_hash_will_validate() { } /// Check behaviour when the `latest_valid_hash` is a junk value. -#[test] -fn latest_valid_hash_is_junk() { +#[tokio::test] +async fn latest_valid_hash_is_junk() { let num_blocks = E::slots_per_epoch() * 5; let finalized_epoch = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -577,10 +597,11 @@ fn latest_valid_hash_is_junk() { let junk_hash = ExecutionBlockHash::repeat_byte(42); rig.import_block(Payload::Invalid { latest_valid_hash: Some(junk_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -598,19 +619,19 @@ fn latest_valid_hash_is_junk() { } /// Check that descendants of invalid blocks are also invalidated. -#[test] -fn invalidates_all_descendants() { +#[tokio::test] +async fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -621,9 +642,14 @@ fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; // The latest valid hash will be set to the grandparent of the fork block. This means that the // parent of the fork block will become invalid. @@ -638,14 +664,15 @@ fn invalidates_all_descendants() { let latest_valid_hash = rig.block_hash(latest_valid_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The block before the fork should become the head. - assert_eq!(rig.head_info().block_root, latest_valid_root); + assert_eq!(rig.harness.head_block_root(), latest_valid_root); // The fork block should be invalidated, even though it's not an ancestor of the block that // triggered the INVALID response from the EL. @@ -677,19 +704,19 @@ fn invalidates_all_descendants() { } /// Check that the head will switch after the canonical branch is invalidated. -#[test] -fn switches_heads() { +#[tokio::test] +async fn switches_heads() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -700,23 +727,29 @@ fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; let latest_valid_slot = fork_parent_slot; let latest_valid_hash = rig.block_hash(fork_parent_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The fork block should become the head. - assert_eq!(rig.head_info().block_root, fork_block_root); + assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. assert!(rig.execution_status(fork_block_root).is_optimistic()); @@ -746,17 +779,18 @@ fn switches_heads() { } } -#[test] -fn invalid_during_processing() { +#[tokio::test] +async fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); let roots = &[ - rig.import_block(Payload::Valid), + rig.import_block(Payload::Valid).await, rig.import_block(Payload::Invalid { latest_valid_hash: None, - }), - rig.import_block(Payload::Valid), + }) + .await, + rig.import_block(Payload::Valid).await, ]; // 0 should be present in the chain. @@ -772,20 +806,20 @@ fn invalid_during_processing() { None ); // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head_block_root = rig.harness.head_block_root(); + assert_eq!(head_block_root, roots[2]); } -#[test] -fn invalid_after_optimistic_sync() { +#[tokio::test] +async fn invalid_after_optimistic_sync() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; for root in &roots { @@ -793,29 +827,32 @@ fn invalid_after_optimistic_sync() { } // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[2]); - roots.push(rig.import_block(Payload::Invalid { - latest_valid_hash: Some(rig.block_hash(roots[1])), - })); + roots.push( + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + }) + .await, + ); // Running fork choice is necessary since a block has been invalidated. - rig.fork_choice(); + rig.recompute_head().await; // 1 should be the head, since 2 was invalidated. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[1]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[1]); } -#[test] -fn manually_validate_child() { +#[tokio::test] +async fn manually_validate_child() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -826,14 +863,14 @@ fn manually_validate_child() { assert!(rig.execution_status(child).is_valid_and_post_bellatrix()); } -#[test] -fn manually_validate_parent() { +#[tokio::test] +async fn manually_validate_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -844,14 +881,14 @@ fn manually_validate_parent() { assert!(rig.execution_status(child).is_optimistic()); } -#[test] -fn payload_preparation() { +#[tokio::test] +async fn payload_preparation() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let current_slot = rig.harness.chain.slot().unwrap(); assert_eq!(head.beacon_state.slot(), 1); assert_eq!(current_slot, 1); @@ -865,18 +902,19 @@ fn payload_preparation() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(1), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) + .await .unwrap(); let payload_attributes = PayloadAttributes { @@ -896,15 +934,15 @@ fn payload_preparation() { assert_eq!(rig.previous_payload_attributes(), payload_attributes); } -#[test] -fn invalid_parent() { +#[tokio::test] +async fn invalid_parent() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import a syncing block atop the transition block (we'll call this the "parent block" since we // build another block on it later). - let parent_root = rig.import_block(Payload::Syncing); + let parent_root = rig.import_block(Payload::Syncing).await; let parent_block = rig.harness.get_block(parent_root.into()).unwrap(); let parent_state = rig .harness @@ -914,34 +952,34 @@ fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, mut state) = rig.harness.make_block(parent_state, slot); + let (block, mut state) = rig.harness.make_block(parent_state, slot).await; + let block = Arc::new(block); let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); // Invalidate the parent block. - rig.invalidate_manually(parent_root); + rig.invalidate_manually(parent_root).await; assert!(rig.execution_status(parent_root).is_invalid()); // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.verify_block_for_gossip(block.clone()), + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()), + rig.harness.chain.process_block(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload cannot be imported to fork choice. - let (block, _block_signature) = block.deconstruct(); assert!(matches!( - rig.harness.chain.fork_choice.write().on_block( + rig.harness.chain.canonical_head.fork_choice_write_lock().on_block( slot, - &block, + block.message(), block_root, Duration::from_secs(0), &mut state, @@ -960,21 +998,21 @@ fn invalid_parent() { } /// Tests to ensure that we will still send a proposer preparation -#[test] -fn payload_preparation_before_transition_block() { +#[tokio::test] +async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); - let head_info = rig.head_info(); - assert!( - !head_info.is_merge_transition_complete, - "the head block is pre-transition" - ); + let head = rig.harness.chain.head_snapshot(); assert_eq!( - head_info.execution_payload_block_hash, - Some(ExecutionBlockHash::zero()), - "the head block is post-bellatrix" + head.beacon_block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(), + ExecutionBlockHash::zero(), + "the head block is post-bellatrix but pre-transition" ); let current_slot = rig.harness.chain.slot().unwrap(); @@ -986,24 +1024,32 @@ fn payload_preparation_before_transition_block() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(0), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.move_to_terminal_block(); rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(current_slot) + .await .unwrap(); + let forkchoice_update_params = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice_blocking(current_slot) + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await .unwrap(); let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); @@ -1012,15 +1058,15 @@ fn payload_preparation_before_transition_block() { assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } -#[test] -fn attesting_to_optimistic_head() { +#[tokio::test] +async fn attesting_to_optimistic_head() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let root = rig.import_block(Payload::Syncing); + let root = rig.import_block(Payload::Syncing).await; - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let slot = head.beacon_block.slot(); assert_eq!( head.beacon_block_root, root, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 771295c415e..560e865a8f2 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -72,18 +72,20 @@ fn get_harness( harness } -#[test] -fn full_participation_no_skips() { +#[tokio::test] +async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); @@ -91,8 +93,8 @@ fn full_participation_no_skips() { check_iterators(&harness); } -#[test] -fn randomised_skips() { +#[tokio::test] +async fn randomised_skips() { let num_slots = E::slots_per_epoch() * 5; let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); @@ -104,14 +106,16 @@ fn randomised_skips() { for slot in 1..=num_slots { if rng.gen_bool(0.8) { - harness.extend_chain( - 1, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(head_slot), - first_slot: Slot::new(slot), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(head_slot), + first_slot: Slot::new(slot), + }, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); num_blocks_produced += 1; head_slot = slot; @@ -120,7 +124,7 @@ fn randomised_skips() { } } - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -133,8 +137,8 @@ fn randomised_skips() { check_iterators(&harness); } -#[test] -fn long_skip() { +#[tokio::test] +async fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -148,11 +152,13 @@ fn long_skip() { // Having this set lower ensures that we start justifying and finalizing quickly after a skip. let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; - harness.extend_chain( - initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks); @@ -162,14 +168,16 @@ fn long_skip() { } // 3. Produce more blocks, establish a new finalized epoch - harness.extend_chain( - final_blocks as usize, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + final_blocks as usize, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(initial_blocks), + first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + }, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks + skip_slots + final_blocks); check_split_slot(&harness, store); @@ -183,8 +191,8 @@ fn long_skip() { /// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value /// 2. We correctly load the genesis value for all required slots /// NOTE: this test takes about a minute to run -#[test] -fn randao_genesis_storage() { +#[tokio::test] +async fn randao_genesis_storage() { let validator_count = 8; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -195,24 +203,24 @@ fn randao_genesis_storage() { // Check we have a non-trivial genesis value let genesis_value = *harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .get_randao_mix(Epoch::new(0)) .expect("randao mix ok"); assert!(!genesis_value.is_zero()); - harness.extend_chain( - num_slots as usize - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_slots as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Check that genesis value is still present assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -221,15 +229,16 @@ fn randao_genesis_storage() { // Then upon adding one more block, it isn't harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -243,8 +252,8 @@ fn randao_genesis_storage() { } // Check that closing and reopening a freezer DB restores the split slot to its correct value. -#[test] -fn split_slot_restore() { +#[tokio::test] +async fn split_slot_restore() { let db_path = tempdir().unwrap(); let split_slot = { @@ -253,11 +262,13 @@ fn split_slot_restore() { let num_blocks = 4 * E::slots_per_epoch(); - harness.extend_chain( - num_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; store.get_split_slot() }; @@ -272,8 +283,8 @@ fn split_slot_restore() { // Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB. // This is a bit of a monster test in that it tests lots of different things, but until they're // tested elsewhere, this is as good a place as any. -#[test] -fn epoch_boundary_state_attestation_processing() { +#[tokio::test] +async fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -285,13 +296,15 @@ fn epoch_boundary_state_attestation_processing() { let mut late_attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(timely_validators.clone()), - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(timely_validators.clone()), + ) + .await; - let head = harness.chain.head().expect("head ok"); + let head = harness.chain.head_snapshot(); late_attestations.extend(harness.get_unaggregated_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, @@ -328,12 +341,7 @@ fn epoch_boundary_state_attestation_processing() { assert_eq!(epoch_boundary_state, ebs_of_ebs); // If the attestation is pre-finalization it should be rejected. - let finalized_epoch = harness - .chain - .head_info() - .expect("should get head") - .finalized_checkpoint - .epoch; + let finalized_epoch = harness.finalized_checkpoint().epoch; let res = harness .chain @@ -364,8 +372,8 @@ fn epoch_boundary_state_attestation_processing() { } // Test that the `end_slot` for forwards block and state root iterators works correctly. -#[test] -fn forwards_iter_block_and_state_roots_until() { +#[tokio::test] +async fn forwards_iter_block_and_state_roots_until() { let num_blocks_produced = E::slots_per_epoch() * 17; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -373,13 +381,14 @@ fn forwards_iter_block_and_state_roots_until() { let all_validators = &harness.get_all_validators(); let (mut head_state, mut head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.chain.head_info().unwrap().block_root; + let head_block_root = harness.head_block_root(); let mut block_roots = vec![head_block_root]; let mut state_roots = vec![head_state_root]; for slot in (1..=num_blocks_produced).map(Slot::from) { let (block_root, mut state) = harness .add_attested_block_at_slot(slot, head_state, head_state_root, all_validators) + .await .unwrap(); head_state_root = state.update_tree_hash_cache().unwrap(); head_state = state; @@ -429,19 +438,21 @@ fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[test] -fn block_replay_with_inaccurate_state_roots() { +#[tokio::test] +async fn block_replay_with_inaccurate_state_roots() { let num_blocks_produced = E::slots_per_epoch() * 3 + 31; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let chain = &harness.chain; - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. let (mut head_state, head_root) = harness.get_current_state_and_root(); @@ -471,8 +482,8 @@ fn block_replay_with_inaccurate_state_roots() { ); } -#[test] -fn block_replayer_hooks() { +#[tokio::test] +async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -487,12 +498,9 @@ fn block_replayer_hooks() { let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - let (_, _, end_block_root, mut end_state) = harness.add_attested_blocks_at_slots( - state.clone(), - state_root, - &block_slots, - &all_validators, - ); + let (_, _, end_block_root, mut end_state) = harness + .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) + .await; let blocks = store .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) @@ -548,8 +556,8 @@ fn block_replayer_hooks() { assert_eq!(end_state, replay_state); } -#[test] -fn delete_blocks_and_states() { +#[tokio::test] +async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let validators_keypairs = @@ -567,7 +575,9 @@ fn delete_blocks_and_states() { let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -587,20 +597,21 @@ fn delete_blocks_and_states() { let fork1_state = harness.get_current_state(); let fork2_state = fork1_state.clone(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, honest_validators), - (fork2_state, fork2_slots, faulty_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, honest_validators), + (fork2_state, fork2_slots, faulty_validators), + ]) + .await; let honest_head = results[0].2; let faulty_head = results[1].2; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let head_info = harness.chain.head_info().expect("should get head"); - assert_eq!(head_info.slot, unforked_blocks + fork_blocks); + assert_eq!(harness.head_slot(), unforked_blocks + fork_blocks); assert_eq!( - head_info.block_root, + harness.head_block_root(), honest_head.into(), "the honest chain should be the canonical chain", ); @@ -671,7 +682,7 @@ fn delete_blocks_and_states() { // Check that we never produce invalid blocks when there is deep forking that changes the shuffling. // See https://github.com/sigp/lighthouse/issues/845 -fn multi_epoch_fork_valid_blocks_test( +async fn multi_epoch_fork_valid_blocks_test( initial_blocks: usize, num_fork1_blocks_: usize, num_fork2_blocks_: usize, @@ -696,7 +707,9 @@ fn multi_epoch_fork_valid_blocks_test( let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); @@ -714,10 +727,12 @@ fn multi_epoch_fork_valid_blocks_test( .map(Into::into) .collect(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, fork1_validators), - (fork2_state, fork2_slots, fork2_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, fork1_validators), + (fork2_state, fork2_slots, fork2_validators), + ]) + .await; let head1 = results[0].2; let head2 = results[1].2; @@ -726,43 +741,47 @@ fn multi_epoch_fork_valid_blocks_test( } // This is the minimal test of block production with different shufflings. -#[test] -fn block_production_different_shuffling_early() { +#[tokio::test] +async fn block_production_different_shuffling_early() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( slots_per_epoch - 2, slots_per_epoch + 3, slots_per_epoch + 3, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } -#[test] -fn block_production_different_shuffling_long() { +#[tokio::test] +async fn block_production_different_shuffling_long() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } // Check that the op pool safely includes multiple attestations per block when necessary. // This checks the correctness of the shuffling compatibility memoization. -#[test] -fn multiple_attestations_per_block() { +#[tokio::test] +async fn multiple_attestations_per_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store, HIGH_VALIDATOR_COUNT); - harness.extend_chain( - E::slots_per_epoch() as usize * 3, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -774,6 +793,8 @@ fn multiple_attestations_per_block() { assert_eq!( snapshot .beacon_block + .as_ref() + .clone() .deconstruct() .0 .body() @@ -784,18 +805,20 @@ fn multiple_attestations_per_block() { } } -#[test] -fn shuffling_compatible_linear_chain() { +#[tokio::test] +async fn shuffling_compatible_linear_chain() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - let head_block_root = harness.extend_chain( - 4 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -808,25 +831,29 @@ fn shuffling_compatible_linear_chain() { ); } -#[test] -fn shuffling_compatible_missing_pivot_block() { +#[tokio::test] +async fn shuffling_compatible_missing_pivot_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - harness.extend_chain( - E::slots_per_epoch() as usize - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); harness.advance_slot(); - let head_block_root = harness.extend_chain( - 2 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -839,15 +866,16 @@ fn shuffling_compatible_missing_pivot_block() { ); } -#[test] -fn shuffling_compatible_simple_fork() { +#[tokio::test] +async fn shuffling_compatible_simple_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -860,15 +888,16 @@ fn shuffling_compatible_simple_fork() { drop(db_path); } -#[test] -fn shuffling_compatible_short_fork() { +#[tokio::test] +async fn shuffling_compatible_short_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, slots_per_epoch + 2, slots_per_epoch + 2, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -973,8 +1002,8 @@ fn check_shuffling_compatible( } // Ensure blocks from abandoned forks are pruned from the Hot DB -#[test] -fn prunes_abandoned_fork_between_two_finalized_checkpoints() { +#[tokio::test] +async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -997,7 +1026,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { state_root, &canonical_chain_slots, &honest_validators, - ); + ) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1005,12 +1035,14 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let (current_state, current_state_root) = rig.get_current_state_and_root(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - current_state, - current_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + current_state, + current_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known for &block_hash in stray_blocks.values() { @@ -1040,12 +1072,9 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postcondition: New blocks got finalized assert_eq!( @@ -1083,8 +1112,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert!(!rig.chain.knows_head(&stray_head)); } -#[test] -fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { +#[tokio::test] +async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1103,12 +1132,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { // Fill up 0th epoch let canonical_chain_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (_, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &canonical_chain_slots_zeroth_epoch, - &honest_validators, - ); + let (_, _, _, mut state) = rig + .add_attested_blocks_at_slots( + state, + state_root, + &canonical_chain_slots_zeroth_epoch, + &honest_validators, + ) + .await; // Fill up 1st epoch let canonical_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) @@ -1122,7 +1153,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { state_root, &canonical_chain_slots_first_epoch, &honest_validators, - ); + ) + .await; let canonical_chain_slot: u64 = rig.get_current_slot().into(); let stray_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 2 @@ -1130,12 +1162,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &stray_chain_slots_first_epoch, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &stray_chain_slots_first_epoch, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1169,12 +1203,9 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1213,8 +1244,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(get_blocks(&chain_dump).contains(&shared_head)); } -#[test] -fn pruning_does_not_touch_blocks_prior_to_finalization() { +#[tokio::test] +async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 16; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1232,12 +1263,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, new_state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1246,12 +1274,14 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &first_epoch_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &first_epoch_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1279,8 +1309,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, _, _) = - rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators); + let (_, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1308,8 +1339,8 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { assert!(rig.chain.knows_head(&stray_head)); } -#[test] -fn prunes_fork_growing_past_youngest_finalized_checkpoint() { +#[tokio::test] +async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1326,12 +1357,9 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; // Fill up 1st epoch. Contains a fork. let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) @@ -1344,9 +1372,11 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { state_root, &slots_first_epoch, &adversarial_validators, - ); - let (canonical_blocks_first_epoch, _, _, mut canonical_state) = - rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators); + ) + .await; + let (canonical_blocks_first_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators) + .await; // Fill up 2nd epoch. Extends both the canonical chain and the fork. let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) @@ -1360,7 +1390,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { stray_state_root, &stray_slots_second_epoch, &adversarial_validators, - ); + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known let stray_blocks: HashMap = stray_blocks_first_epoch @@ -1400,12 +1431,14 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1451,8 +1484,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn prunes_skipped_slots_states() { +#[tokio::test] +async fn prunes_skipped_slots_states() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1475,7 +1508,8 @@ fn prunes_skipped_slots_states() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); @@ -1483,12 +1517,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1526,12 +1562,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1575,8 +1613,8 @@ fn prunes_skipped_slots_states() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn finalizes_non_epoch_start_slot() { +#[tokio::test] +async fn finalizes_non_epoch_start_slot() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1599,7 +1637,8 @@ fn finalizes_non_epoch_start_slot() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = rig.epoch_start_slot(1).into(); @@ -1607,12 +1646,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1650,12 +1691,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1759,14 +1802,14 @@ fn check_no_blocks_exist<'a>( } } -#[test] -fn prune_single_block_fork() { +#[tokio::test] +async fn prune_single_block_fork() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1); + pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1).await; } -#[test] -fn prune_single_block_long_skip() { +#[tokio::test] +async fn prune_single_block_long_skip() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( 2 * slots_per_epoch, @@ -1774,11 +1817,12 @@ fn prune_single_block_long_skip() { 2 * slots_per_epoch, 2 * slots_per_epoch as u64, 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_mid_epoch() { +#[tokio::test] +async fn prune_shared_skip_states_mid_epoch() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( slots_per_epoch + slots_per_epoch / 2, @@ -1786,39 +1830,43 @@ fn prune_shared_skip_states_mid_epoch() { slots_per_epoch, 2, slots_per_epoch - 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_epoch_boundaries() { +#[tokio::test] +async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch); - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch); + pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; + pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch - 1, slots_per_epoch as u64, 1, 0, 2 * slots_per_epoch, - ); + ) + .await; } /// Generic harness for pruning tests. -fn pruning_test( +async fn pruning_test( // Number of blocks to start the chain with before forking. num_initial_blocks: u64, // Number of skip slots on the main chain after the initial blocks. @@ -1850,30 +1898,34 @@ fn pruning_test( let start_slot = Slot::new(1); let divergence_slot = start_slot + num_initial_blocks; let (state, state_root) = harness.get_current_state_and_root(); - let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( - state, - state_root, - &slots(start_slot, num_initial_blocks)[..], - &honest_validators, - ); - - let mut chains = harness.add_blocks_on_multiple_chains(vec![ - // Canonical chain - ( - divergence_state.clone(), - slots( - divergence_slot + num_canonical_skips, - num_canonical_middle_blocks, + let (_, _, _, divergence_state) = harness + .add_attested_blocks_at_slots( + state, + state_root, + &slots(start_slot, num_initial_blocks)[..], + &honest_validators, + ) + .await; + + let mut chains = harness + .add_blocks_on_multiple_chains(vec![ + // Canonical chain + ( + divergence_state.clone(), + slots( + divergence_slot + num_canonical_skips, + num_canonical_middle_blocks, + ), + honest_validators.clone(), + ), + // Fork chain + ( + divergence_state.clone(), + slots(divergence_slot + num_fork_skips, num_fork_blocks), + faulty_validators, ), - honest_validators.clone(), - ), - // Fork chain - ( - divergence_state.clone(), - slots(divergence_slot + num_fork_skips, num_fork_blocks), - faulty_validators, - ), - ]); + ]) + .await; let (_, _, _, mut canonical_state) = chains.remove(0); let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); @@ -1899,20 +1951,19 @@ fn pruning_test( let num_finalization_blocks = 4 * E::slots_per_epoch(); let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - harness.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &slots(canonical_slot, num_finalization_blocks), - &honest_validators, - ); + harness + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &slots(canonical_slot, num_finalization_blocks), + &honest_validators, + ) + .await; // Check that finalization has advanced past the divergence slot. assert!( harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()) > divergence_slot @@ -1940,43 +1991,48 @@ fn garbage_collect_temp_states_from_failed_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let slots_per_epoch = E::slots_per_epoch(); - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot); - - let (mut block, _) = signed_block.deconstruct(); - - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, - ); + // Use a `block_on_dangerous` rather than an async test to stop spawned processes from holding + // a reference to the store. + harness.chain.task_executor.clone().block_on_dangerous( + async move { + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = signed_block.deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness.process_block_result(block).unwrap_err(); + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness.process_block_result(block).await.unwrap_err(); - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + }, + "test", ); - drop(harness); - drop(store); - // On startup, the store should garbage collect all the temporary states. let store = get_store(&db_path); assert_eq!(store.iter_temporary_state_roots().count(), 0); } -#[test] -fn weak_subjectivity_sync() { +#[tokio::test] +async fn weak_subjectivity_sync() { // Build an initial chain on one harness, representing a synced node with full history. let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; @@ -1985,17 +2041,19 @@ fn weak_subjectivity_sync() { let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let genesis_state = full_store .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) .unwrap() .unwrap(); - let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; + let wss_checkpoint = harness.finalized_checkpoint(); let wss_block = harness .chain .store @@ -2010,11 +2068,13 @@ fn weak_subjectivity_sync() { // Add more blocks that advance finalization further. harness.advance_slot(); - harness.extend_chain( - num_final_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); let log = test_logger(); @@ -2028,6 +2088,7 @@ fn weak_subjectivity_sync() { BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() .logger(log.clone()) @@ -2058,12 +2119,15 @@ fn weak_subjectivity_sync() { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, block.clone()) + .make_full_block(&snapshot.beacon_block_root, block.as_ref().clone()) .unwrap(); beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(full_block).unwrap(); - beacon_chain.fork_choice().unwrap(); + beacon_chain + .process_block(Arc::new(full_block)) + .await + .unwrap(); + beacon_chain.recompute_head_at_current_slot().await.unwrap(); // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2157,8 +2221,8 @@ fn weak_subjectivity_sync() { assert_eq!(store.get_anchor_slot(), None); } -#[test] -fn finalizes_after_resuming_from_db() { +#[tokio::test] +async fn finalizes_after_resuming_from_db() { let validator_count = 16; let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; let first_half = num_blocks_produced / 2; @@ -2175,17 +2239,18 @@ fn finalizes_after_resuming_from_db() { harness.advance_slot(); - harness.extend_chain( - first_half as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!( harness .chain - .head() - .expect("should read head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2227,17 +2292,15 @@ fn finalizes_after_resuming_from_db() { .slot_clock .set_slot(latest_slot.as_u64() + 1); - resumed_harness.extend_chain( - (num_blocks_produced - first_half) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + resumed_harness + .extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &resumed_harness - .chain - .head() - .expect("should read head") - .beacon_state; + let state = &resumed_harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), num_blocks_produced, @@ -2260,8 +2323,8 @@ fn finalizes_after_resuming_from_db() { ); } -#[test] -fn revert_minority_fork_on_resume() { +#[tokio::test] +async fn revert_minority_fork_on_resume() { let validator_count = 16; let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); @@ -2317,17 +2380,17 @@ fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot); + let (block, new_state) = harness1.make_block(state, slot).await; - harness1.process_block(slot, block.clone()).unwrap(); - harness2.process_block(slot, block.clone()).unwrap(); + harness1.process_block(slot, block.clone()).await.unwrap(); + harness2.process_block(slot, block.clone()).await.unwrap(); state = new_state; block_root = block.canonical_root(); } - assert_eq!(harness1.chain.head_info().unwrap().slot, fork_slot - 1); - assert_eq!(harness2.chain.head_info().unwrap().slot, fork_slot - 1); + assert_eq!(harness1.head_slot(), fork_slot - 1); + assert_eq!(harness2.head_slot(), fork_slot - 1); // Fork the two chains. let mut state1 = state.clone(); @@ -2352,13 +2415,13 @@ fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot); - harness1.process_block(slot, block1).unwrap(); + let (block1, new_state1) = harness1.make_block(state1, slot).await; + harness1.process_block(slot, block1).await.unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot); - harness2.process_block(slot, block2.clone()).unwrap(); + let (block2, new_state2) = harness2.make_block(state2, slot).await; + harness2.process_block(slot, block2.clone()).await.unwrap(); state2 = new_state2; block_root = block2.canonical_root(); @@ -2367,8 +2430,8 @@ fn revert_minority_fork_on_resume() { } let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.chain.head_info().unwrap().slot, end_slot); - assert_eq!(harness2.chain.head_info().unwrap().slot, end_slot); + assert_eq!(harness1.head_slot(), end_slot); + assert_eq!(harness2.head_slot(), end_slot); // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. // We have to do some hackery with the `slot_clock` so that the correct slot is set when @@ -2396,24 +2459,35 @@ fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness.chain.fork_choice().unwrap(); - let head = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head.slot, fork_slot - 1); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness.chain.knows_head(&head.block_root.into())); + assert!(resumed_harness + .chain + .knows_head(&resumed_harness.head_block_root().into())); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { - resumed_harness.process_block_result(block.clone()).unwrap(); + resumed_harness + .process_block_result(block.clone()) + .await + .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness.chain.fork_choice().unwrap(); - let head_info = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head_info.slot, block.slot()); - assert_eq!(head_info.block_root, block.canonical_root()); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), block.slot()); + assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); @@ -2432,10 +2506,22 @@ fn revert_minority_fork_on_resume() { fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { assert_eq!(a.spec, b.spec, "spec should be equal"); assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + let a_head = a.head_snapshot(); + let b_head = b.head_snapshot(); assert_eq!( - a.head().unwrap(), - b.head().unwrap(), - "head() should be equal" + a_head.beacon_block_root, b_head.beacon_block_root, + "head block roots should be equal" + ); + assert_eq!( + a_head.beacon_block, b_head.beacon_block, + "head blocks should be equal" + ); + // Clone with committee caches only to prevent other caches from messing with the equality + // check. + assert_eq!( + a_head.beacon_state.clone_with_only_committee_caches(), + b_head.beacon_state.clone_with_only_committee_caches(), + "head states should be equal" ); assert_eq!(a.heads(), b.heads(), "heads() should be equal"); assert_eq!( @@ -2446,15 +2532,21 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b let slot = a.slot().unwrap(); let spec = T::EthSpec::default_spec(); assert!( - a.fork_choice.write().get_head(slot, &spec).unwrap() - == b.fork_choice.write().get_head(slot, &spec).unwrap(), + a.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap() + == b.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap(), "fork_choice heads should be equal" ); } /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -2465,7 +2557,7 @@ fn check_slot(harness: &TestHarness, expected_slot: u64) { /// Check that the chain has finalized under best-case assumptions, and check the head slot. fn check_finalization(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; check_slot(harness, expected_slot); @@ -2487,8 +2579,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L assert_eq!( harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2575,10 +2666,7 @@ fn check_iterators(harness: &TestHarness) { max_slot = Some(slot); } // Assert that we reached the head. - assert_eq!( - max_slot, - Some(harness.chain.head_info().expect("should get head").slot) - ); + assert_eq!(max_slot, Some(harness.head_slot())); // Assert that the block root iterator reaches the head. assert_eq!( harness @@ -2588,7 +2676,7 @@ fn check_iterators(harness: &TestHarness) { .last() .map(Result::unwrap) .map(|(_, slot)| slot), - Some(harness.chain.head_info().expect("should get head").slot) + Some(harness.head_slot()) ); } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 626c132d69e..1e51b0ffb9b 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -46,15 +46,8 @@ fn get_valid_sync_committee_message( slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_state = harness.chain.head_beacon_state_cloned(); + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let (signature, _) = harness .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) @@ -77,16 +70,9 @@ fn get_valid_sync_contribution( harness: &BeaconChainHarness>, relative_sync_committee: RelativeSyncCommittee, ) -> (SignedContributionAndProof, usize, SecretKey) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); + let head_state = harness.chain.head_beacon_state_cloned(); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let sync_contributions = harness.make_sync_contributions( &head_state, head_block_root, @@ -116,7 +102,7 @@ fn get_non_aggregator( harness: &BeaconChainHarness>, slot: Slot, ) -> (usize, SecretKey) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; let sync_subcommittee_size = E::sync_committee_size() .safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize) .expect("should determine sync subcommittee size"); @@ -162,17 +148,19 @@ fn get_non_aggregator( } /// Tests verification of `SignedContributionAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -406,7 +394,7 @@ fn aggregated_gossip_verification() { valid_aggregate.message.contribution.clone(), None, &non_aggregator_sk, - &harness.chain.head_info().expect("should get head info").fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ) @@ -474,6 +462,7 @@ fn aggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync contribution using the current sync committee @@ -488,17 +477,19 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for sync committee messages on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -648,6 +639,7 @@ fn unaggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync message using the current sync committee diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index b079baf2ff0..547419ebf4e 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,14 +6,16 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - StateSkipConfig, WhenSlotSkipped, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, }; -use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{ + BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, +}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -40,7 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness .chain @@ -122,7 +126,7 @@ fn iterators() { ) }); - let head = &harness.chain.head().expect("should get head"); + let head = harness.chain.head_snapshot(); assert_eq!( *block_roots.last().expect("should have some block roots"), @@ -137,20 +141,44 @@ fn iterators() { ); } -#[test] -fn find_reorgs() { +fn find_reorg_slot( + chain: &BeaconChain>, + new_state: &BeaconState, + new_block_root: Hash256, +) -> Slot { + let (old_state, old_block_root) = { + let head = chain.canonical_head.cached_head(); + let old_state = head.snapshot.beacon_state.clone(); + let old_block_root = head.head_block_root(); + (old_state, old_block_root) + }; + beacon_chain::canonical_head::find_reorg_slot( + &old_state, + old_block_root, + new_state, + new_block_root, + &chain.spec, + ) + .unwrap() +} + +#[tokio::test] +async fn find_reorgs() { let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - // No need to produce attestations for this test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + // No need to produce attestations for this test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head_state = harness.chain.head_beacon_state().unwrap(); + let head = harness.chain.head_snapshot(); + let head_state = &head.beacon_state; let head_slot = head_state.slot(); let genesis_state = harness .chain @@ -160,10 +188,11 @@ fn find_reorgs() { // because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the // finalized slot. assert_eq!( - harness - .chain - .find_reorg_slot(&genesis_state, harness.chain.genesis_block_root) - .unwrap(), + find_reorg_slot( + &harness.chain, + &genesis_state, + harness.chain.genesis_block_root + ), head_state .finalized_checkpoint() .epoch @@ -172,13 +201,11 @@ fn find_reorgs() { // test head assert_eq!( - harness - .chain - .find_reorg_slot( - &head_state, - harness.chain.head_beacon_block().unwrap().canonical_root() - ) - .unwrap(), + find_reorg_slot( + &harness.chain, + &head_state, + harness.chain.head_beacon_block().canonical_root() + ), head_slot ); @@ -194,16 +221,13 @@ fn find_reorgs() { .unwrap() .unwrap(); assert_eq!( - harness - .chain - .find_reorg_slot(&prev_state, prev_block_root) - .unwrap(), + find_reorg_slot(&harness.chain, &prev_state, prev_block_root), prev_slot ); } -#[test] -fn chooses_fork() { +#[tokio::test] +async fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; @@ -217,22 +241,27 @@ fn chooses_fork() { let faulty_fork_blocks = delay + 2; // Build an initial chain where all validators agree. - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); - - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - honest_fork_blocks, - faulty_fork_blocks, - ); + harness + .extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let (honest_head, faulty_head) = harness + .generate_two_forks_by_skipping_a_block( + &honest_validators, + &faulty_validators, + honest_fork_blocks, + faulty_fork_blocks, + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -241,29 +270,28 @@ fn chooses_fork() { ); assert_eq!( - harness - .chain - .head() - .expect("should get head") - .beacon_block_root, + harness.chain.head_snapshot().beacon_block_root, honest_head, "the honest chain should be the canonical chain" ); } -#[test] -fn finalizes_with_full_participation() { +#[tokio::test] +async fn finalizes_with_full_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -287,8 +315,8 @@ fn finalizes_with_full_participation() { ); } -#[test] -fn finalizes_with_two_thirds_participation() { +#[tokio::test] +async fn finalizes_with_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -296,13 +324,16 @@ fn finalizes_with_two_thirds_participation() { let two_thirds = (VALIDATOR_COUNT / 3) * 2; let attesters = (0..two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -331,8 +362,8 @@ fn finalizes_with_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_with_less_than_two_thirds_participation() { +#[tokio::test] +async fn does_not_finalize_with_less_than_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -341,13 +372,16 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { let less_than_two_thirds = two_thirds - 1; let attesters = (0..less_than_two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -371,19 +405,22 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_without_attestation() { +#[tokio::test] +async fn does_not_finalize_without_attestation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -407,18 +444,20 @@ fn does_not_finalize_without_attestation() { ); } -#[test] -fn roundtrip_operation_pool() { +#[tokio::test] +async fn roundtrip_operation_pool() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); // Add some attestations - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness.chain.op_pool.num_attestations() > 0); // TODO: could add some other operations @@ -439,20 +478,23 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } -#[test] -fn unaggregated_attestations_added_to_fork_choice_some_none() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_some_none() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -493,8 +535,8 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() { } } -#[test] -fn attestations_with_increasing_slots() { +#[tokio::test] +async fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -502,14 +544,16 @@ fn attestations_with_increasing_slots() { let mut attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - // Don't produce & include any attestations (we'll collect them later). - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let head_state_root = head.beacon_state_root(); attestations.extend(harness.get_unaggregated_attestations( @@ -548,20 +592,23 @@ fn attestations_with_increasing_slots() { } } -#[test] -fn unaggregated_attestations_added_to_fork_choice_all_updated() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -605,7 +652,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() { } } -fn run_skip_slot_test(skip_slots: u64) { +async fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); @@ -615,83 +662,60 @@ fn run_skip_slot_test(skip_slots: u64) { harness_b.advance_slot(); } - harness_a.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - // No attestation required for test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness_a + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; assert_eq!( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_a.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(0) ); assert_eq!( harness_b .chain - .process_block( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .clone(), - ) + .process_block(harness_a.chain.head_snapshot().beacon_block.clone()) + .await .unwrap(), - harness_a - .chain - .head() - .expect("should get head") - .beacon_block_root + harness_a.chain.head_snapshot().beacon_block_root ); harness_b .chain - .fork_choice() + .recompute_head_at_current_slot() + .await .expect("should run fork choice"); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +#[tokio::test] +async fn produces_and_processes_with_genesis_skip_slots() { for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { - run_skip_slot_test(i) + run_skip_slot_test(i).await } } -#[test] -fn block_roots_skip_slot_behaviour() { +#[tokio::test] +async fn block_roots_skip_slot_behaviour() { let harness = get_harness(VALIDATOR_COUNT); // Test should be longer than the block roots to ensure a DB lookup is triggered. let chain_length = harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .block_roots() .len() as u64 @@ -708,11 +732,13 @@ fn block_roots_skip_slot_behaviour() { let slot = harness.chain.slot().unwrap().as_u64(); if !skipped_slots.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } } @@ -820,7 +846,7 @@ fn block_roots_skip_slot_behaviour() { let future_slot = harness.chain.slot().unwrap() + 1; assert_eq!( - harness.chain.head().unwrap().beacon_block.slot(), + harness.chain.head_snapshot().beacon_block.slot(), future_slot - 2, "test precondition" ); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 1f02ec7b3c3..cb5cb520440 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -660,15 +660,13 @@ where if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. if let Ok(current_slot) = beacon_chain.slot() { - let head = beacon_chain - .head_info() - .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; - - // Issue the head to the execution engine on startup. This ensures it can start - // syncing. - if head - .execution_payload_block_hash - .map_or(false, |h| h != ExecutionBlockHash::zero()) + let params = beacon_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters(); + if params + .head_hash + .map_or(false, |hash| hash != ExecutionBlockHash::zero()) { // Spawn a new task using the "async" fork choice update method, rather than // using the "blocking" method. @@ -679,7 +677,7 @@ where runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice_async(current_slot) + .update_execution_engine_forkchoice(current_slot, params) .await; // No need to exit early if setting the head fails. It will be set again if/when the @@ -787,8 +785,16 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); + let inner_spec = spec.clone(); let schema_upgrade = |db, from, to| { - migrate_schema::>(db, datadir, from, to, log) + migrate_schema::>( + db, + datadir, + from, + to, + log, + &inner_spec, + ) }; let store = HotColdDB::open( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 22c3bfcb3a8..9476819a4b3 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,5 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus}; +use beacon_chain::{BeaconChain, BeaconChainTypes, ExecutionStatus}; use lighthouse_network::{types::SyncState, NetworkGlobals}; use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; @@ -100,15 +100,10 @@ pub fn spawn_notifier( current_sync_state = sync_state; } - let head_info = match beacon_chain.head_info() { - Ok(head_info) => head_info, - Err(e) => { - error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e)); - break; - } - }; - - let head_slot = head_info.slot; + let cached_head = beacon_chain.canonical_head.cached_head(); + let head_slot = cached_head.head_slot(); + let head_root = cached_head.head_block_root(); + let finalized_checkpoint = cached_head.finalized_checkpoint(); metrics::set_gauge(&metrics::NOTIFIER_HEAD_SLOT, head_slot.as_u64() as i64); @@ -125,9 +120,6 @@ pub fn spawn_notifier( }; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let finalized_epoch = head_info.finalized_checkpoint.epoch; - let finalized_root = head_info.finalized_checkpoint.root; - let head_root = head_info.block_root; // The default is for regular sync but this gets modified if backfill sync is in // progress. @@ -177,8 +169,8 @@ pub fn spawn_notifier( log, "Slot timer"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, @@ -264,35 +256,29 @@ pub fn spawn_notifier( head_root.to_string() }; - let block_hash = match beacon_chain.head_safety_status() { - Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt - .map(|hash| format!("{} (verified)", hash)) - .unwrap_or_else(|| "n/a".to_string()), - Ok(HeadSafetyStatus::Unsafe(block_hash)) => { + let block_hash = match beacon_chain.canonical_head.head_execution_status() { + Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), + Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Optimistic(hash)) => { warn!( log, - "Head execution payload is unverified"; - "execution_block_hash" => ?block_hash, + "Head is optimistic"; + "info" => "chain not fully verified, \ + block and attestation production disabled until execution engine syncs", + "execution_block_hash" => ?hash, ); - format!("{} (unverified)", block_hash) + format!("{} (unverified)", hash) } - Ok(HeadSafetyStatus::Invalid(block_hash)) => { + Ok(ExecutionStatus::Invalid(hash)) => { crit!( log, "Head execution payload is invalid"; "msg" => "this scenario may be unrecoverable", - "execution_block_hash" => ?block_hash, - ); - format!("{} (invalid)", block_hash) - } - Err(e) => { - error!( - log, - "Failed to read head safety status"; - "error" => ?e + "execution_block_hash" => ?hash, ); - "n/a".to_string() + format!("{} (invalid)", hash) } + Err(_) => "unknown".to_string(), }; info!( @@ -300,8 +286,8 @@ pub fn spawn_notifier( "Synced"; "peers" => peer_count_pretty(connected_peer_count), "exec_hash" => block_hash, - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "epoch" => current_epoch, "block" => block_info, "slot" => current_slot, @@ -312,8 +298,8 @@ pub fn spawn_notifier( log, "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_slot" => head_slot, "current_slot" => current_slot, ); @@ -332,57 +318,52 @@ pub fn spawn_notifier( fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); - if let Ok(head_info) = beacon_chain.head_info() { - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } + // Perform some logging about the eth1 chain + if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { + // No need to do logging if using the dummy backend. + if eth1_chain.is_dummy_backend() { + return; + } - if let Some(status) = - eth1_chain.sync_status(head_info.genesis_time, current_slot_opt, &beacon_chain.spec) - { - debug!( - log, - "Eth1 cache sync status"; - "eth1_head_block" => status.head_block_number, - "latest_cached_block_number" => status.latest_cached_block_number, - "latest_cached_timestamp" => status.latest_cached_block_timestamp, - "voting_target_timestamp" => status.voting_target_timestamp, - "ready" => status.lighthouse_is_cached_and_ready - ); + if let Some(status) = eth1_chain.sync_status( + beacon_chain.genesis_time, + current_slot_opt, + &beacon_chain.spec, + ) { + debug!( + log, + "Eth1 cache sync status"; + "eth1_head_block" => status.head_block_number, + "latest_cached_block_number" => status.latest_cached_block_number, + "latest_cached_timestamp" => status.latest_cached_block_timestamp, + "voting_target_timestamp" => status.voting_target_timestamp, + "ready" => status.lighthouse_is_cached_and_ready + ); - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; + if !status.lighthouse_is_cached_and_ready { + let voting_target_timestamp = status.voting_target_timestamp; - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); + let distance = status + .latest_cached_block_timestamp + .map(|latest| { + voting_target_timestamp.saturating_sub(latest) + / beacon_chain.spec.seconds_per_eth1_block + }) + .map(|distance| distance.to_string()) + .unwrap_or_else(|| "initializing deposits".to_string()); - warn!( - log, - "Syncing eth1 block cache"; - "est_blocks_remaining" => distance, - ); - } - } else { - error!( + warn!( log, - "Unable to determine eth1 sync status"; + "Syncing eth1 block cache"; + "est_blocks_remaining" => distance, ); } + } else { + error!( + log, + "Unable to determine eth1 sync status"; + ); } - } else { - error!( - log, - "Unable to get head info"; - ); } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d6acd5fe54c..730f4c588f5 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -298,31 +298,6 @@ impl ExecutionLayer { self.inner.execution_engine_forkchoice_lock.lock().await } - /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, T, U, V>(&'a self, generate_future: T) -> Result - where - T: Fn(&'a Self) -> U, - U: Future>, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - runtime.block_on(generate_future(self)) - } - - /// Convenience function to allow calling async functions in a non-async context. - /// - /// The function is "generic" since it does not enforce a particular return type on - /// `generate_future`. - pub fn block_on_generic<'a, T, U, V>(&'a self, generate_future: T) -> Result - where - T: Fn(&'a Self) -> U, - U: Future, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - Ok(runtime.block_on(generate_future(self))) - } - /// Convenience function to allow spawning a task without waiting for the result. pub fn spawn(&self, generate_future: T, name: &'static str) where @@ -459,19 +434,7 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - pub fn update_proposer_preparation_blocking( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) -> Result<(), Error> { - self.block_on_generic(|_| async move { - self.update_proposer_preparation(update_epoch, preparation_data) - .await - }) - } - - /// Updates the proposer preparation data provided by validators - async fn update_proposer_preparation( + pub async fn update_proposer_preparation( &self, update_epoch: Epoch, preparation_data: &[ProposerPreparationData], diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9207067e33d..35a35bcb74f 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -58,12 +58,10 @@ fn cached_attestation_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_block_root = chain.canonical_head.cached_head().head_block_root(); - let (duties, dependent_root) = chain - .validator_attestation_duties(request_indices, request_epoch, head.block_root) + let (duties, dependent_root, _execution_status) = chain + .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(duties, request_indices, dependent_root, chain) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 727215bfcad..73f50985bdf 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,6 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; use std::str::FromStr; +use std::sync::Arc; use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given @@ -23,19 +24,18 @@ impl BlockId { chain: &BeaconChain, ) -> Result { match &self.0 { - CoreBlockId::Head => chain - .head_info() - .map(|head| head.block_root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.canonical_head.cached_head().head_block_root()), CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => chain - .head_info() - .map(|head| head.finalized_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Justified => chain - .head_info() - .map(|head| head.current_justified_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Finalized => Ok(chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root), + CoreBlockId::Justified => Ok(chain + .canonical_head + .cached_head() + .justified_checkpoint() + .root), CoreBlockId::Slot(slot) => chain .block_root_at_slot(*slot, WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error) @@ -57,10 +57,7 @@ impl BlockId { chain: &BeaconChain, ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map(Into::into) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block().clone_as_blinded()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -103,11 +100,9 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { + ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -122,7 +117,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok(Arc::new(block)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -136,8 +131,8 @@ impl BlockId { .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { + .and_then(|block_opt| { + block_opt.map(Arc::new).ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", root diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 014db8a6027..645c19c40e5 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -22,7 +22,7 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>>, ) -> Result { chain .import_historical_block_batch(blocks) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fa3b6a9d953..95222b7b65e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -23,7 +23,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - HeadSafetyStatus, ProduceBlockVerification, WhenSlotSkipped, + ProduceBlockVerification, WhenSlotSkipped, }; use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -358,9 +358,7 @@ pub fn serve( chain: Arc>| async move { match *network_globals.sync_state.read() { SyncState::SyncingFinalized { .. } => { - let head_slot = chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -393,35 +391,6 @@ pub fn serve( ) .untuple_one(); - // Create a `warp` filter that rejects requests unless the head has been verified by the - // execution layer. - let only_with_safe_head = warp::any() - .and(chain_filter.clone()) - .and_then(move |chain: Arc>| async move { - let status = chain.head_safety_status().map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to read head safety status: {:?}", - e - )) - })?; - match status { - HeadSafetyStatus::Safe(_) => Ok(()), - HeadSafetyStatus::Unsafe(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "optimistic head hash {:?} has not been verified by the execution layer", - hash - ))) - } - HeadSafetyStatus::Invalid(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "the head block has an invalid payload {:?}, this may be unrecoverable", - hash - ))) - } - } - }) - .untuple_one(); - // Create a `warp` filter that provides access to the logger. let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -440,15 +409,12 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|head| api_types::GenesisData { - genesis_time: head.genesis_time, - genesis_validators_root: head.genesis_validators_root, - genesis_fork_version: chain.spec.genesis_fork_version, - }) - .map(api_types::GenericResponse::from) + let genesis_data = api_types::GenesisData { + genesis_time: chain.genesis_time, + genesis_validators_root: chain.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }; + Ok(api_types::GenericResponse::from(genesis_data)) }) }); @@ -824,10 +790,10 @@ pub fn serve( blocking_json_task(move || { let (root, block) = match (query.slot, query.parent_root) { // No query parameters, return the canonical head block. - (None, None) => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block.into()))?, + (None, None) => { + let block = chain.head_beacon_block(); + (block.canonical_root(), block.clone_as_blinded()) + } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; @@ -934,93 +900,85 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock, + |block: Arc>, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| { - blocking_json_task(move || { - let seen_timestamp = timestamp_now(); + log: Logger| async move { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain.process_block(block.clone()).await { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(block.clone())), - )?; + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); - // Determine the delay after the start of the slot, register it with metrics. - let delay = - get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration( - &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, - delay, - ); + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; - match chain.process_block(block.clone()) { - Ok(root) => { - info!( + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); + "root" => ?root, + ) + } else if delay >= error_threshold { error!( log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) } + + Ok(warp::reply::json(&())) } - }) + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } }, ); @@ -1038,99 +996,90 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock>, + |block: Arc>>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| { - blocking_json_task(move || { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el - .block_on(|el| el.propose_blinded_beacon_block(&block)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "proposal failed: {:?}", - e - )) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block - .message() - .body() - .voluntary_exits() - .clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, + _log: Logger| async move { + if let Some(el) = chain.execution_layer.as_ref() { + //FIXME(sean): we may not always receive the payload in this response because it + // should be the relay's job to propogate the block. However, since this block is + // already signed and sent this might be ok (so long as the relay validates + // the block before revealing the payload). + + //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should + // be able to support the normal block proposal flow, because at some point full block endpoints + // will be deprecated from the beacon API. This will entail creating full blocks in + // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded + // blocks. We will access the payload of those blocks here. This flow should happen if the + // execution layer has no payload builders or if we have not yet finalized post-merge transition. + let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) + })?; + let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot: block.message().slot(), + proposer_index: block.message().proposer_index(), + parent_root: block.message().parent_root(), + state_root: block.message().state_root(), + body: BeaconBlockBodyMerge { + randao_reveal: block.message().body().randao_reveal().clone(), + eth1_data: block.message().body().eth1_data().clone(), + graffiti: *block.message().body().graffiti(), + proposer_slashings: block + .message() + .body() + .proposer_slashings() + .clone(), + attester_slashings: block + .message() + .body() + .attester_slashings() + .clone(), + attestations: block.message().body().attestations().clone(), + deposits: block.message().body().deposits().clone(), + voluntary_exits: block.message().body().voluntary_exits().clone(), + sync_aggregate: block + .message() + .body() + .sync_aggregate() + .unwrap() + .clone(), + execution_payload: payload.into(), }, - signature: block.signature().clone(), - }); + }, + signature: block.signature().clone(), + }); + let new_block = Arc::new(new_block); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(new_block.clone())), - )?; + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(new_block.clone()), + )?; - match chain.process_block(new_block) { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; + match chain.process_block(new_block).await { + Ok(_) => { + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); + Ok(warp::reply::json(&())) + } + Err(e) => { + let msg = format!("{:?}", e); - Err(warp_utils::reject::broadcast_without_import(msg)) - } + Err(warp_utils::reject::broadcast_without_import(msg)) } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) } - }) + } else { + Err(warp_utils::reject::custom_server_error( + "no execution layer found".to_string(), + )) + } }, ); @@ -1390,9 +1339,7 @@ pub fn serve( )), )?; - chain - .import_attester_slashing(slashing) - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.import_attester_slashing(slashing); } Ok(()) @@ -1733,10 +1680,7 @@ pub fn serve( .and_then( |network_globals: Arc>, chain: Arc>| { blocking_json_task(move || { - let head_slot = chain - .head_info() - .map(|info| info.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to read slot clock".into()) })?; @@ -1982,48 +1926,49 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2044,48 +1989,48 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2096,7 +2041,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { @@ -2129,7 +2073,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { @@ -2206,7 +2149,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head) .and(chain_filter.clone()) .and_then( |sync_committee_data: SyncContributionData, chain: Arc>| { @@ -2404,43 +2346,42 @@ pub fn serve( |chain: Arc>, client_addr: Option, log: Logger, - preparation_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + preparation_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), - "client" => client_addr - .map(|a| a.to_string()) - .unwrap_or_else(|| "unknown".to_string()), - ); + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + debug!( + log, + "Received proposer preparation data"; + "count" => preparation_data.len(), + "client" => client_addr + .map(|a| a.to_string()) + .unwrap_or_else(|| "unknown".to_string()), + ); + + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; - chain.prepare_beacon_proposer_blocking().map_err(|e| { + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - Ok(()) - }) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) }, ); @@ -2607,7 +2548,11 @@ pub fn serve( .and_then(|chain: Arc>| { blocking_task(move || { Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( - chain.fork_choice.read().proto_array().core_proto_array(), + chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .core_proto_array(), ))) }) }); @@ -2650,9 +2595,6 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - let head_info = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; let current_slot_opt = chain.slot().ok(); chain @@ -2664,7 +2606,7 @@ pub fn serve( ) }) .and_then(|eth1| { - eth1.sync_status(head_info.genesis_time, current_slot_opt, &chain.spec) + eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) .ok_or_else(|| { warp_utils::reject::custom_server_error( "Unable to determine Eth1 sync status".to_string(), @@ -2787,7 +2729,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>>, chain: Arc>, log: Logger| { info!( diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index b040eec779b..bddae555499 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,7 +55,7 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _) = + let (proposers, dependent_root, _execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(chain, request_epoch, dependent_root, proposers) @@ -88,16 +88,23 @@ fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + let (head_slot, head_block_root, head_decision_root) = { + let head = chain.canonical_head.cached_head(); + let head_block_root = head.head_block_root(); + let decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + (head.head_slot(), head_block_root, decision_root) + }; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch - Ordering::Equal => head.proposer_shuffling_decision_root, + Ordering::Equal => head_decision_root, // head_epoch < request_epoch - Ordering::Less => head.block_root, + Ordering::Less => head_block_root, // head_epoch > request_epoch Ordering::Greater => { return Err(warp_utils::reject::custom_server_error(format!( @@ -132,8 +139,9 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + let (indices, dependent_root, _execution_status, fork) = + compute_proposer_duties_from_head(current_epoch, chain) + .map_err(warp_utils::reject::beacon_chain_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 95c049d9979..8604c918991 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -18,27 +18,23 @@ impl StateId { chain: &BeaconChain, ) -> Result { let slot = match &self.0 { - CoreStateId::Head => { - return chain - .head_info() - .map(|head| head.state_root) - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.canonical_head.cached_head().head_state_root()), CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain.head_info().map(|head| { - head.finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Justified => chain.head_info().map(|head| { - head.current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Finalized => chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Justified => chain + .canonical_head + .cached_head() + .justified_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Slot(slot) => *slot, CoreStateId::Root(root) => return Ok(*root), - } - .map_err(warp_utils::reject::beacon_chain_error)?; + }; chain .state_root_at_slot(slot) @@ -62,11 +58,7 @@ impl StateId { chain: &BeaconChain, ) -> Result, warp::Rejection> { let (state_root, slot_opt) = match &self.0 { - CoreStateId::Head => { - return chain - .head_beacon_state() - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.head_beacon_state_cloned()), CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6b4f79fa5d5..942a1167c2f 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -45,6 +45,7 @@ async fn sync_committee_duties_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -61,6 +62,7 @@ async fn sync_committee_duties_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) + .await .unwrap(); assert_eq!( @@ -244,6 +246,7 @@ async fn sync_committee_indices_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -277,6 +280,7 @@ async fn sync_committee_indices_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) + .await .unwrap(); let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 8b12aa4a5b2..3327093d097 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -47,11 +47,13 @@ pub async fn fork_choice_before_proposal() { // Create some chain depth. harness.advance_slot(); - harness.extend_chain( - num_initial as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // We set up the following block graph, where B is a block that is temporarily orphaned by C, // but is then reinstated and built upon by D. @@ -64,8 +66,8 @@ pub async fn fork_choice_before_proposal() { let slot_d = slot_a + 3; let state_a = harness.get_current_state(); - let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); - let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( @@ -76,8 +78,11 @@ pub async fn fork_choice_before_proposal() { slot_b, ); - let (block_c, state_c) = harness.make_block(state_a, slot_c); - let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + let (block_c, state_c) = harness.make_block(state_a, slot_c).await; + let block_root_c = harness + .process_block(slot_c, block_c.clone()) + .await + .unwrap(); // Create attestations to C from a small number of validators and process them immediately. let attestations_c = harness.make_attestations( @@ -94,7 +99,7 @@ pub async fn fork_choice_before_proposal() { // Due to proposer boost, the head should be C during slot C. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_c.into() ); @@ -102,7 +107,7 @@ pub async fn fork_choice_before_proposal() { // Manually prod the per-slot task, because the slot timer doesn't run in the background in // these tests. harness.advance_slot(); - harness.chain.per_slot_task(); + harness.chain.per_slot_task().await; let proposer_index = state_b .get_beacon_proposer_index(slot_d, &harness.chain.spec) @@ -119,7 +124,7 @@ pub async fn fork_choice_before_proposal() { // Head is now B. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_b.into() ); // D's parent is B. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5f53a961560..1d1197583f2 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -20,7 +20,6 @@ use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; @@ -50,6 +49,7 @@ const SKIPPED_SLOTS: &[u64] = &[ ]; struct ApiTester { + harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, next_block: SignedBeaconBlock, @@ -60,11 +60,9 @@ struct ApiTester { proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, _server_shutdown: oneshot::Sender<()>, - validator_keypairs: Vec, network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, - _runtime: TestRuntime, } impl ApiTester { @@ -76,11 +74,13 @@ impl ApiTester { } pub async fn new_from_spec(spec: ChainSpec) -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(), + ); harness.advance_slot(); @@ -88,17 +88,19 @@ impl ApiTester { let slot = harness.chain.slot().unwrap().as_u64(); if !SKIPPED_SLOTS.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -106,12 +108,14 @@ impl ApiTester { "precondition: current slot is one after head" ); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -162,15 +166,19 @@ impl ApiTester { let chain = harness.chain.clone(); assert_eq!( - chain.head_info().unwrap().finalized_checkpoint.epoch, + chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, 2, "precondition: finality" ); assert_eq!( chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch, 3, "precondition: justification" @@ -200,6 +208,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -210,31 +219,33 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - _runtime: harness.runtime, } } pub async fn new_from_genesis() -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(), + ); harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -279,6 +290,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -289,14 +301,16 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - _runtime: harness.runtime, } } + fn validator_keypairs(&self) -> &[Keypair] { + &self.harness.validator_keypairs + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -321,7 +335,9 @@ impl ApiTester { StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), StateId::Root(Hash256::zero()), ]; - ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids.push(StateId::Root( + self.chain.canonical_head.cached_head().head_state_root(), + )); ids } @@ -339,13 +355,20 @@ impl ApiTester { BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), BlockId::Root(Hash256::zero()), ]; - ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids.push(BlockId::Root( + self.chain.canonical_head.cached_head().head_block_root(), + )); ids } fn get_state(&self, state_id: StateId) -> Option> { match state_id { - StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Head => Some( + self.chain + .head_snapshot() + .beacon_state + .clone_with_only_committee_caches(), + ), StateId::Genesis => self .chain .get_state(&self.chain.genesis_state_root, None) @@ -353,9 +376,9 @@ impl ApiTester { StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -370,9 +393,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -396,7 +419,7 @@ impl ApiTester { pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; - let state = self.chain.head().unwrap().beacon_state; + let state = &self.chain.head_snapshot().beacon_state; let expected = GenesisData { genesis_time: state.genesis_time(), genesis_validators_root: state.genesis_validators_root(), @@ -418,14 +441,14 @@ impl ApiTester { .map(|res| res.data.root); let expected = match state_id { - StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Head => Some(self.chain.canonical_head.cached_head().head_state_root()), StateId::Genesis => Some(self.chain.genesis_state_root), StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -434,9 +457,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -746,14 +769,20 @@ impl ApiTester { fn get_block_root(&self, block_id: BlockId) -> Option { match block_id { - BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Head => Some(self.chain.canonical_head.cached_head().head_block_root()), BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Finalized => Some( + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root, + ), BlockId::Justified => Some( self.chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .root, ), BlockId::Slot(slot) => self @@ -1314,7 +1343,7 @@ impl ApiTester { pub async fn test_get_node_syncing(self) -> Self { let result = self.client.get_node_syncing().await.unwrap().data; - let head_slot = self.chain.head_info().unwrap().slot; + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let sync_distance = self.chain.slot().unwrap() - head_slot; let expected = SyncingData { @@ -1528,7 +1557,7 @@ impl ApiTester { } fn validator_count(&self) -> usize { - self.chain.head().unwrap().beacon_state.validators().len() + self.chain.head_snapshot().beacon_state.validators().len() } fn interesting_validator_indices(&self) -> Vec> { @@ -1613,7 +1642,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); assert_eq!(results.dependent_root, dependent_root); @@ -1688,7 +1717,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); // Presently, the beacon chain harness never runs the code that primes the proposer // cache. If this changes in the future then we'll need some smarter logic here, but @@ -1816,7 +1845,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); self.client .get_validator_duties_proposer(current_epoch) @@ -1870,7 +1899,7 @@ impl ApiTester { } pub async fn test_block_production(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() * 3 { @@ -1890,7 +1919,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -1918,7 +1947,7 @@ impl ApiTester { self.client.post_beacon_blocks(&signed_block).await.unwrap(); - assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -1949,7 +1978,7 @@ impl ApiTester { } pub async fn test_block_production_verify_randao_invalid(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() { @@ -1969,7 +1998,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -2032,7 +2061,7 @@ impl ApiTester { } pub async fn test_get_validator_attestation_data(self) -> Self { - let mut state = self.chain.head_beacon_state().unwrap(); + let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); state .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) @@ -2062,7 +2091,6 @@ impl ApiTester { let attestation = self .chain .head_beacon_block() - .unwrap() .message() .body() .attestations()[0] @@ -2090,7 +2118,7 @@ impl ApiTester { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - let mut head = self.chain.head().unwrap(); + let mut head = self.chain.head_snapshot().as_ref().clone(); while head.beacon_state.current_epoch() < epoch { per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); } @@ -2106,7 +2134,7 @@ impl ApiTester { .client .post_validator_duties_attester( epoch, - (0..self.validator_keypairs.len() as u64) + (0..self.validator_keypairs().len() as u64) .collect::>() .as_slice(), ) @@ -2115,7 +2143,7 @@ impl ApiTester { .data; let (i, kp, duty, proof) = self - .validator_keypairs + .validator_keypairs() .iter() .enumerate() .find_map(|(i, kp)| { @@ -2348,7 +2376,7 @@ impl ApiTester { pub async fn test_post_lighthouse_liveness(self) -> Self { let epoch = self.chain.epoch().unwrap(); - let head_state = self.chain.head_beacon_state().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); let indices = (0..head_state.validators().len()) .map(|i| i as u64) .collect::>(); @@ -2465,7 +2493,7 @@ impl ApiTester { let block_root = self.next_block.canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch - let current_duty_dependent_root = self.chain.head_beacon_block_root().unwrap(); + let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); let next_slot = self.next_block.slot(); let finalization_distance = E::slots_per_epoch() * 2; @@ -2488,17 +2516,21 @@ impl ApiTester { epoch_transition: true, }); + let finalized_block_root = self + .chain + .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let finalized_block = self + .chain + .get_blinded_block(&finalized_block_root) + .unwrap() + .unwrap(); + let finalized_state_root = finalized_block.state_root(); + let expected_finalized = EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - block: self - .chain - .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(), - state: self - .chain - .state_root_at_slot(next_slot - finalization_distance) - .unwrap() - .unwrap(), + block: finalized_block_root, + state: finalized_state_root, epoch: Epoch::new(3), }); @@ -2510,7 +2542,7 @@ impl ApiTester { let block_events = poll_events(&mut events_future, 3, Duration::from_millis(10000)).await; assert_eq!( block_events.as_slice(), - &[expected_block, expected_finalized, expected_head] + &[expected_block, expected_head, expected_finalized] ); // Test a reorg event diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 81de3f015ad..f208f3b16bc 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1330,9 +1330,9 @@ pub enum Response { /// A Status message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), + BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), + BlocksByRoot(Option>>), } impl std::convert::From> for RPCCodedResponse { diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 6bd4a96fb5f..5d960c32557 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -529,10 +529,10 @@ fn handle_v1_response( Protocol::Goodbye => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { @@ -569,31 +569,31 @@ fn handle_v2_response( })?; match protocol { Protocol::BlocksByRange => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), )))), }, Protocol::BlocksByRoot => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), @@ -832,10 +832,10 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -845,7 +845,7 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -858,11 +858,11 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -871,7 +871,7 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -947,10 +947,10 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -962,10 +962,10 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -974,10 +974,10 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); @@ -987,12 +987,12 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() )))) ); @@ -1019,11 +1019,11 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))), ); @@ -1034,11 +1034,11 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -1046,22 +1046,22 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); assert_eq!( encode_then_decode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(merge_block_small)))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); let mut encoded = @@ -1113,7 +1113,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1134,7 +1134,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1156,7 +1156,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1181,7 +1181,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap(); @@ -1226,7 +1226,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1250,7 +1250,7 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1ac9c9b2c0a..386c0ea42e2 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -9,6 +9,7 @@ use ssz_types::{ VariableList, }; use std::ops::Deref; +use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -227,10 +228,10 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the /// batch. - BlocksByRange(Box>), + BlocksByRange(Arc>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Box>), + BlocksByRoot(Arc>), /// A PONG response to a PING request. Pong(Ping), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index af2656a2759..a01072f8e4e 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,6 +7,7 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; +use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, @@ -17,7 +18,7 @@ use types::{ #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - BeaconBlock(Box>), + BeaconBlock(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -173,7 +174,7 @@ impl PubsubMessage { )) } }; - Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) + Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 5895d32d5dc..ebc5626a609 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -175,15 +175,15 @@ fn test_blocks_by_range_chunked_rpc() { // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -313,7 +313,7 @@ fn test_blocks_by_range_over_limit() { // BlocksByRange Response let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let request_id = messages_to_send as usize; // build the sender future @@ -412,7 +412,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); // keep count of the number of messages received let mut messages_received: u64 = 0; @@ -544,7 +544,7 @@ fn test_blocks_by_range_single_empty_rpc() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); let messages_to_send = 1; @@ -664,15 +664,15 @@ fn test_blocks_by_root_chunked_rpc() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -807,7 +807,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 4aa7c769244..2a7400f1ed7 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -52,6 +52,7 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; +use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -384,7 +385,7 @@ impl WorkEvent { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, ) -> Self { Self { @@ -488,7 +489,7 @@ impl WorkEvent { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Self { @@ -505,7 +506,7 @@ impl WorkEvent { /// Create a new work event to import `blocks` as a beacon chain segment. pub fn chain_segment( process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, ) -> Self { Self { drop_during_sync: false, @@ -652,7 +653,7 @@ pub enum Work { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, }, DelayedImportBlock { @@ -689,13 +690,13 @@ pub enum Work { seen_timestamp: Duration, }, RpcBlock { - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, }, ChainSegment { process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, }, Status { peer_id: PeerId, @@ -1305,15 +1306,6 @@ impl BeaconProcessor { let idle_tx = toolbox.idle_tx; let work_reprocessing_tx = toolbox.work_reprocessing_tx; - // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. - // - // This helps ensure that the worker is always freed in the case of an early exit or panic. - // As such, this instantiation should happen as early in the function as possible. - let send_idle_on_drop = SendOnDrop { - tx: idle_tx, - log: self.log.clone(), - }; - let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1323,6 +1315,16 @@ impl BeaconProcessor { &[work.str_id()], ); + // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. + // + // This helps ensure that the worker is always freed in the case of an early exit or panic. + // As such, this instantiation should happen as early in the function as possible. + let send_idle_on_drop = SendOnDrop { + tx: idle_tx, + _worker_timer: worker_timer, + log: self.log.clone(), + }; + let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); @@ -1336,7 +1338,6 @@ impl BeaconProcessor { return; }; - let log = self.log.clone(); let executor = self.executor.clone(); let worker = Worker { @@ -1355,252 +1356,308 @@ impl BeaconProcessor { "worker" => worker_id, ); - let sub_executor = executor.clone(); - executor.spawn_blocking( - move || { - let _worker_timer = worker_timer; + let task_spawner = TaskSpawner { + executor: executor.clone(), + send_idle_on_drop, + }; - match work { - /* - * Individual unaggregated attestation verification. - */ - Work::GossipAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => worker - .process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)), - /* - * Individual aggregated attestation verification. - */ - Work::GossipAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) - } - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - } => worker.process_gossip_block( + let sub_executor = executor; + match work { + /* + * Individual unaggregated attestation verification. + */ + Work::GossipAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched unaggregated attestation verification. + */ + Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Individual aggregated attestation verification. + */ + Work::GossipAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched aggregated attestation verification. + */ + Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Verification for beacon blocks received on gossip. + */ + Work::GossipBlock { + message_id, + peer_id, + peer_client, + block, + seen_timestamp, + } => task_spawner.spawn_async(async move { + worker + .process_gossip_block( message_id, peer_id, peer_client, - *block, - work_reprocessing_tx.clone(), - duplicate_cache, - seen_timestamp, - ), - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, block, - seen_timestamp, - } => worker.process_gossip_verified_block( - peer_id, - *block, work_reprocessing_tx, + duplicate_cache, seen_timestamp, - ), - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - } => worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => worker.process_gossip_proposer_slashing( - message_id, - peer_id, - *proposer_slashing, - ), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => worker.process_gossip_attester_slashing( - message_id, - peer_id, - *attester_slashing, - ), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ), - /* - * Syn contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block, - seen_timestamp, - process_type, - } => { - worker.process_rpc_block( - *block, - seen_timestamp, - process_type, - work_reprocessing_tx.clone(), - duplicate_cache, - ); - } - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - worker.process_chain_segment(process_id, blocks) - } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => worker.process_status(peer_id, message), - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ), - }; + ) + .await + }), + /* + * Import for blocks that we received earlier than their intended slot. + */ + Work::DelayedImportBlock { + peer_id, + block, + seen_timestamp, + } => task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + seen_timestamp, + )), + /* + * Voluntary exits received on gossip. + */ + Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + }), + /* + * Proposer slashings received on gossip. + */ + Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }), + /* + * Attester slashings received on gossip. + */ + Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ) + }), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ) + }), + /* + * Verification for beacon blocks received during syncing via RPC. + */ + Work::RpcBlock { + block, + seen_timestamp, + process_type, + } => task_spawner.spawn_async(worker.process_rpc_block( + block, + seen_timestamp, + process_type, + work_reprocessing_tx, + duplicate_cache, + )), + /* + * Verification for a chain segment (multiple blocks). + */ + Work::ChainSegment { process_id, blocks } => task_spawner + .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + /* + * Processing of Status Messages. + */ + Work::Status { peer_id, message } => { + task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) + } + /* + * Processing of range syncing requests from other peers. + */ + Work::BlocksByRangeRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + /* + * Processing of blocks by roots requests from other peers. + */ + Work::BlocksByRootsRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + Work::UnknownBlockAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), + Work::UnknownBlockAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + None, + seen_timestamp, + ) + }), + }; + } +} - trace!( - log, - "Beacon processor worker done"; - "work" => work_id, - "worker" => worker_id, - ); +/// Spawns tasks that are either: +/// +/// - Blocking (i.e. intensive methods that shouldn't run on the core `tokio` executor) +/// - Async (i.e. `async` methods) +/// +/// Takes a `SendOnDrop` and ensures it is dropped after the task completes. This frees the beacon +/// processor worker so a new task can be started. +struct TaskSpawner { + executor: TaskExecutor, + send_idle_on_drop: SendOnDrop, +} - // This explicit `drop` is used to remind the programmer that this variable must - // not be dropped until the worker is complete. Dropping it early will cause the - // worker to be marked as "free" and cause an over-spawning of workers. - drop(send_idle_on_drop); +impl TaskSpawner { + /// Spawn an async task, dropping the `SendOnDrop` after the task has completed. + fn spawn_async(self, task: impl Future + Send + 'static) { + self.executor.spawn( + async { + task.await; + drop(self.send_idle_on_drop) }, WORKER_TASK_NAME, - ); + ) + } + + /// Spawn a blocking task, dropping the `SendOnDrop` after the task has completed. + fn spawn_blocking(self, task: F) + where + F: FnOnce() + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(); + drop(self.send_idle_on_drop) + }, + WORKER_TASK_NAME, + ) + } + + /// Spawn a blocking task, passing the `SendOnDrop` into the task. + /// + /// ## Notes + /// + /// Users must ensure the `SendOnDrop` is dropped at the appropriate time! + pub fn spawn_blocking_with_manual_send_idle(self, task: F) + where + F: FnOnce(SendOnDrop) + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(self.send_idle_on_drop); + }, + WORKER_TASK_NAME, + ) } } @@ -1616,6 +1673,8 @@ impl BeaconProcessor { /// https://doc.rust-lang.org/std/ops/trait.Drop.html#panics pub struct SendOnDrop { tx: mpsc::Sender<()>, + // The field is unused, but it's here to ensure the timer is dropped once the task has finished. + _worker_timer: Option, log: Logger, } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 1c9d323576d..a39ca2ec33e 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -8,7 +8,6 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use environment::{null_logger, Environment, EnvironmentBuilder}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -20,7 +19,6 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -45,7 +43,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); /// Provides utilities for testing the `BeaconProcessor`. struct TestRig { chain: Arc>, - next_block: SignedBeaconBlock, + next_block: Arc>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -56,7 +54,7 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, - environment: Option>, + _harness: BeaconChainHarness, } /// This custom drop implementation ensures that we shut down the tokio runtime gracefully. Without @@ -65,12 +63,11 @@ impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; - self.environment.take().unwrap().shutdown_on_idle(); } } impl TestRig { - pub fn new(chain_length: u64) -> Self { + pub async fn new(chain_length: u64) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -84,16 +81,18 @@ impl TestRig { harness.advance_slot(); for _ in 0..chain_length { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -101,8 +100,9 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -155,11 +155,11 @@ impl TestRig { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - let chain = harness.chain; + let chain = harness.chain.clone(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let log = null_logger().unwrap(); + let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); @@ -181,15 +181,7 @@ impl TestRig { &log, )); - let mut environment = EnvironmentBuilder::mainnet() - .null_logger() - .unwrap() - .multi_threaded_tokio_runtime() - .unwrap() - .build() - .unwrap(); - - let executor = environment.core_context().executor; + let executor = harness.runtime.task_executor.clone(); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); @@ -208,7 +200,7 @@ impl TestRig { Self { chain, - next_block, + next_block: Arc::new(next_block), attestations, next_block_attestations, next_block_aggregate_attestations, @@ -219,12 +211,16 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, - environment: Some(environment), + _harness: harness, } } + pub async fn recompute_head(&self) { + self.chain.recompute_head_at_current_slot().await.unwrap() + } + pub fn head_root(&self) -> Hash256 { - self.chain.head().unwrap().beacon_block_root + self.chain.head_snapshot().beacon_block_root } pub fn enqueue_gossip_block(&self) { @@ -233,7 +229,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - Box::new(self.next_block.clone()), + self.next_block.clone(), Duration::from_secs(0), )) .unwrap(); @@ -241,7 +237,7 @@ impl TestRig { pub fn enqueue_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( - Box::new(self.next_block.clone()), + self.next_block.clone(), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -324,28 +320,16 @@ impl TestRig { .unwrap(); } - fn handle(&mut self) -> Handle { - self.environment - .as_mut() - .unwrap() - .core_context() - .executor - .handle() - .unwrap() - } - /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. - pub fn assert_no_events_for(&mut self, duration: Duration) { - self.handle().block_on(async { - tokio::select! { - _ = tokio::time::sleep(duration) => (), - event = self.work_journal_rx.recv() => panic!( - "received {:?} within {:?} when expecting no events", - event, - duration - ), - } - }) + pub async fn assert_no_events_for(&mut self, duration: Duration) { + tokio::select! { + _ = tokio::time::sleep(duration) => (), + event = self.work_journal_rx.recv() => panic!( + "received {:?} within {:?} when expecting no events", + event, + duration + ), + } } /// Checks that the `BeaconProcessor` event journal contains the `expected` events in the given @@ -354,57 +338,54 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { assert!(expected .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - let mut worker_freed_remaining = expected.len(); - - let drain_future = async { - loop { - match self.work_journal_rx.recv().await { - Some(event) if event == WORKER_FREED => { - worker_freed_remaining -= 1; - if worker_freed_remaining == 0 { - // Break when all expected events are finished. - break; - } - } - Some(event) if event == NOTHING_TO_DO => { - // Ignore these. - } - Some(event) => { - events.push(event); + let mut events = Vec::with_capacity(expected.len()); + let mut worker_freed_remaining = expected.len(); + + let drain_future = async { + loop { + match self.work_journal_rx.recv().await { + Some(event) if event == WORKER_FREED => { + worker_freed_remaining -= 1; + if worker_freed_remaining == 0 { + // Break when all expected events are finished. + break; } - None => break, } + Some(event) if event == NOTHING_TO_DO => { + // Ignore these. + } + Some(event) => { + events.push(event); + } + None => break, } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", - STANDARD_TIMEOUT, - expected, - events, - worker_freed_remaining, - ), - _ = drain_future => {}, } - - (events, worker_freed_remaining) - }); + }; + + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", + STANDARD_TIMEOUT, + expected, + events, + worker_freed_remaining, + ), + _ = drain_future => {}, + } assert_eq!(events, expected); assert_eq!(worker_freed_remaining, 0); } - pub fn assert_event_journal(&mut self, expected: &[&str]) { - self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT); + pub async fn assert_event_journal(&mut self, expected: &[&str]) { + self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT) + .await } /// Assert that the `BeaconProcessor` event journal is as `expected`. @@ -413,34 +394,34 @@ impl TestRig { /// /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. - pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - - let drain_future = async { - while let Some(event) = self.work_journal_rx.recv().await { - events.push(event); - - // Break as soon as we collect the desired number of events. - if events.len() >= expected.len() { - break; - } + pub async fn assert_event_journal_with_timeout( + &mut self, + expected: &[&str], + timeout: Duration, + ) { + let mut events = Vec::with_capacity(expected.len()); + + let drain_future = async { + while let Some(event) = self.work_journal_rx.recv().await { + events.push(event); + + // Break as soon as we collect the desired number of events. + if events.len() >= expected.len() { + break; } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(timeout) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", - timeout, - expected, - events - ), - _ = drain_future => {}, } - - events - }); + }; + + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(timeout) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", + timeout, + expected, + events + ), + _ = drain_future => {}, + } assert_eq!(events, expected); } @@ -455,9 +436,9 @@ fn junk_message_id() -> MessageId { } /// Blocks that arrive early should be queued for later processing. -#[test] -fn import_gossip_block_acceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_acceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -477,7 +458,8 @@ fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -492,7 +474,8 @@ fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -502,9 +485,9 @@ fn import_gossip_block_acceptably_early() { } /// Blocks that are *too* early shouldn't get into the delay queue. -#[test] -fn import_gossip_block_unacceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_unacceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -524,11 +507,12 @@ fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the // block isn't imported. - rig.assert_no_events_for(Duration::from_secs(5)); + rig.assert_no_events_for(Duration::from_secs(5)).await; assert!( rig.head_root() != rig.next_block.canonical_root(), @@ -537,9 +521,9 @@ fn import_gossip_block_unacceptably_early() { } /// Blocks that arrive on-time should be processed normally. -#[test] -fn import_gossip_block_at_current_slot() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_at_current_slot() { + let mut rig = TestRig::new(SMALL_CHAIN).await; assert_eq!( rig.chain.slot().unwrap(), @@ -549,7 +533,8 @@ fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -559,15 +544,16 @@ fn import_gossip_block_at_current_slot() { } /// Ensure a valid attestation can be imported. -#[test] -fn import_gossip_attestation() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_attestation() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -583,8 +569,8 @@ enum BlockImportMethod { /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -592,7 +578,8 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -613,11 +600,12 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -632,20 +620,20 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { ); } -#[test] -fn attestation_to_unknown_block_processed_after_gossip_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Gossip) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_gossip_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Gossip).await } -#[test] -fn attestation_to_unknown_block_processed_after_rpc_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Rpc) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_rpc_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Empty the op pool. rig.chain @@ -659,7 +647,8 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -680,11 +669,12 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -699,21 +689,21 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { ); } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip).await } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -721,7 +711,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -734,7 +725,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -745,9 +737,9 @@ fn requeue_unknown_block_gossip_attestation_without_import() { /// Ensure that aggregate that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -755,7 +747,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -768,7 +761,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -778,10 +772,10 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { } /// Ensure a bunch of valid operations can be imported. -#[test] -fn import_misc_gossip_ops() { +#[tokio::test] +async fn import_misc_gossip_ops() { // Exits need the long chain so validators aren't too young to exit. - let mut rig = TestRig::new(LONG_CHAIN); + let mut rig = TestRig::new(LONG_CHAIN).await; /* * Attester slashing @@ -791,7 +785,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attester_slashings(), @@ -807,7 +802,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_proposer_slashings(), @@ -823,7 +819,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_voluntary_exits(), diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b367f7f6d2e..bb3565e885a 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -13,6 +13,7 @@ use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerI use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; @@ -636,24 +637,27 @@ impl Worker { /// /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] - pub fn process_gossip_block( + pub async fn process_gossip_block( self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, seen_duration: Duration, ) { - if let Some(gossip_verified_block) = self.process_gossip_unverified_block( - message_id, - peer_id, - peer_client, - block, - reprocess_tx.clone(), - seen_duration, - ) { + if let Some(gossip_verified_block) = self + .process_gossip_unverified_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx.clone(), + seen_duration, + ) + .await + { let block_root = gossip_verified_block.block_root; if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( @@ -661,7 +665,8 @@ impl Worker { gossip_verified_block, reprocess_tx, seen_duration, - ); + ) + .await; // Drop the handle to remove the entry from the cache drop(handle); } else { @@ -678,12 +683,12 @@ impl Worker { /// if it passes gossip propagation criteria, tell the network thread to forward it. /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. - pub fn process_gossip_unverified_block( + pub async fn process_gossip_unverified_block( &self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, seen_duration: Duration, ) -> Option> { @@ -704,7 +709,7 @@ impl Worker { Some(peer_client.to_string()), ); - let verified_block = match self.chain.verify_block_for_gossip(block) { + let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); @@ -887,7 +892,7 @@ impl Worker { /// Process the beacon block that has already passed gossip verification. /// /// Raises a log if there are errors. - pub fn process_gossip_verified_block( + pub async fn process_gossip_verified_block( self, peer_id: PeerId, verified_block: GossipVerifiedBlock, @@ -895,9 +900,9 @@ impl Worker { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block = Box::new(verified_block.block.clone()); + let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block) { + match self.chain.process_block(verified_block).await { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -913,24 +918,27 @@ impl Worker { ) }; - trace!( + debug!( self.log, "Gossipsub block processed"; + "block" => ?block_root, "peer_id" => %peer_id ); - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "block gossip" - ), - Err(e) => error!( + if let Err(e) = self.chain.recompute_head_at_current_slot().await { + error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "block gossip" - ), + "location" => "block_gossip" + ) + } else { + debug!( + self.log, + "Fork choice success"; + "block" => ?block_root, + "location" => "block_gossip" + ) } } Err(BlockError::ParentUnknown { .. }) => { @@ -1134,13 +1142,9 @@ impl Worker { .read() .register_gossip_attester_slashing(slashing.as_inner()); - if let Err(e) = self.chain.import_attester_slashing(slashing) { - debug!(self.log, "Error importing attester slashing"; "error" => ?e); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL); - } else { - debug!(self.log, "Successfully imported attester slashing"); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); - } + self.chain.import_attester_slashing(slashing); + debug!(self.log, "Successfully imported attester slashing"); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } /// Process the sync committee signature received from the gossip network and: diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 2d2196b9e99..239f45462a4 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -9,6 +9,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -62,7 +63,7 @@ impl Worker { &self, remote: &StatusMessage, ) -> Result, BeaconChainError> { - let local = self.chain.status_message()?; + let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); let irrelevant_reason = if local.fork_digest != remote.fork_digest { @@ -143,7 +144,7 @@ impl Worker { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(Box::new(block))), + Response::BlocksByRoot(Some(block)), request_id, ); send_block_count += 1; @@ -278,7 +279,7 @@ impl Worker { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), + response: Response::BlocksByRange(Some(Arc::new(block))), id: request_id, }); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 943ee9cdaf7..24ad2d59c6e 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -10,7 +10,8 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, trace, warn}; +use slog::{debug, error, info, warn}; +use std::sync::Arc; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -35,9 +36,9 @@ struct ChainSegmentFailed { impl Worker { /// Attempt to process a block received from a direct RPC request. - pub fn process_rpc_block( + pub async fn process_rpc_block( self, - block: SignedBeaconBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, @@ -56,7 +57,7 @@ impl Worker { } }; let slot = block.slot(); - let result = self.chain.process_block(block); + let result = self.chain.process_block(block).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -77,7 +78,8 @@ impl Worker { None, None, ); - self.run_fork_choice() + + self.recompute_head("process_head").await; } } // Sync handles these results @@ -92,10 +94,10 @@ impl Worker { /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. - pub fn process_chain_segment( + pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, ) { let result = match sync_type { // this a request from the range sync @@ -104,7 +106,7 @@ impl Worker { let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()) { + match self.process_blocks(downloaded_blocks.iter()).await { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -171,7 +173,7 @@ impl Worker { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()) { + match self.process_blocks(downloaded_blocks.iter().rev()).await { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); BatchProcessResult::Failed { @@ -191,19 +193,17 @@ impl Worker { } /// Helper function to process blocks batches which only consumes the chain and blocks to process. - fn process_blocks<'a>( + async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks = downloaded_blocks.cloned().collect::>(); - match self.chain.process_chain_segment(blocks) { + let blocks: Vec> = downloaded_blocks.cloned().collect(); + match self.chain.process_chain_segment(blocks).await { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - // Batch completed successfully with at least one block, run fork choice. - self.run_fork_choice(); + self.recompute_head("process_blocks_ok").await; } - (imported_blocks, Ok(())) } ChainSegmentResult::Failed { @@ -213,7 +213,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.run_fork_choice(); + self.recompute_head("process_blocks_err").await; } (imported_blocks, r) } @@ -223,9 +223,13 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>, + blocks: Vec>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + let blinded_blocks = blocks + .iter() + .map(|full_block| full_block.clone_as_blinded()) + .map(Arc::new) + .collect(); match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -337,18 +341,18 @@ impl Worker { /// Runs fork-choice on a given chain. This is used during block processing after one successful /// block import. - fn run_fork_choice(&self) { - match self.chain.fork_choice() { - Ok(()) => trace!( + async fn recompute_head(&self, location: &str) { + match self.chain.recompute_head_at_current_slot().await { + Ok(()) => debug!( self.log, "Fork choice success"; - "location" => "batch processing" + "location" => location ), Err(e) => error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "batch import error" + "location" => location ), } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b8db9c17f83..9d86c3e55a6 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -2,9 +2,10 @@ use crate::beacon_processor::{ BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, }; use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, @@ -114,11 +115,10 @@ impl Processor { /// Called when we first connect to a peer, or when the PeerManager determines we need to /// re-status. pub fn send_status(&mut self, peer_id: PeerId) { - if let Ok(status_message) = status_message(&self.chain) { - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); } /// Handle a `Status` request. @@ -132,12 +132,12 @@ impl Processor { ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - // ignore status responses if we are shutting down - if let Ok(status_message) = status_message(&self.chain) { - // Say status back. - self.network - .send_response(peer_id, Response::Status(status_message), request_id); - } + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) } @@ -178,7 +178,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -209,7 +209,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -244,7 +244,7 @@ impl Processor { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, ) { self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( message_id, @@ -370,22 +370,6 @@ impl Processor { } } -/// Build a `StatusMessage` representing the state of the given `beacon_chain`. -pub(crate) fn status_message( - beacon_chain: &BeaconChain, -) -> Result { - let head_info = beacon_chain.head_info()?; - let fork_digest = beacon_chain.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) -} - /// Wraps a Network Channel to employ various RPC related network functionality for the /// processor. #[derive(Clone)] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8995de2e5f..c21183608aa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,7 +7,7 @@ use crate::{ subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -30,8 +30,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, RelativeEpoch, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -706,29 +706,12 @@ impl NetworkService { fn update_gossipsub_parameters(&mut self) { if let Ok(slot) = self.beacon_chain.slot() { - if let Some(active_validators) = self + let active_validators_opt = self .beacon_chain - .with_head(|head| { - Ok::<_, BeaconChainError>( - head.beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - self.beacon_chain.epoch().ok().map(|current_epoch| { - head.beacon_state - .validators() - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .count() - }) - }), - ) - }) - .unwrap_or(None) - { + .canonical_head + .cached_head() + .active_validator_count(); + if let Some(active_validators) = active_validators_opt { if self .libp2p .swarm @@ -742,6 +725,14 @@ impl NetworkService { "active_validators" => active_validators ); } + } else { + // This scenario will only happen if the caches on the cached canonical head aren't + // built. That should never be the case. + error!( + self.log, + "Active validator count unavailable"; + "info" => "please report this bug" + ); } } } diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ade490e00ec..865f8ee933f 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,4 +1,5 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -6,20 +7,33 @@ use lighthouse_network::rpc::StatusMessage; /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without /// polluting/coupling the type with RPC concepts. pub trait ToStatusMessage { - fn status_message(&self) -> Result; + fn status_message(&self) -> StatusMessage; } impl ToStatusMessage for BeaconChain { - fn status_message(&self) -> Result { - let head_info = self.head_info()?; - let fork_digest = self.enr_fork_id().fork_digest; + fn status_message(&self) -> StatusMessage { + status_message(self) + } +} + +/// Build a `StatusMessage` representing the state of the given `beacon_chain`. +pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { + let fork_digest = beacon_chain.enr_fork_id().fork_digest; + let cached_head = beacon_chain.canonical_head.cached_head(); + let mut finalized_checkpoint = cached_head.finalized_checkpoint(); + + // Alias the genesis checkpoint root to `0x00`. + let spec = &beacon_chain.spec; + let genesis_epoch = spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()); + if finalized_checkpoint.epoch == genesis_epoch { + finalized_checkpoint.root = Hash256::zero(); + } - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) + StatusMessage { + fork_digest, + finalized_root: finalized_checkpoint.root, + finalized_epoch: finalized_checkpoint.epoch, + head_root: cached_head.head_block_root(), + head_slot: cached_head.head_slot(), } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 581f6b32702..778eb63263c 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; +use task_executor::test_utils::TestRuntime; use types::{ CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -32,6 +33,7 @@ type TestBeaconChainType = Witness< pub struct TestBeaconChain { chain: Arc>, + _test_runtime: TestRuntime, } impl TestBeaconChain { @@ -46,11 +48,14 @@ impl TestBeaconChain { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let test_runtime = TestRuntime::default(); + let chain = Arc::new( BeaconChainBuilder::new(MainnetEthSpec) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) + .task_executor(test_runtime.task_executor.clone()) .genesis_state( interop_genesis_state::( &keypairs, @@ -74,7 +79,10 @@ impl TestBeaconChain { .build() .expect("should build"), ); - Self { chain } + Self { + chain, + _test_runtime: test_runtime, + } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index be750e25f02..dc0b6c72027 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -53,7 +53,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -392,7 +392,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ece923ef591..8ac65df3d6b 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -7,6 +7,7 @@ use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{crit, debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; use tokio::sync::mpsc; @@ -105,7 +106,7 @@ impl BlockLookups { pub fn search_parent( &mut self, - block: Box>, + block: Arc>, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { @@ -129,7 +130,7 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(*block, peer_id); + let parent_lookup = ParentLookup::new(block, peer_id); self.request_parent(parent_lookup, cx); } @@ -139,7 +140,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -203,7 +204,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -482,7 +483,7 @@ impl BlockLookups { Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search - parent_lookup.add_block(*block); + parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { @@ -581,7 +582,7 @@ impl BlockLookups { fn send_block_for_processing( &mut self, - block: Box>, + block: Arc>, duration: Duration, process_type: BlockProcessType, ) -> Result<(), ()> { diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a9a3c34bc05..62503353ade 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,4 +1,5 @@ use lighthouse_network::PeerId; +use std::sync::Arc; use store::{EthSpec, Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; @@ -21,7 +22,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -48,10 +49,10 @@ impl ParentLookup { pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block == block) + .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: SignedBeaconBlock, peer_id: PeerId) -> Self { + pub fn new(block: Arc>, peer_id: PeerId) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { @@ -86,7 +87,7 @@ impl ParentLookup { self.current_parent_request.check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: SignedBeaconBlock) { + pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); self.downloaded_blocks.push(block); self.current_parent_request.hash = next_parent; @@ -108,7 +109,7 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec> { + pub fn chain_blocks(&mut self) -> Vec>> { std::mem::take(&mut self.downloaded_blocks) } @@ -116,9 +117,9 @@ impl ParentLookup { /// the processing result of the block. pub fn verify_block( &mut self, - block: Option>>, + block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>>, VerifyError> { + ) -> Result>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 347a4ae4377..debf3de8dba 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::sync::Arc; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; @@ -82,8 +83,8 @@ impl SingleBlockRequest { /// Returns the block for processing if the response is what we expected. pub fn verify_block( &mut self, - block: Option>>, - ) -> Result>>, VerifyError> { + block: Option>>, + ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { self.register_failure(); @@ -195,7 +196,7 @@ mod tests { let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); sl.request_block().unwrap(); - sl.verify_block(Some(Box::new(block))).unwrap().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); } #[test] diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index dde7d49953a..e9c8ac8ca74 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -158,7 +158,7 @@ fn test_single_block_lookup_happy_path() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -204,7 +204,7 @@ fn test_single_block_lookup_wrong_response() { // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); rig.expect_block_request(); // should be retried @@ -243,7 +243,7 @@ fn test_single_block_lookup_becomes_parent_request() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -252,7 +252,7 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Box::new(block))), &mut cx); + bl.single_block_processed(id, Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -269,11 +269,11 @@ fn test_parent_lookup_happy_path() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); rig.expect_empty_network(); @@ -294,12 +294,12 @@ fn test_parent_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); let id2 = rig.expect_parent_request(); @@ -308,7 +308,7 @@ fn test_parent_lookup_wrong_response() { rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -328,7 +328,7 @@ fn test_parent_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends an empty response, peer should be penalized and the block re-requested. @@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -357,7 +357,7 @@ fn test_parent_lookup_rpc_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // The request fails. It should be tried again. @@ -365,7 +365,7 @@ fn test_parent_lookup_rpc_failure() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -385,7 +385,7 @@ fn test_parent_lookup_too_many_attempts() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { let id = rig.expect_parent_request(); match i % 2 { @@ -397,7 +397,7 @@ fn test_parent_lookup_too_many_attempts() { _ => { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); } } @@ -427,12 +427,12 @@ fn test_parent_lookup_too_deep() { let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); for block in blocks.into_iter().rev() { let id = rig.expect_parent_request(); // the block - bl.parent_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request @@ -440,7 +440,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Box::new(block))), + Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx, ) } @@ -454,7 +454,7 @@ fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let peer_id = PeerId::random(); let trigger_block = rig.rand_block(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 0003db6ab08..0e1cd80a6f3 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -88,12 +88,12 @@ pub enum SyncMessage { RpcBlock { request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Box>), + UnknownBlock(PeerId, Arc>), /// A peer has sent an object that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. @@ -228,17 +228,12 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: SyncInfo) { // ensure the beacon chain still exists - let local = match self.chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; let sync_type = remote_sync_type(&local, &remote, &self.chain); @@ -378,7 +373,7 @@ impl SyncManager { // advanced and will produce a head chain on re-status. Otherwise it will shift // to being synced let mut sync_state = { - let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0)); + let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); let peers = self.network_globals.peers.read(); @@ -481,11 +476,7 @@ impl SyncManager { SyncMessage::UnknownBlock(peer_id, block) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self - .chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let unknown_block_slot = block.slot(); // if the block is far in the future, ignore it. If its within the slot tolerance of @@ -570,7 +561,7 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, ) { match request_id { @@ -598,7 +589,7 @@ impl SyncManager { batch_id, &peer_id, id, - beacon_block.map(|b| *b), + beacon_block, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -620,7 +611,7 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block.map(|b| *b), + beacon_block, ); self.update_sync_state(); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 96bdc533f8d..ffbd1a64da0 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -65,27 +65,26 @@ impl SyncNetworkContext { chain: &C, peers: impl Iterator, ) { - if let Ok(status_message) = chain.status_message() { - for peer_id in peers { - debug!( - self.log, - "Sending Status Request"; - "peer" => %peer_id, - "fork_digest" => ?status_message.fork_digest, - "finalized_root" => ?status_message.finalized_root, - "finalized_epoch" => ?status_message.finalized_epoch, - "head_root" => %status_message.head_root, - "head_slot" => %status_message.head_slot, - ); + let status_message = chain.status_message(); + for peer_id in peers { + debug!( + self.log, + "Sending Status Request"; + "peer" => %peer_id, + "fork_digest" => ?status_message.fork_digest, + "finalized_root" => ?status_message.finalized_root, + "finalized_epoch" => ?status_message.finalized_epoch, + "head_root" => %status_message.head_root, + "head_slot" => %status_message.head_slot, + ); - let request = Request::Status(status_message.clone()); - let request_id = RequestId::Router; - let _ = self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - }); - } + let request = Request::Status(status_message.clone()); + let request_id = RequestId::Router; + let _ = self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + }); } } diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index ed3f07763cd..c01366f1be9 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -59,7 +59,7 @@ pub fn remote_sync_type( if remote.head_slot < near_range_start { PeerSyncType::Behind } else if remote.head_slot > near_range_end - && !chain.fork_choice.read().contains_block(&remote.head_root) + && !chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer has a head ahead enough of ours and we have no knowledge of their best // block. @@ -74,7 +74,7 @@ pub fn remote_sync_type( if (local.finalized_epoch + 1 == remote.finalized_epoch && near_range_start <= remote.head_slot && remote.head_slot <= near_range_end) - || chain.fork_choice.read().contains_block(&remote.head_root) + || chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer is near enough to us to be considered synced, or // we have already synced up to this peer's head diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index aaebe022c70..6dda70c1974 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,6 +4,7 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; +use std::sync::Arc; use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// The number of times to retry a batch before it is considered failed. @@ -46,7 +47,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; + fn batch_attempt_hash(blocks: &[Arc>]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -58,7 +59,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -100,9 +101,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Vec>>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>), + AwaitingProcessing(PeerId, Vec>>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -212,7 +213,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: SignedBeaconBlock) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -338,7 +339,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>, WrongState> { + pub fn start_processing(&mut self) -> Result>>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -437,7 +438,10 @@ pub struct Attempt { } impl Attempt { - fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + fn new( + peer_id: PeerId, + blocks: &[Arc>], + ) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs index 5f8033bc51e..df49543a6b6 100644 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ b/beacon_node/network/src/sync/range_sync/block_storage.rs @@ -8,6 +8,6 @@ pub trait BlockStorage { impl BlockStorage for BeaconChain { fn is_block_known(&self, block_root: &Hash256) -> bool { - self.fork_choice.read().contains_block(block_root) + self.block_is_known_to_fork_choice(block_root) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 88837d0e127..f3130a7343c 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -9,6 +9,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; +use std::sync::Arc; use tokio::sync::mpsc::Sender; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -216,7 +217,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9953df81d09..f08f8eb82a5 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -53,7 +53,7 @@ use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; @@ -221,7 +221,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -365,17 +365,12 @@ where network.status_peers(self.beacon_chain.as_ref(), chain.peers()); - let local = match self.beacon_chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.beacon_chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; // update the state of the collection @@ -447,8 +442,8 @@ mod tests { } impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> Result { - Ok(self.status.read().clone()) + fn status_message(&self) -> StatusMessage { + self.status.read().clone() } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 84d23a45626..6b8b8eb145b 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -21,3 +21,4 @@ store = { path = "../store" } [dev-dependencies] beacon_chain = { path = "../beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70eb31cd0fb..771dca12f69 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -710,7 +710,7 @@ mod release_tests { } /// Test state for sync contribution-related tests. - fn sync_contribution_test_state( + async fn sync_contribution_test_state( num_committees: usize, ) -> (BeaconChainHarness>, ChainSpec) { let mut spec = E::default_spec(); @@ -722,12 +722,14 @@ mod release_tests { let harness = get_harness::(num_validators, Some(spec.clone())); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1)], - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1)], + (0..num_validators).collect::>().as_slice(), + ) + .await; (harness, spec) } @@ -1454,9 +1456,9 @@ mod release_tests { } /// End-to-end test of basic sync contribution handling. - #[test] - fn sync_contribution_aggregation_insert_get_prune() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_aggregation_insert_get_prune() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1514,9 +1516,9 @@ mod release_tests { } /// Adding a sync contribution already in the pool should not increase the size of the pool. - #[test] - fn sync_contribution_duplicate() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_duplicate() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1551,9 +1553,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with more bits set should increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_more_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_more_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1631,9 +1633,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with fewer bits set should not increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_fewer_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_fewer_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fe66a176b67..e66cee6fdeb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -669,7 +669,11 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; + self.block_as_kv_store_ops( + &block_root, + block.as_ref().clone(), + &mut key_value_batch, + )?; } StoreOp::PutState(state_root, state) => { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 613c2e416ca..364bda2cc40 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -39,6 +39,7 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; @@ -152,7 +153,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 pub enum StoreOp<'a, E: EthSpec> { - PutBlock(Hash256, Box>), + PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index bf2acaf5bb5..944846c863b 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,7 +3,7 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, info, warn}; +use slog::{info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::time::sleep; @@ -13,11 +13,8 @@ pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, ) -> Result<(), &'static str> { - let log = executor.log(); - let per_slot_executor = executor.clone(); - + let log = executor.log().clone(); let timer_future = async move { - let log = per_slot_executor.log().clone(); loop { let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { Some(duration) => duration, @@ -28,31 +25,12 @@ pub fn spawn_timer( }; sleep(duration_to_next_slot).await; - - let chain = beacon_chain.clone(); - if let Some(handle) = per_slot_executor - .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") - { - if let Err(e) = handle.await { - warn!( - log, - "Per slot task failed"; - "info" => ?e - ); - } - } else { - debug!( - log, - "Per slot task timer stopped"; - "info" => "shutting down" - ); - break; - } + beacon_chain.per_slot_task().await; } }; executor.spawn(timer_future, "timer"); - info!(log, "Timer service started"); + info!(executor.log(), "Timer service started"); Ok(()) } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index f344dc47354..08bb565870d 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dd525bea504..353157e44a4 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,6 +7,8 @@ use slog::{crit, debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; +pub use tokio::task::JoinHandle; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -312,6 +314,62 @@ impl TaskExecutor { Some(future) } + /// Block the current (non-async) thread on the completion of some future. + /// + /// ## Warning + /// + /// This method is "dangerous" since calling it from an async thread will result in a panic! Any + /// use of this outside of testing should be very deeply considered as Lighthouse has been + /// burned by this function in the past. + /// + /// Determining what is an "async thread" is rather challenging; just because a function isn't + /// marked as `async` doesn't mean it's not being called from an `async` function or there isn't + /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to + /// @paulhauner if you plan to use this function in production. He has put metrics in here to + /// track any use of it, so don't think you can pull a sneaky one on him. + pub fn block_on_dangerous( + &self, + future: F, + name: &'static str, + ) -> Option +where { + let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); + metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + let log = self.log.clone(); + let handle = self.handle()?; + let exit = self.exit.clone(); + + debug!( + log, + "Starting block_on task"; + "name" => name + ); + + handle.block_on(async { + let output = tokio::select! { + output = future => { + debug!( + log, + "Completed block_on task"; + "name" => name + ); + Some(output) + }, + _ = exit => { + debug!( + log, + "Cancelled block_on task"; + "name" => name, + ); + None + } + }; + metrics::dec_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + drop(timer); + output + }) + } + /// Returns a `Handle` to the current runtime. pub fn handle(&self) -> Option { self.handle_provider.handle() diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index ead5925b6e8..662225fbd76 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -18,6 +18,16 @@ lazy_static! { "Time taken by blocking tasks", &["blocking_task_hist"] ); + pub static ref BLOCK_ON_TASKS_COUNT: Result = try_create_int_gauge_vec( + "block_on_tasks_count", + "Total number of block_on_dangers tasks spawned", + &["name"] + ); + pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result = try_create_histogram_vec( + "block_on_tasks_histogram", + "Time taken by block_on_dangerous tasks", + &["name"] + ); pub static ref TASKS_HISTOGRAM: Result = try_create_histogram_vec( "async_tasks_time_histogram", "Time taken by async tasks", diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index ed6b6ab557f..b2570092e6d 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -16,3 +16,4 @@ eth2_ssz_derive = "0.3.0" [dev-dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 026c6097c9b..4256076cb95 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -4,9 +4,8 @@ use ssz_derive::{Decode, Encode}; use std::cmp::Ordering; use std::marker::PhantomData; use std::time::Duration; -use types::MainnetEthSpec; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; @@ -211,6 +210,7 @@ fn dequeue_attestations( /// Equivalent to the `is_from_block` `bool` in: /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +#[derive(Clone, Copy)] pub enum AttestationFromBlock { True, False, @@ -224,6 +224,13 @@ pub struct ForkchoiceUpdateParameters { pub finalized_hash: Option, } +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ForkChoiceView { + pub head_block_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, +} + /// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice @@ -242,7 +249,9 @@ pub struct ForkChoice { /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. - forkchoice_update_parameters: Option, + forkchoice_update_parameters: ForkchoiceUpdateParameters, + /// The most recent result of running `Self::get_head`. + head_block_root: Hash256, _phantom: PhantomData, } @@ -269,6 +278,8 @@ where anchor_block_root: Hash256, anchor_block: &SignedBeaconBlock, anchor_state: &BeaconState, + current_slot: Option, + spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. if anchor_block.slot() % E::slots_per_epoch() != 0 { @@ -303,6 +314,9 @@ where }, ); + // If the current slot is not provided, use the value that was last provided to the store. + let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); + let proto_array = ProtoArrayForkChoice::new::( finalized_block_slot, finalized_block_state_root, @@ -313,15 +327,28 @@ where execution_status, )?; - Ok(Self { + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: vec![], - forkchoice_update_parameters: None, + // This will be updated during the next call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // This will be updated during the next call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + // Ensure that `fork_choice.head_block_root` is updated. + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } + /* /// Instantiates `Self` from some existing components. /// /// This is useful if the existing components have been loaded from disk after a process @@ -339,13 +366,13 @@ where _phantom: PhantomData, } } + */ /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// - /// These values are updated each time `Self::get_head` is called. May return `None` if - /// `Self::get_head` has not yet been called. - pub fn get_forkchoice_update_parameters(&self) -> Option { + /// These values are updated each time `Self::get_head` is called. + pub fn get_forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { self.forkchoice_update_parameters } @@ -419,6 +446,8 @@ where spec, )?; + self.head_block_root = head_root; + // Cache some values for the next forkchoiceUpdate call to the execution layer. let head_hash = self .get_block(&head_root) @@ -427,15 +456,35 @@ where let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); - self.forkchoice_update_parameters = Some(ForkchoiceUpdateParameters { + self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, finalized_hash, - }); + }; Ok(head_root) } + /// Return information about: + /// + /// - The LMD head of the chain. + /// - The FFG checkpoints. + /// + /// The information is "cached" since the last call to `Self::get_head`. + /// + /// ## Notes + /// + /// The finalized/justified checkpoints are determined from the fork choice store. Therefore, + /// it's possible that the state corresponding to `get_state(get_block(head_block_root))` will + /// have *differing* finalized and justified information. + pub fn cached_fork_choice_view(&self) -> ForkChoiceView { + ForkChoiceView { + head_block_root: self.head_block_root, + justified_checkpoint: self.justified_checkpoint(), + finalized_checkpoint: self.finalized_checkpoint(), + } + } + /// Returns `true` if the given `store` should be updated to set /// `state.current_justified_checkpoint` its `justified_checkpoint`. /// @@ -529,7 +578,7 @@ where pub fn on_block>( &mut self, current_slot: Slot, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, block_delay: Duration, state: &mut BeaconState, @@ -604,13 +653,13 @@ where // Update unrealized justified/finalized checkpoints. let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = { let justifiable_beacon_state = match block { - BeaconBlock::Merge(_) | BeaconBlock::Altair(_) => { + BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { state_processing::per_epoch_processing::altair::process_justifiable( state, spec, )? .0 } - BeaconBlock::Base(_) => { + BeaconBlockRef::Base(_) => { state_processing::per_epoch_processing::base::process_justifiable(state, spec)? .0 } @@ -1065,6 +1114,11 @@ where } } + /// Returns the weight for the given block root. + pub fn get_block_weight(&self, block_root: &Hash256) -> Option { + self.proto_array.get_weight(block_root) + } + /// Returns the `ProtoBlock` for the justified checkpoint. /// /// ## Notes @@ -1094,6 +1148,39 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } + /// Returns `Ok(true)` if `block_root` has been imported optimistically. That is, the + /// execution payload has not been verified. + /// + /// Returns `Ok(false)` if `block_root`'s execution payload has been verfied, if it is a + /// pre-Bellatrix block or if it is before the PoW terminal block. + /// + /// In the case where the block could not be found in fork-choice, it returns the + /// `execution_status` of the current finalized block. + /// + /// This function assumes the `block_root` exists. + pub fn is_optimistic_block(&self, block_root: &Hash256) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Ok(self.get_finalized_block()?.execution_status.is_optimistic()) + } + } + + /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` + /// when the block cannot be found. + /// + /// Intended to be used when checking if the head has been imported optimistically. + pub fn is_optimistic_block_no_fallback( + &self, + block_root: &Hash256, + ) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Err(Error::MissingProtoArrayBlock(*block_root)) + } + } + /// Returns `Ok(false)` if a block is not viable to be imported optimistically. /// /// ## Notes @@ -1216,17 +1303,31 @@ where pub fn from_persisted( persisted: PersistedForkChoice, fc_store: T, + spec: &ChainSpec, ) -> Result> { let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes)?; - Ok(Self { + let current_slot = fc_store.get_current_slot(); + + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: persisted.queued_attestations, - forkchoice_update_parameters: None, + // Will be updated in the following call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // Will be updated in the following call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } /// Takes a snapshot of `Self` and stores it in `PersistedForkChoice`, allowing this struct to diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 6384df97b6e..a7085b024a5 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -33,7 +33,7 @@ pub trait ForkChoiceStore: Sized { /// choice. Allows the implementer to performing caching or other housekeeping duties. fn on_verified_block>( &mut self, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error>; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 157306dd5f8..6f79b488dd6 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,8 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, + AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, + QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index a495f9584c6..3c869c59d8e 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -16,9 +16,8 @@ use fork_choice::{ }; use store::MemoryStore; use types::{ - test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, - ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, Slot, - SubnetId, + test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, + Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; @@ -74,7 +73,14 @@ impl ForkChoiceTest { where T: Fn(&BeaconForkChoiceStore, MemoryStore>) -> U, { - func(&self.harness.chain.fork_choice.read().fc_store()) + func( + &self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .fc_store(), + ) } /// Assert the epochs match. @@ -109,15 +115,7 @@ impl ForkChoiceTest { /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { - assert!( - self.harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - < epoch - ); + assert!(self.harness.finalized_checkpoint().epoch < epoch); self } @@ -150,11 +148,17 @@ impl ForkChoiceTest { { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) .unwrap(); - func(self.harness.chain.fork_choice.read().queued_attestations()); + func( + self.harness + .chain + .canonical_head + .fork_choice_read_lock() + .queued_attestations(), + ); self } @@ -173,7 +177,7 @@ impl ForkChoiceTest { } /// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error. - pub fn apply_blocks_while(self, mut predicate: F) -> Result + pub async fn apply_blocks_while(self, mut predicate: F) -> Result where F: FnMut(BeaconBlockRef<'_, E>, &BeaconState) -> bool, { @@ -182,12 +186,12 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot); + let (block, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { + if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { self.harness.attest_block( &state, block.state_root(), @@ -205,25 +209,29 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). - pub fn apply_blocks(self, count: usize) -> Self { + pub async fn apply_blocks(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; self } /// Apply `count` blocks to the chain (without attestations). - pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; self } @@ -256,9 +264,9 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. - pub fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self + pub async fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), { let state = self .harness @@ -269,18 +277,17 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -293,13 +300,13 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts that an error occurred and allows inspecting it via `comparison_func`. - pub fn apply_invalid_block_directly_to_fork_choice( + pub async fn apply_invalid_block_directly_to_fork_choice( self, mut mutation_func: F, mut comparison_func: G, ) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), G: FnMut(ForkChoiceError), { let state = self @@ -311,19 +318,18 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - mutation_func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + mutation_func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -339,7 +345,7 @@ impl ForkChoiceTest { /// database. fn check_justified_balances(&self) { let harness = &self.harness; - let fc = self.harness.chain.fork_choice.read(); + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); let state_root = harness .chain @@ -377,7 +383,7 @@ impl ForkChoiceTest { /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. - fn apply_attestation_to_chain( + async fn apply_attestation_to_chain( self, delay: MutationDelay, mut mutation_func: F, @@ -387,7 +393,7 @@ impl ForkChoiceTest { F: FnMut(&mut IndexedAttestation, &BeaconChain>), G: FnMut(Result<(), BeaconChainError>), { - let head = self.harness.chain.head().expect("should get head"); + let head = self.harness.chain.head_snapshot(); let current_slot = self.harness.chain.slot().expect("should get slot"); let mut attestation = self @@ -438,11 +444,13 @@ impl ForkChoiceTest { if let MutationDelay::Blocks(slots) = delay { self.harness.advance_slot(); - self.harness.extend_chain( - slots, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; } mutation_func( @@ -464,17 +472,9 @@ impl ForkChoiceTest { pub fn check_finalized_block_is_accessible(self) -> Self { self.harness .chain - .fork_choice - .write() - .get_block( - &self - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .root, - ) + .canonical_head + .fork_choice_read_lock() + .get_block(&self.harness.finalized_checkpoint().root) .unwrap(); self @@ -488,7 +488,7 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); - let fork_choice = tester.harness.chain.fork_choice.read(); + let fork_choice = tester.harness.chain.canonical_head.fork_choice_read_lock(); let justified_checkpoint = fork_choice.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 0); @@ -503,44 +503,50 @@ fn justified_and_finalized_blocks() { /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -#[test] -fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis -#[test] -fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) + .await .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) .assert_best_justified_epoch(2) .apply_blocks(1) + .await .assert_justified_epoch(3); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is the first justification since genesis -#[test] -fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { +#[tokio::test] +async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) .assert_best_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(2); } @@ -548,12 +554,14 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -567,6 +575,7 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } @@ -574,12 +583,14 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -593,6 +604,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(3); } @@ -600,12 +612,14 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -619,17 +633,20 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } /// Check that the balances are obtained correctly. -#[test] -fn justified_balances() { +#[tokio::test] +async fn justified_balances() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_justified_epoch(2) .check_justified_balances() } @@ -648,15 +665,16 @@ macro_rules! assert_invalid_block { /// Specification v0.12.1 /// /// assert block.parent_root in store.block_states -#[test] -fn invalid_block_unknown_parent() { +#[tokio::test] +async fn invalid_block_unknown_parent() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.parent_root_mut() = junk; + *block.message_mut().parent_root_mut() = junk; }, |err| { assert_invalid_block!( @@ -665,36 +683,42 @@ fn invalid_block_unknown_parent() { if parent == junk ) }, - ); + ) + .await; } /// Specification v0.12.1 /// /// assert get_current_slot(store) >= block.slot -#[test] -fn invalid_block_future_slot() { +#[tokio::test] +async fn invalid_block_future_slot() { ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() += 1; + *block.message_mut().slot_mut() += 1; }, |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }), - ); + ) + .await; } /// Specification v0.12.1 /// /// assert block.slot > finalized_slot -#[test] -fn invalid_block_finalized_slot() { +#[tokio::test] +async fn invalid_block_finalized_slot() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; + *block.message_mut().slot_mut() = + Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; }, |err| { assert_invalid_block!( @@ -703,7 +727,8 @@ fn invalid_block_finalized_slot() { if finalized_slot == Epoch::new(2).start_slot(E::slots_per_epoch()) ) }, - ); + ) + .await; } /// Specification v0.12.1 @@ -714,18 +739,20 @@ fn invalid_block_finalized_slot() { /// Note: we technically don't do this exact check, but an equivalent check. Reference: /// /// https://github.com/ethereum/eth2.0-specs/pull/1884 -#[test] -fn invalid_block_finalized_descendant() { +#[tokio::test] +async fn invalid_block_finalized_descendant() { let invalid_ancestor = Mutex::new(Hash256::zero()); ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .apply_invalid_block_directly_to_fork_choice( |block, state| { - *block.parent_root_mut() = *state + *block.message_mut().parent_root_mut() = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); *invalid_ancestor.lock().unwrap() = block.parent_root(); @@ -737,7 +764,8 @@ fn invalid_block_finalized_descendant() { if block_ancestor == Some(*invalid_ancestor.lock().unwrap()) ) }, - ); + ) + .await; } macro_rules! assert_invalid_attestation { @@ -754,23 +782,26 @@ macro_rules! assert_invalid_attestation { } /// Ensure we can process a valid attestation. -#[test] -fn valid_attestation() { +#[tokio::test] +async fn valid_attestation() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), - ); + ) + .await; } /// This test is not in the specification, however we reject an attestation with an empty /// aggregation bitfield since it has no purpose beyond wasting our time. -#[test] -fn invalid_attestation_empty_bitfield() { +#[tokio::test] +async fn invalid_attestation_empty_bitfield() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -779,7 +810,8 @@ fn invalid_attestation_empty_bitfield() { |result| { assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -787,10 +819,11 @@ fn invalid_attestation_empty_bitfield() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch after current epoch) -#[test] -fn invalid_attestation_future_epoch() { +#[tokio::test] +async fn invalid_attestation_future_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -803,7 +836,8 @@ fn invalid_attestation_future_epoch() { if attestation_epoch == Epoch::new(2) && current_epoch == Epoch::new(0) ) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -811,10 +845,11 @@ fn invalid_attestation_future_epoch() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch prior to previous epoch) -#[test] -fn invalid_attestation_past_epoch() { +#[tokio::test] +async fn invalid_attestation_past_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize * 3 + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -827,16 +862,18 @@ fn invalid_attestation_past_epoch() { if attestation_epoch == Epoch::new(0) && current_epoch == Epoch::new(3) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.epoch == compute_epoch_at_slot(attestation.data.slot) -#[test] -fn invalid_attestation_target_epoch() { +#[tokio::test] +async fn invalid_attestation_target_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -849,18 +886,20 @@ fn invalid_attestation_target_epoch() { if target == Epoch::new(1) && slot == Slot::new(1) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root in store.blocks -#[test] -fn invalid_attestation_unknown_target_root() { +#[tokio::test] +async fn invalid_attestation_unknown_target_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -873,18 +912,20 @@ fn invalid_attestation_unknown_target_root() { if root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert attestation.data.beacon_block_root in store.blocks -#[test] -fn invalid_attestation_unknown_beacon_block_root() { +#[tokio::test] +async fn invalid_attestation_unknown_beacon_block_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -897,16 +938,18 @@ fn invalid_attestation_unknown_beacon_block_root() { if beacon_block_root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot -#[test] -fn invalid_attestation_future_block() { +#[tokio::test] +async fn invalid_attestation_future_block() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::Blocks(1), |attestation, chain| { @@ -923,19 +966,21 @@ fn invalid_attestation_future_block() { if block == 2 && attestation == 1 ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) -#[test] -fn invalid_attestation_inconsistent_ffg_vote() { +#[tokio::test] +async fn invalid_attestation_inconsistent_ffg_vote() { let local_opt = Mutex::new(None); let attestation_opt = Mutex::new(None); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, chain| { @@ -962,22 +1007,25 @@ fn invalid_attestation_inconsistent_ffg_vote() { && local == local_opt.lock().unwrap().unwrap() ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert get_current_slot(store) >= attestation.data.slot + 1 -#[test] -fn invalid_attestation_delayed_slot() { +#[tokio::test] +async fn invalid_attestation_delayed_slot() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)) .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), ) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1)) .skip_slot() .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)); @@ -985,10 +1033,11 @@ fn invalid_attestation_delayed_slot() { /// Tests that the correct target root is used when the attested-to block is in a prior epoch to /// the attestation. -#[test] -fn valid_attestation_skip_across_epoch() { +#[tokio::test] +async fn valid_attestation_skip_across_epoch() { ForkChoiceTest::new() .apply_blocks(E::slots_per_epoch() as usize - 1) + .await .skip_slots(2) .apply_attestation_to_chain( MutationDelay::NoDelay, @@ -999,15 +1048,18 @@ fn valid_attestation_skip_across_epoch() { ) }, |result| result.unwrap(), - ); + ) + .await; } -#[test] -fn can_read_finalized_block() { +#[tokio::test] +async fn can_read_finalized_block() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .check_finalized_block_is_accessible(); } @@ -1025,8 +1077,8 @@ fn weak_subjectivity_fail_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config); } -#[test] -fn weak_subjectivity_pass_on_startup() { +#[tokio::test] +async fn weak_subjectivity_pass_on_startup() { let epoch = Epoch::new(0); let root = Hash256::zero(); @@ -1037,23 +1089,21 @@ fn weak_subjectivity_pass_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config) .apply_blocks(E::slots_per_epoch() as usize) + .await .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_passes() { +#[tokio::test] +async fn weak_subjectivity_check_passes() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let checkpoint = setup_harness.harness.finalized_checkpoint(); let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1062,26 +1112,25 @@ fn weak_subjectivity_check_passes() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_fails_early_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_early_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch - 1; @@ -1092,25 +1141,23 @@ fn weak_subjectivity_check_fails_early_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_late_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_late_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch + 1; @@ -1121,25 +1168,23 @@ fn weak_subjectivity_check_fails_late_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_incorrect_root() { +#[tokio::test] +async fn weak_subjectivity_check_fails_incorrect_root() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.root = Hash256::zero(); @@ -1150,27 +1195,31 @@ fn weak_subjectivity_check_fails_incorrect_root() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // the checkpoint at epoch 4 should become the root of last block of epoch 2 @@ -1187,31 +1236,37 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // Invalid checkpoint (epoch too early) @@ -1228,9 +1283,11 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 858f4627fe5..4c83c689e48 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -7,7 +7,7 @@ use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - MainnetEthSpec, Slot, + Slot, }; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -16,6 +16,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); /// Defines an operation which may invalidate the `execution_status` of some nodes. +#[derive(Clone)] pub enum InvalidationOperation { /// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors. InvalidateOne { block_root: Hash256 }, @@ -143,6 +144,7 @@ impl ProtoArray { /// - Compare the current node with the parents best-child, updating it if the current node /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. + #[allow(clippy::too_many_arguments)] pub fn apply_score_changes( &mut self, mut deltas: Vec, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index a52d28a2c1a..568cfa9640e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -536,38 +536,44 @@ mod test_compute_deltas { // Add block that is a finalized descendant. fc.proto_array - .on_block::(Block { - slot: genesis_slot + 1, - root: finalized_desc, - parent_root: Some(finalized_root), - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id.clone(), - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - unrealized_justified_checkpoint: Some(genesis_checkpoint), - unrealized_finalized_checkpoint: Some(genesis_checkpoint), - }) + .on_block::( + Block { + slot: genesis_slot + 1, + root: finalized_desc, + parent_root: Some(finalized_root), + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + genesis_slot + 1, + ) .unwrap(); // Add block that is *not* a finalized descendant. fc.proto_array - .on_block::(Block { - slot: genesis_slot + 1, - root: not_finalized_desc, - parent_root: None, - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - unrealized_justified_checkpoint: None, - unrealized_finalized_checkpoint: None, - }) + .on_block::( + Block { + slot: genesis_slot + 1, + root: not_finalized_desc, + parent_root: None, + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + }, + genesis_slot + 1, + ) .unwrap(); assert!(!fc.is_descendant(unknown, unknown)); diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index a0ce237481b..c7ed4b308df 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dev-dependencies] env_logger = "0.9.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } [dependencies] bls = { path = "../../crypto/bls" } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b75a79c72e8..2daefdacadb 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -27,7 +27,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( epoch_offset: u64, num_validators: usize, ) -> BeaconChainHarness> { @@ -41,27 +41,31 @@ fn get_harness( .build(); let state = harness.get_current_state(); if last_slot_of_epoch > Slot::new(0) { - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..last_slot_of_epoch.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..last_slot_of_epoch.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..num_validators).collect::>().as_slice(), + ) + .await; } harness } -#[test] -fn valid_block_ok() { +#[tokio::test] +async fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let result = per_block_processing( &mut state, @@ -75,15 +79,15 @@ fn valid_block_ok() { assert!(result.is_ok()); } -#[test] -fn invalid_block_header_state_slot() { +#[tokio::test] +async fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot); + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); @@ -104,15 +108,17 @@ fn invalid_block_header_state_slot() { ); } -#[test] -fn invalid_parent_block_root() { +#[tokio::test] +async fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); @@ -136,14 +142,16 @@ fn invalid_parent_block_root() { ); } -#[test] -fn invalid_block_signature() { +#[tokio::test] +async fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (block, _) = signed_block.deconstruct(); let result = per_block_processing( @@ -164,17 +172,19 @@ fn invalid_block_signature() { ); } -#[test] -fn invalid_randao_reveal_signature() { +#[tokio::test] +async fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_with_modifier(state, slot + 1, |block| { - *block.body_mut().randao_reveal_mut() = Signature::empty(); - }); + let (signed_block, mut state) = harness + .make_block_with_modifier(state, slot + 1, |block| { + *block.body_mut().randao_reveal_mut() = Signature::empty(); + }) + .await; let result = per_block_processing( &mut state, @@ -189,16 +199,22 @@ fn invalid_randao_reveal_signature() { assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid)); } -#[test] -fn valid_4_deposits() { +#[tokio::test] +async fn valid_4_deposits() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -207,16 +223,22 @@ fn valid_4_deposits() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_deposit_count_too_big() { +#[tokio::test] +async fn invalid_deposit_deposit_count_too_big() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let big_deposit_count = NUM_DEPOSITS + 1; @@ -233,16 +255,22 @@ fn invalid_deposit_deposit_count_too_big() { ); } -#[test] -fn invalid_deposit_count_too_small() { +#[tokio::test] +async fn invalid_deposit_count_too_small() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let small_deposit_count = NUM_DEPOSITS - 1; @@ -259,16 +287,22 @@ fn invalid_deposit_count_too_small() { ); } -#[test] -fn invalid_deposit_bad_merkle_proof() { +#[tokio::test] +async fn invalid_deposit_bad_merkle_proof() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let bad_index = state.eth1_deposit_index() as usize; @@ -287,17 +321,23 @@ fn invalid_deposit_bad_merkle_proof() { ); } -#[test] -fn invalid_deposit_wrong_sig() { +#[tokio::test] +async fn invalid_deposit_wrong_sig() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -305,17 +345,23 @@ fn invalid_deposit_wrong_sig() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_invalid_pub_key() { +#[tokio::test] +async fn invalid_deposit_invalid_pub_key() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -324,13 +370,19 @@ fn invalid_deposit_invalid_pub_key() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attestation_no_committee_for_index() { +#[tokio::test] +async fn invalid_attestation_no_committee_for_index() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; @@ -352,13 +404,19 @@ fn invalid_attestation_no_committee_for_index() { ); } -#[test] -fn invalid_attestation_wrong_justified_checkpoint() { +#[tokio::test] +async fn invalid_attestation_wrong_justified_checkpoint() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let old_justified_checkpoint = head_block.body().attestations()[0].data.source; let mut new_justified_checkpoint = old_justified_checkpoint; new_justified_checkpoint.epoch += Epoch::new(1); @@ -389,13 +447,19 @@ fn invalid_attestation_wrong_justified_checkpoint() { ); } -#[test] -fn invalid_attestation_bad_aggregation_bitfield_len() { +#[tokio::test] +async fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); @@ -416,13 +480,19 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { ); } -#[test] -fn invalid_attestation_bad_signature() { +#[tokio::test] +async fn invalid_attestation_bad_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, 97); // minimal number of required validators for this test + let harness = get_harness::(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); let result = process_operations::process_attestations( @@ -444,13 +514,19 @@ fn invalid_attestation_bad_signature() { ); } -#[test] -fn invalid_attestation_included_too_early() { +#[tokio::test] +async fn invalid_attestation_included_too_early() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot + Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -479,14 +555,20 @@ fn invalid_attestation_included_too_early() { ); } -#[test] -fn invalid_attestation_included_too_late() { +#[tokio::test] +async fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot - Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -512,14 +594,20 @@ fn invalid_attestation_included_too_late() { ); } -#[test] -fn invalid_attestation_target_epoch_slot_mismatch() { +#[tokio::test] +async fn invalid_attestation_target_epoch_slot_mismatch() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .target @@ -544,10 +632,10 @@ fn invalid_attestation_target_epoch_slot_mismatch() { ); } -#[test] -fn valid_insert_attester_slashing() { +#[tokio::test] +async fn valid_insert_attester_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let attester_slashing = harness.make_attester_slashing(vec![1, 2]); @@ -563,10 +651,10 @@ fn valid_insert_attester_slashing() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attester_slashing_not_slashable() { +#[tokio::test] +async fn invalid_attester_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); @@ -589,10 +677,10 @@ fn invalid_attester_slashing_not_slashable() { ); } -#[test] -fn invalid_attester_slashing_1_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_1_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); @@ -618,10 +706,10 @@ fn invalid_attester_slashing_1_invalid() { ); } -#[test] -fn invalid_attester_slashing_2_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_2_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); @@ -647,10 +735,10 @@ fn invalid_attester_slashing_2_invalid() { ); } -#[test] -fn valid_insert_proposer_slashing() { +#[tokio::test] +async fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); let result = process_operations::process_proposer_slashings( @@ -663,10 +751,10 @@ fn valid_insert_proposer_slashing() { assert!(result.is_ok()); } -#[test] -fn invalid_proposer_slashing_proposals_identical() { +#[tokio::test] +async fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); @@ -689,10 +777,10 @@ fn invalid_proposer_slashing_proposals_identical() { ); } -#[test] -fn invalid_proposer_slashing_proposer_unknown() { +#[tokio::test] +async fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; @@ -716,10 +804,10 @@ fn invalid_proposer_slashing_proposer_unknown() { ); } -#[test] -fn invalid_proposer_slashing_duplicate_slashing() { +#[tokio::test] +async fn invalid_proposer_slashing_duplicate_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); @@ -747,10 +835,10 @@ fn invalid_proposer_slashing_duplicate_slashing() { ); } -#[test] -fn invalid_bad_proposal_1_signature() { +#[tokio::test] +async fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -771,10 +859,10 @@ fn invalid_bad_proposal_1_signature() { ); } -#[test] -fn invalid_bad_proposal_2_signature() { +#[tokio::test] +async fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -795,10 +883,10 @@ fn invalid_bad_proposal_2_signature() { ); } -#[test] -fn invalid_proposer_slashing_proposal_epoch_mismatch() { +#[tokio::test] +async fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index 07321b8ab8d..f5855ddc42e 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -12,7 +12,7 @@ pub fn process_justification_and_finalization( participation_cache: &ParticipationCache, ) -> Result, Error> { if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(JustifiableBeaconState::from(state)); + return Ok(state.into()); } let previous_epoch = state.previous_epoch(); diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs index 3f4a565d74e..2f9412e8025 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -12,7 +12,7 @@ pub fn process_justification_and_finalization( _spec: &ChainSpec, ) -> Result, Error> { if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(JustifiableBeaconState::from(state)); + return Ok(JustifiableBeaconState::from(&*state)); } weigh_justification_and_finalization( diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 4379547bfe6..14bbfbc071d 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -6,8 +6,8 @@ use bls::Hash256; use env_logger::{Builder, Env}; use types::Slot; -#[test] -fn runs_without_error() { +#[tokio::test] +async fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -22,15 +22,17 @@ fn runs_without_error() { (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..target_slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..8).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..target_slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..8).collect::>().as_slice(), + ) + .await; let mut new_head_state = harness.get_current_state(); process_epoch(&mut new_head_state, &spec).unwrap(); @@ -45,8 +47,8 @@ mod release_tests { use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; - #[test] - fn altair_state_on_base_fork() { + #[tokio::test] + async fn altair_state_on_base_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork happens at epoch 1. @@ -61,12 +63,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get an Altair block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get an Altair block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; @@ -103,8 +107,8 @@ mod release_tests { ); } - #[test] - fn base_state_on_altair_fork() { + #[tokio::test] + async fn base_state_on_altair_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork never happens. @@ -119,12 +123,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get a block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get a block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs index cb9fc9390a3..e5b505bb91c 100644 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ b/consensus/tree_hash/examples/flamegraph_beacon_state.rs @@ -17,7 +17,7 @@ fn get_harness() -> BeaconChainHarness> { } fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state().unwrap(); + let state = get_harness::().chain.head_beacon_state_cloned(); assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 881d17a3309..9d305a969d9 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -52,6 +52,7 @@ criterion = "0.3.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } +tokio = "1.14.0" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 6eb12ddf05e..c6985d94a96 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -541,6 +541,52 @@ impl_from!(BeaconBlockBase, >, >, |body: impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +// We can clone blocks with payloads to blocks without payloads, without cloning the payload. +macro_rules! impl_clone_as_blinded { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>) => { + impl $ty_name<$($from_params),*> + { + pub fn clone_as_blinded(&self) -> $ty_name<$($to_params),*> { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = self; + + $ty_name { + slot: *slot, + proposer_index: *proposer_index, + parent_root: *parent_root, + state_root: *state_root, + body: body.clone_as_blinded(), + } + } + } + } +} + +impl_clone_as_blinded!(BeaconBlockBase, >, >); +impl_clone_as_blinded!(BeaconBlockAltair, >, >); +impl_clone_as_blinded!(BeaconBlockMerge, >, >); + +// A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the +// execution payload. +impl<'a, E: EthSpec> From>> + for BeaconBlock> +{ + fn from( + full_block: BeaconBlockRef<'a, E, FullPayload>, + ) -> BeaconBlock> { + match full_block { + BeaconBlockRef::Base(block) => BeaconBlock::Base(block.clone_as_blinded()), + BeaconBlockRef::Altair(block) => BeaconBlock::Altair(block.clone_as_blinded()), + BeaconBlockRef::Merge(block) => BeaconBlock::Merge(block.clone_as_blinded()), + } + } +} + impl From>> for ( BeaconBlock>, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 34761ea9a7f..acb35e92857 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -251,6 +251,97 @@ impl From>> } } +// We can clone a full block into a blinded block, without cloning the payload. +impl BeaconBlockBodyBase> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { + let BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom, + } = self; + + BeaconBlockBodyBase { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + _phantom: PhantomData, + } + } +} + +impl BeaconBlockBodyAltair> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyAltair> { + let BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom, + } = self; + + BeaconBlockBodyAltair { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + _phantom: PhantomData, + } + } +} + +impl BeaconBlockBodyMerge> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyMerge> { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = self; + + BeaconBlockBodyMerge { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayload { + execution_payload_header: From::from(execution_payload), + }, + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 48998e26d0d..db431138aac 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -34,32 +34,34 @@ fn default_values() { assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } -fn new_state(validator_count: usize, slot: Slot) -> BeaconState { +async fn new_state(validator_count: usize, slot: Slot) -> BeaconState { let harness = get_harness(validator_count); let head_state = harness.get_current_state(); if slot > Slot::new(0) { - harness.add_attested_blocks_at_slots( - head_state, - Hash256::zero(), - (1..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + head_state, + Hash256::zero(), + (1..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness.get_current_state() } -#[test] +#[tokio::test] #[should_panic] -fn fails_without_validators() { - new_state::(0, Slot::new(0)); +async fn fails_without_validators() { + new_state::(0, Slot::new(0)).await; } -#[test] -fn initializes_with_the_right_epoch() { - let state = new_state::(16, Slot::new(0)); +#[tokio::test] +async fn initializes_with_the_right_epoch() { + let state = new_state::(16, Slot::new(0)).await; let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); @@ -75,13 +77,13 @@ fn initializes_with_the_right_epoch() { assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } -#[test] -fn shuffles_for_the_right_epoch() { +#[tokio::test] +async fn shuffles_for_the_right_epoch() { let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(6); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::(num_validators, slot); + let mut state = new_state::(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index b88b49e1a39..d65d0a9e6ce 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -25,7 +25,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( validator_count: usize, slot: Slot, ) -> BeaconChainHarness> { @@ -41,24 +41,26 @@ fn get_harness( .map(Slot::new) .collect::>(); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - slots.as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + slots.as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } -fn build_state(validator_count: usize) -> BeaconState { +async fn build_state(validator_count: usize) -> BeaconState { get_harness(validator_count, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } -fn test_beacon_proposer_index() { +async fn test_beacon_proposer_index() { let spec = T::default_spec(); // Get the i'th candidate proposer for the given state and slot @@ -85,20 +87,20 @@ fn test_beacon_proposer_index() { // Test where we have one validator per slot. // 0th candidate should be chosen every time. - let state = build_state(T::slots_per_epoch() as usize); + let state = build_state(T::slots_per_epoch() as usize).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test where we have two validators per slot. // 0th candidate should be chosen every time. - let state = build_state((T::slots_per_epoch() as usize).mul(2)); + let state = build_state((T::slots_per_epoch() as usize).mul(2)).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test with two validators per slot, first validator has zero balance. - let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)); + let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); state.validators_mut()[slot0_candidate0].effective_balance = 0; test(&state, Slot::new(0), 1); @@ -107,9 +109,9 @@ fn test_beacon_proposer_index() { } } -#[test] -fn beacon_proposer_index() { - test_beacon_proposer_index::(); +#[tokio::test] +async fn beacon_proposer_index() { + test_beacon_proposer_index::().await; } /// Test that @@ -144,11 +146,11 @@ fn test_cache_initialization( ); } -#[test] -fn cache_initialization() { +#[tokio::test] +async fn cache_initialization() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; *state.slot_mut() = (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); @@ -211,11 +213,11 @@ fn test_clone_config(base_state: &BeaconState, clone_config: Clon } } -#[test] -fn clone_config() { +#[tokio::test] +async fn clone_config() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; state.build_all_caches(&spec).unwrap(); state @@ -314,7 +316,7 @@ mod committees { assert!(expected_indices_iter.next().is_none()); } - fn committee_consistency_test( + async fn committee_consistency_test( validator_count: usize, state_epoch: Epoch, cache_epoch: RelativeEpoch, @@ -322,7 +324,7 @@ mod committees { let spec = &T::default_spec(); let slot = state_epoch.start_slot(T::slots_per_epoch()); - let harness = get_harness::(validator_count, slot); + let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) @@ -350,7 +352,7 @@ mod committees { ); } - fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { + async fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { let spec = T::default_spec(); let validator_count = spec @@ -359,13 +361,15 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch); + committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch) + .await; committee_consistency_test::( validator_count as usize, T::genesis_epoch() + 4, cached_epoch, - ); + ) + .await; committee_consistency_test::( validator_count as usize, @@ -374,38 +378,39 @@ mod committees { .mul(T::slots_per_epoch()) .mul(4), cached_epoch, - ); + ) + .await; } - #[test] - fn current_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Current); + #[tokio::test] + async fn current_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Current).await; } - #[test] - fn previous_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Previous); + #[tokio::test] + async fn previous_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Previous).await; } - #[test] - fn next_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Next); + #[tokio::test] + async fn next_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Next).await; } } mod get_outstanding_deposit_len { use super::*; - fn state() -> BeaconState { + async fn state() -> BeaconState { get_harness(16, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } - #[test] - fn returns_ok() { - let mut state = state(); + #[tokio::test] + async fn returns_ok() { + let mut state = state().await; assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); state.eth1_data_mut().deposit_count = 17; @@ -413,9 +418,9 @@ mod get_outstanding_deposit_len { assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); } - #[test] - fn returns_err_if_the_state_is_invalid() { - let mut state = state(); + #[tokio::test] + async fn returns_err_if_the_state_is_invalid() { + let mut state = state().await; // The state is invalid, deposit count is lower than deposit index. state.eth1_data_mut().deposit_count = 16; *state.eth1_deposit_index_mut() = 17; diff --git a/consensus/types/src/justifiable_beacon_state.rs b/consensus/types/src/justifiable_beacon_state.rs index ef8258b528e..6184abb29a1 100644 --- a/consensus/types/src/justifiable_beacon_state.rs +++ b/consensus/types/src/justifiable_beacon_state.rs @@ -8,8 +8,8 @@ pub struct JustifiableBeaconState { pub finalized_checkpoint: Checkpoint, } -impl From<&mut BeaconState> for JustifiableBeaconState { - fn from(state: &mut BeaconState) -> Self { +impl From<&BeaconState> for JustifiableBeaconState { + fn from(state: &BeaconState) -> Self { Self { current_justified_checkpoint: state.current_justified_checkpoint(), previous_justified_checkpoint: state.previous_justified_checkpoint(), diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index d736f0be193..a21eeb63c27 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -28,6 +28,8 @@ pub trait ExecPayload: + Hash + TryFrom> + From> + + Send + + 'static { fn block_type() -> BlockType; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 54880706882..5c40c4685c3 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -346,6 +346,14 @@ impl From> for SignedBlindedBeaconBlock { } } +// We can blind borrowed blocks with payloads by converting the payload into a header (without +// cloning the payload contents). +impl SignedBeaconBlock { + pub fn clone_as_blinded(&self) -> SignedBlindedBeaconBlock { + SignedBeaconBlock::from_block(self.message().into(), self.signature().clone()) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6717bb0f467..50295df4b0e 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -222,7 +222,7 @@ pub fn migrate_db( runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { - let spec = runtime_context.eth2_config.spec.clone(); + let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -236,7 +236,7 @@ pub fn migrate_db( Ok(()) }, client_config.store.clone(), - spec, + spec.clone(), log.clone(), )?; @@ -253,6 +253,7 @@ pub fn migrate_db( from, to, log, + spec, ) } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 88feff0bbc4..091a95dc4cb 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -216,14 +216,7 @@ impl SlasherService { }; // Add to local op pool. - if let Err(e) = beacon_chain.import_attester_slashing(verified_slashing) { - error!( - log, - "Beacon chain refused attester slashing"; - "error" => ?e, - "slashing" => ?slashing, - ); - } + beacon_chain.import_attester_slashing(verified_slashing); // Publish to the network if broadcast is enabled. if slasher.config().broadcast { diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ac9ca8993cc..64f4aa75389 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -81,11 +81,23 @@ pub struct Cases { } impl Cases { - pub fn test_results(&self, fork_name: ForkName) -> Vec { - self.test_cases - .into_par_iter() - .enumerate() - .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) - .collect() + pub fn test_results(&self, fork_name: ForkName, use_rayon: bool) -> Vec { + if use_rayon { + self.test_cases + .into_par_iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } else { + self.test_cases + .iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 982735648fc..2d4ab76019e 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,14 +7,16 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, HeadInfo, + BeaconChainTypes, CachedHead, }; use serde_derive::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; +use std::future::Future; +use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; @@ -72,15 +74,13 @@ pub struct ForkChoiceTest { pub description: String, pub anchor_state: BeaconState, pub anchor_block: BeaconBlock, + #[allow(clippy::type_complexity)] pub steps: Vec, Attestation, PowBlock, AttesterSlashing>>, } -/// Spec for fork choice tests, with proposer boosting enabled. -/// -/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default. +/// Spec to be used for fork choice tests. pub fn fork_choice_spec(fork_name: ForkName) -> ChainSpec { - let mut spec = testing_spec::(fork_name); - spec + testing_spec::(fork_name) } impl LoadCase for ForkChoiceTest { @@ -311,19 +311,20 @@ impl Tester { Ok(self.spec.genesis_slot + slots_since_genesis) } - fn find_head(&self) -> Result { - self.harness - .chain - .fork_choice() - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + fn block_on_dangerous(&self, future: F) -> Result { self.harness .chain - .head_info() - .map_err(|e| Error::InternalError(format!("failed to read head with {:?}", e))) + .task_executor + .clone() + .block_on_dangerous(future, "ef_tests_block_on") + .ok_or_else(|| Error::InternalError("runtime shutdown".into())) } - fn genesis_epoch(&self) -> Epoch { - self.spec.genesis_slot.epoch(E::slots_per_epoch()) + fn find_head(&self) -> Result, Error> { + let chain = self.harness.chain.clone(); + self.block_on_dangerous(chain.recompute_head_at_current_slot())? + .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + Ok(self.harness.chain.canonical_head.cached_head()) } pub fn set_tick(&self, tick: u64) { @@ -338,15 +339,16 @@ impl Tester { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(slot, &self.harness.spec) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock, valid: bool) -> Result<(), Error> { - let result = self.harness.chain.process_block(block.clone()); let block_root = block.canonical_root(); + let block = Arc::new(block); + let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -391,16 +393,20 @@ impl Tester { .seconds_from_current_slot_start(self.spec.seconds_per_slot) .unwrap(); - let (block, _) = block.deconstruct(); - let result = self.harness.chain.fork_choice.write().on_block( - self.harness.chain.slot().unwrap(), - &block, - block_root, - block_delay, - &mut state, - PayloadVerificationStatus::Irrelevant, - &self.harness.chain.spec, - ); + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_block( + self.harness.chain.slot().unwrap(), + block.message(), + block_root, + block_delay, + &mut state, + PayloadVerificationStatus::Irrelevant, + &self.harness.chain.spec, + ); if result.is_ok() { return Err(Error::DidntFail(format!( @@ -448,10 +454,11 @@ impl Tester { } pub fn check_head(&self, expected_head: Head) -> Result<(), Error> { - let chain_head = self.find_head().map(|head| Head { - slot: head.slot, - root: head.block_root, - })?; + let head = self.find_head()?; + let chain_head = Head { + slot: head.head_slot(), + root: head.head_block_root(), + }; check_equal("head", chain_head, expected_head) } @@ -470,15 +477,15 @@ impl Tester { } pub fn check_justified_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); - - assert_checkpoints_eq( - "justified_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); + + assert_checkpoints_eq("justified_checkpoint", head_checkpoint, fc_checkpoint); check_equal("justified_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -487,15 +494,15 @@ impl Tester { &self, expected_checkpoint_root: Hash256, ) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint_root", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint_root", head_checkpoint, fc_checkpoint); check_equal( "justified_checkpoint_root", @@ -505,15 +512,15 @@ impl Tester { } pub fn check_finalized_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.finalized_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().finalized_checkpoint(); - - assert_checkpoints_eq( - "finalized_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + let head_checkpoint = self.find_head()?.finalized_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint(); + + assert_checkpoints_eq("finalized_checkpoint", head_checkpoint, fc_checkpoint); check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -525,8 +532,8 @@ impl Tester { let best_justified_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .best_justified_checkpoint(); check_equal( "best_justified_checkpoint", @@ -542,8 +549,8 @@ impl Tester { let u_justified_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .unrealized_justified_checkpoint(); check_equal( "u_justified_checkpoint", @@ -559,8 +566,8 @@ impl Tester { let u_finalized_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .unrealized_finalized_checkpoint(); check_equal( "u_finalized_checkpoint", @@ -573,7 +580,12 @@ impl Tester { &self, expected_proposer_boost_root: Hash256, ) -> Result<(), Error> { - let proposer_boost_root = self.harness.chain.fork_choice.read().proposer_boost_root(); + let proposer_boost_root = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proposer_boost_root(); check_equal( "proposer_boost_root", proposer_boost_root, @@ -588,20 +600,8 @@ impl Tester { /// This function is necessary due to a quirk documented in this issue: /// /// https://github.com/ethereum/consensus-specs/issues/2566 -fn assert_checkpoints_eq(name: &str, genesis_epoch: Epoch, head: Checkpoint, fc: Checkpoint) { - if fc.epoch == genesis_epoch { - assert_eq!( - head, - Checkpoint { - epoch: genesis_epoch, - root: Hash256::zero() - }, - "{} (genesis)", - name - ) - } else { - assert_eq!(head, fc, "{} (non-genesis)", name) - } +fn assert_checkpoints_eq(name: &str, head: Checkpoint, fc: Checkpoint) { + assert_eq!(head, fc, "{}", name) } /// Convenience function to create `Error` messages. diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index be6c495aaed..25299bf5775 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -30,6 +30,10 @@ pub trait Handler { } } + fn use_rayon() -> bool { + true + } + fn run_for_fork(&self, fork_name: ForkName) { let fork_name_str = fork_name.to_string(); @@ -59,7 +63,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name); + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); let name = format!( "{}/{}/{}", @@ -460,6 +464,11 @@ impl Handler for ForkChoiceHandler { self.handler_name.clone() } + fn use_rayon() -> bool { + // The fork choice tests use `block_on` which can cause panics with rayon. + false + } + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Merge block tests are only enabled for Bellatrix or later. if self.handler_name == "on_merge_block" diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 21162fea56d..79d927b6ce9 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -98,10 +98,9 @@ impl TestRig { } pub fn perform_tests_blocking(&self) { - self.ee_a - .execution_layer - .block_on_generic(|_| async { self.perform_tests().await }) - .unwrap() + self.runtime + .handle() + .block_on(async { self.perform_tests().await }); } pub async fn wait_until_synced(&self) { diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 4e93db3b326..6da9f2f4a6f 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -12,3 +12,4 @@ types = { path = "../../consensus/types" } eth2_ssz = "0.4.1" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 75f82b3132f..3e4bb7bf3f9 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -37,11 +37,12 @@ impl Default for ExitTest { } impl ExitTest { - fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { + async fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { let harness = get_harness::( self.state_epoch.start_slot(E::slots_per_epoch()), VALIDATOR_COUNT, - ); + ) + .await; let mut state = harness.get_current_state(); (self.state_modifier)(&mut state); @@ -49,11 +50,12 @@ impl ExitTest { let validator_index = self.validator_index; let exit_epoch = self.exit_epoch; - let (signed_block, state) = - harness.make_block_with_modifier(state.clone(), state.slot() + 1, |block| { + let (signed_block, state) = harness + .make_block_with_modifier(state.clone(), state.slot() + 1, |block| { harness.add_voluntary_exit(block, validator_index, exit_epoch); block_modifier(&harness, block); - }); + }) + .await; (signed_block, state) } @@ -72,12 +74,12 @@ impl ExitTest { } #[cfg(all(test, not(debug_assertions)))] - fn run(self) -> BeaconState { + async fn run(self) -> BeaconState { let spec = &E::default_spec(); let expected = self.expected.clone(); assert_eq!(STATE_EPOCH, spec.shard_committee_period); - let (block, mut state) = self.block_and_pre_state(); + let (block, mut state) = self.block_and_pre_state().await; let result = Self::process(&block, &mut state); @@ -86,8 +88,8 @@ impl ExitTest { state } - fn test_vector(self, title: String) -> TestVector { - let (block, pre_state) = self.block_and_pre_state(); + async fn test_vector(self, title: String) -> TestVector { + let (block, pre_state) = self.block_and_pre_state().await; let mut post_state = pre_state.clone(); let (post_state, error) = match Self::process(&block, &mut post_state) { Ok(_) => (Some(post_state), None), @@ -334,14 +336,14 @@ mod custom_tests { ); } - #[test] - fn valid() { - let state = ExitTest::default().run(); + #[tokio::test] + async fn valid() { + let state = ExitTest::default().run().await; assert_exited(&state, VALIDATOR_INDEX as usize); } - #[test] - fn valid_three() { + #[tokio::test] + async fn valid_three() { let state = ExitTest { block_modifier: Box::new(|harness, block| { harness.add_voluntary_exit(block, 1, STATE_EPOCH); @@ -349,7 +351,8 @@ mod custom_tests { }), ..ExitTest::default() } - .run(); + .run() + .await; for i in &[VALIDATOR_INDEX, 1, 2] { assert_exited(&state, *i as usize); diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index 81f81718525..5dafbf549a0 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -4,11 +4,11 @@ /// - `mod tests`: runs all the test vectors locally. macro_rules! vectors_and_tests { ($($name: ident, $test: expr),*) => { - pub fn vectors() -> Vec { + pub async fn vectors() -> Vec { let mut vec = vec![]; $( - vec.push($test.test_vector(stringify!($name).into())); + vec.push($test.test_vector(stringify!($name).into()).await); )* vec @@ -18,9 +18,9 @@ macro_rules! vectors_and_tests { mod tests { use super::*; $( - #[test] - fn $name() { - $test.run(); + #[tokio::test] + async fn $name() { + $test.run().await; } )* } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index d66842e5a10..3e7c37af543 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -25,8 +25,9 @@ pub const BASE_VECTOR_DIR: &str = "vectors"; pub const SLOT_OFFSET: u64 = 1; /// Writes all known test vectors to `CARGO_MANIFEST_DIR/vectors`. -fn main() { - match write_all_vectors() { +#[tokio::main] +async fn main() { + match write_all_vectors().await { Ok(()) => exit(0), Err(e) => { eprintln!("Error: {}", e); @@ -49,7 +50,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( slot: Slot, validator_count: usize, ) -> BeaconChainHarness> { @@ -61,23 +62,25 @@ fn get_harness( let skip_to_slot = slot - SLOT_OFFSET; if skip_to_slot > Slot::new(0) { let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (skip_to_slot.as_u64()..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (skip_to_slot.as_u64()..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } /// Writes all vectors to file. -fn write_all_vectors() -> Result<(), String> { - write_vectors_to_file("exit", &exit::vectors()) +async fn write_all_vectors() -> Result<(), String> { + write_vectors_to_file("exit", &exit::vectors().await) } /// Writes a list of `vectors` to the `title` dir.