From 1b4620f2569089193e2b64ebd2998797908e9818 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:45:07 +1000 Subject: [PATCH 01/41] Consensus changes --- consensus/state_processing/Cargo.toml | 1 + consensus/state_processing/src/all_caches.rs | 6 +- .../state_processing/src/block_replayer.rs | 46 +- .../src/common/initiate_validator_exit.rs | 9 +- .../src/common/slash_validator.rs | 8 +- .../update_progressive_balances_cache.rs | 4 +- consensus/state_processing/src/epoch_cache.rs | 2 +- consensus/state_processing/src/genesis.rs | 14 +- .../src/per_block_processing.rs | 2 +- .../altair/sync_committee.rs | 1 + .../src/per_block_processing/errors.rs | 7 + .../process_operations.rs | 32 +- .../per_block_processing/signature_sets.rs | 2 +- .../verify_bls_to_execution_change.rs | 4 +- .../src/per_block_processing/verify_exit.rs | 4 +- .../altair/participation_flag_updates.rs | 9 +- .../base/validator_statuses.rs | 4 +- .../capella/historical_summaries_update.rs | 3 + .../effective_balance_updates.rs | 14 +- .../epoch_processing_summary.rs | 18 +- .../src/per_epoch_processing/errors.rs | 9 +- .../historical_roots_update.rs | 2 +- .../per_epoch_processing/registry_updates.rs | 6 +- .../src/per_epoch_processing/resets.rs | 4 +- .../src/per_epoch_processing/single_pass.rs | 97 ++- .../src/per_epoch_processing/slashings.rs | 4 +- .../state_processing/src/upgrade/altair.rs | 9 +- .../state_processing/src/upgrade/capella.rs | 5 +- .../state_processing/src/upgrade/deneb.rs | 1 - .../state_processing/src/upgrade/electra.rs | 1 - .../state_processing/src/upgrade/merge.rs | 1 - consensus/types/Cargo.toml | 2 + consensus/types/benches/benches.rs | 50 +- consensus/types/examples/clone_state.rs | 51 -- consensus/types/examples/ssz_encode_state.rs | 54 -- consensus/types/examples/tree_hash_state.rs | 56 -- consensus/types/src/activation_queue.rs | 2 +- consensus/types/src/beacon_block.rs | 2 +- consensus/types/src/beacon_state.rs | 793 ++++++++++++------ .../types/src/beacon_state/clone_config.rs | 47 -- .../types/src/beacon_state/committee_cache.rs | 7 +- .../src/beacon_state/committee_cache/tests.rs | 2 +- .../types/src/beacon_state/compact_state.rs | 316 +++++++ .../types/src/beacon_state/exit_cache.rs | 15 +- consensus/types/src/beacon_state/iter.rs | 4 +- .../types/src/beacon_state/pubkey_cache.rs | 16 +- .../types/src/beacon_state/slashings_cache.rs | 8 +- consensus/types/src/beacon_state/tests.rs | 241 +----- .../types/src/beacon_state/tree_hash_cache.rs | 645 -------------- consensus/types/src/blob_sidecar.rs | 3 +- consensus/types/src/chain_spec.rs | 7 + consensus/types/src/execution_payload.rs | 2 + .../types/src/execution_payload_header.rs | 24 +- consensus/types/src/historical_batch.rs | 6 +- consensus/types/src/lib.rs | 8 +- consensus/types/src/light_client_bootstrap.rs | 8 +- .../types/src/light_client_finality_update.rs | 1 + consensus/types/src/light_client_update.rs | 7 + consensus/types/src/test_utils/test_random.rs | 2 +- consensus/types/src/tree_hash_impls.rs | 165 ---- consensus/types/src/validator.rs | 214 ++++- 61 files changed, 1326 insertions(+), 1761 deletions(-) delete mode 100644 consensus/types/examples/clone_state.rs delete mode 100644 consensus/types/examples/ssz_encode_state.rs delete mode 100644 consensus/types/examples/tree_hash_state.rs delete mode 100644 consensus/types/src/beacon_state/clone_config.rs create mode 100644 consensus/types/src/beacon_state/compact_state.rs delete mode 100644 consensus/types/src/beacon_state/tree_hash_cache.rs delete mode 100644 consensus/types/src/tree_hash_impls.rs diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index be5367eb08f..d07763d1825 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -28,6 +28,7 @@ arbitrary = { workspace = true } lighthouse_metrics = { workspace = true } lazy_static = { workspace = true } derivative = { workspace = true } +vec_map = "0.8.2" [features] default = ["legacy-arith"] diff --git a/consensus/state_processing/src/all_caches.rs b/consensus/state_processing/src/all_caches.rs index 106692c63aa..b915091405b 100644 --- a/consensus/state_processing/src/all_caches.rs +++ b/consensus/state_processing/src/all_caches.rs @@ -9,12 +9,14 @@ use types::{BeaconState, ChainSpec, EpochCacheError, EthSpec, Hash256, RelativeE pub trait AllCaches { /// Build all caches. /// - /// Note that this excludes the tree-hash cache. That needs to be managed separately. + /// Note that this excludes milhouse's intrinsic tree-hash cache. That needs to be managed + /// separately. fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), EpochCacheError>; /// Return true if all caches are built. /// - /// Note that this excludes the tree-hash cache. That needs to be managed separately. + /// Note that this excludes milhouse's intrinsic tree-hash cache. That needs to be managed + /// separately. fn all_caches_built(&self) -> bool; } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index f502d7f692c..1749f773f3a 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -8,17 +8,18 @@ use std::iter::Peekable; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; -type PreBlockHook<'a, E, Error> = Box< +pub type PreBlockHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> + 'a, >; -type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; -type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; -type PostSlotHook<'a, E, Error> = Box< +pub type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; +pub type PreSlotHook<'a, E, Error> = + Box, &mut BeaconState) -> Result<(), Error> + 'a>; +pub type PostSlotHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, Option>, bool) -> Result<(), Error> + 'a, >; -type StateRootIterDefault = std::iter::Empty>; +pub type StateRootIterDefault = std::iter::Empty>; /// Efficiently apply blocks to a state while configuring various parameters. /// @@ -31,7 +32,6 @@ pub struct BlockReplayer< > { state: BeaconState, spec: &'a ChainSpec, - state_processing_strategy: StateProcessingStrategy, block_sig_strategy: BlockSignatureStrategy, verify_block_root: Option, pre_block_hook: Option>, @@ -89,7 +89,6 @@ where Self { state, spec, - state_processing_strategy: StateProcessingStrategy::Accurate, block_sig_strategy: BlockSignatureStrategy::VerifyBulk, verify_block_root: Some(VerifyBlockRoot::True), pre_block_hook: None, @@ -107,10 +106,10 @@ where mut self, state_processing_strategy: StateProcessingStrategy, ) -> Self { + // FIXME(sproul): no-op if state_processing_strategy == StateProcessingStrategy::Inconsistent { self.verify_block_root = None; } - self.state_processing_strategy = state_processing_strategy; self } @@ -186,11 +185,6 @@ where blocks: &[SignedBeaconBlock>], i: usize, ) -> Result, Error> { - // If we don't care about state roots then return immediately. - if self.state_processing_strategy == StateProcessingStrategy::Inconsistent { - return Ok(Some(Hash256::zero())); - } - // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter @@ -232,11 +226,12 @@ where } while self.state.slot() < block.slot() { + let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; let summary = per_slot_processing(&mut self.state, state_root, self.spec) .map_err(BlockReplayError::from)?; @@ -250,15 +245,11 @@ where pre_block_hook(&mut self.state, block)?; } - let verify_block_root = self.verify_block_root.unwrap_or_else(|| { - // If no explicit policy is set, verify only the first 1 or 2 block roots if using - // accurate state roots. Inaccurate state roots require block root verification to - // be off. - if i <= 1 && self.state_processing_strategy == StateProcessingStrategy::Accurate { - VerifyBlockRoot::True - } else { - VerifyBlockRoot::False - } + // If no explicit policy is set, verify only the first 1 or 2 block roots. + let verify_block_root = self.verify_block_root.unwrap_or(if i <= 1 { + VerifyBlockRoot::True + } else { + VerifyBlockRoot::False }); // Proposer index was already checked when this block was originally processed, we // can omit recomputing it during replay. @@ -268,7 +259,7 @@ where &mut self.state, block, self.block_sig_strategy, - self.state_processing_strategy, + StateProcessingStrategy::Accurate, verify_block_root, &mut ctxt, self.spec, @@ -282,11 +273,12 @@ where if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { + let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; let summary = per_slot_processing(&mut self.state, state_root, self.spec) .map_err(BlockReplayError::from)?; diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index c527807df89..4abe326cb1c 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -30,15 +30,16 @@ pub fn initiate_validator_exit( exit_queue_epoch.safe_add_assign(1)?; } - let validator = state.get_validator_mut(index)?; + let validator = state.get_validator_cow(index)?; // Return if the validator already initiated exit - if validator.exit_epoch != spec.far_future_epoch { + if validator.exit_epoch() != spec.far_future_epoch { return Ok(()); } - validator.exit_epoch = exit_queue_epoch; - validator.withdrawable_epoch = + let validator = validator.into_mut()?; + validator.mutable.exit_epoch = exit_queue_epoch; + validator.mutable.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; state diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 16b4e74ece9..da84b0af135 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -25,12 +25,12 @@ pub fn slash_validator( initiate_validator_exit(state, slashed_index, spec)?; let validator = state.get_validator_mut(slashed_index)?; - validator.slashed = true; - validator.withdrawable_epoch = cmp::max( - validator.withdrawable_epoch, + validator.mutable.slashed = true; + validator.mutable.withdrawable_epoch = cmp::max( + validator.withdrawable_epoch(), epoch.safe_add(E::EpochsPerSlashingsVector::to_u64())?, ); - let validator_effective_balance = validator.effective_balance; + let validator_effective_balance = validator.effective_balance(); state.set_slashings( epoch, state diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index af843b3acbc..280b5377ab9 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -35,7 +35,7 @@ pub fn initialize_progressive_balances_cache( .zip(state.previous_epoch_participation()?) { // Exclude slashed validators. We are calculating *unslashed* participating totals. - if validator.slashed { + if validator.slashed() { continue; } @@ -78,7 +78,7 @@ fn update_flag_total_balances( ) -> Result<(), BeaconStateError> { for (flag, balance) in total_balances.total_flag_balances.iter_mut().enumerate() { if participation_flags.has_flag(flag)? { - balance.safe_add_assign(validator.effective_balance)?; + balance.safe_add_assign(validator.effective_balance())?; } } Ok(()) diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index b2f2d85407e..1d7473d7350 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -117,7 +117,7 @@ pub fn initialize_epoch_cache( let mut activation_queue = ActivationQueue::default(); for (index, validator) in state.validators().iter().enumerate() { - effective_balances.push(validator.effective_balance); + effective_balances.push(validator.effective_balance()); // Add to speculative activation queue. activation_queue diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index b225923b418..88dd94186ae 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -28,7 +28,7 @@ pub fn initialize_beacon_state_from_eth1( let mut state = BeaconState::new(genesis_time, eth1_data, spec); // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash); + state.fill_randao_mixes_with(eth1_block_hash)?; let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); @@ -152,18 +152,20 @@ pub fn process_activations( spec: &ChainSpec, ) -> Result<(), Error> { let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + while let Some((index, validator)) = validators_iter.next_cow() { + let validator = validator.into_mut()?; let balance = balances .get(index) .copied() .ok_or(Error::BalancesOutOfBounds(index))?; - validator.effective_balance = std::cmp::min( + validator.mutable.effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); - if validator.effective_balance == spec.max_effective_balance { - validator.activation_eligibility_epoch = E::genesis_epoch(); - validator.activation_epoch = E::genesis_epoch(); + if validator.effective_balance() == spec.max_effective_balance { + validator.mutable.activation_eligibility_epoch = E::genesis_epoch(); + validator.mutable.activation_epoch = E::genesis_epoch(); } } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b370ec6216b..5d26cd22664 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -251,7 +251,7 @@ pub fn process_block_header( // Verify proposer is not slashed verify!( - !state.get_validator(proposer_index as usize)?.slashed, + !state.get_validator(proposer_index as usize)?.slashed(), HeaderInvalid::ProposerSlashed(proposer_index) ); diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 210db4c9c15..e35494a96ef 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -59,6 +59,7 @@ pub fn process_sync_aggregate( .into_iter() .zip(aggregate.sync_committee_bits.iter()) { + // FIXME(sproul): double-check this for Capella, proposer shouldn't have 0 effective balance if participation_bit { // Accumulate proposer rewards in a temp var in case the proposer has very low balance, is // part of the sync committee, does not participate and its penalties saturate. diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 28d36dbc518..336895514f9 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -82,6 +82,7 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + MilhouseError(milhouse::Error), EpochCacheError(EpochCacheError), WithdrawalsRootMismatch { expected: Hash256, @@ -138,6 +139,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index af9b7938132..7e114c71c6e 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,6 +5,7 @@ use crate::common::{ }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; +use std::sync::Arc; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; pub fn process_operations>( @@ -128,7 +129,7 @@ pub mod altair_deneb { let previous_epoch = ctxt.previous_epoch; let current_epoch = ctxt.current_epoch; - let attesting_indices = &verify_attestation_for_block_inclusion( + let attesting_indices = verify_attestation_for_block_inclusion( state, attestation, ctxt, @@ -136,7 +137,8 @@ pub mod altair_deneb { spec, ) .map_err(|e| e.into_with_index(att_index))? - .attesting_indices; + .attesting_indices + .clone(); // Matching roots, participation flag indices let data = &attestation.data; @@ -146,7 +148,7 @@ pub mod altair_deneb { // Update epoch participation flags. let mut proposer_reward_numerator = 0; - for index in attesting_indices { + for index in &attesting_indices { let index = *index as usize; let validator_effective_balance = state.epoch_cache().get_effective_balance(index)?; @@ -411,17 +413,19 @@ pub fn process_deposit( // Create a new validator. let validator = Validator { - pubkey: deposit.data.pubkey, - withdrawal_credentials: deposit.data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - slashed: false, + pubkey: Arc::new(deposit.data.pubkey), + mutable: ValidatorMutable { + withdrawal_credentials: deposit.data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + slashed: false, + }, }; state.validators_mut().push(validator)?; state.balances_mut().push(deposit.data.amount)?; diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 163b2cff7a9..d3d3af096db 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -64,7 +64,7 @@ where .validators() .get(validator_index) .and_then(|v| { - let pk: Option = v.pubkey.decompress().ok(); + let pk: Option = v.pubkey().decompress().ok(); pk }) .map(Cow::Owned) diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 1e8f25ed10b..500355c7543 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -29,7 +29,7 @@ pub fn verify_bls_to_execution_change( verify!( validator - .withdrawal_credentials + .withdrawal_credentials() .as_bytes() .first() .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) @@ -41,7 +41,7 @@ pub fn verify_bls_to_execution_change( // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); verify!( - validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + validator.withdrawal_credentials().as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch ); diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index fc258d38298..3619feaf857 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -41,7 +41,7 @@ pub fn verify_exit( // Verify that the validator has not yet exited. verify!( - validator.exit_epoch == spec.far_future_epoch, + validator.exit_epoch() == spec.far_future_epoch, ExitInvalid::AlreadyExited(exit.validator_index) ); @@ -56,7 +56,7 @@ pub fn verify_exit( // Verify the validator has been active long enough. let earliest_exit_epoch = validator - .activation_epoch + .activation_epoch() .safe_add(spec.shard_committee_period)?; verify!( current_epoch >= earliest_exit_epoch, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index dd1b2dfcd86..fc55fb11144 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -2,17 +2,14 @@ use crate::EpochProcessingError; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; -use types::VariableList; +use types::List; pub fn process_participation_flag_updates( state: &mut BeaconState, ) -> Result<(), EpochProcessingError> { *state.previous_epoch_participation_mut()? = std::mem::take(state.current_epoch_participation_mut()?); - *state.current_epoch_participation_mut()? = VariableList::new(vec![ - ParticipationFlags::default( - ); - state.validators().len() - ])?; + *state.current_epoch_participation_mut()? = + List::repeat(ParticipationFlags::default(), state.validators().len())?; Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index 7e244058038..fe8db7d2dee 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -202,9 +202,9 @@ impl ValidatorStatuses { let previous_epoch = state.previous_epoch(); for validator in state.validators().iter() { - let effective_balance = validator.effective_balance; + let effective_balance = validator.effective_balance(); let mut status = ValidatorStatus { - is_slashed: validator.slashed, + is_slashed: validator.slashed(), is_eligible: state.is_eligible_validator(previous_epoch, validator)?, is_withdrawable_in_current_epoch: validator.is_withdrawable_at(current_epoch), current_epoch_effective_balance: effective_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs index 7490f276567..00adabdcfe9 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -13,6 +13,9 @@ pub fn process_historical_summaries_update( .safe_rem((E::slots_per_historical_root() as u64).safe_div(E::slots_per_epoch())?)? == 0 { + // We need to flush any pending mutations before hashing. + state.block_roots_mut().apply_updates()?; + state.state_roots_mut().apply_updates()?; let summary = HistoricalSummary::new(state); return state .historical_summaries_mut()? diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index 7bd62c40816..146e4a3a8e3 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -21,30 +21,32 @@ pub fn process_effective_balance_updates( let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + + while let Some((index, validator)) = validators_iter.next_cow() { let balance = balances .get(index) .copied() .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; let new_effective_balance = if balance.safe_add(downward_threshold)? - < validator.effective_balance - || validator.effective_balance.safe_add(upward_threshold)? < balance + < validator.effective_balance() + || validator.effective_balance().safe_add(upward_threshold)? < balance { std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ) } else { - validator.effective_balance + validator.effective_balance() }; if validator.is_active_at(next_epoch) { new_total_active_balance.safe_add_assign(new_effective_balance)?; } - if new_effective_balance != validator.effective_balance { - validator.effective_balance = new_effective_balance; + if new_effective_balance != validator.effective_balance() { + validator.into_mut()?.mutable.effective_balance = new_effective_balance; } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 65a946e7bff..508426af18c 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -3,8 +3,8 @@ use crate::metrics; use std::sync::Arc; use types::{ consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, - BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, - Validator, VariableList, + BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, + SyncCommittee, Validator, }; /// Provides a summary of validator participation during the epoch. @@ -25,20 +25,20 @@ pub enum EpochProcessingSummary { #[derive(PartialEq, Debug)] pub struct ParticipationEpochSummary { /// Copy of the validator registry prior to mutation. - validators: VariableList, + validators: List, /// Copy of the participation flags for the previous epoch. - previous_epoch_participation: VariableList, + previous_epoch_participation: List, /// Copy of the participation flags for the current epoch. - current_epoch_participation: VariableList, + current_epoch_participation: List, previous_epoch: Epoch, current_epoch: Epoch, } impl ParticipationEpochSummary { pub fn new( - validators: VariableList, - previous_epoch_participation: VariableList, - current_epoch_participation: VariableList, + validators: List, + previous_epoch_participation: List, + current_epoch_participation: List, previous_epoch: Epoch, current_epoch: Epoch, ) -> Self { @@ -54,7 +54,7 @@ impl ParticipationEpochSummary { pub fn is_active_and_unslashed(&self, val_index: usize, epoch: Epoch) -> bool { self.validators .get(val_index) - .map(|validator| !validator.slashed && validator.is_active_at(epoch)) + .map(|validator| !validator.slashed() && validator.is_active_at(epoch)) .unwrap_or(false) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index c18e1303b26..de481ec6767 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,4 @@ -use types::{BeaconStateError, EpochCacheError, InconsistentFork}; +use types::{milhouse, BeaconStateError, EpochCacheError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { @@ -23,6 +23,7 @@ pub enum EpochProcessingError { InconsistentStateFork(InconsistentFork), InvalidJustificationBit(ssz_types::Error), InvalidFlagIndex(usize), + MilhouseError(milhouse::Error), EpochCache(EpochCacheError), } @@ -50,6 +51,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + impl From for EpochProcessingError { fn from(e: EpochCacheError) -> Self { EpochProcessingError::EpochCache(e) diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 6d06b4d7ca5..7686932192f 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -14,7 +14,7 @@ pub fn process_historical_roots_update( .safe_rem(E::SlotsPerHistoricalRoot::to_u64().safe_div(E::slots_per_epoch())?)? == 0 { - let historical_batch = state.historical_batch(); + let historical_batch = state.historical_batch()?; state .historical_roots_mut() .push(historical_batch.tree_hash_root())?; diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 6b86f9c1e76..c978a76d059 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -17,7 +17,7 @@ pub fn process_registry_updates( let current_epoch = state.current_epoch(); let is_ejectable = |validator: &Validator| { validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance + && validator.effective_balance() <= spec.ejection_balance }; let indices_to_update: Vec<_> = state .validators() @@ -32,7 +32,7 @@ pub fn process_registry_updates( for index in indices_to_update { let validator = state.get_validator_mut(index)?; if validator.is_eligible_for_activation_queue(spec) { - validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.mutable.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(validator) { initiate_validator_exit(state, index, spec)?; @@ -50,7 +50,7 @@ pub fn process_registry_updates( let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; for index in activation_queue { - state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; + state.get_validator_mut(index)?.mutable.activation_epoch = delayed_activation_epoch; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index d577c52e6a5..c9f69c3c95e 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -2,7 +2,7 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{Unsigned, VariableList}; +use types::{List, Unsigned}; pub fn process_eth1_data_reset( state: &mut BeaconState, @@ -13,7 +13,7 @@ pub fn process_eth1_data_reset( .safe_rem(E::SlotsPerEth1VotingPeriod::to_u64())? == 0 { - *state.eth1_data_votes_mut() = VariableList::empty(); + *state.eth1_data_votes_mut() = List::empty(); } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 9319d2941b5..513fc26b6ff 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -12,6 +12,7 @@ use types::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, + milhouse::Cow, ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; @@ -173,9 +174,9 @@ pub fn process_epoch_single_pass( let effective_balances_ctxt = &EffectiveBalancesContext::new(spec)?; // Iterate over the validators and related fields in one pass. - let mut validators_iter = validators.iter_mut(); - let mut balances_iter = balances.iter_mut(); - let mut inactivity_scores_iter = inactivity_scores.iter_mut(); + let mut validators_iter = validators.iter_cow(); + let mut balances_iter = balances.iter_cow(); + let mut inactivity_scores_iter = inactivity_scores.iter_cow(); // Values computed for the next epoch transition. let mut next_epoch_total_active_balance = 0; @@ -186,20 +187,21 @@ pub fn process_epoch_single_pass( previous_epoch_participation.iter(), current_epoch_participation.iter(), ) { - let validator = validators_iter - .next() + let (_, mut validator) = validators_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; - let balance = balances_iter - .next() + let (_, mut balance) = balances_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; - let inactivity_score = inactivity_scores_iter - .next() + let (_, mut inactivity_score) = inactivity_scores_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; let is_active_current_epoch = validator.is_active_at(current_epoch); let is_active_previous_epoch = validator.is_active_at(previous_epoch); let is_eligible = is_active_previous_epoch - || (validator.slashed && previous_epoch.safe_add(1)? < validator.withdrawable_epoch); + || (validator.slashed() + && previous_epoch.safe_add(1)? < validator.withdrawable_epoch()); let base_reward = if is_eligible { epoch_cache.get_base_reward(index)? @@ -209,10 +211,10 @@ pub fn process_epoch_single_pass( let validator_info = &ValidatorInfo { index, - effective_balance: validator.effective_balance, + effective_balance: validator.effective_balance(), base_reward, is_eligible, - is_slashed: validator.slashed, + is_slashed: validator.slashed(), is_active_current_epoch, is_active_previous_epoch, previous_epoch_participation, @@ -223,7 +225,7 @@ pub fn process_epoch_single_pass( // `process_inactivity_updates` if conf.inactivity_updates { process_single_inactivity_update( - inactivity_score, + &mut inactivity_score, validator_info, state_ctxt, spec, @@ -233,8 +235,8 @@ pub fn process_epoch_single_pass( // `process_rewards_and_penalties` if conf.rewards_and_penalties { process_single_reward_and_penalty( - balance, - inactivity_score, + &mut balance, + &inactivity_score, validator_info, rewards_ctxt, state_ctxt, @@ -246,7 +248,7 @@ pub fn process_epoch_single_pass( // `process_registry_updates` if conf.registry_updates { process_single_registry_update( - validator, + &mut validator, validator_info, exit_cache, activation_queue, @@ -258,14 +260,14 @@ pub fn process_epoch_single_pass( // `process_slashings` if conf.slashings { - process_single_slashing(balance, validator, slashings_ctxt, state_ctxt, spec)?; + process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } // `process_effective_balance_updates` if conf.effective_balance_updates { process_single_effective_balance_update( *balance, - validator, + &mut validator, validator_info, &mut next_epoch_total_active_balance, &mut next_epoch_cache, @@ -290,7 +292,7 @@ pub fn process_epoch_single_pass( } fn process_single_inactivity_update( - inactivity_score: &mut u64, + inactivity_score: &mut Cow, validator_info: &ValidatorInfo, state_ctxt: &StateContext, spec: &ChainSpec, @@ -303,25 +305,27 @@ fn process_single_inactivity_update( if validator_info.is_unslashed_participating_index(TIMELY_TARGET_FLAG_INDEX)? { // Avoid mutating when the inactivity score is 0 and can't go any lower -- the common // case. - if *inactivity_score == 0 { + if **inactivity_score == 0 { return Ok(()); } - inactivity_score.safe_sub_assign(1)?; + inactivity_score.make_mut()?.safe_sub_assign(1)?; } else { - inactivity_score.safe_add_assign(spec.inactivity_score_bias)?; + inactivity_score + .make_mut()? + .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak if !state_ctxt.is_in_inactivity_leak { - let deduction = min(spec.inactivity_score_recovery_rate, *inactivity_score); - inactivity_score.safe_sub_assign(deduction)?; + let deduction = min(spec.inactivity_score_recovery_rate, **inactivity_score); + inactivity_score.make_mut()?.safe_sub_assign(deduction)?; } Ok(()) } fn process_single_reward_and_penalty( - balance: &mut u64, + balance: &mut Cow, inactivity_score: &u64, validator_info: &ValidatorInfo, rewards_ctxt: &RewardsAndPenaltiesContext, @@ -351,6 +355,7 @@ fn process_single_reward_and_penalty( )?; if delta.rewards != 0 || delta.penalties != 0 { + let balance = balance.make_mut()?; balance.safe_add_assign(delta.rewards)?; *balance = balance.saturating_sub(delta.penalties); } @@ -452,7 +457,7 @@ impl RewardsAndPenaltiesContext { } fn process_single_registry_update( - validator: &mut Validator, + validator: &mut Cow, validator_info: &ValidatorInfo, exit_cache: &mut ExitCache, activation_queue: &BTreeSet, @@ -463,16 +468,18 @@ fn process_single_registry_update( let current_epoch = state_ctxt.current_epoch; if validator.is_eligible_for_activation_queue(spec) { - validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.make_mut()?.mutable.activation_eligibility_epoch = current_epoch.safe_add(1)?; } - if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance + if validator.is_active_at(current_epoch) + && validator.effective_balance() <= spec.ejection_balance { initiate_validator_exit(validator, exit_cache, state_ctxt, spec)?; } if activation_queue.contains(&validator_info.index) { - validator.activation_epoch = spec.compute_activation_exit_epoch(current_epoch)?; + validator.make_mut()?.mutable.activation_epoch = + spec.compute_activation_exit_epoch(current_epoch)?; } // Caching: add to speculative activation queue for next epoch. @@ -487,13 +494,13 @@ fn process_single_registry_update( } fn initiate_validator_exit( - validator: &mut Validator, + validator: &mut Cow, exit_cache: &mut ExitCache, state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { // Return if the validator already initiated exit - if validator.exit_epoch != spec.far_future_epoch { + if validator.exit_epoch() != spec.far_future_epoch { return Ok(()); } @@ -508,8 +515,9 @@ fn initiate_validator_exit( exit_queue_epoch.safe_add_assign(1)?; } - validator.exit_epoch = exit_queue_epoch; - validator.withdrawable_epoch = + let validator = validator.make_mut()?; + validator.mutable.exit_epoch = exit_queue_epoch; + validator.mutable.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; exit_cache.record_validator_exit(exit_queue_epoch)?; @@ -540,24 +548,25 @@ impl SlashingsContext { } fn process_single_slashing( - balance: &mut u64, + balance: &mut Cow, validator: &Validator, slashings_ctxt: &SlashingsContext, state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { - if validator.slashed && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch + if validator.slashed() + && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch() { let increment = spec.effective_balance_increment; let penalty_numerator = validator - .effective_balance + .effective_balance() .safe_div(increment)? .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; let penalty = penalty_numerator .safe_div(state_ctxt.total_active_balance)? .safe_mul(increment)?; - *balance = balance.saturating_sub(penalty); + *balance.make_mut()? = balance.saturating_sub(penalty); } Ok(()) } @@ -581,7 +590,7 @@ impl EffectiveBalancesContext { #[allow(clippy::too_many_arguments)] fn process_single_effective_balance_update( balance: u64, - validator: &mut Validator, + validator: &mut Cow, validator_info: &ValidatorInfo, next_epoch_total_active_balance: &mut u64, next_epoch_cache: &mut PreEpochCache, @@ -590,11 +599,11 @@ fn process_single_effective_balance_update( state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { - let old_effective_balance = validator.effective_balance; + let old_effective_balance = validator.effective_balance(); let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? - < validator.effective_balance + < validator.effective_balance() || validator - .effective_balance + .effective_balance() .safe_add(eb_ctxt.upward_threshold)? < balance { @@ -603,7 +612,7 @@ fn process_single_effective_balance_update( spec.max_effective_balance, ) } else { - validator.effective_balance + validator.effective_balance() }; if validator.is_active_at(state_ctxt.next_epoch) { @@ -611,12 +620,12 @@ fn process_single_effective_balance_update( } if new_effective_balance != old_effective_balance { - validator.effective_balance = new_effective_balance; + validator.make_mut()?.mutable.effective_balance = new_effective_balance; // Update progressive balances cache for the *current* epoch, which will soon become the // previous epoch once the epoch transition completes. progressive_balances.on_effective_balance_change( - validator.slashed, + validator.slashed(), validator_info.current_epoch_participation, old_effective_balance, new_effective_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index a1770478008..7618c9b6367 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -27,9 +27,9 @@ pub fn process_slashings( .iter() .enumerate() .filter(|(_, validator)| { - validator.slashed && target_withdrawable_epoch == validator.withdrawable_epoch + validator.slashed() && target_withdrawable_epoch == validator.withdrawable_epoch() }) - .map(|(index, validator)| (index, validator.effective_balance)) + .map(|(index, validator)| (index, validator.effective_balance())) .collect::>(); for (index, validator_effective_balance) in indices { diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index cfbc6eba9e9..872560db3df 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -4,13 +4,13 @@ use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VariableList, + Fork, List, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. pub fn translate_participation( state: &mut BeaconState, - pending_attestations: &VariableList, E::MaxPendingAttestations>, + pending_attestations: &List, E::MaxPendingAttestations>, spec: &ChainSpec, ) -> Result<(), Error> { // Previous epoch committee cache is required for `get_attesting_indices`. @@ -51,8 +51,8 @@ pub fn upgrade_to_altair( let pre = pre_state.as_base_mut()?; let default_epoch_participation = - VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; - let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; + List::new(vec![ParticipationFlags::default(); pre.validators.len()])?; + let inactivity_scores = List::new(vec![0; pre.validators.len()])?; let temp_sync_committee = Arc::new(SyncCommittee::temporary()); @@ -108,7 +108,6 @@ pub fn upgrade_to_altair( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); // Fill in previous epoch participation from the pre state's pending attestations. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 87b40abebdd..51e29d10f3c 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,7 +1,7 @@ use std::mem; use types::{ BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, VariableList, + Fork, List, }; /// Transform a `Merge` state into an `Capella` state. @@ -61,7 +61,7 @@ pub fn upgrade_to_capella( // Capella next_withdrawal_index: 0, next_withdrawal_validator_index: 0, - historical_summaries: VariableList::default(), + historical_summaries: List::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -70,7 +70,6 @@ pub fn upgrade_to_capella( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/deneb.rs b/consensus/state_processing/src/upgrade/deneb.rs index 43fe5d9dc3d..c21e1361a5a 100644 --- a/consensus/state_processing/src/upgrade/deneb.rs +++ b/consensus/state_processing/src/upgrade/deneb.rs @@ -71,7 +71,6 @@ pub fn upgrade_to_deneb( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index a37d0fc3beb..f64228f050b 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -70,7 +70,6 @@ pub fn upgrade_to_electra( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index 147c97ac29e..02705743ceb 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -66,7 +66,6 @@ pub fn upgrade_to_bellatrix( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index db15f53537e..4802481ae82 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -52,6 +52,8 @@ serde_json = { workspace = true } smallvec = { workspace = true } maplit = { workspace = true } strum = { workspace = true } +milhouse = { workspace = true } +rpds = "0.11.0" [dev-dependencies] criterion = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index bb2b527109f..17d266a56e5 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -2,12 +2,13 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, - MainnetEthSpec, Validator, + MainnetEthSpec, Validator, ValidatorMutable, }; fn get_state(validator_count: usize) -> BeaconState { @@ -27,21 +28,25 @@ fn get_state(validator_count: usize) -> BeaconState { .expect("should add balance"); } - *state.validators_mut() = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: spec.max_effective_balance, - slashed: false, - activation_eligibility_epoch: Epoch::new(0), - activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), - }) - .collect::>() - .into(); + *state.validators_mut() = List::new( + (0..validator_count) + .collect::>() + .par_iter() + .map(|&i| Validator { + pubkey: Arc::new(generate_deterministic_keypair(i).pk.compress()), + mutable: ValidatorMutable { + withdrawal_credentials: Hash256::from_low_u64_le(i as u64), + effective_balance: spec.max_effective_balance, + slashed: false, + activation_eligibility_epoch: Epoch::new(0), + activation_epoch: Epoch::new(0), + exit_epoch: Epoch::from(u64::max_value()), + withdrawable_epoch: Epoch::from(u64::max_value()), + }, + }) + .collect(), + ) + .unwrap(); state } @@ -96,19 +101,6 @@ fn all_benches(c: &mut Criterion) { .sample_size(10), ); - let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("clone/tree_hash_cache", move |b| { - b.iter_batched_ref( - || inner_state.clone(), - |state| black_box(state.tree_hash_cache().clone()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - let inner_state = state.clone(); c.bench( &format!("{}_validators", validator_count), diff --git a/consensus/types/examples/clone_state.rs b/consensus/types/examples/clone_state.rs deleted file mode 100644 index a7e80cf4078..00000000000 --- a/consensus/types/examples/clone_state.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..100_000 { - let _ = state.clone(); - } -} diff --git a/consensus/types/examples/ssz_encode_state.rs b/consensus/types/examples/ssz_encode_state.rs deleted file mode 100644 index 5d0a2db17c7..00000000000 --- a/consensus/types/examples/ssz_encode_state.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use ssz::Encode; -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..1_024 { - let state_bytes = state.as_ssz_bytes(); - let _: BeaconState = - BeaconState::from_ssz_bytes(&state_bytes, &E::default_spec()).expect("should decode"); - } -} diff --git a/consensus/types/examples/tree_hash_state.rs b/consensus/types/examples/tree_hash_state.rs deleted file mode 100644 index 26777b25912..00000000000 --- a/consensus/types/examples/tree_hash_state.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let mut state = get_state(validator_count); - state.update_tree_hash_cache().expect("should update cache"); - - actual_thing::(&mut state); -} - -fn actual_thing(state: &mut BeaconState) { - for _ in 0..200_024 { - let _ = state.update_tree_hash_cache().expect("should update cache"); - } -} diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/activation_queue.rs index 09ffa5b85e7..acbb276a61a 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/activation_queue.rs @@ -23,7 +23,7 @@ impl ActivationQueue { ) { if validator.could_be_eligible_for_activation_at(next_epoch, spec) { self.queue - .insert((validator.activation_eligibility_epoch, index)); + .insert((validator.activation_eligibility_epoch(), index)); } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 14874f0204f..94c44abcc90 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -358,7 +358,7 @@ impl> BeaconBlockBase { }; let deposit = Deposit { - proof: FixedVector::from_elem(Hash256::zero()), + proof: ssz_types::FixedVector::from_elem(Hash256::zero()), data: deposit_data, }; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ba11c9c4cce..eafd12b13ca 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,12 +1,14 @@ use self::committee_cache::get_active_validator_indices; use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; +use crate::validator::ValidatorTrait; use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; +use metastruct::{metastruct, NumFields}; pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; @@ -28,28 +30,26 @@ pub use crate::beacon_state::balance::Balance; pub use crate::beacon_state::exit_cache::ExitCache; pub use crate::beacon_state::progressive_balances_cache::*; pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; -pub use tree_hash_cache::BeaconTreeHashCache; +pub use milhouse::{interface::Interface, List, Vector}; #[macro_use] mod committee_cache; mod balance; -mod clone_config; +pub mod compact_state; mod exit_cache; mod iter; mod progressive_balances_cache; mod pubkey_cache; mod slashings_cache; mod tests; -mod tree_hash_cache; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; -pub type Validators = VariableList::ValidatorRegistryLimit>; -pub type Balances = VariableList::ValidatorRegistryLimit>; +pub type Validators = List::ValidatorRegistryLimit>; +pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -144,6 +144,20 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + MilhouseError(milhouse::Error), + CommitteeCacheDiffInvalidEpoch { + prev_current_epoch: Epoch, + current_epoch: Epoch, + }, + CommitteeCacheDiffUninitialized { + expected_epoch: Epoch, + }, + DiffAcrossFork { + prev_fork: ForkName, + current_fork: ForkName, + }, + TotalActiveBalanceDiffUninitialized, + MissingImmutableValidator(usize), IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), @@ -207,97 +221,206 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, - arbitrary::Arbitrary + arbitrary::Arbitrary, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + arbitrary(bound = "E: EthSpec, GenericValidator: ValidatorTrait"), derivative(Clone), ), + specific_variant_attributes( + Base(metastruct( + mappings( + map_beacon_state_base_fields(), + map_beacon_state_base_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_base_tree_list_fields( + other_type = "BeaconStateBase", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Altair(metastruct( + mappings( + map_beacon_state_altair_fields(), + map_beacon_state_altair_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_altair_tree_list_fields( + other_type = "BeaconStateAltair", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Merge(metastruct( + mappings( + map_beacon_state_bellatrix_fields(), + map_beacon_state_bellatrix_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_merge_tree_list_fields( + other_type = "BeaconStateMerge", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Capella(metastruct( + mappings( + map_beacon_state_capella_fields(), + map_beacon_state_capella_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_capella_tree_list_fields( + other_type = "BeaconStateCapella", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Deneb(metastruct( + mappings( + map_beacon_state_deneb_fields(), + map_beacon_state_deneb_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_deneb_tree_list_fields( + other_type = "BeaconStateDeneb", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Electra(metastruct( + mappings( + map_beacon_state_electra_fields(), + map_beacon_state_electra_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + bimappings(bimap_beacon_state_electra_tree_list_fields( + other_type = "BeaconStateElectra", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )) + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + map_ref_mut_into(BeaconStateRef) +)] +#[derive( + Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary, )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] +#[arbitrary(bound = "E: EthSpec, GenericValidator: ValidatorTrait")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconState +pub struct BeaconState where E: EthSpec, { // Versioning #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub genesis_validators_root: Hash256, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub slot: Slot, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub fork: Fork, // History + #[metastruct(exclude_from(tree_lists))] pub latest_block_header: BeaconBlockHeader, - #[compare_fields(as_slice)] - pub block_roots: FixedVector, - #[compare_fields(as_slice)] - pub state_roots: FixedVector, + #[test_random(default)] + #[compare_fields(as_iter)] + pub block_roots: Vector, + #[test_random(default)] + #[compare_fields(as_iter)] + pub state_roots: Vector, // Frozen in Capella, replaced by historical_summaries - pub historical_roots: VariableList, + #[test_random(default)] + #[compare_fields(as_iter)] + pub historical_roots: List, // Ethereum 1.0 chain data + #[metastruct(exclude_from(tree_lists))] pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, + #[test_random(default)] + pub eth1_data_votes: List, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry - #[compare_fields(as_slice)] - pub validators: VariableList, - #[compare_fields(as_slice)] + #[test_random(default)] + pub validators: List, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - pub balances: VariableList, + #[compare_fields(as_iter)] + #[test_random(default)] + pub balances: List, // Randomness - pub randao_mixes: FixedVector, + #[test_random(default)] + pub randao_mixes: Vector, // Slashings + #[test_random(default)] #[serde(with = "ssz_types::serde_utils::quoted_u64_fixed_vec")] - pub slashings: FixedVector, + pub slashings: Vector, // Attestations (genesis fork only) #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, E::MaxPendingAttestations>, + #[test_random(default)] + pub previous_epoch_attestations: List, E::MaxPendingAttestations>, #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, E::MaxPendingAttestations>, + #[test_random(default)] + pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub previous_epoch_participation: VariableList, + #[test_random(default)] + pub previous_epoch_participation: List, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub current_epoch_participation: VariableList, + #[test_random(default)] + pub current_epoch_participation: List, // Finality #[test_random(default)] + #[metastruct(exclude_from(tree_lists))] pub justification_bits: BitVector, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub previous_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub current_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub finalized_checkpoint: Checkpoint, // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub inactivity_scores: VariableList, + #[test_random(default)] + pub inactivity_scores: List, // Light-client sync committees #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, // Execution @@ -305,89 +428,85 @@ where only(Merge), partial_getter(rename = "latest_execution_payload_header_merge") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, #[superstruct( only(Capella), partial_getter(rename = "latest_execution_payload_header_capella") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, #[superstruct( only(Deneb), partial_getter(rename = "latest_execution_payload_header_deneb") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, #[superstruct( only(Electra), partial_getter(rename = "latest_execution_payload_header_electra") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, // Capella #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella, Deneb, Electra))] - pub historical_summaries: VariableList, + #[test_random(default)] + pub historical_summaries: List, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub total_active_balance: Option<(Epoch, u64)>, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub progressive_balances_cache: ProgressiveBalancesCache, + #[metastruct(exclude)] + pub committee_caches: [Arc; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub committee_caches: [CommitteeCache; CACHED_EPOCHS], + #[metastruct(exclude)] + pub progressive_balances_cache: ProgressiveBalancesCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub pubkey_cache: PubkeyCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub exit_cache: ExitCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub slashings_cache: SlashingsCache, /// Epoch cache of values that are useful for block processing that are static over an epoch. #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] + #[metastruct(exclude)] pub epoch_cache: EpochCache, - #[serde(skip_serializing, skip_deserializing)] - #[ssz(skip_serializing, skip_deserializing)] - #[tree_hash(skip_hashing)] - #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub tree_hash_cache: BeaconTreeHashCache, -} - -impl Clone for BeaconState { - fn clone(&self) -> Self { - self.clone_with(CloneConfig::all()) - } } impl BeaconState { @@ -395,6 +514,7 @@ impl BeaconState { /// /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { + let default_committee_cache = Arc::new(CommitteeCache::default()); BeaconState::Base(BeaconStateBase { // Versioning genesis_time, @@ -408,28 +528,28 @@ impl BeaconState { // History latest_block_header: BeaconBlock::::empty(spec).temporary_block_header(), - block_roots: FixedVector::from_elem(Hash256::zero()), - state_roots: FixedVector::from_elem(Hash256::zero()), - historical_roots: VariableList::empty(), + block_roots: Vector::default(), + state_roots: Vector::default(), + historical_roots: List::default(), // Eth1 eth1_data, - eth1_data_votes: VariableList::empty(), + eth1_data_votes: List::default(), eth1_deposit_index: 0, // Validator registry - validators: VariableList::empty(), // Set later. - balances: VariableList::empty(), // Set later. + validators: List::default(), // Set later. + balances: List::default(), // Set later. // Randomness - randao_mixes: FixedVector::from_elem(Hash256::zero()), + randao_mixes: Vector::default(), // Slashings - slashings: FixedVector::from_elem(0), + slashings: Vector::default(), // Attestations - previous_epoch_attestations: VariableList::empty(), - current_epoch_attestations: VariableList::empty(), + previous_epoch_attestations: List::default(), + current_epoch_attestations: List::default(), // Finality justification_bits: BitVector::new(), @@ -441,15 +561,14 @@ impl BeaconState { total_active_balance: None, progressive_balances_cache: <_>::default(), committee_caches: [ - CommitteeCache::default(), - CommitteeCache::default(), - CommitteeCache::default(), + default_committee_cache.clone(), + default_committee_cache.clone(), + default_committee_cache, ], pubkey_cache: PubkeyCache::default(), exit_cache: ExitCache::default(), slashings_cache: SlashingsCache::default(), epoch_cache: EpochCache::default(), - tree_hash_cache: <_>::default(), }) } @@ -485,30 +604,6 @@ impl BeaconState { } } - /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::arithmetic_side_effects)] - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). - let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); - let slot_end = slot_start + ::ssz_fixed_len(); - - let slot_bytes = bytes - .get(slot_start..slot_end) - .ok_or(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: slot_end, - })?; - - let slot = Slot::from_ssz_bytes(slot_bytes)?; - let fork_at_slot = spec.fork_name_at_slot::(slot); - - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) - } - /// Returns the `tree_hash_root` of the state. /// /// Spec v0.12.1 @@ -516,11 +611,15 @@ impl BeaconState { Hash256::from_slice(&self.tree_hash_root()[..]) } - pub fn historical_batch(&self) -> HistoricalBatch { - HistoricalBatch { + pub fn historical_batch(&mut self) -> Result, Error> { + // Updating before cloning makes the clone cheap and saves repeated hashing. + self.block_roots_mut().apply_updates()?; + self.state_roots_mut().apply_updates()?; + + Ok(HistoricalBatch { block_roots: self.block_roots().clone(), state_roots: self.state_roots().clone(), - } + }) } /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator @@ -531,6 +630,21 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } + /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. + pub fn get_validator_index_read_only( + &self, + pubkey: &PublicKeyBytes, + ) -> Result, Error> { + let pubkey_cache = self.pubkey_cache(); + if pubkey_cache.len() != self.validators().len() { + return Err(Error::PubkeyCacheIncomplete { + cache_len: pubkey_cache.len(), + registry_len: self.validators().len(), + }); + } + Ok(pubkey_cache.get(pubkey)) + } + /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(E::slots_per_epoch()) @@ -952,7 +1066,7 @@ impl BeaconState { .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; let random_byte = Self::shuffling_random_byte(i, seed.as_bytes())?; - let effective_balance = self.get_validator(candidate_index)?.effective_balance; + let effective_balance = self.get_validator(candidate_index)?.effective_balance(); if effective_balance.safe_mul(MAX_RANDOM_BYTE)? >= spec .max_effective_balance @@ -974,7 +1088,7 @@ impl BeaconState { .map(|&index| { self.validators() .get(index) - .map(|v| v.pubkey) + .map(|v| *v.pubkey()) .ok_or(Error::UnknownValidator(index)) }) .collect::, _>>()?; @@ -985,7 +1099,7 @@ impl BeaconState { let aggregate_pubkey = AggregatePublicKey::aggregate(&decompressed_pubkeys)?; Ok(SyncCommittee { - pubkeys: FixedVector::new(pubkeys)?, + pubkeys: ssz_types::FixedVector::new(pubkeys)?, aggregate_pubkey: aggregate_pubkey.to_public_key().compress(), }) } @@ -1005,7 +1119,7 @@ impl BeaconState { Ok(validator_indices .iter() .map(|&validator_index| { - let pubkey = self.get_validator(validator_index as usize)?.pubkey; + let pubkey = *self.get_validator(validator_index as usize)?.pubkey(); Ok(SyncDuty::from_sync_committee( validator_index, @@ -1079,8 +1193,9 @@ impl BeaconState { } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) { - *self.randao_mixes_mut() = FixedVector::from_elem(index_root); + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + *self.randao_mixes_mut() = Vector::from_elem(index_root)?; + Ok(()) } /// Safely obtains the index for `randao_mixes` @@ -1213,7 +1328,7 @@ impl BeaconState { } /// Get a reference to the entire `slashings` vector. - pub fn get_all_slashings(&self) -> &[u64] { + pub fn get_all_slashings(&self) -> &Vector { self.slashings() } @@ -1237,45 +1352,25 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_and_progressive_balances_mut( - &mut self, + pub fn validators_and_balances_and_progressive_balances_mut<'a>( + &'a mut self, ) -> ( - &mut Validators, - &mut Balances, - &mut ProgressiveBalancesCache, + &'a mut Validators, + &'a mut Balances, + &'a mut ProgressiveBalancesCache, ) { - match self { - BeaconState::Base(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Altair(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Merge(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Capella(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Deneb(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Electra(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - } + map_beacon_state_ref_mut_into_beacon_state_ref!(&'a _, self.to_mut(), |inner, cons| { + if false { + cons(&*inner); + unreachable!() + } else { + ( + &mut inner.validators, + &mut inner.balances, + &mut inner.progressive_balances_cache, + ) + } + }) } #[allow(clippy::type_complexity)] @@ -1285,9 +1380,9 @@ impl BeaconState { ( &mut Validators, &mut Balances, - &VariableList, - &VariableList, - &mut VariableList, + &List, + &List, + &mut List, &mut ProgressiveBalancesCache, &mut ExitCache, &mut EpochCache, @@ -1349,6 +1444,13 @@ impl BeaconState { } } + /// Get a mutable reference to the balance of a single validator. + pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + self.balances_mut() + .get_mut(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) + } + /// Generate a seed for the given `epoch`. pub fn get_seed( &self, @@ -1398,10 +1500,20 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Safe copy-on-write accessor for the `validators` list. + pub fn get_validator_cow( + &mut self, + validator_index: usize, + ) -> Result, Error> { + self.validators_mut() + .get_cow(validator_index) + .ok_or(Error::UnknownValidator(validator_index)) + } + /// Return the effective balance for a validator with the given `validator_index`. pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) - .map(|v| v.effective_balance) + .map(|v| v.effective_balance()) } /// Get the inactivity score for a single validator. @@ -1423,13 +1535,6 @@ impl BeaconState { .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) } - /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { - self.balances_mut() - .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) - } - /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// /// Spec v0.12.1 @@ -1497,7 +1602,7 @@ impl BeaconState { for validator in self.validators() { if validator.is_active_at(current_epoch) { - total_active_balance.safe_add_assign(validator.effective_balance)?; + total_active_balance.safe_add_assign(validator.effective_balance())?; } } Ok(std::cmp::max( @@ -1575,7 +1680,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut VariableList, Error> { + ) -> Result<&mut List, Error> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -1659,7 +1764,6 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Current)?; self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); - self.drop_tree_hash_cache(); self.drop_progressive_balances_cache(); *self.exit_cache_mut() = ExitCache::default(); *self.slashings_cache_mut() = SlashingsCache::default(); @@ -1718,7 +1822,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { CommitteeCache::initialized(self, epoch, spec) } @@ -1732,7 +1836,7 @@ impl BeaconState { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); - *self.committee_cache_at_index_mut(next)? = CommitteeCache::default(); + *self.committee_cache_at_index_mut(next)? = Arc::new(CommitteeCache::default()); Ok(()) } @@ -1747,21 +1851,24 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { self.committee_caches() .get(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. - fn committee_cache_at_index_mut(&mut self, index: usize) -> Result<&mut CommitteeCache, Error> { + fn committee_cache_at_index_mut( + &mut self, + index: usize, + ) -> Result<&mut Arc, Error> { self.committee_caches_mut() .get_mut(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) @@ -1769,7 +1876,10 @@ impl BeaconState { /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. - pub fn committee_cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { + pub fn committee_cache( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&Arc, Error> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; @@ -1780,30 +1890,10 @@ impl BeaconState { } } - /// Returns the cache for some `RelativeEpoch`, replacing the existing cache with an - /// un-initialized cache. Returns an error if the existing cache has not been initialized. - pub fn take_committee_cache( - &mut self, - relative_epoch: RelativeEpoch, - ) -> Result { - let i = Self::committee_cache_index(relative_epoch); - let current_epoch = self.current_epoch(); - let cache = self - .committee_caches_mut() - .get_mut(i) - .ok_or(Error::CommitteeCachesOutOfBounds(i))?; - - if cache.is_initialized_at(relative_epoch.into_epoch(current_epoch)) { - Ok(mem::take(cache)) - } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) - } - } - /// Drops the cache, leaving it in an uninitialized state. pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = - CommitteeCache::default(); + Arc::new(CommitteeCache::default()); Ok(()) } @@ -1813,13 +1903,11 @@ impl BeaconState { /// never re-add a pubkey. pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); - for (i, validator) in self - .validators() - .iter() - .enumerate() - .skip(pubkey_cache.len()) - { - let success = pubkey_cache.insert(validator.pubkey, i); + let start_index = pubkey_cache.len(); + + for (i, validator) in self.validators().iter_from(start_index)?.enumerate() { + let index = start_index.safe_add(i)?; + let success = pubkey_cache.insert(*validator.pubkey(), index); if !success { return Err(Error::PubkeyCacheInconsistent); } @@ -1834,96 +1922,51 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } + pub fn has_pending_mutations(&self) -> bool { + self.block_roots().has_pending_updates() + || self.state_roots().has_pending_updates() + || self.historical_roots().has_pending_updates() + || self.eth1_data_votes().has_pending_updates() + || self.validators().has_pending_updates() + || self.balances().has_pending_updates() + || self.randao_mixes().has_pending_updates() + || self.slashings().has_pending_updates() + || self + .previous_epoch_attestations() + .map_or(false, List::has_pending_updates) + || self + .current_epoch_attestations() + .map_or(false, List::has_pending_updates) + || self + .previous_epoch_participation() + .map_or(false, List::has_pending_updates) + || self + .current_epoch_participation() + .map_or(false, List::has_pending_updates) + || self + .inactivity_scores() + .map_or(false, List::has_pending_updates) + } + /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. fn drop_progressive_balances_cache(&mut self) { *self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default(); } - /// Initialize but don't fill the tree hash cache, if it isn't already initialized. - pub fn initialize_tree_hash_cache(&mut self) { - if !self.tree_hash_cache().is_initialized() { - *self.tree_hash_cache_mut() = BeaconTreeHashCache::new(self) - } - } - /// Compute the tree hash root of the state using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_tree_hash_root(self)?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } + self.apply_pending_mutations()?; + Ok(self.tree_hash_root()) } /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_validators_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_validators_tree_hash_root(self.validators())?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } - } - - /// Completely drops the tree hash cache, replacing it with a new, empty cache. - pub fn drop_tree_hash_cache(&mut self) { - self.tree_hash_cache_mut().uninitialize(); - } - - /// Clone the state whilst preserving only the selected caches. - pub fn clone_with(&self, config: CloneConfig) -> Self { - let mut res = match self { - BeaconState::Base(inner) => BeaconState::Base(inner.clone()), - BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), - BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), - BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), - BeaconState::Deneb(inner) => BeaconState::Deneb(inner.clone()), - BeaconState::Electra(inner) => BeaconState::Electra(inner.clone()), - }; - if config.committee_caches { - res.committee_caches_mut() - .clone_from(self.committee_caches()); - *res.total_active_balance_mut() = *self.total_active_balance(); - } - if config.pubkey_cache { - *res.pubkey_cache_mut() = self.pubkey_cache().clone(); - } - if config.exit_cache { - *res.exit_cache_mut() = self.exit_cache().clone(); - } - if config.slashings_cache { - *res.slashings_cache_mut() = self.slashings_cache().clone(); - } - if config.tree_hash_cache { - *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); - } - if config.progressive_balances_cache { - *res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone(); - } - res - } - - pub fn clone_with_only_committee_caches(&self) -> Self { - self.clone_with(CloneConfig::committee_caches_only()) + self.validators_mut().apply_updates()?; + Ok(self.validators().tree_hash_root()) } /// Passing `previous_epoch` to this function rather than computing it internally provides @@ -1934,7 +1977,8 @@ impl BeaconState { val: &Validator, ) -> Result { Ok(val.is_active_at(previous_epoch) - || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) + || (val.slashed() + && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch())) } /// Passing `previous_epoch` to this function rather than computing it internally provides @@ -1979,10 +2023,181 @@ impl BeaconState { self.epoch_cache().get_base_reward(validator_index) } - pub fn compute_merkle_proof( - &mut self, - generalized_index: usize, - ) -> Result, Error> { + #[allow(clippy::arithmetic_side_effects)] + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + // Required for macros (which use type-hints internally). + type GenericValidator = Validator; + + match (&mut *self, base) { + (Self::Base(self_inner), Self::Base(base_inner)) => { + bimap_beacon_state_base_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Base(_), _) => (), + (Self::Altair(self_inner), Self::Altair(base_inner)) => { + bimap_beacon_state_altair_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Altair(_), _) => (), + (Self::Merge(self_inner), Self::Merge(base_inner)) => { + bimap_beacon_state_merge_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Merge(_), _) => (), + (Self::Capella(self_inner), Self::Capella(base_inner)) => { + bimap_beacon_state_capella_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Capella(_), _) => (), + (Self::Deneb(self_inner), Self::Deneb(base_inner)) => { + bimap_beacon_state_deneb_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Deneb(_), _) => (), + (Self::Electra(self_inner), Self::Electra(base_inner)) => { + bimap_beacon_state_electra_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Electra(_), _) => (), + } + + // Use sync committees from `base` if they are equal. + if let Ok(current_sync_committee) = self.current_sync_committee_mut() { + if let Ok(base_sync_committee) = base.current_sync_committee() { + if current_sync_committee == base_sync_committee { + *current_sync_committee = base_sync_committee.clone(); + } + } + } + if let Ok(next_sync_committee) = self.next_sync_committee_mut() { + if let Ok(base_sync_committee) = base.next_sync_committee() { + if next_sync_committee == base_sync_committee { + *next_sync_committee = base_sync_committee.clone(); + } + } + } + + // Rebase caches like the committee caches and the pubkey cache, which are expensive to + // rebuild and likely to be re-usable from the base state. + self.rebase_caches_on(base, spec)?; + + Ok(()) + } + + pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + // Use pubkey cache from `base` if it contains superior information (likely if our cache is + // uninitialized). + let num_validators = self.validators().len(); + let pubkey_cache = self.pubkey_cache_mut(); + let base_pubkey_cache = base.pubkey_cache(); + if pubkey_cache.len() < base_pubkey_cache.len() && pubkey_cache.len() < num_validators { + *pubkey_cache = base_pubkey_cache.clone(); + } + + // Use committee caches from `base` if they are relevant. + let epochs = [ + self.previous_epoch(), + self.current_epoch(), + self.next_epoch()?, + ]; + for (index, epoch) in epochs.into_iter().enumerate() { + if let Ok(base_relative_epoch) = RelativeEpoch::from_epoch(base.current_epoch(), epoch) + { + *self.committee_cache_at_index_mut(index)? = + base.committee_cache(base_relative_epoch)?.clone(); + + // Ensure total active balance cache remains built whenever current committee + // cache is built. + if epoch == self.current_epoch() { + self.build_total_active_balance_cache(spec)?; + } + } + } + + Ok(()) + } +} + +impl BeaconState { + /// The number of fields of the `BeaconState` rounded up to the nearest power of two. + /// + /// This is relevant to tree-hashing of the `BeaconState`. + /// + /// We assume this value is stable across forks. This assumption is checked in the + /// `check_num_fields_pow2` test. + pub const NUM_FIELDS_POW2: usize = BeaconStateMerge::::NUM_FIELDS.next_power_of_two(); + + /// Specialised deserialisation method that uses the `ChainSpec` as context. + #[allow(clippy::arithmetic_side_effects)] + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). + let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); + let slot_end = slot_start + ::ssz_fixed_len(); + + let slot_bytes = bytes + .get(slot_start..slot_end) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_end, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + + Ok(map_fork_name!( + fork_at_slot, + Self, + <_>::from_ssz_bytes(bytes)? + )) + } + + #[allow(clippy::arithmetic_side_effects)] + pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + match self { + Self::Base(inner) => { + inner.previous_epoch_attestations.apply_updates()?; + inner.current_epoch_attestations.apply_updates()?; + map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Altair(inner) => { + map_beacon_state_altair_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Merge(inner) => { + map_beacon_state_bellatrix_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Capella(inner) => { + map_beacon_state_capella_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Deneb(inner) => { + map_beacon_state_deneb_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Electra(inner) => { + map_beacon_state_electra_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + } + self.eth1_data_votes_mut().apply_updates()?; + Ok(()) + } + + pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { // 1. Convert generalized index to field index. let field_index = match generalized_index { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX @@ -1992,7 +2207,7 @@ impl BeaconState { // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } light_client_update::FINALIZED_ROOT_INDEX => { @@ -2002,20 +2217,47 @@ impl BeaconState { // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches // position of `finalized_checkpoint` in `BeaconState`. finalized_checkpoint_generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } _ => return Err(Error::IndexNotSupported(generalized_index)), }; // 2. Get all `BeaconState` leaves. - self.initialize_tree_hash_cache(); - let mut cache = self - .tree_hash_cache_mut() - .take() - .ok_or(Error::TreeHashCacheNotInitialized)?; - let leaves = cache.recalculate_tree_hash_leaves(self)?; - self.tree_hash_cache_mut().restore(cache); + let mut leaves = vec![]; + #[allow(clippy::arithmetic_side_effects)] + match self { + BeaconState::Base(state) => { + map_beacon_state_base_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Altair(state) => { + map_beacon_state_altair_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Merge(state) => { + map_beacon_state_bellatrix_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Capella(state) => { + map_beacon_state_capella_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Deneb(state) => { + map_beacon_state_deneb_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Electra(state) => { + map_beacon_state_electra_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + }; // 3. Make deposit tree. // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). @@ -2074,9 +2316,10 @@ impl From for Error { } } -/// Helper function for "cloning" a field by using its default value. -fn clone_default(_value: &T) -> T { - T::default() +impl From for Error { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } } impl CompareFields for BeaconState { diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs deleted file mode 100644 index 27e066d5db6..00000000000 --- a/consensus/types/src/beacon_state/clone_config.rs +++ /dev/null @@ -1,47 +0,0 @@ -/// Configuration struct for controlling which caches of a `BeaconState` should be cloned. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] -pub struct CloneConfig { - pub committee_caches: bool, - pub pubkey_cache: bool, - pub exit_cache: bool, - pub slashings_cache: bool, - pub tree_hash_cache: bool, - pub progressive_balances_cache: bool, -} - -impl CloneConfig { - pub fn all() -> Self { - Self { - committee_caches: true, - pubkey_cache: true, - exit_cache: true, - slashings_cache: true, - tree_hash_cache: true, - progressive_balances_cache: true, - } - } - - pub fn none() -> Self { - Self::default() - } - - pub fn committee_caches_only() -> Self { - Self { - committee_caches: true, - ..Self::none() - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn sanity() { - assert!(CloneConfig::all().pubkey_cache); - assert!(!CloneConfig::none().tree_hash_cache); - assert!(CloneConfig::committee_caches_only().committee_caches); - assert!(!CloneConfig::committee_caches_only().exit_cache); - } -} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index a6b12cf5af3..7913df8e00e 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; +use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; mod tests; @@ -65,7 +66,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -115,13 +116,13 @@ impl CommitteeCache { .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); } - Ok(CommitteeCache { + Ok(Arc::new(CommitteeCache { initialized_epoch: Some(epoch), shuffling, shuffling_positions, committees_per_slot, slots_per_epoch: E::slots_per_epoch(), - }) + })) } /// Returns `true` if the cache has been initialized at the supplied `epoch`. diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index a5effb9363b..a2274765691 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -92,7 +92,7 @@ async fn shuffles_for_the_right_epoch() { .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); - *state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + *state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap(); let previous_seed = state .get_seed(state.previous_epoch(), Domain::BeaconAttester, spec) diff --git a/consensus/types/src/beacon_state/compact_state.rs b/consensus/types/src/beacon_state/compact_state.rs new file mode 100644 index 00000000000..3f8f47c8541 --- /dev/null +++ b/consensus/types/src/beacon_state/compact_state.rs @@ -0,0 +1,316 @@ +use crate::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, BeaconStateDeneb, + BeaconStateElectra, BeaconStateError as Error, BeaconStateMerge, EthSpec, List, PublicKeyBytes, + Validator, ValidatorMutable, +}; +use itertools::process_results; +use std::sync::Arc; + +pub type CompactBeaconState = BeaconState; + +/// Implement the conversion function from BeaconState -> CompactBeaconState. +macro_rules! full_to_compact { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + BeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $s.genesis_time, + genesis_validators_root: $s.genesis_validators_root, + slot: $s.slot, + fork: $s.fork, + + // History + latest_block_header: $s.latest_block_header.clone(), + block_roots: $s.block_roots.clone(), + state_roots: $s.state_roots.clone(), + historical_roots: $s.historical_roots.clone(), + + // Eth1 + eth1_data: $s.eth1_data.clone(), + eth1_data_votes: $s.eth1_data_votes.clone(), + eth1_deposit_index: $s.eth1_deposit_index, + + // Validator registry + validators: List::try_from_iter( + $s.validators.into_iter().map(|validator| validator.mutable.clone()) + ).expect("fix this"), + balances: $s.balances.clone(), + + // Shuffling + randao_mixes: $s.randao_mixes.clone(), + + // Slashings + slashings: $s.slashings.clone(), + + // Finality + justification_bits: $s.justification_bits.clone(), + previous_justified_checkpoint: $s.previous_justified_checkpoint, + current_justified_checkpoint: $s.current_justified_checkpoint, + finalized_checkpoint: $s.finalized_checkpoint, + + // Caches. + total_active_balance: $s.total_active_balance.clone(), + committee_caches: $s.committee_caches.clone(), + progressive_balances_cache: $s.progressive_balances_cache.clone(), + pubkey_cache: $s.pubkey_cache.clone(), + exit_cache: $s.exit_cache.clone(), + slashings_cache: $s.slashings_cache.clone(), + epoch_cache: $s.epoch_cache.clone(), + + // Variant-specific fields + $( + $extra_fields: $s.$extra_fields.clone() + ),* + }) + } +} + +/// Implement the conversion from CompactBeaconState -> BeaconState. +macro_rules! compact_to_full { + ($inner:ident, $variant_name:ident, $struct_name:ident, $immutable_validators:ident, [$($extra_fields:ident),*]) => { + BeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $inner.genesis_time, + genesis_validators_root: $inner.genesis_validators_root, + slot: $inner.slot, + fork: $inner.fork, + + // History + latest_block_header: $inner.latest_block_header, + block_roots: $inner.block_roots, + state_roots: $inner.state_roots, + historical_roots: $inner.historical_roots, + + // Eth1 + eth1_data: $inner.eth1_data, + eth1_data_votes: $inner.eth1_data_votes, + eth1_deposit_index: $inner.eth1_deposit_index, + + // Validator registry + validators: process_results($inner.validators.into_iter().enumerate().map(|(i, mutable)| { + $immutable_validators(i) + .ok_or(Error::MissingImmutableValidator(i)) + .map(move |pubkey| { + Validator { + pubkey, + mutable: mutable.clone(), + } + }) + }), |iter| List::try_from_iter(iter))??, + balances: $inner.balances, + + // Shuffling + randao_mixes: $inner.randao_mixes, + + // Slashings + slashings: $inner.slashings, + + // Finality + justification_bits: $inner.justification_bits, + previous_justified_checkpoint: $inner.previous_justified_checkpoint, + current_justified_checkpoint: $inner.current_justified_checkpoint, + finalized_checkpoint: $inner.finalized_checkpoint, + + // Caching + total_active_balance: $inner.total_active_balance, + committee_caches: $inner.committee_caches, + progressive_balances_cache: $inner.progressive_balances_cache, + pubkey_cache: $inner.pubkey_cache, + exit_cache: $inner.exit_cache, + slashings_cache: $inner.slashings_cache, + epoch_cache: $inner.epoch_cache, + + // Variant-specific fields + $( + $extra_fields: $inner.$extra_fields + ),* + }) + } +} + +impl BeaconState { + pub fn into_compact_state(self) -> CompactBeaconState { + match self { + BeaconState::Base(s) => full_to_compact!( + s, + self, + Base, + BeaconStateBase, + [previous_epoch_attestations, current_epoch_attestations] + ), + BeaconState::Altair(s) => full_to_compact!( + s, + self, + Altair, + BeaconStateAltair, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), + BeaconState::Merge(s) => full_to_compact!( + s, + self, + Merge, + BeaconStateMerge, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + BeaconState::Capella(s) => full_to_compact!( + s, + self, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + BeaconState::Deneb(s) => full_to_compact!( + s, + self, + Deneb, + BeaconStateDeneb, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + BeaconState::Electra(s) => full_to_compact!( + s, + self, + Electra, + BeaconStateElectra, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + } + } +} + +impl CompactBeaconState { + pub fn try_into_full_state(self, immutable_validators: F) -> Result, Error> + where + F: Fn(usize) -> Option>, + { + let state = match self { + BeaconState::Base(inner) => compact_to_full!( + inner, + Base, + BeaconStateBase, + immutable_validators, + [previous_epoch_attestations, current_epoch_attestations] + ), + BeaconState::Altair(inner) => compact_to_full!( + inner, + Altair, + BeaconStateAltair, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), + BeaconState::Merge(inner) => compact_to_full!( + inner, + Merge, + BeaconStateMerge, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + BeaconState::Capella(inner) => compact_to_full!( + inner, + Capella, + BeaconStateCapella, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + BeaconState::Deneb(inner) => compact_to_full!( + inner, + Deneb, + BeaconStateDeneb, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + BeaconState::Electra(inner) => compact_to_full!( + inner, + Electra, + BeaconStateElectra, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + }; + Ok(state) + } +} diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index bda788e63b9..1a570549957 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -1,10 +1,9 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; use safe_arith::SafeArith; -use serde::{Deserialize, Serialize}; use std::cmp::Ordering; /// Map from exit epoch to the number of validators with that exit epoch. -#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { /// True if the cache has been initialized. initialized: bool, @@ -16,7 +15,11 @@ pub struct ExitCache { impl ExitCache { /// Initialize a new cache for the given list of validators. - pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result { + pub fn new<'a, V, I>(validators: V, spec: &ChainSpec) -> Result + where + V: IntoIterator, + I: ExactSizeIterator + Iterator, + { let mut exit_cache = ExitCache { initialized: true, max_exit_epoch: Epoch::new(0), @@ -24,9 +27,9 @@ impl ExitCache { }; // Add all validators with a non-default exit epoch to the cache. validators - .iter() - .filter(|validator| validator.exit_epoch != spec.far_future_epoch) - .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?; + .into_iter() + .filter(|validator| validator.exit_epoch() != spec.far_future_epoch) + .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch()))?; Ok(exit_cache) } diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 2d3ad02c836..2caa0365e01 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -74,7 +74,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( @@ -122,7 +122,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index 0b61ea3c5f8..d58dd7bc1dd 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -1,21 +1,21 @@ use crate::*; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; #[allow(clippy::len_without_is_empty)] -#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Default)] pub struct PubkeyCache { - /// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap - /// len, as it does not increase when duplicate keys are added. Duplicate keys are used during - /// testing. + /// Maintain the number of keys added to the map. It is not sufficient to just use the + /// HashTrieMap len, as it does not increase when duplicate keys are added. Duplicate keys are + /// used during testing. len: usize, - map: HashMap, + map: HashTrieMap, } impl PubkeyCache { /// Returns the number of validator indices added to the map so far. + #[allow(clippy::len_without_is_empty)] pub fn len(&self) -> ValidatorIndex { self.len } @@ -26,7 +26,7 @@ impl PubkeyCache { /// that an index is never skipped. pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool { if index == self.len { - self.map.insert(pubkey, index); + self.map.insert_mut(pubkey, index); self.len = self .len .checked_add(1) diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/beacon_state/slashings_cache.rs index cfdc349f86c..19813ebbfe1 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/beacon_state/slashings_cache.rs @@ -1,13 +1,13 @@ use crate::{BeaconStateError, Slot, Validator}; use arbitrary::Arbitrary; -use std::collections::HashSet; +use rpds::HashTrieSetSync as HashTrieSet; /// Persistent (cheap to clone) cache of all slashed validator indices. #[derive(Debug, Default, Clone, PartialEq, Arbitrary)] pub struct SlashingsCache { latest_block_slot: Option, #[arbitrary(default)] - slashed_validators: HashSet, + slashed_validators: HashTrieSet, } impl SlashingsCache { @@ -20,7 +20,7 @@ impl SlashingsCache { let slashed_validators = validators .into_iter() .enumerate() - .filter_map(|(i, validator)| validator.slashed.then_some(i)) + .filter_map(|(i, validator)| validator.slashed().then_some(i)) .collect(); Self { latest_block_slot: Some(latest_block_slot), @@ -49,7 +49,7 @@ impl SlashingsCache { validator_index: usize, ) -> Result<(), BeaconStateError> { self.check_initialized(block_slot)?; - self.slashed_validators.insert(validator_index); + self.slashed_validators.insert_mut(validator_index); Ok(()) } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 00625a1788e..226eb9099a0 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,20 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{ - interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, - DEFAULT_ETH1_BLOCK_HASH, -}; +use crate::{test_utils::*, ForkName}; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, - ChainSpec, CloneConfig, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec, - MinimalEthSpec, RelativeEpoch, Slot, + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateMerge, ChainSpec, Domain, + Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, }; -use safe_arith::SafeArith; use ssz::Encode; -use state_processing::per_slot_processing; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; -use tree_hash::TreeHash; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); @@ -101,7 +95,12 @@ async fn test_beacon_proposer_index() { // Test with two validators per slot, first validator has zero balance. let mut state = build_state::((E::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); - state.validators_mut()[slot0_candidate0].effective_balance = 0; + state + .validators_mut() + .get_mut(slot0_candidate0) + .unwrap() + .mutable + .effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..E::slots_per_epoch() { test(&state, Slot::from(i), 0); @@ -159,85 +158,6 @@ async fn cache_initialization() { test_cache_initialization(&mut state, RelativeEpoch::Next, &spec); } -fn test_clone_config(base_state: &BeaconState, clone_config: CloneConfig) { - let state = base_state.clone_with(clone_config); - if clone_config.committee_caches { - state - .committee_cache(RelativeEpoch::Previous) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Current) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Next) - .expect("committee cache exists"); - state - .total_active_balance() - .expect("total active balance exists"); - } else { - state - .committee_cache(RelativeEpoch::Previous) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Current) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Next) - .expect_err("shouldn't exist"); - } - if clone_config.pubkey_cache { - assert_ne!(state.pubkey_cache().len(), 0); - } else { - assert_eq!(state.pubkey_cache().len(), 0); - } - if clone_config.exit_cache { - state - .exit_cache() - .check_initialized() - .expect("exit cache exists"); - } else { - state - .exit_cache() - .check_initialized() - .expect_err("exit cache doesn't exist"); - } - if clone_config.tree_hash_cache { - assert!(state.tree_hash_cache().is_initialized()); - } else { - assert!( - !state.tree_hash_cache().is_initialized(), - "{:?}", - clone_config - ); - } -} - -#[tokio::test] -async fn clone_config() { - let spec = MinimalEthSpec::default_spec(); - - let mut state = build_state::(16).await; - - state.build_caches(&spec).unwrap(); - state - .update_tree_hash_cache() - .expect("should update tree hash cache"); - - let num_caches = 6; - let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { - committee_caches: (i & 1) != 0, - pubkey_cache: ((i >> 1) & 1) != 0, - exit_cache: ((i >> 2) & 1) != 0, - slashings_cache: ((i >> 3) & 1) != 0, - tree_hash_cache: ((i >> 4) & 1) != 0, - progressive_balances_cache: ((i >> 5) & 1) != 0, - }); - - for config in all_configs { - test_clone_config(&state, config); - } -} - /// Tests committee-specific components #[cfg(test)] mod committees { @@ -328,10 +248,9 @@ mod committees { let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); - let distinct_hashes: Vec = (0..E::epochs_per_historical_vector()) - .map(|i| Hash256::from_low_u64_be(i as u64)) - .collect(); - *new_head_state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + let distinct_hashes = + (0..E::epochs_per_historical_vector()).map(|i| Hash256::from_low_u64_be(i as u64)); + *new_head_state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap(); new_head_state .force_build_committee_cache(RelativeEpoch::Previous, spec) @@ -487,120 +406,22 @@ fn decode_base_and_altair() { } #[test] -fn tree_hash_cache_linear_history() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut state: BeaconState = - BeaconState::Base(BeaconStateBase::random_for_test(&mut rng)); - - let root = state.update_tree_hash_cache().unwrap(); - - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); - - /* - * A cache should hash twice without updating the slot. - */ - - assert_eq!( - state.update_tree_hash_cache().unwrap(), - root, - "tree hash result should be identical on the same slot" - ); - - /* - * A cache should not hash after updating the slot but not updating the state roots. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - - assert_eq!( - state.update_tree_hash_cache(), - Err(BeaconStateError::NonLinearTreeHashCacheHistory), - "should not build hash without updating the state root" - ); - - /* - * The cache should update if the slot and state root are updated. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - let root = state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - state - .set_state_root(state.slot() - 1, root) - .expect("should set state root"); - - let root = state.update_tree_hash_cache().unwrap(); - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); -} - -// Check how the cache behaves when there's a distance larger than `SLOTS_PER_HISTORICAL_ROOT` -// since its last update. -#[test] -fn tree_hash_cache_linear_history_long_skip() { - let validator_count = 128; - let keypairs = generate_deterministic_keypairs(validator_count); - - let spec = &test_spec::(); - - // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state_with_eth1( - &keypairs, - 0, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, - spec, - ) - .unwrap(); - - state.update_tree_hash_cache().unwrap(); - - // This state retains its original cache until it is updated after a long skip. - let mut original_cache_state = state.clone(); - assert!(original_cache_state.tree_hash_cache().is_initialized()); - - // Advance the states to a slot beyond the historical state root limit, using the state root - // from the first state to avoid touching the original state's cache. - let start_slot = state.slot(); - let target_slot = start_slot - .safe_add(MinimalEthSpec::slots_per_historical_root() as u64 + 1) - .unwrap(); - - let mut prev_state_root; - while state.slot() < target_slot { - prev_state_root = state.update_tree_hash_cache().unwrap(); - per_slot_processing(&mut state, None, spec).unwrap(); - per_slot_processing(&mut original_cache_state, Some(prev_state_root), spec).unwrap(); +fn check_num_fields_pow2() { + use metastruct::NumFields; + pub type E = MainnetEthSpec; + + for fork_name in ForkName::list_all() { + let num_fields = match fork_name { + ForkName::Base => BeaconStateBase::::NUM_FIELDS, + ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, + ForkName::Merge => BeaconStateMerge::::NUM_FIELDS, + ForkName::Capella => BeaconStateCapella::::NUM_FIELDS, + ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS, + ForkName::Electra => BeaconStateElectra::::NUM_FIELDS, + }; + assert_eq!( + num_fields.next_power_of_two(), + BeaconState::::NUM_FIELDS_POW2 + ); } - - // The state with the original cache should still be initialized at the starting slot. - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - start_slot - ); - - // Updating the tree hash cache should be successful despite the long skip. - assert_eq!( - original_cache_state.update_tree_hash_cache().unwrap(), - state.update_tree_hash_cache().unwrap() - ); - - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - target_slot - ); } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs deleted file mode 100644 index 290020b1b35..00000000000 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ /dev/null @@ -1,645 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -#![allow(clippy::disallowed_methods)] -#![allow(clippy::indexing_slicing)] - -use super::Error; -use crate::historical_summary::HistoricalSummaryCache; -use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; -use rayon::prelude::*; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use std::cmp::Ordering; -use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; - -/// The number of leaves (including padding) on the `BeaconState` Merkle tree. -/// -/// ## Note -/// -/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the -/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; - -/// The number of nodes in the Merkle tree of a validator record. -const NODES_PER_VALIDATOR: usize = 15; - -/// The number of validator record tree hash caches stored in each arena. -/// -/// This is primarily used for concurrency; if we have 16 validators and set `VALIDATORS_PER_ARENA -/// == 8` then it is possible to do a 2-core concurrent hash. -/// -/// Do not set to 0. -const VALIDATORS_PER_ARENA: usize = 4_096; - -#[derive(Debug, PartialEq, Clone, Encode, Decode)] -pub struct Eth1DataVotesTreeHashCache { - arena: CacheArena, - tree_hash_cache: TreeHashCache, - voting_period: u64, - roots: VariableList, -} - -impl Eth1DataVotesTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut arena = CacheArena::default(); - let roots: VariableList<_, _> = state - .eth1_data_votes() - .iter() - .map(|eth1_data| eth1_data.tree_hash_root()) - .collect::>() - .into(); - let tree_hash_cache = roots.new_tree_hash_cache(&mut arena); - - Self { - arena, - tree_hash_cache, - voting_period: Self::voting_period(state.slot()), - roots, - } - } - - fn voting_period(slot: Slot) -> u64 { - slot.as_u64() / E::SlotsPerEth1VotingPeriod::to_u64() - } - - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - if state.eth1_data_votes().len() < self.roots.len() - || Self::voting_period(state.slot()) != self.voting_period - { - *self = Self::new(state); - } - - state - .eth1_data_votes() - .iter() - .skip(self.roots.len()) - .try_for_each(|eth1_data| self.roots.push(eth1_data.tree_hash_root()))?; - - self.roots - .recalculate_tree_hash_root(&mut self.arena, &mut self.tree_hash_cache) - .map_err(Into::into) - } -} - -/// A cache that performs a caching tree hash of the entire `BeaconState` struct. -/// -/// This type is a wrapper around the inner cache, which does all the work. -#[derive(Debug, Default, PartialEq, Clone)] -pub struct BeaconTreeHashCache { - inner: Option>, -} - -impl BeaconTreeHashCache { - pub fn new(state: &BeaconState) -> Self { - Self { - inner: Some(BeaconTreeHashCacheInner::new(state)), - } - } - - pub fn is_initialized(&self) -> bool { - self.inner.is_some() - } - - /// Move the inner cache out so that the containing `BeaconState` can be borrowed. - pub fn take(&mut self) -> Option> { - self.inner.take() - } - - /// Restore the inner cache after using `take`. - pub fn restore(&mut self, inner: BeaconTreeHashCacheInner) { - self.inner = Some(inner); - } - - /// Make the cache empty. - pub fn uninitialize(&mut self) { - self.inner = None; - } - - /// Return the slot at which the cache was last updated. - /// - /// This should probably only be used during testing. - pub fn initialized_slot(&self) -> Option { - Some(self.inner.as_ref()?.previous_state?.1) - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct BeaconTreeHashCacheInner { - /// Tracks the previously generated state root to ensure the next state root provided descends - /// directly from this state. - previous_state: Option<(Hash256, Slot)>, - // Validators cache - validators: ValidatorsListTreeHashCache, - // Arenas - fixed_arena: CacheArena, - balances_arena: CacheArena, - slashings_arena: CacheArena, - // Caches - block_roots: TreeHashCache, - state_roots: TreeHashCache, - historical_roots: TreeHashCache, - historical_summaries: OptionalTreeHashCache, - balances: TreeHashCache, - randao_mixes: TreeHashCache, - slashings: TreeHashCache, - eth1_data_votes: Eth1DataVotesTreeHashCache, - inactivity_scores: OptionalTreeHashCache, - // Participation caches - previous_epoch_participation: OptionalTreeHashCache, - current_epoch_participation: OptionalTreeHashCache, -} - -impl BeaconTreeHashCacheInner { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut fixed_arena = CacheArena::default(); - let block_roots = state.block_roots().new_tree_hash_cache(&mut fixed_arena); - let state_roots = state.state_roots().new_tree_hash_cache(&mut fixed_arena); - let historical_roots = state - .historical_roots() - .new_tree_hash_cache(&mut fixed_arena); - let historical_summaries = OptionalTreeHashCache::new( - state - .historical_summaries() - .ok() - .map(HistoricalSummaryCache::new) - .as_ref(), - ); - - let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); - - let validators = ValidatorsListTreeHashCache::new::(state.validators()); - - let mut balances_arena = CacheArena::default(); - let balances = state.balances().new_tree_hash_cache(&mut balances_arena); - - let mut slashings_arena = CacheArena::default(); - let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena); - - let inactivity_scores = OptionalTreeHashCache::new(state.inactivity_scores().ok()); - - let previous_epoch_participation = OptionalTreeHashCache::new( - state - .previous_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - let current_epoch_participation = OptionalTreeHashCache::new( - state - .current_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - - Self { - previous_state: None, - validators, - fixed_arena, - balances_arena, - slashings_arena, - block_roots, - state_roots, - historical_roots, - historical_summaries, - balances, - randao_mixes, - slashings, - inactivity_scores, - eth1_data_votes: Eth1DataVotesTreeHashCache::new(state), - previous_epoch_participation, - current_epoch_participation, - } - } - - pub fn recalculate_tree_hash_leaves( - &mut self, - state: &BeaconState, - ) -> Result, Error> { - let mut leaves = vec![ - // Genesis data leaves. - state.genesis_time().tree_hash_root(), - state.genesis_validators_root().tree_hash_root(), - // Current fork data leaves. - state.slot().tree_hash_root(), - state.fork().tree_hash_root(), - state.latest_block_header().tree_hash_root(), - // Roots leaves. - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, - // Eth1 Data leaves. - state.eth1_data().tree_hash_root(), - self.eth1_data_votes.recalculate_tree_hash_root(state)?, - state.eth1_deposit_index().tree_hash_root(), - // Validator leaves. - self.validators - .recalculate_tree_hash_root(state.validators())?, - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, - ]; - - // Participation - if let BeaconState::Base(state) = state { - leaves.push(state.previous_epoch_attestations.tree_hash_root()); - leaves.push(state.current_epoch_attestations.tree_hash_root()); - } else { - leaves.push( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))?, - ); - leaves.push( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))?, - ); - } - // Checkpoint leaves - leaves.push(state.justification_bits().tree_hash_root()); - leaves.push(state.previous_justified_checkpoint().tree_hash_root()); - leaves.push(state.current_justified_checkpoint().tree_hash_root()); - leaves.push(state.finalized_checkpoint().tree_hash_root()); - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - leaves.push( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)?, - ); - } - if let Ok(current_sync_committee) = state.current_sync_committee() { - leaves.push(current_sync_committee.tree_hash_root()); - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - leaves.push(next_sync_committee.tree_hash_root()); - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - leaves.push(payload_header.tree_hash_root()); - } - - // Withdrawal indices (Capella and later). - if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { - leaves.push(next_withdrawal_index.tree_hash_root()); - } - if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { - leaves.push(next_withdrawal_validator_index.tree_hash_root()); - } - - // Historical roots/summaries (Capella and later). - if let Ok(historical_summaries) = state.historical_summaries() { - leaves.push( - self.historical_summaries.recalculate_tree_hash_root( - &HistoricalSummaryCache::new(historical_summaries), - )?, - ); - } - - Ok(leaves) - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// The provided `state` should be a descendant of the last `state` given to this function, or - /// the `Self::new` function. If the state is more than `SLOTS_PER_HISTORICAL_ROOT` slots - /// after `self.previous_state` then the whole cache will be re-initialized. - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - // If this cache has previously produced a root, ensure that it is in the state root - // history of this state. - // - // This ensures that the states applied have a linear history, this - // allows us to make assumptions about how the state changes over times and produce a more - // efficient algorithm. - if let Some((previous_root, previous_slot)) = self.previous_state { - // The previously-hashed state must not be newer than `state`. - if previous_slot > state.slot() { - return Err(Error::TreeHashCacheSkippedSlot { - cache: previous_slot, - state: state.slot(), - }); - } - - // If the state is newer, the previous root must be in the history of the given state. - // If the previous slot is out of range of the `state_roots` array (indicating a long - // gap between the cache's last use and the current state) then we re-initialize. - match state.get_state_root(previous_slot) { - Ok(state_previous_root) if *state_previous_root == previous_root => {} - Ok(_) => return Err(Error::NonLinearTreeHashCacheHistory), - Err(Error::SlotOutOfBounds) => { - *self = Self::new(state); - } - Err(e) => return Err(e), - } - } - - let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - - let leaves = self.recalculate_tree_hash_leaves(state)?; - for leaf in leaves { - hasher.write(leaf.as_bytes())?; - } - - let root = hasher.finish()?; - - self.previous_state = Some((root, state.slot())); - - Ok(root) - } - - /// Updates the cache and provides the root of the given `validators`. - pub fn recalculate_validators_tree_hash_root( - &mut self, - validators: &[Validator], - ) -> Result { - self.validators.recalculate_tree_hash_root(validators) - } -} - -/// A specialized cache for computing the tree hash root of `state.validators`. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -struct ValidatorsListTreeHashCache { - list_arena: CacheArena, - list_cache: TreeHashCache, - values: ParallelValidatorTreeHash, -} - -impl ValidatorsListTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let mut list_arena = CacheArena::default(); - Self { - list_cache: TreeHashCache::new( - &mut list_arena, - int_log(E::ValidatorRegistryLimit::to_usize()), - validators.len(), - ), - list_arena, - values: ParallelValidatorTreeHash::new(validators), - } - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { - let mut list_arena = std::mem::take(&mut self.list_arena); - - let leaves = self.values.leaves(validators)?; - let num_leaves = leaves.iter().map(|arena| arena.len()).sum(); - - let leaves_iter = ForcedExactSizeIterator { - iter: leaves.into_iter().flatten().map(|h| h.to_fixed_bytes()), - len: num_leaves, - }; - - let list_root = self - .list_cache - .recalculate_merkle_root(&mut list_arena, leaves_iter)?; - - self.list_arena = list_arena; - - Ok(mix_in_length(&list_root, validators.len())) - } -} - -/// Provides a wrapper around some `iter` if the number of items in the iterator is known to the -/// programmer but not the compiler. This allows use of `ExactSizeIterator` in some occasions. -/// -/// Care should be taken to ensure `len` is accurate. -struct ForcedExactSizeIterator { - iter: I, - len: usize, -} - -impl> Iterator for ForcedExactSizeIterator { - type Item = V; - - fn next(&mut self) -> Option { - self.iter.next() - } -} - -impl> ExactSizeIterator for ForcedExactSizeIterator { - fn len(&self) -> usize { - self.len - } -} - -/// Provides a cache for each of the `Validator` objects in `state.validators` and computes the -/// roots of these using Rayon parallelization. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct ParallelValidatorTreeHash { - /// Each arena and its associated sub-trees. - arenas: Vec<(CacheArena, Vec)>, -} - -impl ParallelValidatorTreeHash { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let num_arenas = std::cmp::max( - 1, - (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, - ); - - let mut arenas = (1..=num_arenas) - .map(|i| { - let num_validators = if i == num_arenas { - validators.len() % VALIDATORS_PER_ARENA - } else { - VALIDATORS_PER_ARENA - }; - NODES_PER_VALIDATOR * num_validators - }) - .map(|capacity| (CacheArena::with_capacity(capacity), vec![])) - .collect::>(); - - validators.iter().enumerate().for_each(|(i, v)| { - let (arena, caches) = &mut arenas[i / VALIDATORS_PER_ARENA]; - caches.push(v.new_tree_hash_cache(arena)) - }); - - Self { arenas } - } - - /// Returns the number of validators stored in self. - fn len(&self) -> usize { - self.arenas.last().map_or(0, |last| { - // Subtraction cannot underflow because `.last()` ensures the `.len() > 0`. - (self.arenas.len() - 1) * VALIDATORS_PER_ARENA + last.1.len() - }) - } - - /// Updates the caches for each `Validator` in `validators` and returns a list that maps 1:1 - /// with `validators` to the hash of each validator. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn leaves(&mut self, validators: &[Validator]) -> Result>, Error> { - match self.len().cmp(&validators.len()) { - Ordering::Less => validators.iter().skip(self.len()).for_each(|v| { - if self - .arenas - .last() - .map_or(true, |last| last.1.len() >= VALIDATORS_PER_ARENA) - { - let mut arena = CacheArena::default(); - let cache = v.new_tree_hash_cache(&mut arena); - self.arenas.push((arena, vec![cache])) - } else { - let (arena, caches) = &mut self - .arenas - .last_mut() - .expect("Cannot reach this block if arenas is empty."); - caches.push(v.new_tree_hash_cache(arena)) - } - }), - Ordering::Greater => { - return Err(Error::ValidatorRegistryShrunk); - } - Ordering::Equal => (), - } - - self.arenas - .par_iter_mut() - .enumerate() - .map(|(arena_index, (arena, caches))| { - caches - .iter_mut() - .enumerate() - .map(move |(cache_index, cache)| { - let val_index = (arena_index * VALIDATORS_PER_ARENA) + cache_index; - - let validator = validators - .get(val_index) - .ok_or(Error::TreeHashCacheInconsistent)?; - - validator - .recalculate_tree_hash_root(arena, cache) - .map_err(Error::CachedTreeHashError) - }) - .collect() - }) - .collect() - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCache { - inner: Option, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCacheInner { - arena: CacheArena, - tree_hash_cache: TreeHashCache, -} - -impl OptionalTreeHashCache { - /// Initialize a new cache if `item.is_some()`. - fn new>(item: Option<&C>) -> Self { - let inner = item.map(OptionalTreeHashCacheInner::new); - Self { inner } - } - - /// Compute the tree hash root for the given `item`. - /// - /// This function will initialize the inner cache if necessary (e.g. when crossing the fork). - fn recalculate_tree_hash_root>( - &mut self, - item: &C, - ) -> Result { - let cache = self - .inner - .get_or_insert_with(|| OptionalTreeHashCacheInner::new(item)); - item.recalculate_tree_hash_root(&mut cache.arena, &mut cache.tree_hash_cache) - .map_err(Into::into) - } -} - -impl OptionalTreeHashCacheInner { - fn new>(item: &C) -> Self { - let mut arena = CacheArena::default(); - let tree_hash_cache = item.new_tree_hash_cache(&mut arena); - OptionalTreeHashCacheInner { - arena, - tree_hash_cache, - } - } -} - -impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { - fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self::default()) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{MainnetEthSpec, ParticipationFlags}; - - #[test] - fn validator_node_count() { - let mut arena = CacheArena::default(); - let v = Validator::default(); - let _cache = v.new_tree_hash_cache(&mut arena); - assert_eq!(arena.backing_len(), NODES_PER_VALIDATOR); - } - - #[test] - fn participation_flags() { - type N = ::ValidatorRegistryLimit; - let len = 65; - let mut test_flag = ParticipationFlags::default(); - test_flag.add_flag(0).unwrap(); - let epoch_participation = VariableList::<_, N>::new(vec![test_flag; len]).unwrap(); - - let mut cache = OptionalTreeHashCache { inner: None }; - - let cache_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - let recalc_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - - assert_eq!(cache_root, recalc_root, "recalculated root should match"); - assert_eq!( - cache_root, - epoch_participation.tree_hash_root(), - "cached root should match uncached" - ); - } -} diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 31b1307aa7f..e54bc2f4f97 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, - EthSpec, Hash256, SignedBeaconBlockHeader, Slot, + EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; use crate::{KzgProofs, SignedBeaconBlock}; use bls::Signature; @@ -16,7 +16,6 @@ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{FixedVector, VariableList}; use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 988bd6755dc..f4e8d4e8b05 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -309,6 +309,13 @@ impl ChainSpec { } } + /// Return the name of the fork activated at `slot`, if any. + pub fn fork_activated_at_slot(&self, slot: Slot) -> Option { + let prev_slot_fork = self.fork_name_at_slot::(slot.saturating_sub(Slot::new(1))); + let slot_fork = self.fork_name_at_slot::(slot); + (slot_fork != prev_slot_fork).then_some(slot_fork) + } + /// Returns the fork version for a named fork. pub fn fork_version_for_name(&self, fork_name: ForkName) -> [u8; 4] { match fork_name { diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 27dc8cab0a4..68e8f6f444f 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -6,6 +6,8 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +// FIXME(sproul): try milhouse Vector + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index f10f449d6de..4783e2f3bd6 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -32,7 +32,8 @@ use tree_hash_derive::TreeHash; tree_hash(enum_behaviour = "transparent") ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_ref_into(ExecutionPayloadHeader) )] #[derive( Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, @@ -347,6 +348,27 @@ impl TryFrom> for ExecutionPayloadHeaderDe } } +impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> { + /// Mutate through + pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { + match self { + ExecutionPayloadHeaderRefMut::Merge(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Capella(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Deneb(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Electra(mut_ref) => { + *mut_ref = header.try_into()?; + } + } + Ok(()) + } +} + impl TryFrom> for ExecutionPayloadHeaderElectra { type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 1c565c0092d..7bac9699eb6 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -23,8 +23,10 @@ use tree_hash_derive::TreeHash; )] #[arbitrary(bound = "E: EthSpec")] pub struct HistoricalBatch { - pub block_roots: FixedVector, - pub state_roots: FixedVector, + #[test_random(default)] + pub block_roots: Vector, + #[test_random(default)] + pub state_roots: Vector, } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 6551ebc1dda..82524e069b1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -91,7 +91,6 @@ pub mod sync_committee_contribution; pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; -mod tree_hash_impls; pub mod validator_registration_data; pub mod withdrawal; @@ -125,7 +124,7 @@ pub use crate::beacon_block_body::{ }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::beacon_state::{compact_state::CompactBeaconState, Error as BeaconStateError, *}; pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; @@ -221,7 +220,7 @@ pub use crate::sync_committee_subscription::SyncCommitteeSubscription; pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; +pub use crate::validator::{Validator, ValidatorMutable}; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; @@ -243,8 +242,7 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; - pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; - +pub use milhouse::{self, List, Vector}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 43bab325f3c..f76d710e4d6 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,8 +1,8 @@ -use super::{BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; +use super::{BeaconState, EthSpec, Hash256, SyncCommittee}; use crate::{ - light_client_update::*, test_utils::TestRandom, ChainSpec, ForkName, ForkVersionDeserialize, - LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, - Slot, + light_client_update::*, test_utils::TestRandom, ChainSpec, FixedVector, ForkName, + ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, SignedBeaconBlock, Slot, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 288527e91cb..3d0bfd115a7 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -58,6 +58,7 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Deneb), partial_getter(rename = "finalized_header_deneb"))] pub finalized_header: LightClientHeaderDeneb, /// Merkle proof attesting finalized header. + #[test_random(default)] pub finality_branch: FixedVector, /// current sync aggreggate pub sync_aggregate: SyncAggregate, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index af9cbc16610..d5e8cd592df 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -37,6 +37,7 @@ pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), BeaconStateError(beacon_state::Error), ArithError(ArithError), AltairForkNotActive, @@ -65,6 +66,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: milhouse::Error) -> Error { + Error::MilhouseError(e) + } +} + /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 0adaf81bd7d..72a7a036ccc 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -87,7 +87,7 @@ where } } -impl TestRandom for FixedVector +impl TestRandom for ssz_types::FixedVector where T: TestRandom, { diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs deleted file mode 100644 index eb3660d4666..00000000000 --- a/consensus/types/src/tree_hash_impls.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! This module contains custom implementations of `CachedTreeHash` for ETH2-specific types. -//! -//! It makes some assumptions about the layouts and update patterns of other structs in this -//! crate, and should be updated carefully whenever those structs are changed. -use crate::{Epoch, Hash256, PublicKeyBytes, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; -use int_to_bytes::int_to_fixed_bytes32; -use tree_hash::merkle_root; - -/// Number of struct fields on `Validator`. -const NUM_VALIDATOR_FIELDS: usize = 8; - -impl CachedTreeHash for Validator { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new(arena, int_log(NUM_VALIDATOR_FIELDS), NUM_VALIDATOR_FIELDS) - } - - /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. - /// - /// Specifically, we assume that the `pubkey` field is constant. - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - // Otherwise just check the fields which might have changed. - let dirty_indices = cache - .leaves() - .iter_mut(arena)? - .enumerate() - .flat_map(|(i, leaf)| { - // Pubkey field (index 0) is constant. - if i == 0 && cache.initialized { - None - } else if process_field_by_index(self, i, leaf, !cache.initialized) { - Some(i) - } else { - None - } - }) - .collect(); - - cache.update_merkle_root(arena, dirty_indices) - } -} - -fn process_field_by_index( - v: &Validator, - field_idx: usize, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - match field_idx { - 0 => process_pubkey_bytes_field(&v.pubkey, leaf, force_update), - 1 => process_slice_field(v.withdrawal_credentials.as_bytes(), leaf, force_update), - 2 => process_u64_field(v.effective_balance, leaf, force_update), - 3 => process_bool_field(v.slashed, leaf, force_update), - 4 => process_epoch_field(v.activation_eligibility_epoch, leaf, force_update), - 5 => process_epoch_field(v.activation_epoch, leaf, force_update), - 6 => process_epoch_field(v.exit_epoch, leaf, force_update), - 7 => process_epoch_field(v.withdrawable_epoch, leaf, force_update), - _ => panic!( - "Validator type only has {} fields, {} out of bounds", - NUM_VALIDATOR_FIELDS, field_idx - ), - } -} - -fn process_pubkey_bytes_field( - val: &PublicKeyBytes, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - let new_tree_hash = merkle_root(val.as_serialized(), 0); - process_slice_field(new_tree_hash.as_bytes(), leaf, force_update) -} - -fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool { - if force_update || leaf.as_bytes() != new_tree_hash { - leaf.assign_from_slice(new_tree_hash); - true - } else { - false - } -} - -fn process_u64_field(val: u64, leaf: &mut Hash256, force_update: bool) -> bool { - let new_tree_hash = int_to_fixed_bytes32(val); - process_slice_field(&new_tree_hash[..], leaf, force_update) -} - -fn process_epoch_field(val: Epoch, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val.as_u64(), leaf, force_update) -} - -fn process_bool_field(val: bool, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val as u64, leaf, force_update) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::TestRandom; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use tree_hash::TreeHash; - - fn test_validator_tree_hash(v: &Validator) { - let arena = &mut CacheArena::default(); - - let mut cache = v.new_tree_hash_cache(arena); - // With a fresh cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - // With a completely up-to-date cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - } - - #[test] - fn default_validator() { - test_validator_tree_hash(&Validator::default()); - } - - #[test] - fn zeroed_validator() { - let v = Validator { - activation_eligibility_epoch: Epoch::from(0u64), - activation_epoch: Epoch::from(0u64), - ..Default::default() - }; - test_validator_tree_hash(&v); - } - - #[test] - fn random_validators() { - let mut rng = XorShiftRng::from_seed([0xf1; 16]); - let num_validators = 1000; - (0..num_validators) - .map(|_| Validator::random_for_test(&mut rng)) - .for_each(|v| test_validator_tree_hash(&v)); - } - - #[test] - #[allow(clippy::assertions_on_constants)] - pub fn smallvec_size_check() { - // If this test fails we need to go and reassess the length of the `SmallVec` in - // `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going - // to start doing heap allocations for each validator, this will fragment memory and slow - // us down. - assert!(NUM_VALIDATOR_FIELDS <= 8,); - } -} diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 98567cd1e6c..349f4a9b16f 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -2,28 +2,34 @@ use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, }; +use arbitrary::Arbitrary; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use std::sync::Arc; use test_random_derive::TestRandom; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +const NUM_FIELDS: usize = 8; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, + Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, )] +#[serde(deny_unknown_fields)] pub struct Validator { - pub pubkey: PublicKeyBytes, + pub pubkey: Arc, + #[serde(flatten)] + pub mutable: ValidatorMutable, +} + +/// The mutable fields of a validator. +#[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Arbitrary, +)] +pub struct ValidatorMutable { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, @@ -34,47 +40,148 @@ pub struct Validator { pub withdrawable_epoch: Epoch, } +pub trait ValidatorTrait: + std::fmt::Debug + + PartialEq + + Clone + + serde::Serialize + + Send + + Sync + + serde::de::DeserializeOwned + + ssz::Encode + + ssz::Decode + + TreeHash + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> +{ +} + +impl ValidatorTrait for Validator {} +impl ValidatorTrait for ValidatorMutable {} + impl Validator { + pub fn pubkey(&self) -> &PublicKeyBytes { + &self.pubkey + } + + pub fn pubkey_clone(&self) -> Arc { + self.pubkey.clone() + } + + /// Replace the validator's pubkey (should only be used during testing). + pub fn replace_pubkey(&mut self, pubkey: PublicKeyBytes) { + self.pubkey = Arc::new(pubkey); + } + + #[inline] + pub fn withdrawal_credentials(&self) -> Hash256 { + self.mutable.withdrawal_credentials + } + + #[inline] + pub fn effective_balance(&self) -> u64 { + self.mutable.effective_balance + } + + #[inline] + pub fn slashed(&self) -> bool { + self.mutable.slashed + } + + #[inline] + pub fn activation_eligibility_epoch(&self) -> Epoch { + self.mutable.activation_eligibility_epoch + } + + #[inline] + pub fn activation_epoch(&self) -> Epoch { + self.mutable.activation_epoch + } + + #[inline] + pub fn activation_epoch_mut(&mut self) -> &mut Epoch { + &mut self.mutable.activation_epoch + } + + #[inline] + pub fn exit_epoch(&self) -> Epoch { + self.mutable.exit_epoch + } + + pub fn exit_epoch_mut(&mut self) -> &mut Epoch { + &mut self.mutable.exit_epoch + } + + #[inline] + pub fn withdrawable_epoch(&self) -> Epoch { + self.mutable.withdrawable_epoch + } + /// Returns `true` if the validator is considered active at some epoch. + #[inline] pub fn is_active_at(&self, epoch: Epoch) -> bool { - self.activation_epoch <= epoch && epoch < self.exit_epoch + self.activation_epoch() <= epoch && epoch < self.exit_epoch() } /// Returns `true` if the validator is slashable at some epoch. + #[inline] pub fn is_slashable_at(&self, epoch: Epoch) -> bool { - !self.slashed && self.activation_epoch <= epoch && epoch < self.withdrawable_epoch + !self.slashed() && self.activation_epoch() <= epoch && epoch < self.withdrawable_epoch() } /// Returns `true` if the validator is considered exited at some epoch. + #[inline] pub fn is_exited_at(&self, epoch: Epoch) -> bool { - self.exit_epoch <= epoch + self.exit_epoch() <= epoch } /// Returns `true` if the validator is able to withdraw at some epoch. + #[inline] pub fn is_withdrawable_at(&self, epoch: Epoch) -> bool { - epoch >= self.withdrawable_epoch + epoch >= self.withdrawable_epoch() } /// Returns `true` if the validator is eligible to join the activation queue. /// /// Spec v0.12.1 + #[inline] pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { - self.activation_eligibility_epoch == spec.far_future_epoch - && self.effective_balance == spec.max_effective_balance + self.activation_eligibility_epoch() == spec.far_future_epoch + && self.effective_balance() == spec.max_effective_balance } /// Returns `true` if the validator is eligible to be activated. /// /// Spec v0.12.1 + #[inline] pub fn is_eligible_for_activation( &self, state: &BeaconState, spec: &ChainSpec, ) -> bool { - // Placement in queue is finalized - self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch // Has not yet been activated - && self.activation_epoch == spec.far_future_epoch + self.activation_epoch() == spec.far_future_epoch && + // Placement in queue is finalized + self.activation_eligibility_epoch() <= state.finalized_checkpoint().epoch + } + + fn tree_hash_root_internal(&self) -> Result { + let mut hasher = tree_hash::MerkleHasher::with_leaves(NUM_FIELDS); + + hasher.write(self.pubkey().tree_hash_root().as_bytes())?; + hasher.write(self.withdrawal_credentials().tree_hash_root().as_bytes())?; + hasher.write(self.effective_balance().tree_hash_root().as_bytes())?; + hasher.write(self.slashed().tree_hash_root().as_bytes())?; + hasher.write( + self.activation_eligibility_epoch() + .tree_hash_root() + .as_bytes(), + )?; + hasher.write(self.activation_epoch().tree_hash_root().as_bytes())?; + hasher.write(self.exit_epoch().tree_hash_root().as_bytes())?; + hasher.write(self.withdrawable_epoch().tree_hash_root().as_bytes())?; + + hasher.finish() } /// Returns `true` if the validator *could* be eligible for activation at `epoch`. @@ -84,18 +191,18 @@ impl Validator { /// the epoch transition at the end of `epoch`. pub fn could_be_eligible_for_activation_at(&self, epoch: Epoch, spec: &ChainSpec) -> bool { // Has not yet been activated - self.activation_epoch == spec.far_future_epoch + self.activation_epoch() == spec.far_future_epoch // Placement in queue could be finalized. // // NOTE: the epoch distance is 1 rather than 2 because we consider the activations that // occur at the *end* of `epoch`, after `process_justification_and_finalization` has already // updated the state's checkpoint. - && self.activation_eligibility_epoch < epoch + && self.activation_eligibility_epoch() < epoch } /// Returns `true` if the validator has eth1 withdrawal credential. pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { - self.withdrawal_credentials + self.withdrawal_credentials() .as_bytes() .first() .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) @@ -106,7 +213,7 @@ impl Validator { pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ self.has_eth1_withdrawal_credential(spec) .then(|| { - self.withdrawal_credentials + self.withdrawal_credentials() .as_bytes() .get(12..) .map(Address::from_slice) @@ -121,28 +228,37 @@ impl Validator { let mut bytes = [0u8; 32]; bytes[0] = spec.eth1_address_withdrawal_prefix_byte; bytes[12..].copy_from_slice(execution_address.as_bytes()); - self.withdrawal_credentials = Hash256::from(bytes); + self.mutable.withdrawal_credentials = Hash256::from(bytes); } /// Returns `true` if the validator is fully withdrawable at some epoch. pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { - self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + self.has_eth1_withdrawal_credential(spec) + && self.withdrawable_epoch() <= epoch + && balance > 0 } /// Returns `true` if the validator is partially withdrawable. pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) - && self.effective_balance == spec.max_effective_balance + && self.effective_balance() == spec.max_effective_balance && balance > spec.max_effective_balance } } impl Default for Validator { - /// Yields a "default" `Validator`. Primarily used for testing. fn default() -> Self { - Self { - pubkey: PublicKeyBytes::empty(), - withdrawal_credentials: Hash256::default(), + Validator { + pubkey: Arc::new(PublicKeyBytes::empty()), + mutable: <_>::default(), + } + } +} + +impl Default for ValidatorMutable { + fn default() -> Self { + ValidatorMutable { + withdrawal_credentials: Hash256::zero(), activation_eligibility_epoch: Epoch::from(std::u64::MAX), activation_epoch: Epoch::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX), @@ -153,6 +269,25 @@ impl Default for Validator { } } +impl TreeHash for Validator { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Container + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + self.tree_hash_root_internal() + .expect("Validator tree_hash_root should not fail") + } +} + #[cfg(test)] mod tests { use super::*; @@ -166,7 +301,7 @@ mod tests { assert!(!v.is_active_at(epoch)); assert!(!v.is_exited_at(epoch)); assert!(!v.is_withdrawable_at(epoch)); - assert!(!v.slashed); + assert!(!v.slashed()); } #[test] @@ -174,7 +309,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - activation_epoch: epoch, + mutable: ValidatorMutable { + activation_epoch: epoch, + ..Default::default() + }, ..Validator::default() }; @@ -188,7 +326,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - exit_epoch: epoch, + mutable: ValidatorMutable { + exit_epoch: epoch, + ..ValidatorMutable::default() + }, ..Validator::default() }; @@ -202,7 +343,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - withdrawable_epoch: epoch, + mutable: ValidatorMutable { + withdrawable_epoch: epoch, + ..ValidatorMutable::default() + }, ..Validator::default() }; From bba555d5cb6f36b88cdb441c3a746f6feeb2f4ba Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:46:23 +1000 Subject: [PATCH 02/41] EF tests --- testing/ef_tests/src/case_result.rs | 3 +++ testing/ef_tests/src/cases/bls_verify_msg.rs | 9 +++++++- .../src/cases/merkle_proof_validity.rs | 2 +- testing/ef_tests/src/cases/ssz_generic.rs | 22 ++++++++++++------- testing/ef_tests/src/cases/ssz_static.rs | 1 - 5 files changed, 26 insertions(+), 11 deletions(-) diff --git a/testing/ef_tests/src/case_result.rs b/testing/ef_tests/src/case_result.rs index 67ab9c51bbf..c511d9a1ca0 100644 --- a/testing/ef_tests/src/case_result.rs +++ b/testing/ef_tests/src/case_result.rs @@ -39,6 +39,9 @@ pub fn compare_beacon_state_results_without_caches( if let (Ok(ref mut result), Some(ref mut expected)) = (result.as_mut(), expected.as_mut()) { result.drop_all_caches().unwrap(); expected.drop_all_caches().unwrap(); + + result.apply_pending_mutations().unwrap(); + expected.apply_pending_mutations().unwrap(); } compare_result_detailed(result, expected) diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 42ee459a607..31fb16a4df4 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; -use bls::{PublicKeyBytes, Signature, SignatureBytes}; +use bls::{PublicKey, PublicKeyBytes, Signature, SignatureBytes}; use serde::Deserialize; use types::Hash256; @@ -29,6 +29,13 @@ impl Case for BlsVerify { .try_into() .and_then(|signature: Signature| { let pk = self.input.pubkey.decompress()?; + + // Check serialization roundtrip. + let pk_uncompressed = pk.serialize_uncompressed(); + let pk_from_uncompressed = PublicKey::deserialize_uncompressed(&pk_uncompressed) + .expect("uncompressed serialization should round-trip"); + assert_eq!(pk_from_uncompressed, pk); + Ok(signature.verify(&pk, Hash256::from_slice(&message))) }) .unwrap_or(false); diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a2e831ade59..ddca5e2184b 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -51,7 +51,7 @@ impl LoadCase for MerkleProofValidity { impl Case for MerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); - state.initialize_tree_hash_cache(); + state.update_tree_hash_cache().unwrap(); let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index bb2465aae10..e620f4509fc 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -1,14 +1,14 @@ #![allow(non_snake_case)] use super::*; -use crate::cases::common::{TestU128, TestU256}; -use crate::decode::{snappy_decode_file, yaml_decode_file}; -use serde::Deserialize; -use serde::{de::Error as SerdeError, Deserializer}; +use crate::cases::common::{SszStaticType, TestU128, TestU256}; +use crate::cases::ssz_static::{check_serialization, check_tree_hash}; +use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; use types::typenum::*; -use types::{BitList, BitVector, FixedVector, VariableList}; +use types::{BitList, BitVector, ForkName, VariableList, Vector}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -125,10 +125,16 @@ impl Case for SszGeneric { let elem_ty = parts[1]; let length = parts[2]; + // Skip length 0 tests. Milhouse doesn't have any checks against 0-capacity lists. + if length == "0" { + log_file_access(self.path.join("serialized.ssz_snappy")); + return Ok(()); + } + type_dispatch!( ssz_generic_test, (&self.path), - FixedVector, + Vector, <>, [elem_ty => primitive_type] [length => typenum] @@ -263,8 +269,8 @@ struct ComplexTestStruct { #[serde(deserialize_with = "byte_list_from_hex_str")] D: VariableList, E: VarTestStruct, - F: FixedVector, - G: FixedVector, + F: Vector, + G: Vector, } #[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index e41c90c6e03..5f0ac3525c4 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -118,7 +118,6 @@ impl Case for SszStaticTHC> { check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; let mut state = self.value.clone(); - state.initialize_tree_hash_cache(); let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; From 9d3ff459cb24fa927a907877b8a7628b379a1fa3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:47:11 +1000 Subject: [PATCH 03/41] lcli --- lcli/src/new_testnet.rs | 29 ++++++++++++++++------------- lcli/src/replace_state_pubkeys.rs | 10 +++++++--- lcli/src/skip_slots.rs | 4 ++-- lcli/src/transition_blocks.rs | 11 +++++++---- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index f9da3d2b3e9..4ea04fd15f4 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -17,13 +17,14 @@ use std::fs::File; use std::io::Read; use std::path::PathBuf; use std::str::FromStr; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, - ForkName, Hash256, Keypair, PublicKey, Validator, + ForkName, Hash256, Keypair, PublicKey, Validator, ValidatorMutable, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -264,7 +265,7 @@ fn initialize_state_with_validators( let mut state = BeaconState::new(genesis_time, eth1_data, spec); // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash); + state.fill_randao_mixes_with(eth1_block_hash).unwrap(); for keypair in keypairs.iter() { let withdrawal_credentials = |pubkey: &PublicKey| { @@ -275,17 +276,19 @@ fn initialize_state_with_validators( let amount = spec.max_effective_balance; // Create a new validator. let validator = Validator { - pubkey: keypair.0.pk.clone().into(), - withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount - amount % (spec.effective_balance_increment), - spec.max_effective_balance, - ), - slashed: false, + pubkey: Arc::new(keypair.0.pk.clone().into()), + mutable: ValidatorMutable { + withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount - amount % (spec.effective_balance_increment), + spec.max_effective_balance, + ), + slashed: false, + }, }; state.validators_mut().push(validator).unwrap(); state.balances_mut().push(amount).unwrap(); diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index 0f9fac3aff9..5d8421d6f6e 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -42,7 +42,8 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); let mut deposit_root = Hash256::zero(); - for (index, validator) in state.validators_mut().iter_mut().enumerate() { + let validators = state.validators_mut(); + for index in 0..validators.len() { let (secret, _) = recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; @@ -52,11 +53,14 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), eprintln!("{}: {}", index, keypair.pk); - validator.pubkey = keypair.pk.into(); + validators + .get_mut(index) + .unwrap() + .replace_pubkey(keypair.pk.into()); // Update the deposit tree. let mut deposit_data = DepositData { - pubkey: validator.pubkey, + pubkey: *validators.get(index).unwrap().pubkey(), // Set this to a junk value since it's very time consuming to generate the withdrawal // keys and it's not useful for the time being. withdrawal_credentials: Hash256::zero(), diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 9e5da7709f1..d421c077d83 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -57,7 +57,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::time::{Duration, Instant}; -use types::{BeaconState, CloneConfig, EthSpec, Hash256}; +use types::{BeaconState, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -128,7 +128,7 @@ pub fn run( }; for i in 0..runs { - let mut state = state.clone_with(CloneConfig::all()); + let mut state = state.clone(); let start = Instant::now(); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index c72b41b1d44..bab1649d147 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -85,7 +85,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; use store::HotColdDB; -use types::{BeaconState, ChainSpec, CloneConfig, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -201,7 +201,10 @@ pub fn run( let store = Arc::new(store); debug!("Building pubkey cache (might take some time)"); - let validator_pubkey_cache = ValidatorPubkeyCache::new(&pre_state, store) + let validator_pubkey_cache = store.immutable_validators.clone(); + validator_pubkey_cache + .write() + .import_new_pubkeys(&pre_state) .map_err(|e| format!("Failed to create pubkey cache: {:?}", e))?; /* @@ -234,7 +237,7 @@ pub fn run( let mut output_post_state = None; let mut saved_ctxt = None; for i in 0..runs { - let pre_state = pre_state.clone_with(CloneConfig::all()); + let pre_state = pre_state.clone(); let block = block.clone(); let start = Instant::now(); @@ -245,7 +248,7 @@ pub fn run( block, state_root_opt, &config, - &validator_pubkey_cache, + &*validator_pubkey_cache.read(), &mut saved_ctxt, spec, )?; From 1f5516a14b8be6ab621adea717067df5dc081e5a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:48:55 +1000 Subject: [PATCH 04/41] common and watch --- common/eth2/src/types.rs | 12 ++++++------ common/task_executor/Cargo.toml | 1 + common/task_executor/src/test_utils.rs | 3 ++- watch/src/updater/mod.rs | 10 +++++----- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5f85d777957..04e37ed1935 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -375,20 +375,20 @@ pub enum ValidatorStatus { impl ValidatorStatus { pub fn from_validator(validator: &Validator, epoch: Epoch, far_future_epoch: Epoch) -> Self { if validator.is_withdrawable_at(epoch) { - if validator.effective_balance == 0 { + if validator.effective_balance() == 0 { ValidatorStatus::WithdrawalDone } else { ValidatorStatus::WithdrawalPossible } - } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch { - if validator.slashed { + } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch() { + if validator.slashed() { ValidatorStatus::ExitedSlashed } else { ValidatorStatus::ExitedUnslashed } } else if validator.is_active_at(epoch) { - if validator.exit_epoch < far_future_epoch { - if validator.slashed { + if validator.exit_epoch() < far_future_epoch { + if validator.slashed() { ValidatorStatus::ActiveSlashed } else { ValidatorStatus::ActiveExiting @@ -399,7 +399,7 @@ impl ValidatorStatus { // `pending` statuses are specified as validators where `validator.activation_epoch > current_epoch`. // If this code is reached, this criteria must have been met because `validator.is_active_at(epoch)`, // `validator.is_exited_at(epoch)`, and `validator.is_withdrawable_at(epoch)` all returned false. - } else if validator.activation_eligibility_epoch == far_future_epoch { + } else if validator.activation_eligibility_epoch() == far_future_epoch { ValidatorStatus::PendingInitialized } else { ValidatorStatus::PendingQueued diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index b3d58fa5ea8..cc9a2c5097b 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -12,3 +12,4 @@ futures = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } sloggers = { workspace = true } +logging = { workspace = true } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index 6e372d97575..ec8f45d850e 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,4 +1,5 @@ use crate::TaskExecutor; +use logging::test_logger; use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; @@ -26,7 +27,7 @@ impl Default for TestRuntime { fn default() -> Self { let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = null_logger().unwrap(); + let log = test_logger(); let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { (None, handle) diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs index 65e0a90a2b4..c3c8c94cdd7 100644 --- a/watch/src/updater/mod.rs +++ b/watch/src/updater/mod.rs @@ -211,20 +211,20 @@ pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result Date: Mon, 8 Apr 2024 11:49:23 +1000 Subject: [PATCH 05/41] account manager --- account_manager/src/validator/exit.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index bc9e0ee1dd6..f5cdd635188 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -203,8 +203,8 @@ async fn publish_voluntary_exit( let validator_data = get_validator_data(client, &keypair.pk).await?; match validator_data.status { ValidatorStatus::ActiveExiting => { - let exit_epoch = validator_data.validator.exit_epoch; - let withdrawal_epoch = validator_data.validator.withdrawable_epoch; + let exit_epoch = validator_data.validator.exit_epoch(); + let withdrawal_epoch = validator_data.validator.withdrawable_epoch(); let current_epoch = get_current_epoch::(genesis_data.genesis_time, spec) .ok_or("Failed to get current epoch. Please check your system time")?; eprintln!("Voluntary exit has been accepted into the beacon chain, but not yet finalized. \ @@ -224,7 +224,7 @@ async fn publish_voluntary_exit( ValidatorStatus::ExitedSlashed | ValidatorStatus::ExitedUnslashed => { eprintln!( "Validator has exited on epoch: {}", - validator_data.validator.exit_epoch + validator_data.validator.exit_epoch() ); break; } @@ -250,7 +250,7 @@ async fn get_validator_index_for_exit( ValidatorStatus::ActiveOngoing => { let eligible_epoch = validator_data .validator - .activation_epoch + .activation_epoch() .safe_add(spec.shard_committee_period) .map_err(|e| format!("Failed to calculate eligible epoch, validator activation epoch too high: {:?}", e))?; From 67a881dc5fb2c6dcb860dcb3555e7b1a36dfdaaf Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:50:45 +1000 Subject: [PATCH 06/41] cargo --- Cargo.lock | 394 ++++++++++++++++++++++++++++++++++++++++++----------- Cargo.toml | 7 +- 2 files changed, 320 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e37e3903dbd..b3ef7c9c88a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,7 +49,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand", + "rand 0.8.5", "regex", "rpassword", "serde", @@ -240,7 +240,7 @@ dependencies = [ "k256 0.13.3", "keccak-asm", "proptest", - "rand", + "rand 0.8.5", "ruint", "serde", "tiny-keccak", @@ -313,6 +313,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "archery" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a8da9bc4c4053ee067669762bcaeea6e241841295a2b6c948312dad6ef4cc02" +dependencies = [ + "static_assertions", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -424,7 +433,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -434,7 +443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -616,6 +625,15 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "autocfg" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" +dependencies = [ + "autocfg 1.1.0", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -734,6 +752,7 @@ version = "0.2.0" dependencies = [ "bitvec 1.0.1", "bls", + "crossbeam-channel", "derivative", "environment", "eth1", @@ -760,8 +779,9 @@ dependencies = [ "oneshot_broadcast", "operation_pool", "parking_lot 0.12.1", + "promise_cache", "proto_array", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "sensitive_url", @@ -790,7 +810,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.1.3" +version = "5.1.222-exp" dependencies = [ "beacon_chain", "clap", @@ -991,12 +1011,13 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", + "criterion", "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "hex", - "rand", + "rand 0.8.5", "serde", "tree_hash", "zeroize", @@ -1026,7 +1047,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.1.3" +version = "5.1.222-exp" dependencies = [ "beacon_node", "clap", @@ -1358,6 +1379,15 @@ dependencies = [ "types", ] +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "cmake" version = "0.1.50" @@ -1549,7 +1579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1561,7 +1591,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1573,7 +1603,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -1770,6 +1800,7 @@ dependencies = [ "clap", "clap_utils", "environment", + "ethereum_ssz", "hex", "logging", "slog", @@ -2027,7 +2058,7 @@ dependencies = [ "lru", "more-asserts", "parking_lot 0.11.2", - "rand", + "rand 0.8.5", "rlp", "smallvec", "socket2 0.4.10", @@ -2104,7 +2135,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "subtle", @@ -2165,7 +2196,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "rand_core", + "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", @@ -2185,7 +2216,7 @@ dependencies = [ "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "sec1 0.7.3", "subtle", "zeroize", @@ -2212,7 +2243,7 @@ dependencies = [ "hex", "k256 0.13.3", "log", - "rand", + "rand 0.8.5", "rlp", "serde", "sha3 0.10.8", @@ -2433,7 +2464,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -2475,7 +2506,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "serde_repr", @@ -2707,7 +2738,7 @@ dependencies = [ "k256 0.11.6", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "rlp-derive", "serde", @@ -2834,7 +2865,7 @@ dependencies = [ "lru", "parking_lot 0.12.1", "pretty_reqwest_error", - "rand", + "rand 0.8.5", "reqwest", "sensitive_url", "serde", @@ -2902,7 +2933,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2912,7 +2943,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2959,7 +2990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2972,7 +3003,7 @@ checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "arbitrary", "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -3043,6 +3074,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "1.1.0" @@ -3320,7 +3357,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec 0.3.1", "quickcheck", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.10.8", @@ -3336,7 +3373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3347,7 +3384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3535,7 +3572,7 @@ dependencies = [ "idna 0.4.0", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "socket2 0.5.6", "thiserror", "tinyvec", @@ -3557,7 +3594,7 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror", @@ -3954,7 +3991,7 @@ dependencies = [ "http 0.2.11", "hyper 0.14.28", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4028,7 +4065,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg", + "autocfg 1.1.0", "hashbrown 0.12.3", ] @@ -4288,7 +4325,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.1.3" +version = "5.1.222-exp" dependencies = [ "account_utils", "beacon_chain", @@ -4486,7 +4523,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", "smallvec", "thiserror", @@ -4548,7 +4585,7 @@ dependencies = [ "multihash", "p256", "quick-protobuf", - "rand", + "rand 0.8.5", "sec1 0.7.3", "sha2 0.10.8", "thiserror", @@ -4570,7 +4607,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "smallvec", "socket2 0.5.6", "tokio", @@ -4607,7 +4644,7 @@ dependencies = [ "libp2p-identity", "nohash-hasher", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "smallvec", "tracing", "unsigned-varint 0.7.2", @@ -4629,7 +4666,7 @@ dependencies = [ "multihash", "once_cell", "quick-protobuf", - "rand", + "rand 0.8.5", "sha2 0.10.8", "snow", "static_assertions", @@ -4670,7 +4707,7 @@ dependencies = [ "libp2p-tls", "parking_lot 0.12.1", "quinn", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustls", "socket2 0.5.6", @@ -4695,7 +4732,7 @@ dependencies = [ "libp2p-swarm-derive", "multistream-select", "once_cell", - "rand", + "rand 0.8.5", "smallvec", "tokio", "tracing", @@ -4805,7 +4842,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", @@ -4864,7 +4901,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.1.3" +version = "5.1.222-exp" dependencies = [ "account_manager", "account_utils", @@ -4896,6 +4933,7 @@ dependencies = [ "slashing_protection", "slog", "sloggers", + "store", "task_executor", "tempfile", "tracing-subscriber", @@ -4949,7 +4987,7 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.9.9", @@ -5030,7 +5068,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "autocfg", + "autocfg 1.1.0", "scopeguard", ] @@ -5190,7 +5228,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ - "autocfg", + "autocfg 1.1.0", ] [[package]] @@ -5249,6 +5287,28 @@ dependencies = [ "quote", ] +[[package]] +name = "milhouse" +version = "0.1.0" +source = "git+https://github.com/sigp/milhouse?branch=main#40a536490b14dc95834f9ece0001e8e04f7b38d7" +dependencies = [ + "arbitrary", + "derivative", + "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", + "itertools", + "parking_lot 0.11.2", + "rayon", + "serde", + "smallvec", + "tree_hash", + "triomphe", + "typenum", + "vec_map", +] + [[package]] name = "mime" version = "0.3.17" @@ -5493,7 +5553,7 @@ dependencies = [ "num_cpus", "operation_pool", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "rlp", "slog", "slog-async", @@ -5590,7 +5650,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ - "autocfg", + "autocfg 1.1.0", "num-integer", "num-traits", ] @@ -5607,7 +5667,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "serde", "smallvec", "zeroize", @@ -5634,7 +5694,7 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ - "autocfg", + "autocfg 1.1.0", "num-integer", "num-traits", ] @@ -5645,7 +5705,7 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ - "autocfg", + "autocfg 1.1.0", "libm", ] @@ -5795,7 +5855,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "rayon", "serde", "state_processing", @@ -5935,7 +5995,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -6205,7 +6265,7 @@ dependencies = [ "hmac 0.12.1", "md-5", "memchr", - "rand", + "rand 0.8.5", "sha2 0.10.8", "stringprep", ] @@ -6377,6 +6437,16 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "promise_cache" +version = "0.1.0" +dependencies = [ + "derivative", + "itertools", + "oneshot_broadcast", + "slog", +] + [[package]] name = "proptest" version = "1.4.0" @@ -6388,9 +6458,9 @@ dependencies = [ "bitflags 2.4.2", "lazy_static", "num-traits", - "rand", - "rand_chacha", - "rand_xorshift", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift 0.3.0", "regex-syntax 0.8.2", "rusty-fork", "tempfile", @@ -6484,7 +6554,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand", + "rand 0.8.5", ] [[package]] @@ -6523,7 +6593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustc-hash", "rustls", @@ -6588,6 +6658,25 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.8", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift 0.1.1", + "winapi", +] + [[package]] name = "rand" version = "0.8.5" @@ -6595,8 +6684,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.8", + "rand_core 0.3.1", ] [[package]] @@ -6606,9 +6705,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.6.4" @@ -6618,13 +6732,75 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.8", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -6659,6 +6835,15 @@ dependencies = [ "yasna", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6876,6 +7061,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "rpds" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ef5140bcb576bfd6d56cd2de709a7d17851ac1f3805e67fe9d99e42a11821f" +dependencies = [ + "archery", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -6907,7 +7101,7 @@ dependencies = [ "parity-scale-codec 3.6.9", "primitive-types 0.12.2", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -7489,7 +7683,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -7499,7 +7693,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -7544,7 +7738,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg", + "autocfg 1.1.0", ] [[package]] @@ -7566,7 +7760,7 @@ dependencies = [ "lru", "maplit", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "serde", @@ -7748,7 +7942,7 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "ring 0.17.8", "rustc_version 0.4.0", "sha2 0.10.8", @@ -7825,6 +8019,12 @@ dependencies = [ "typenum", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "state_processing" version = "0.2.0" @@ -7850,6 +8050,7 @@ dependencies = [ "tokio", "tree_hash", "types", + "vec_map", ] [[package]] @@ -7875,6 +8076,7 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", + "bls", "db-key", "directory", "ethereum_ssz", @@ -7883,15 +8085,20 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", + "logging", "lru", "parking_lot 0.12.1", + "safe_arith", "serde", "slog", "sloggers", + "smallvec", "state_processing", "strum", "tempfile", "types", + "xdelta3", + "zstd", ] [[package]] @@ -7947,9 +8154,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201" dependencies = [ "darling", "itertools", @@ -8089,6 +8296,7 @@ dependencies = [ "futures", "lazy_static", "lighthouse_metrics", + "logging", "slog", "sloggers", "tokio", @@ -8153,7 +8361,7 @@ dependencies = [ "hex", "hmac 0.12.1", "log", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2 0.10.8", @@ -8259,7 +8467,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "rustc-hash", "sha2 0.10.8", "thiserror", @@ -8370,7 +8578,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "rand", + "rand 0.8.5", "socket2 0.5.6", "tokio", "tokio-util 0.7.10", @@ -8647,6 +8855,16 @@ dependencies = [ "rlp", ] +[[package]] +name = "triomphe" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +dependencies = [ + "serde", + "stable_deref_trait", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -8686,12 +8904,14 @@ dependencies = [ "maplit", "merkle_proof", "metastruct", + "milhouse", "parking_lot 0.12.1", "paste", - "rand", - "rand_xorshift", + "rand 0.8.5", + "rand_xorshift 0.3.0", "rayon", "regex", + "rpds", "rusqlite", "safe_arith", "serde", @@ -8908,7 +9128,7 @@ dependencies = [ "malloc_utils", "monitoring_api", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "reqwest", "ring 0.16.20", "safe_arith", @@ -8945,7 +9165,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand", + "rand 0.8.5", "tempfile", "tree_hash", "types", @@ -9211,7 +9431,7 @@ dependencies = [ "logging", "network", "r2d2", - "rand", + "rand 0.8.5", "reqwest", "serde", "serde_json", @@ -9631,7 +9851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "serde", "zeroize", ] @@ -9653,6 +9873,20 @@ dependencies = [ "time", ] +[[package]] +name = "xdelta3" +version = "0.1.5" +source = "git+http://github.com/michaelsproul/xdelta3-rs?rev=ae9a1d2585ef998f4656acdc35cf263ee88e6dfa#ae9a1d2585ef998f4656acdc35cf263ee88e6dfa" +dependencies = [ + "bindgen 0.66.1", + "cc", + "futures-io", + "futures-util", + "libc", + "log", + "rand 0.6.5", +] + [[package]] name = "xml-rs" version = "0.8.19" @@ -9688,7 +9922,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -9703,7 +9937,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 38018c712d5..61e0d7bb05a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ members = [ "common/malloc_utils", "common/oneshot_broadcast", "common/pretty_reqwest_error", + "common/promise_cache", "common/sensitive_url", "common/slot_clock", "common/system_health", @@ -105,6 +106,7 @@ bytes = "1" clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.3" +crossbeam-channel = "0.5.8" delay_map = "0.3" derivative = "2" dirs = "3" @@ -131,6 +133,7 @@ libsecp256k1 = "0.7" log = "0.4" lru = "0.12" maplit = "1" +milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -157,7 +160,7 @@ smallvec = "1.11.2" snap = "1" ssz_types = "0.5" strum = { version = "0.24", features = ["derive"] } -superstruct = "0.6" +superstruct = "0.7" syn = "1" sysinfo = "0.26" tempfile = "3" @@ -174,8 +177,10 @@ tree_hash_derive = "0.5" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.6", default-features = false, features = ["tls"] } +xdelta3 = { git = "http://github.com/michaelsproul/xdelta3-rs", rev="ae9a1d2585ef998f4656acdc35cf263ee88e6dfa" } zeroize = { version = "1", features = ["zeroize_derive"] } zip = "0.6" +zstd = "0.11.2" # Local crates. account_utils = { path = "common/account_utils" } From 19f3020e3e235fb659472ed3be21a49e3bf407f7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:51:10 +1000 Subject: [PATCH 07/41] fork choice --- consensus/fork_choice/tests/tests.rs | 2 +- consensus/proto_array/src/justified_balances.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3153275fb73..f90383e96a8 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -392,7 +392,7 @@ impl ForkChoiceTest { .into_iter() .map(|v| { if v.is_active_at(state.current_epoch()) { - v.effective_balance + v.effective_balance() } else { 0 } diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs index e08c8443eef..daff362209a 100644 --- a/consensus/proto_array/src/justified_balances.rs +++ b/consensus/proto_array/src/justified_balances.rs @@ -24,11 +24,11 @@ impl JustifiedBalances { .validators() .iter() .map(|validator| { - if !validator.slashed && validator.is_active_at(current_epoch) { - total_effective_balance.safe_add_assign(validator.effective_balance)?; + if !validator.slashed() && validator.is_active_at(current_epoch) { + total_effective_balance.safe_add_assign(validator.effective_balance())?; num_active_validators.safe_add_assign(1)?; - Ok(validator.effective_balance) + Ok(validator.effective_balance()) } else { Ok(0) } From 083cf644335feb1ff8aa7020713e692df3710672 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 11:52:58 +1000 Subject: [PATCH 08/41] promise cache --- common/promise_cache/Cargo.toml | 10 ++ common/promise_cache/src/lib.rs | 227 ++++++++++++++++++++++++++++++++ 2 files changed, 237 insertions(+) create mode 100644 common/promise_cache/Cargo.toml create mode 100644 common/promise_cache/src/lib.rs diff --git a/common/promise_cache/Cargo.toml b/common/promise_cache/Cargo.toml new file mode 100644 index 00000000000..b5fa42bd438 --- /dev/null +++ b/common/promise_cache/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "promise_cache" +version = "0.1.0" +edition.workspace = true + +[dependencies] +derivative = { workspace = true } +oneshot_broadcast = { path = "../oneshot_broadcast" } +itertools = { workspace = true } +slog = { workspace = true } diff --git a/common/promise_cache/src/lib.rs b/common/promise_cache/src/lib.rs new file mode 100644 index 00000000000..36b6bd984f5 --- /dev/null +++ b/common/promise_cache/src/lib.rs @@ -0,0 +1,227 @@ +use derivative::Derivative; +use itertools::Itertools; +use oneshot_broadcast::{oneshot, Receiver, Sender}; +use slog::Logger; +use std::collections::HashMap; +use std::hash::Hash; +use std::sync::Arc; + +#[derive(Debug)] +pub struct PromiseCache +where + K: Hash + Eq + Clone, + P: Protect, +{ + cache: HashMap>, + capacity: usize, + protector: P, + max_concurrent_promises: usize, + logger: Logger, +} + +/// A value implementing `Protect` is capable of preventing keys of type `K` from being evicted. +/// +/// It also dictates an ordering on keys which is used to prioritise evictions. +pub trait Protect { + type SortKey: Ord; + + fn sort_key(&self, k: &K) -> Self::SortKey; + + fn protect_from_eviction(&self, k: &K) -> bool; + + fn notify_eviction(&self, _k: &K, _log: &Logger) {} +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""))] +pub enum CacheItem { + Complete(Arc), + Promise(Receiver>), +} + +impl std::fmt::Debug for CacheItem { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + CacheItem::Complete(value) => value.fmt(f), + CacheItem::Promise(_) => "Promise(..)".fmt(f), + } + } +} + +#[derive(Debug)] +pub enum PromiseCacheError { + Failed(oneshot_broadcast::Error), + MaxConcurrentPromises(usize), +} + +pub trait ToArc { + fn to_arc(&self) -> Arc; +} + +impl CacheItem { + pub fn is_promise(&self) -> bool { + matches!(self, CacheItem::Promise(_)) + } + + pub fn wait(self) -> Result, PromiseCacheError> { + match self { + CacheItem::Complete(value) => Ok(value), + CacheItem::Promise(receiver) => receiver.recv().map_err(PromiseCacheError::Failed), + } + } +} + +impl ToArc for Arc { + fn to_arc(&self) -> Arc { + self.clone() + } +} + +impl ToArc for T +where + T: Clone, +{ + fn to_arc(&self) -> Arc { + Arc::new(self.clone()) + } +} + +impl PromiseCache +where + K: Hash + Eq + Clone, + P: Protect, +{ + pub fn new(capacity: usize, protector: P, logger: Logger) -> Self { + // Making the concurrent promises directly configurable is considered overkill for now, + // so we just derive a vaguely sensible value from the cache size. + let max_concurrent_promises = std::cmp::max(2, capacity / 8); + Self { + cache: HashMap::new(), + capacity, + protector, + max_concurrent_promises, + logger, + } + } + + pub fn get(&mut self, key: &K) -> Option> { + match self.cache.get(key) { + // The cache contained the value, return it. + item @ Some(CacheItem::Complete(_)) => item.cloned(), + // The cache contains a promise for the value. Check to see if the promise has already + // been resolved, without waiting for it. + item @ Some(CacheItem::Promise(receiver)) => match receiver.try_recv() { + // The promise has already been resolved. Replace the entry in the cache with a + // `Complete` entry and then return the value. + Ok(Some(value)) => { + let ready = CacheItem::Complete(value); + self.insert_cache_item(key.clone(), ready.clone()); + Some(ready) + } + // The promise has not yet been resolved. Return the promise so the caller can await + // it. + Ok(None) => item.cloned(), + // The sender has been dropped without sending a value. There was most likely an + // error computing the value. Drop the key from the cache and return + // `None` so the caller can recompute the value. + // + // It's worth noting that this is the only place where we removed unresolved + // promises from the cache. This means unresolved promises will only be removed if + // we try to access them again. This is OK, since the promises don't consume much + // memory. We expect that *all* promises should be resolved, unless there is a + // programming or database error. + Err(oneshot_broadcast::Error::SenderDropped) => { + self.cache.remove(key); + None + } + }, + // The cache does not have this value and it's not already promised to be computed. + None => None, + } + } + + pub fn contains(&self, key: &K) -> bool { + self.cache.contains_key(key) + } + + pub fn insert_value>(&mut self, key: K, value: &C) { + if self + .cache + .get(&key) + // Replace the value if it's not present or if it's a promise. A bird in the hand is + // worth two in the promise-bush! + .map_or(true, CacheItem::is_promise) + { + self.insert_cache_item(key, CacheItem::Complete(value.to_arc())); + } + } + + /// Take care of resolving a promise by ensuring the value is made available: + /// + /// 1. To all waiting thread that are holding a `Receiver`. + /// 2. In the cache itself for future callers. + pub fn resolve_promise>(&mut self, sender: Sender>, key: K, value: &C) { + // Use the sender to notify all actively waiting receivers. + let arc_value = value.to_arc(); + sender.send(arc_value.clone()); + + // Re-insert the value into the cache. The promise may have been evicted in the meantime, + // but we probably want to keep this value (which resolved recently) over other older cache + // entries. + self.insert_value(key, &arc_value); + } + + /// Prunes the cache first before inserting a new item. + fn insert_cache_item(&mut self, key: K, cache_item: CacheItem) { + self.prune_cache(); + self.cache.insert(key, cache_item); + } + + pub fn create_promise(&mut self, key: K) -> Result>, PromiseCacheError> { + let num_active_promises = self.cache.values().filter(|item| item.is_promise()).count(); + if num_active_promises >= self.max_concurrent_promises { + return Err(PromiseCacheError::MaxConcurrentPromises( + num_active_promises, + )); + } + + let (sender, receiver) = oneshot(); + self.insert_cache_item(key, CacheItem::Promise(receiver)); + Ok(sender) + } + + fn prune_cache(&mut self) { + let target_cache_size = self.capacity.saturating_sub(1); + if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { + let keys_to_prune = self + .cache + .keys() + .filter(|k| !self.protector.protect_from_eviction(*k)) + .sorted_by_key(|k| self.protector.sort_key(k)) + .take(prune_count) + .cloned() + .collect::>(); + + for key in &keys_to_prune { + self.protector.notify_eviction(key, &self.logger); + self.cache.remove(key); + } + } + } + + pub fn update_protector(&mut self, protector: P) { + self.protector = protector; + } + + pub fn len(&self) -> usize { + self.cache.len() + } + + pub fn is_empty(&self) -> bool { + self.cache.is_empty() + } + + pub fn max_concurrent_promises(&self) -> usize { + self.max_concurrent_promises + } +} From 429db72145b5397ec18d9caa451476de844abfea Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:04:51 +1000 Subject: [PATCH 09/41] beacon chain --- beacon_node/beacon_chain/Cargo.toml | 80 ++-- .../beacon_chain/src/attestation_rewards.rs | 4 +- .../beacon_chain/src/beacon_block_reward.rs | 4 +- beacon_node/beacon_chain/src/beacon_chain.rs | 361 ++++++------------ .../beacon_chain/src/beacon_proposer_cache.rs | 8 +- .../beacon_chain/src/beacon_snapshot.rs | 25 +- .../beacon_chain/src/blob_verification.rs | 131 ++----- .../beacon_chain/src/block_verification.rs | 192 ++++------ beacon_node/beacon_chain/src/builder.rs | 55 +-- .../beacon_chain/src/canonical_head.rs | 118 ++---- beacon_node/beacon_chain/src/chain_config.rs | 9 +- .../overflow_lru_cache.rs | 2 +- beacon_node/beacon_chain/src/errors.rs | 4 +- beacon_node/beacon_chain/src/eth1_chain.rs | 29 +- beacon_node/beacon_chain/src/head_tracker.rs | 4 +- beacon_node/beacon_chain/src/lib.rs | 13 +- beacon_node/beacon_chain/src/metrics.rs | 4 - beacon_node/beacon_chain/src/migrate.rs | 2 +- .../beacon_chain/src/shuffling_cache.rs | 300 +++------------ .../src/sync_committee_rewards.rs | 42 +- beacon_node/beacon_chain/src/test_utils.rs | 17 +- .../beacon_chain/src/validator_monitor.rs | 16 +- .../beacon_chain/tests/op_verification.rs | 30 +- .../tests/payload_invalidation.rs | 4 +- beacon_node/beacon_chain/tests/rewards.rs | 28 +- beacon_node/beacon_chain/tests/store_tests.rs | 24 +- 26 files changed, 530 insertions(+), 976 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 43c2c896f71..fd59a1a799c 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,58 +19,60 @@ environment = { workspace = true } serde_json = { workspace = true } [dependencies] -serde_json = { workspace = true } +bitvec = { workspace = true } +bls = { workspace = true } +crossbeam-channel = { workspace = true } +derivative = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -merkle_proof = { workspace = true } -store = { workspace = true } -parking_lot = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +genesis = { workspace = true } +hex = { workspace = true } +int_to_bytes = { workspace = true } +itertools = { workspace = true } +kzg = { workspace = true } lazy_static = { workspace = true } -smallvec = { workspace = true } lighthouse_metrics = { workspace = true } +logging = { workspace = true } +lru = { workspace = true } +merkle_proof = { workspace = true } +oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } +parking_lot = { workspace = true } +promise_cache = { path = "../../common/promise_cache" } +proto_array = { workspace = true } +rand = { workspace = true } rayon = { workspace = true } +safe_arith = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } -ethereum_serde_utils = { workspace = true } +serde_json = { workspace = true } +slasher = { workspace = true } slog = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } sloggers = { workspace = true } slot_clock = { workspace = true } -ethereum_hashing = { workspace = true } -ethereum_ssz = { workspace = true } +smallvec = { workspace = true } ssz_types = { workspace = true } -ethereum_ssz_derive = { workspace = true } state_processing = { workspace = true } -tree_hash_derive = { workspace = true } -tree_hash = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -eth1 = { workspace = true } -futures = { workspace = true } -genesis = { workspace = true } -int_to_bytes = { workspace = true } -rand = { workspace = true } -proto_array = { workspace = true } -lru = { workspace = true } -tempfile = { workspace = true } -bitvec = { workspace = true } -bls = { workspace = true } -kzg = { workspace = true } -safe_arith = { workspace = true } -fork_choice = { workspace = true } -task_executor = { workspace = true } -derivative = { workspace = true } -itertools = { workspace = true } -slasher = { workspace = true } -eth2 = { workspace = true } +store = { workspace = true } strum = { workspace = true } -logging = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } superstruct = { workspace = true } -hex = { workspace = true } -oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } -slog-term = { workspace = true } -slog-async = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 491b7ef7da9..45b690dc8f3 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -230,13 +230,13 @@ impl BeaconChain { let mut inactivity_penalty = 0i64; if eligible { - let effective_balance = validator.effective_balance; + let effective_balance = validator.effective_balance(); for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { let (ideal_reward, penalty) = ideal_rewards_hashmap .get(&(flag_index, effective_balance)) .ok_or(BeaconChainError::AttestationRewardsError)?; - let voted_correctly = !validator.slashed + let voted_correctly = !validator.slashed() && previous_epoch_participation_flags.has_flag(flag_index)?; if voted_correctly { if flag_index == TIMELY_HEAD_FLAG_INDEX { diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 5b70215d225..9ee5ec41eed 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -135,7 +135,7 @@ impl BeaconChain { proposer_slashing_reward.safe_add_assign( state .get_validator(proposer_slashing.proposer_index() as usize)? - .effective_balance + .effective_balance() .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } @@ -157,7 +157,7 @@ impl BeaconChain { attester_slashing_reward.safe_add_assign( state .get_validator(attester_index as usize)? - .effective_balance + .effective_balance() .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 421bc12ee43..5adf51fe07a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,11 +54,11 @@ use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; +use crate::parallel_state_cache::ParallelStateCache; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; @@ -130,9 +130,6 @@ pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. type HashBlockTuple = (Hash256, RpcBlock); -/// The time-out before failure during an operation to take a read/write RwLock on the block -/// processing cache. -pub const BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the /// attestation cache. pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); @@ -176,6 +173,7 @@ pub const INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON: &str = "Finalized merge transition block is invalid."; /// Defines the behaviour when a block/block-root for a skipped slot is requested. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. /// @@ -447,8 +445,6 @@ pub struct BeaconChain { pub event_handler: Option>, /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, - /// A cache dedicated to block processing. - pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization @@ -456,7 +452,7 @@ pub struct BeaconChain { /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. - pub(crate) validator_pubkey_cache: TimeoutRwLock>, + pub(crate) validator_pubkey_cache: Arc>>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, /// A cache used when producing attestations whilst the head block is still being imported. @@ -465,6 +461,10 @@ pub struct BeaconChain { pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, + /// A cache used to de-duplicate HTTP state requests. + /// + /// The cache is keyed by `state_root`. + pub parallel_state_cache: Arc>>, /// A cache used to produce light_client server messages pub light_client_server_cache: LightClientServerCache, /// Sender to signal the light_client server to produce new updates @@ -487,11 +487,6 @@ pub struct BeaconChain { pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. pub kzg: Option>, - /// State with complete tree hash cache, ready for block production. - /// - /// NB: We can delete this once we have tree-states. - #[allow(clippy::type_complexity)] - pub block_production_state: Arc)>>>, } pub enum BeaconBlockResponseWrapper { @@ -768,9 +763,8 @@ impl BeaconChain { let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state.clone_with(CloneConfig::none()), + local_head.beacon_state.clone(), local_head.beacon_block_root, - &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -874,8 +868,7 @@ impl BeaconChain { let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state.clone_with(CloneConfig::none()), - &self.spec, + local_head.beacon_state.clone(), )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -1476,10 +1469,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_index(pubkey)) } @@ -1492,10 +1482,7 @@ impl BeaconChain { &self, validator_pubkeys: impl Iterator, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); validator_pubkeys .map(|pubkey| { @@ -1520,10 +1507,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_pubkey(&self, validator_index: usize) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get(validator_index).cloned()) } @@ -1533,11 +1517,7 @@ impl BeaconChain { &self, validator_index: usize, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; - + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_pubkey_bytes(validator_index).copied()) } @@ -1550,10 +1530,7 @@ impl BeaconChain { &self, validator_indices: &[usize], ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); let mut map = HashMap::with_capacity(validator_indices.len()); for &validator_index in validator_indices { @@ -3320,8 +3297,7 @@ impl BeaconChain { // would be difficult to check that they all lock fork choice first. let mut ops = self .validator_pubkey_cache - .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? + .write() .import_new_pubkeys(&state)?; // Apply the state to the attester cache, only if it is from the previous epoch or later. @@ -3542,29 +3518,6 @@ impl BeaconChain { }); } - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout) - .map(|mut snapshot_cache| { - snapshot_cache.insert( - BeaconSnapshot { - beacon_state: state, - beacon_block: signed_block.clone(), - beacon_block_root: block_root, - }, - None, - &self.spec, - ) - }) - .unwrap_or_else(|e| { - error!( - self.log, - "Failed to insert snapshot"; - "error" => ?e, - "task" => "process block" - ); - }); - self.head_tracker .register_block(block_root, parent_root, slot); @@ -3974,7 +3927,7 @@ impl BeaconChain { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(Error::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id, committee_cache); + .insert_value(shuffling_id, committee_cache); } } Ok(()) @@ -4145,22 +4098,22 @@ impl BeaconChain { self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); - // Producing a block requires the tree hash cache, so clone a full state corresponding to - // the head from the snapshot cache. Unfortunately we can't move the snapshot out of the - // cache (which would be fast), because we need to re-process the block after it has been - // signed. If we miss the cache or we're producing a block that conflicts with the head, - // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); // Atomically read some values from the head whilst avoiding holding cached head `Arc` any // longer than necessary. - let (head_slot, head_block_root) = { + let (head_slot, head_block_root, head_state_root) = { let head = self.canonical_head.cached_head(); - (head.head_slot(), head.head_block_root()) + ( + head.head_slot(), + head.head_block_root(), + head.head_state_root(), + ) }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root) + if let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( self.log, @@ -4168,37 +4121,16 @@ impl BeaconChain { "slot" => slot, "head_to_reorg" => %head_block_root, ); - (re_org_state.pre_state, re_org_state.state_root) - } - // Normal case: proposing a block atop the current head using the cache. - else if let Some((_, cached_state)) = - self.get_state_from_block_production_cache(head_block_root) - { - (cached_state.pre_state, cached_state.state_root) - } - // Fall back to a direct read of the snapshot cache. - else if let Some(pre_state) = - self.get_state_from_snapshot_cache_for_block_production(head_block_root) - { - warn!( - self.log, - "Block production cache miss"; - "message" => "falling back to snapshot cache clone", - "slot" => slot - ); - (pre_state.pre_state, pre_state.state_root) + (re_org_state, Some(re_org_state_root)) } else { - warn!( - self.log, - "Block production cache miss"; - "message" => "this block is more likely to be orphaned", - "slot" => slot, - ); - let state = self - .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) - .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - - (state, None) + // Fetch the head state advanced through to `slot`, which should be present in the + // state cache thanks to the state advance timer. + let (state_root, state) = self + .store + .get_advanced_hot_state(head_block_root, slot, head_state_root) + .map_err(BlockProductionError::FailedToLoadState)? + .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; + (state, Some(state_root)) } } else { warn!( @@ -4219,40 +4151,6 @@ impl BeaconChain { Ok((state, state_root_opt)) } - /// Get the state cached for block production *if* it matches `head_block_root`. - /// - /// This will clear the cache regardless of whether the block root matches, so only call this if - /// you think the `head_block_root` is likely to match! - fn get_state_from_block_production_cache( - &self, - head_block_root: Hash256, - ) -> Option<(Hash256, BlockProductionPreState)> { - // Take care to drop the lock as quickly as possible. - let mut lock = self.block_production_state.lock(); - let result = lock - .take() - .filter(|(cached_block_root, _)| *cached_block_root == head_block_root); - drop(lock); - result - } - - /// Get a state for block production from the snapshot cache. - fn get_state_from_snapshot_cache_for_block_production( - &self, - head_block_root: Hash256, - ) -> Option> { - if let Some(lock) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - { - let result = lock.get_state_for_block_production(head_block_root); - drop(lock); - result - } else { - None - } - } - /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. /// /// This function will return `None` if proposer re-orgs are disabled. @@ -4261,7 +4159,7 @@ impl BeaconChain { slot: Slot, head_slot: Slot, canonical_head: Hash256, - ) -> Option> { + ) -> Option<(BeaconState, Hash256)> { let re_org_head_threshold = self.config.re_org_head_threshold?; let re_org_parent_threshold = self.config.re_org_parent_threshold?; @@ -4344,30 +4242,20 @@ impl BeaconChain { .ok()?; drop(proposer_head_timer); let re_org_parent_block = proposer_head.parent_node.root; + let re_org_parent_state_root = proposer_head.parent_node.state_root; - // Only attempt a re-org if we hit the block production cache or snapshot cache. - let pre_state = self - .get_state_from_block_production_cache(re_org_parent_block) - .map(|(_, state)| state) - .or_else(|| { + // FIXME(sproul): consider not re-orging if we miss the cache + let (state_root, state) = self + .store + .get_advanced_hot_state(re_org_parent_block, slot, re_org_parent_state_root) + .map_err(|e| { warn!( self.log, - "Block production cache miss"; - "message" => "falling back to snapshot cache during re-org", - "slot" => slot, - "block_root" => ?re_org_parent_block + "Error loading block production state"; + "error" => ?e, ); - self.get_state_from_snapshot_cache_for_block_production(re_org_parent_block) }) - .or_else(|| { - debug!( - self.log, - "Not attempting re-org"; - "reason" => "missed snapshot cache", - "parent_block" => ?re_org_parent_block, - ); - None - })?; + .ok()??; info!( self.log, @@ -4378,7 +4266,7 @@ impl BeaconChain { "threshold_weight" => proposer_head.re_org_head_weight_threshold ); - Some(pre_state) + Some((state, state_root)) } /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. @@ -4503,23 +4391,10 @@ impl BeaconChain { let parent_block_root = forkchoice_update_params.head_root; + // FIXME(sproul): optimise this for tree-states let (unadvanced_state, unadvanced_state_root) = if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) - } else if let Some(snapshot) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .get_cloned(parent_block_root, CloneConfig::none()) - { - debug!( - self.log, - "Hit snapshot cache during withdrawals calculation"; - "slot" => proposal_slot, - "parent_block_root" => ?parent_block_root, - ); - let state_root = snapshot.beacon_state_root(); - (Cow::Owned(snapshot.beacon_state), state_root) } else { info!( self.log, @@ -4910,6 +4785,7 @@ impl BeaconChain { drop(slot_timer); state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + state.apply_pending_mutations()?; let parent_root = if state.slot() > 0 { *state @@ -4924,7 +4800,7 @@ impl BeaconChain { let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| v.pubkey) + .map(|v| *v.pubkey()) .ok_or(BlockProductionError::BeaconChain( BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), ))?; @@ -6286,7 +6162,7 @@ impl BeaconChain { // access. drop(shuffling_cache); - let committee_cache = cache_item.wait()?; + let committee_cache = cache_item.wait().map_err(Error::ShufflingCacheError)?; map_fn(&committee_cache, shuffling_id.shuffling_decision_block) } else { // Create an entry in the cache that "promises" this value will eventually be computed. @@ -6295,7 +6171,9 @@ impl BeaconChain { // // Creating the promise whilst we hold the `shuffling_cache` lock will prevent the same // promise from being created twice. - let sender = shuffling_cache.create_promise(shuffling_id.clone())?; + let sender = shuffling_cache + .create_promise(shuffling_id.clone()) + .map_err(Error::ShufflingCacheError)?; // Drop the shuffling cache to avoid holding the lock for any longer than // required. @@ -6308,6 +6186,17 @@ impl BeaconChain { "head_block_root" => head_block_root.to_string(), ); + // If the block's state will be so far ahead of `shuffling_epoch` that even its + // previous epoch committee cache will be too new, then error. Callers of this function + // shouldn't be requesting such old shufflings for this `head_block_root`. + let head_block_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if head_block_epoch > shuffling_epoch + 1 { + return Err(Error::InvalidStateForShuffling { + state_epoch: head_block_epoch, + shuffling_epoch, + }); + } + let state_read_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); @@ -6318,71 +6207,52 @@ impl BeaconChain { // to copy the head is liable to race-conditions. let head_state_opt = self.with_head(|head| { if head.beacon_block_root == head_block_root { - Ok(Some(( - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - head.beacon_state_root(), - ))) + Ok(Some((head.beacon_state.clone(), head.beacon_state_root()))) } else { Ok::<_, Error>(None) } })?; + // Compute the `target_slot` to advance the block's state to. + // + // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to + // only advance into the first slot of the epoch prior to `shuffling_epoch`. + // + // If the `head_block` is already ahead of that slot, then we should load the state + // at that slot, as we've determined above that the `shuffling_epoch` cache will + // not be too far in the past. + let target_slot = std::cmp::max( + shuffling_epoch + .saturating_sub(1_u64) + .start_slot(T::EthSpec::slots_per_epoch()), + head_block.slot, + ); + // If the head state is useful for this request, use it. Otherwise, read a state from - // disk. + // disk that is advanced as close as possible to `target_slot`. let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let block_state_root = head_block.state_root; - let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); let (state_root, state) = self .store - .get_inconsistent_state_for_attestation_verification_only( - &head_block_root, - max_slot, - block_state_root, - )? - .ok_or(Error::MissingBeaconState(block_state_root))?; + .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? + .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; - /* - * IMPORTANT - * - * Since it's possible that - * `Store::get_inconsistent_state_for_attestation_verification_only` was used to obtain - * the state, we cannot rely upon the following fields: - * - * - `state.state_roots` - * - `state.block_roots` - * - * These fields should not be used for the rest of this function. - */ - metrics::stop_timer(state_read_timer); let state_skip_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - // If the state is in an earlier epoch, advance it. If it's from a later epoch, reject - // it. + // If the state is still in an earlier epoch, advance it to the `target_slot` so + // that its next epoch committee cache matches the `shuffling_epoch`. if state.current_epoch() + 1 < shuffling_epoch { - // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to - // only advance into the slot prior to the `shuffling_epoch`. - let target_slot = shuffling_epoch - .saturating_sub(1_u64) - .start_slot(T::EthSpec::slots_per_epoch()); - - // Advance the state into the required slot, using the "partial" method since the state - // roots are not relevant for the shuffling. + // Advance the state into the required slot, using the "partial" method since the + // state roots are not relevant for the shuffling. partial_state_advance(&mut state, Some(state_root), target_slot, &self.spec)?; - } else if state.current_epoch() > shuffling_epoch { - return Err(Error::InvalidStateForShuffling { - state_epoch: state.current_epoch(), - shuffling_epoch, - }); } - metrics::stop_timer(state_skip_timer); + let committee_building_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); @@ -6391,14 +6261,13 @@ impl BeaconChain { state.build_committee_cache(relative_epoch, &self.spec)?; - let committee_cache = state.take_committee_cache(relative_epoch)?; - let committee_cache = Arc::new(committee_cache); + let committee_cache = state.committee_cache(relative_epoch)?.clone(); let shuffling_decision_block = shuffling_id.shuffling_decision_block; self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(Error::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id, &committee_cache); + .insert_value(shuffling_id, &committee_cache); metrics::stop_timer(committee_building_timer); @@ -6412,56 +6281,60 @@ impl BeaconChain { /// /// This could be a very expensive operation and should only be done in testing/analysis /// activities. + /// + /// This dump function previously used a backwards iterator but has been swapped to a forwards + /// iterator as it allows for MUCH better caching and rebasing. Memory usage of some tests went + /// from 5GB per test to 90MB. #[allow(clippy::type_complexity)] pub fn chain_dump( &self, ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = { - let head = self.canonical_head.cached_head(); - BeaconSnapshot { - beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), - beacon_block_root: head.snapshot.beacon_block_root, - beacon_state: head.snapshot.beacon_state.clone(), - } - }; + let mut prev_block_root = None; + let mut prev_beacon_state = None; - dump.push(last_slot.clone()); + for res in self.forwards_iter_block_roots(Slot::new(0))? { + let (beacon_block_root, _) = res?; - loop { - let beacon_block_root = last_slot.beacon_block.parent_root(); - - if beacon_block_root == Hash256::zero() { - break; // Genesis has been reached. + // Do not include snapshots at skipped slots. + if Some(beacon_block_root) == prev_block_root { + continue; } + prev_block_root = Some(beacon_block_root); let beacon_block = self .store - .get_blinded_block(&beacon_block_root)? + .get_blinded_block(&beacon_block_root, None)? .ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; let beacon_state_root = beacon_block.state_root(); - let beacon_state = self + + let mut beacon_state = self .store .get_state(&beacon_state_root, Some(beacon_block.slot()))? .ok_or_else(|| { Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) })?; - let slot = BeaconSnapshot { + // This beacon state might come from the freezer DB, which means it could have pending + // updates or lots of untethered memory. We rebase it on the previous state in order to + // address this. + beacon_state.apply_pending_mutations()?; + if let Some(prev) = prev_beacon_state { + beacon_state.rebase_on(&prev, &self.spec)?; + } + beacon_state.build_caches(&self.spec)?; + prev_beacon_state = Some(beacon_state.clone()); + + let snapshot = BeaconSnapshot { beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; - - dump.push(slot.clone()); - last_slot = slot; + dump.push(snapshot); } - - dump.reverse(); - Ok(dump) } @@ -6696,6 +6569,10 @@ impl BeaconChain { self.data_availability_checker.data_availability_boundary() } + pub fn logger(&self) -> &Logger { + &self.log + } + /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index ca390712b13..d10bbfbbc5f 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -17,8 +17,7 @@ use std::cmp::Ordering; use std::num::NonZeroUsize; use types::non_zero_usize::new_non_zero_usize; use types::{ - BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, - Unsigned, + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -145,10 +144,7 @@ pub fn compute_proposer_duties_from_head( let (mut state, head_state_root, head_block_root) = { let head = chain.canonical_head.cached_head(); // Take a copy of the head state. - let head_state = head - .snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); + let head_state = head.snapshot.beacon_state.clone(); let head_state_root = head.head_state_root(); let head_block_root = head.head_block_root(); (head_state, head_state_root, head_block_root) diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index afb13247766..e9fde48ac67 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,8 +1,8 @@ use serde::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the @@ -14,6 +14,19 @@ pub struct BeaconSnapshot = FullPayl pub beacon_state: BeaconState, } +/// This snapshot is to be used for verifying a child of `self.beacon_block`. +#[derive(Debug)] +pub struct PreProcessingSnapshot { + /// This state is equivalent to the `self.beacon_block.state_root()` state that has been + /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for + /// the application of another block. + pub pre_state: BeaconState, + /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. + pub beacon_state_root: Option, + pub beacon_block: SignedBlindedBeaconBlock, + pub beacon_block_root: Hash256, +} + impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( @@ -48,12 +61,4 @@ impl> BeaconSnapshot { self.beacon_block_root = beacon_block_root; self.beacon_state = beacon_state; } - - pub fn clone_with(&self, clone_config: CloneConfig) -> Self { - Self { - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - beacon_state: self.beacon_state.clone_with(clone_config), - } - } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index a69f2b74524..496a11f93e0 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,23 +2,20 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::sync::Arc; -use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::block_verification::{ - cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, - BlockSlashInfo, + cheap_state_advance_to_obtain_committees, process_block_slash_info, BlockSlashInfo, }; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use merkle_proof::MerkleTreeError; -use slog::{debug, warn}; +use slog::debug; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; -use types::{ - BeaconStateError, BlobSidecar, CloneConfig, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, -}; +use types::{BeaconStateError, BlobSidecar, EthSpec, Hash256, SignedBeaconBlockHeader, Slot}; /// An error occurred while validating a gossip blob. #[derive(Debug)] @@ -485,98 +482,42 @@ pub fn validate_blob_sidecar_for_gossip( "block_root" => %block_root, "index" => %blob_index, ); - if let Some(mut snapshot) = chain - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only()) - }) - { - if snapshot.beacon_state.slot() == blob_slot { - debug!( - chain.log, - "Cloning snapshot cache state for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - ( - snapshot - .beacon_state - .get_beacon_proposer_index(blob_slot, &chain.spec)?, - snapshot.beacon_state.fork(), - ) - } else { - debug!( - chain.log, - "Cloning and advancing snapshot cache state for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - let state = - cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( - &mut snapshot.beacon_state, - Some(snapshot.beacon_block_root), - blob_slot, - &chain.spec, - )?; - ( - state.get_beacon_proposer_index(blob_slot, &chain.spec)?, - state.fork(), - ) - } - } - // Need to advance the state to get the proposer index - else { - warn!( - chain.log, - "Snapshot cache miss for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - - let parent_block = chain - .get_blinded_block(&block_parent_root) - .map_err(GossipBlobError::BeaconChainError)? - .ok_or_else(|| { - GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root)) - })?; - - let mut parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot()))? - .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing state {:?}", - parent_block.state_root() - )) - })?; - let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( - &mut parent_state, - Some(parent_block.state_root()), - blob_slot, - &chain.spec, - )?; - - let proposers = state.get_beacon_proposer_indices(&chain.spec)?; - let proposer_index = *proposers - .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) - .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; - - let fork = state.fork(); - // Prime the proposer shuffling cache with the newly-learned value. - chain.beacon_proposer_cache.lock().insert( - blob_epoch, - proposer_shuffling_root, - proposers, - fork, - )?; - (proposer_index, fork) - } + let (parent_state_root, mut parent_state) = chain + .store + .get_advanced_hot_state(block_parent_root, blob_slot, parent_block.state_root) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state for parent block {block_parent_root:?}", + )) + })?; + + let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut parent_state, + Some(parent_state_root), + blob_slot, + &chain.spec, + )?; + + let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let proposer_index = *proposers + .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) + .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain.beacon_proposer_cache.lock().insert( + blob_epoch, + proposer_shuffling_root, + proposers, + state.fork(), + )?; + (proposer_index, state.fork()) }; // Signature verify the signed block header. let signature_is_valid = { - let pubkey_cache = - get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); + let pubkey = pubkey_cache .get(proposer_index) .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3cd8a7f259b..461e54df719 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -48,6 +48,7 @@ // returned alongside. #![allow(clippy::result_large_err)] +use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_verification_types::{ AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, @@ -59,14 +60,10 @@ use crate::execution_payload::{ AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::observed_block_producers::SeenBlock; -use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - beacon_chain::{ - BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, - }, + beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; @@ -86,7 +83,7 @@ use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; @@ -94,14 +91,13 @@ use std::fmt::Debug; use std::fs; use std::io::Write; use std::sync::Arc; -use std::time::Duration; -use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, - ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; use types::{BlobSidecar, ExecPayload}; @@ -617,7 +613,7 @@ pub fn signature_verify_chain_segment( // verify signatures let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = get_signature_verifier::(&state, &pubkey_cache, &chain.spec); for svb in &mut signature_verified_blocks { signature_verifier .include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?; @@ -1054,7 +1050,8 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = + get_signature_verifier::(&state, &pubkey_cache, &chain.spec); let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); @@ -1105,7 +1102,8 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = + get_signature_verifier::(&state, &pubkey_cache, &chain.spec); // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. @@ -1426,52 +1424,31 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = if parent.beacon_block.slot() == state.slot() { - // If it happens that `pre_state` has *not* already been advanced forward a single - // slot, then there is no need to compute the state root for this - // `per_slot_processing` call since that state root is already stored in the parent - // block. - parent.beacon_block.state_root() - } else { - // This is a new state we've reached, so stage it for storage in the DB. - // Computing the state root here is time-equivalent to computing it during slot - // processing, but we get early access to it. - let state_root = state.update_tree_hash_cache()?; - - // Store the state immediately, marking it as temporary, and staging the deletion - // of its temporary status as part of the larger atomic operation. - let txn_lock = chain.store.hot_db.begin_rw_transaction(); - let state_already_exists = - chain.store.load_hot_state_summary(&state_root)?.is_some(); - - let state_batch = if state_already_exists { - // If the state exists, it could be temporary or permanent, but in neither case - // should we rewrite it or store a new temporary flag for it. We *will* stage - // the temporary flag for deletion because it's OK to double-delete the flag, - // and we don't mind if another thread gets there first. - vec![] + let state_root = + if parent.beacon_block.slot() == state.slot() { + // If it happens that `pre_state` has *not* already been advanced forward a single + // slot, then there is no need to compute the state root for this + // `per_slot_processing` call since that state root is already stored in the parent + // block. + parent.beacon_block.state_root() } else { - vec![ - if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root, &state) - } else { - StoreOp::PutStateSummary( - state_root, - HotStateSummary::new(&state_root, &state)?, - ) - }, - StoreOp::PutStateTemporaryFlag(state_root), - ] + // This is a new state we've reached, so stage it for storage in the DB. + // Computing the state root here is time-equivalent to computing it during slot + // processing, but we get early access to it. + let state_root = state.update_tree_hash_cache()?; + + // Store the state immediately, marking it as temporary, and staging the deletion + // of its temporary status as part of the larger atomic operation. + let txn_lock = chain.store.hot_db.begin_rw_transaction(); + chain.store.do_atomically_with_block_and_blobs_cache(vec![ + StoreOp::PutState(state_root, &state), + ])?; + drop(txn_lock); + + confirmed_state_roots.push(state_root); + + state_root }; - chain - .store - .do_atomically_with_block_and_blobs_cache(state_batch)?; - drop(txn_lock); - - confirmed_state_roots.push(state_root); - - state_root - }; if let Some(summary) = per_slot_processing(&mut state, Some(state_root), &chain.spec)? { // Expose Prometheus metrics. @@ -1523,8 +1500,7 @@ impl ExecutionPendingBlock { let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); - state.build_committee_cache(RelativeEpoch::Previous, &chain.spec)?; - state.build_committee_cache(RelativeEpoch::Current, &chain.spec)?; + state.build_all_committee_caches(&chain.spec)?; metrics::stop_timer(committee_timer); @@ -1840,12 +1816,10 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent>( - block_root: Hash256, + _block_root: Hash256, block: B, chain: &BeaconChain, ) -> Result<(PreProcessingSnapshot, B), BlockError> { - let spec = &chain.spec; - // Reject any block if its parent is not known to fork choice. // // A block that is not in fork choice is either: @@ -1864,44 +1838,9 @@ fn load_parent>( return Err(BlockError::ParentUnknown(block.into_rpc_block())); } - let block_delay = chain - .block_times_cache - .read() - .get_block_delays( - block_root, - chain - .slot_clock - .start_of(block.slot()) - .unwrap_or_else(|| Duration::from_secs(0)), - ) - .observed; - let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); - let result = if let Some((snapshot, cloned)) = chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|mut snapshot_cache| { - snapshot_cache.get_state_for_block_processing( - block.parent_root(), - block.slot(), - block_delay, - spec, - ) - }) { - if cloned { - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES); - debug!( - chain.log, - "Cloned snapshot for late block/skipped slot"; - "slot" => %block.slot(), - "parent_slot" => %snapshot.beacon_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); - } - Ok((snapshot, block)) - } else { + let result = { // Load the blocks parent block from the database, returning invalid if that block is not // found. // @@ -1926,7 +1865,7 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). - let (parent_state_root, parent_state) = chain + let (parent_state_root, state) = chain .store .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { @@ -1935,22 +1874,46 @@ fn load_parent>( ) })?; - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); - debug!( - chain.log, - "Missed snapshot cache"; - "slot" => block.slot(), - "parent_slot" => parent_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); + if !state.all_caches_built() { + slog::warn!( + chain.log, + "Parent state lacks built caches"; + "block_slot" => block.slot(), + "state_slot" => state.slot(), + ); + } + + if block.slot() != state.slot() { + slog::warn!( + chain.log, + "Parent state is not advanced"; + "block_slot" => block.slot(), + "state_slot" => state.slot(), + ); + } + + let beacon_state_root = if state.slot() == parent_block.slot() { + // Sanity check. + if parent_state_root != parent_block.state_root() { + return Err(BeaconChainError::DBInconsistent(format!( + "Parent state at slot {} has the wrong state root: {:?} != {:?}", + state.slot(), + parent_state_root, + parent_block.state_root() + )) + .into()); + } + Some(parent_block.state_root()) + } else { + None + }; Ok(( PreProcessingSnapshot { beacon_block: parent_block, beacon_block_root: root, - pre_state: parent_state, - beacon_state_root: Some(parent_state_root), + pre_state: state, + beacon_state_root, }, block, )) @@ -2031,7 +1994,7 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr } else if state.slot() > block_slot { Err(Err::not_later_than_parent_error(block_slot, state.slot())) } else { - let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let mut state = state.clone(); let target_slot = block_epoch.start_slot(E::slots_per_epoch()); // Advance the state into the same epoch as the block. Use the "partial" method since state @@ -2050,10 +2013,7 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BeaconChainError> { - chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) + Ok(chain.validator_pubkey_cache.read()) } /// Produces an _empty_ `BlockSignatureVerifier`. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c1ebeb68bba..2904da28062 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -12,10 +12,8 @@ use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; -use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, @@ -28,19 +26,20 @@ use futures::channel::mpsc::Sender; use kzg::{Kzg, TrustedSetup}; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; +use promise_cache::PromiseCache; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, debug, error, info, o, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::per_slot_processing; +use state_processing::{per_slot_processing, AllCaches}; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, - Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Epoch, EthSpec, Graffiti, Hash256, + Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -92,7 +91,6 @@ pub struct BeaconChainBuilder { shutdown_sender: Option>, light_client_server_tx: Option>>, head_tracker: Option, - validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, log: Option, @@ -135,7 +133,6 @@ where shutdown_sender: None, light_client_server_tx: None, head_tracker: None, - validator_pubkey_cache: None, spec: E::default_spec(), chain_config: ChainConfig::default(), log: None, @@ -292,7 +289,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_blinded_block(&chain.genesis_block_root) + .get_blinded_block(&chain.genesis_block_root, Some(Slot::new(0))) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -462,7 +459,7 @@ where // Prime all caches before storing the state in the database and computing the tree hash // root. weak_subj_state - .build_caches(&self.spec) + .build_all_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; let weak_subj_state_root = weak_subj_state .update_tree_hash_cache() @@ -520,6 +517,12 @@ where let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; + // Build the committee caches before storing. The database assumes that states have + // committee caches built before storing. + weak_subj_state + .build_all_committee_caches(&self.spec) + .map_err(|e| format!("Error building caches on checkpoint state: {:?}", e))?; + // Fill in the linear block roots between the checkpoint block's slot and the aligned // state's slot. All slots less than the block's slot will be handled by block backfill, // while states greater or equal to the checkpoint state will be handled by `migrate_db`. @@ -537,6 +540,13 @@ where // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. + store + .update_finalized_state( + weak_subj_state_root, + weak_subj_block_root, + weak_subj_state.clone(), + ) + .map_err(|e| format!("Failed to set checkpoint state as finalized state: {:?}", e))?; store .put_state(&weak_subj_state_root, &weak_subj_state) .map_err(|e| format!("Failed to store weak subjectivity state: {e:?}"))?; @@ -565,13 +575,6 @@ where .map_err(|e| format!("Failed to initialize blob info: {:?}", e))?, ); - // Store pruning checkpoint to prevent attempting to prune before the anchor state. - self.pending_io_batch - .push(store.pruning_checkpoint_store_op(Checkpoint { - root: weak_subj_block_root, - epoch: weak_subj_state.slot().epoch(E::slots_per_epoch()), - })); - let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, beacon_block: Arc::new(weak_subj_block), @@ -860,10 +863,9 @@ where let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); - let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; - let snapshot_cache_size = self.chain_config.snapshot_cache_size; + let parallel_state_cache_size = self.chain_config.parallel_state_cache_size; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -939,10 +941,6 @@ where fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, - snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( - snapshot_cache_size, - head_for_snapshot_cache, - )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, @@ -952,7 +950,12 @@ where beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), - validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), + parallel_state_cache: Arc::new(RwLock::new(PromiseCache::new( + parallel_state_cache_size, + Default::default(), + log.clone(), + ))), + validator_pubkey_cache, attester_cache: <_>::default(), early_attester_cache: <_>::default(), light_client_server_cache: LightClientServerCache::new(), @@ -970,7 +973,6 @@ where .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, ), kzg, - block_production_state: Arc::new(Mutex::new(None)), }; let head = beacon_chain.head_snapshot(); @@ -1270,14 +1272,15 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials(); + let creds = creds.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], - &hash(&v.pubkey.as_ssz_bytes())[1..], + &hash(&v.pubkey().as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index ced4eda05cf..633e8365b6c 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -35,10 +35,7 @@ use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::BlockShufflingIds; use crate::{ - beacon_chain::{ - BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, - }, + beacon_chain::{BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, FORK_CHOICE_DB_KEY}, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, metrics, @@ -54,6 +51,7 @@ use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; +use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; @@ -466,9 +464,7 @@ impl BeaconChain { pub fn head_beacon_state_cloned(&self) -> BeaconState { // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. let snapshot: Arc<_> = self.head_snapshot(); - snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()) + snapshot.beacon_state.clone() } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -652,48 +648,31 @@ impl BeaconChain { let new_cached_head = if new_view.head_block_root != old_view.head_block_root { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_snapshot = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned( + let mut new_snapshot = { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root, None)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let (_, beacon_state) = self + .store + .get_advanced_hot_state( new_view.head_block_root, - CloneConfig::committee_caches_only(), - ) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&new_view.head_block_root)? - .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - - let (_, beacon_state) = self - .store - .get_advanced_hot_state( - new_view.head_block_root, - current_slot, - beacon_block.state_root(), - )? - .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; - - Ok(BeaconSnapshot { - beacon_block: Arc::new(beacon_block), - beacon_block_root: new_view.head_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; + current_slot, + beacon_block.state_root(), + )? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + + BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + } + }; + + // Regardless of where we got the state from, attempt to build all the + // caches except the tree hash cache. + new_snapshot.beacon_state.build_all_caches(&self.spec)?; let new_cached_head = CachedHead { snapshot: Arc::new(new_snapshot), @@ -834,25 +813,6 @@ impl BeaconChain { .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - // Update the snapshot cache with the latest head value. - // - // This *could* be done inside `recompute_head`, however updating the head on the snapshot - // cache is not critical so we avoid placing it on a critical path. Note that this function - // will not return an error if the update fails, it will just log an error. - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(new_snapshot.beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - match BlockShufflingIds::try_from_head( new_snapshot.beacon_block_root, &new_snapshot.beacon_state, @@ -860,9 +820,7 @@ impl BeaconChain { Ok(head_shuffling_ids) => { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .map(|mut shuffling_cache| { - shuffling_cache.update_head_shuffling_ids(head_shuffling_ids) - }) + .map(|mut shuffling_cache| shuffling_cache.update_protector(head_shuffling_ids)) .unwrap_or_else(|| { error!( self.log, @@ -998,26 +956,6 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_view.finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - self.attester_cache .prune_below(new_view.finalized_checkpoint.epoch); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 0772aff6710..545bdd20b7b 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -16,6 +16,9 @@ pub const DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR: u32 = 3; /// Fraction of a slot lookahead for fork choice in the state advance timer (500ms on mainnet). pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; +/// Cache only a small number of states in the parallel cache by default. +pub const DEFAULT_PARALLEL_STATE_CACHE_SIZE: usize = 2; + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing an attestation. @@ -75,8 +78,6 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, - /// The size of the snapshot cache. - pub snapshot_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to /// genesis. pub genesis_backfill: bool, @@ -86,6 +87,8 @@ pub struct ChainConfig { pub always_prepare_payload: bool, /// Number of epochs between each migration of data from the hot database to the freezer. pub epochs_per_migration: u64, + /// Size of the promise cache for de-duplicating parallel state requests. + pub parallel_state_cache_size: usize, /// When set to true Light client server computes and caches state proofs for serving updates pub enable_light_client_server: bool, } @@ -116,10 +119,10 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, - snapshot_cache_size: crate::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, + parallel_state_cache_size: DEFAULT_PARALLEL_STATE_CACHE_SIZE, enable_light_client_server: false, } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 50e07987fdf..9c74db1b933 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -907,7 +907,7 @@ mod test { let chain = &harness.chain; let log = chain.log.clone(); let head = chain.head_snapshot(); - let parent_state = head.beacon_state.clone_with_only_committee_caches(); + let parent_state = head.beacon_state.clone(); let target_slot = chain.slot().expect("should get slot") + 1; let parent_root = head.beacon_block_root; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9c82e964cc0..11025d5937b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -214,8 +214,7 @@ pub enum BeaconChainError { }, AttestationHeadNotInForkChoice(Hash256), MissingPersistedForkChoice, - CommitteePromiseFailed(oneshot_broadcast::Error), - MaxCommitteePromises(usize), + ShufflingCacheError(promise_cache::PromiseCacheError), BlsToExecutionPriorToCapella, BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), @@ -279,6 +278,7 @@ pub enum BlockProductionError { TerminalPoWBlockLookupFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error), FailedToReadFinalizedBlock(store::Error), + FailedToLoadState(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ShuttingDown, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3ec39f9d192..31297244e3e 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1020,6 +1020,7 @@ mod test { mod collect_valid_votes { use super::*; + use types::List; fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { (0..n) @@ -1067,12 +1068,14 @@ mod test { let votes_to_consider = get_eth1_data_vec(slots, 0); - *state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = List::new( + votes_to_consider[0..slots as usize / 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect()); @@ -1096,12 +1099,14 @@ mod test { .expect("should have some eth1 data") .clone(); - *state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = List::new( + vec![duplicate_eth1_data.clone(); 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); assert_votes!( diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 71e2473cdcf..b7802cbb2e0 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -90,8 +90,8 @@ impl PartialEq for HeadTracker { /// This is used when persisting the state of the `BeaconChain` to disk. #[derive(Encode, Decode, Clone)] pub struct SszHeadTracker { - roots: Vec, - slots: Vec, + pub roots: Vec, + pub slots: Vec, } impl SszHeadTracker { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7721c9b00ff..7ee18de0351 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -45,20 +45,19 @@ pub mod observed_block_producers; pub mod observed_operations; mod observed_slashable; pub mod otb_verification_service; +mod parallel_state_cache; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; -pub mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; -pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, @@ -98,3 +97,13 @@ pub use state_processing::per_block_processing::errors::{ pub use store; pub use timeout_rw_lock::TimeoutRwLock; pub use types; + +pub mod validator_pubkey_cache { + use crate::BeaconChainTypes; + + pub type ValidatorPubkeyCache = store::ValidatorPubkeyCache< + ::EthSpec, + ::HotStore, + ::ColdStore, + >; +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b40f46da221..4970975f251 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,12 +4,8 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; -use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; -/// The maximum time to wait for the snapshot cache lock during a metrics scrape. -const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); - // Attestation simulator metrics pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str = "validator_monitor_attestation_simulator_head_attester_hit_total"; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index ad597bf92aa..10cbe6378f0 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -6,7 +6,7 @@ use parking_lot::Mutex; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::mem; -use std::sync::{mpsc, Arc}; +use std::sync::Arc; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index b3de6f91c92..7db4e082142 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,240 +1,53 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use itertools::Itertools; +use promise_cache::{PromiseCache, Protect}; use slog::{debug, Logger}; - -use oneshot_broadcast::{oneshot, Receiver, Sender}; use types::{ beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, RelativeEpoch, }; -use crate::{metrics, BeaconChainError}; - /// The size of the cache that stores committee caches for quicker verification. /// /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this /// ignores a few extra bytes in the caches that should be insignificant compared to the indices). -pub const DEFAULT_CACHE_SIZE: usize = 16; - -/// The maximum number of concurrent committee cache "promises" that can be issued. In effect, this -/// limits the number of concurrent states that can be loaded into memory for the committee cache. -/// This prevents excessive memory usage at the cost of rejecting some attestations. +/// +/// The cache size also determines the maximum number of concurrent committee cache "promises" that +/// can be issued. In effect, this limits the number of concurrent states that can be loaded into +/// memory for the committee cache. This prevents excessive memory usage at the cost of rejecting +/// some attestations. /// /// We set this value to 2 since states can be quite large and have a significant impact on memory /// usage. A healthy network cannot have more than a few committee caches and those caches should /// always be inserted during block import. Unstable networks with a high degree of forking might /// see some attestations dropped due to this concurrency limit, however I propose that this is /// better than low-resource nodes going OOM. -const MAX_CONCURRENT_PROMISES: usize = 2; - -#[derive(Clone)] -pub enum CacheItem { - /// A committee. - Committee(Arc), - /// A promise for a future committee. - Promise(Receiver>), -} - -impl CacheItem { - pub fn is_promise(&self) -> bool { - matches!(self, CacheItem::Promise(_)) - } - - pub fn wait(self) -> Result, BeaconChainError> { - match self { - CacheItem::Committee(cache) => Ok(cache), - CacheItem::Promise(receiver) => receiver - .recv() - .map_err(BeaconChainError::CommitteePromiseFailed), - } - } -} - -/// Provides a cache for `CommitteeCache`. -/// -/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like -/// a find/replace error. -pub struct ShufflingCache { - cache: HashMap, - cache_size: usize, - head_shuffling_ids: BlockShufflingIds, - logger: Logger, -} - -impl ShufflingCache { - pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self { - Self { - cache: HashMap::new(), - cache_size, - head_shuffling_ids, - logger, - } - } - - pub fn get(&mut self, key: &AttestationShufflingId) -> Option { - match self.cache.get(key) { - // The cache contained the committee cache, return it. - item @ Some(CacheItem::Committee(_)) => { - metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); - item.cloned() - } - // The cache contains a promise for the committee cache. Check to see if the promise has - // already been resolved, without waiting for it. - item @ Some(CacheItem::Promise(receiver)) => match receiver.try_recv() { - // The promise has already been resolved. Replace the entry in the cache with a - // `Committee` entry and then return the committee. - Ok(Some(committee)) => { - metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); - metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); - let ready = CacheItem::Committee(committee); - self.insert_cache_item(key.clone(), ready.clone()); - Some(ready) - } - // The promise has not yet been resolved. Return the promise so the caller can await - // it. - Ok(None) => { - metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); - metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); - item.cloned() - } - // The sender has been dropped without sending a committee. There was most likely an - // error computing the committee cache. Drop the key from the cache and return - // `None` so the caller can recompute the committee. - // - // It's worth noting that this is the only place where we removed unresolved - // promises from the cache. This means unresolved promises will only be removed if - // we try to access them again. This is OK, since the promises don't consume much - // memory. We expect that *all* promises should be resolved, unless there is a - // programming or database error. - Err(oneshot_broadcast::Error::SenderDropped) => { - metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_FAILS); - metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); - self.cache.remove(key); - None - } - }, - // The cache does not have this committee and it's not already promised to be computed. - None => { - metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); - None - } - } - } - - pub fn contains(&self, key: &AttestationShufflingId) -> bool { - self.cache.contains_key(key) - } - - pub fn insert_committee_cache( - &mut self, - key: AttestationShufflingId, - committee_cache: &C, - ) { - if self - .cache - .get(&key) - // Replace the committee if it's not present or if it's a promise. A bird in the hand is - // worth two in the promise-bush! - .map_or(true, CacheItem::is_promise) - { - self.insert_cache_item( - key, - CacheItem::Committee(committee_cache.to_arc_committee_cache()), - ); - } - } - - /// Prunes the cache first before inserting a new cache item. - fn insert_cache_item(&mut self, key: AttestationShufflingId, cache_item: CacheItem) { - self.prune_cache(); - self.cache.insert(key, cache_item); - } - - /// Prunes the `cache` to keep the size below the `cache_size` limit, based on the following - /// preferences: - /// - Entries from more recent epochs are preferred over older ones. - /// - Entries with shuffling ids matching the head's previous, current, and future epochs must - /// not be pruned. - fn prune_cache(&mut self) { - let target_cache_size = self.cache_size.saturating_sub(1); - if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { - let shuffling_ids_to_prune = self - .cache - .keys() - .sorted_by_key(|key| key.shuffling_epoch) - .filter(|shuffling_id| { - Some(shuffling_id) - != self - .head_shuffling_ids - .id_for_epoch(shuffling_id.shuffling_epoch) - .as_ref() - .as_ref() - }) - .take(prune_count) - .cloned() - .collect::>(); - - for shuffling_id in shuffling_ids_to_prune.iter() { - debug!( - self.logger, - "Removing old shuffling from cache"; - "shuffling_epoch" => shuffling_id.shuffling_epoch, - "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block - ); - self.cache.remove(shuffling_id); - } - } - } +pub const DEFAULT_CACHE_SIZE: usize = 16; - pub fn create_promise( - &mut self, - key: AttestationShufflingId, - ) -> Result>, BeaconChainError> { - let num_active_promises = self - .cache - .iter() - .filter(|(_, item)| item.is_promise()) - .count(); - if num_active_promises >= MAX_CONCURRENT_PROMISES { - return Err(BeaconChainError::MaxCommitteePromises(num_active_promises)); - } +impl Protect for BlockShufflingIds { + type SortKey = Epoch; - let (sender, receiver) = oneshot(); - self.insert_cache_item(key, CacheItem::Promise(receiver)); - Ok(sender) + fn sort_key(&self, k: &AttestationShufflingId) -> Epoch { + k.shuffling_epoch } - /// Inform the cache that the shuffling decision roots for the head has changed. - /// - /// The shufflings for the head's previous, current, and future epochs will never be ejected from - /// the cache during `Self::insert_cache_item`. - pub fn update_head_shuffling_ids(&mut self, head_shuffling_ids: BlockShufflingIds) { - self.head_shuffling_ids = head_shuffling_ids; + fn protect_from_eviction(&self, shuffling_id: &AttestationShufflingId) -> bool { + Some(shuffling_id) == self.id_for_epoch(shuffling_id.shuffling_epoch).as_ref() } -} -/// A helper trait to allow lazy-cloning of the committee cache when inserting into the cache. -pub trait ToArcCommitteeCache { - fn to_arc_committee_cache(&self) -> Arc; -} - -impl ToArcCommitteeCache for CommitteeCache { - fn to_arc_committee_cache(&self) -> Arc { - Arc::new(self.clone()) + fn notify_eviction(&self, shuffling_id: &AttestationShufflingId, logger: &Logger) { + debug!( + logger, + "Removing old shuffling from cache"; + "shuffling_epoch" => shuffling_id.shuffling_epoch, + "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block + ); } } -impl ToArcCommitteeCache for Arc { - fn to_arc_committee_cache(&self) -> Arc { - self.clone() - } -} +pub type ShufflingCache = PromiseCache; /// Contains the shuffling IDs for a beacon block. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct BlockShufflingIds { pub current: AttestationShufflingId, pub next: AttestationShufflingId, @@ -294,13 +107,13 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { + use super::*; + use crate::test_utils::EphemeralHarnessType; + use promise_cache::{CacheItem, PromiseCacheError}; + use std::sync::Arc; use task_executor::test_utils::null_logger; use types::*; - use crate::test_utils::EphemeralHarnessType; - - use super::*; - type E = MinimalEthSpec; type TestBeaconChainType = EphemeralHarnessType; type BeaconChainHarness = crate::test_utils::BeaconChainHarness; @@ -339,7 +152,7 @@ mod test { .clone(); let committee_b = state.committee_cache(RelativeEpoch::Next).unwrap().clone(); assert!(committee_a != committee_b); - (Arc::new(committee_a), Arc::new(committee_b)) + (committee_a, committee_b) } /// Builds a deterministic but incoherent shuffling ID from a `u64`. @@ -372,10 +185,10 @@ mod test { // Ensure the promise has been resolved. let item = cache.get(&id_a).unwrap(); assert!( - matches!(item, CacheItem::Committee(committee) if committee == committee_a), + matches!(item, CacheItem::Complete(committee) if committee == committee_a), "the promise should be resolved" ); - assert_eq!(cache.cache.len(), 1, "the cache should have one entry"); + assert_eq!(cache.len(), 1, "the cache should have one entry"); } #[test] @@ -399,7 +212,7 @@ mod test { // Ensure the key now indicates an empty slot. assert!(cache.get(&id_a).is_none(), "the slot should be empty"); - assert!(cache.cache.is_empty(), "the cache should be empty"); + assert!(cache.is_empty(), "the cache should be empty"); } #[test] @@ -433,7 +246,7 @@ mod test { // Ensure promise A has been resolved. let item = cache.get(&id_a).unwrap(); assert!( - matches!(item, CacheItem::Committee(committee) if committee == committee_a), + matches!(item, CacheItem::Complete(committee) if committee == committee_a), "promise A should be resolved" ); @@ -442,41 +255,40 @@ mod test { // Ensure promise B has been resolved. let item = cache.get(&id_b).unwrap(); assert!( - matches!(item, CacheItem::Committee(committee) if committee == committee_b), + matches!(item, CacheItem::Complete(committee) if committee == committee_b), "promise B should be resolved" ); // Check both entries again. assert!( - matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee) if committee == committee_a), + matches!(cache.get(&id_a).unwrap(), CacheItem::Complete(committee) if committee == committee_a), "promise A should remain resolved" ); assert!( - matches!(cache.get(&id_b).unwrap(), CacheItem::Committee(committee) if committee == committee_b), + matches!(cache.get(&id_b).unwrap(), CacheItem::Complete(committee) if committee == committee_b), "promise B should remain resolved" ); - assert_eq!(cache.cache.len(), 2, "the cache should have two entries"); + assert_eq!(cache.len(), 2, "the cache should have two entries"); } #[test] fn too_many_promises() { let mut cache = new_shuffling_cache(); - for i in 0..MAX_CONCURRENT_PROMISES { + for i in 0..cache.max_concurrent_promises() { cache.create_promise(shuffling_id(i as u64)).unwrap(); } // Ensure that the next promise returns an error. It is important for the application to // dump his ass when he can't keep his promises, you're a queen and you deserve better. assert!(matches!( - cache.create_promise(shuffling_id(MAX_CONCURRENT_PROMISES as u64)), - Err(BeaconChainError::MaxCommitteePromises( - MAX_CONCURRENT_PROMISES - )) + cache.create_promise(shuffling_id(cache.max_concurrent_promises() as u64)), + Err(PromiseCacheError::MaxConcurrentPromises(n)) + if n == cache.max_concurrent_promises() )); assert_eq!( - cache.cache.len(), - MAX_CONCURRENT_PROMISES, + cache.len(), + cache.max_concurrent_promises(), "the cache should have two entries" ); } @@ -486,9 +298,9 @@ mod test { let mut cache = new_shuffling_cache(); let id_a = shuffling_id(1); let committee_cache_a = Arc::new(CommitteeCache::default()); - cache.insert_committee_cache(id_a.clone(), &committee_cache_a); + cache.insert_value(id_a.clone(), &committee_cache_a); assert!( - matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee_cache) if committee_cache == committee_cache_a), + matches!(cache.get(&id_a).unwrap(), CacheItem::Complete(committee_cache) if committee_cache == committee_cache_a), "should insert committee cache" ); } @@ -501,7 +313,7 @@ mod test { .collect::>(); for (shuffling_id, committee_cache) in shuffling_id_and_committee_caches.iter() { - cache.insert_committee_cache(shuffling_id.clone(), committee_cache); + cache.insert_value(shuffling_id.clone(), committee_cache); } for i in 1..(TEST_CACHE_SIZE + 1) { @@ -515,11 +327,7 @@ mod test { !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), "should not contain oldest epoch shuffling id" ); - assert_eq!( - cache.cache.len(), - cache.cache_size, - "should limit cache size" - ); + assert_eq!(cache.len(), TEST_CACHE_SIZE, "should limit cache size"); } #[test] @@ -534,7 +342,7 @@ mod test { shuffling_epoch: (current_epoch + 1).into(), shuffling_decision_block: Hash256::from_low_u64_be(current_epoch + i as u64), }; - cache.insert_committee_cache(shuffling_id, &committee_cache); + cache.insert_value(shuffling_id, &committee_cache); } // Now, update the head shuffling ids @@ -544,12 +352,12 @@ mod test { previous: Some(shuffling_id(current_epoch - 1)), block_root: Hash256::from_low_u64_le(42), }; - cache.update_head_shuffling_ids(head_shuffling_ids.clone()); + cache.update_protector(head_shuffling_ids.clone()); // Insert head state shuffling ids. Should not be overridden by other shuffling ids. - cache.insert_committee_cache(head_shuffling_ids.current.clone(), &committee_cache); - cache.insert_committee_cache(head_shuffling_ids.next.clone(), &committee_cache); - cache.insert_committee_cache( + cache.insert_value(head_shuffling_ids.current.clone(), &committee_cache); + cache.insert_value(head_shuffling_ids.next.clone(), &committee_cache); + cache.insert_value( head_shuffling_ids.previous.clone().unwrap(), &committee_cache, ); @@ -560,7 +368,7 @@ mod test { shuffling_epoch: Epoch::from(i), shuffling_decision_block: Hash256::from_low_u64_be(i as u64), }; - cache.insert_committee_cache(shuffling_id, &committee_cache); + cache.insert_value(shuffling_id, &committee_cache); } assert!( @@ -575,10 +383,6 @@ mod test { cache.contains(&head_shuffling_ids.previous.unwrap()), "should retain head shuffling id for previous epoch." ); - assert_eq!( - cache.cache.len(), - cache.cache_size, - "should limit cache size" - ); + assert_eq!(cache.len(), TEST_CACHE_SIZE, "should limit cache size"); } } diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 2221aa1d5eb..9b35cff9432 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -38,9 +38,26 @@ impl BeaconChain { })?; let mut balances = HashMap::::new(); + for &validator_index in &sync_committee_indices { + balances.insert( + validator_index, + *state + .balances() + .get(validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); + } + + let proposer_index = block.proposer_index() as usize; + balances.insert( + proposer_index, + *state + .balances() + .get(proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); let mut total_proposer_rewards = 0; - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; // Apply rewards to participant balances. Keep track of proposer rewards for (validator_index, participant_bit) in sync_committee_indices @@ -48,15 +65,15 @@ impl BeaconChain { .zip(sync_aggregate.sync_committee_bits.iter()) { let participant_balance = balances - .entry(*validator_index) - .or_insert_with(|| state.balances()[*validator_index]); + .get_mut(validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?; if participant_bit { participant_balance.safe_add_assign(participant_reward_value)?; balances - .entry(proposer_index) - .or_insert_with(|| state.balances()[proposer_index]) + .get_mut(&proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)? .safe_add_assign(proposer_reward_per_bit)?; total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; @@ -67,18 +84,17 @@ impl BeaconChain { Ok(balances .iter() - .filter_map(|(i, new_balance)| { - let reward = if *i != proposer_index { - *new_balance as i64 - state.balances()[*i] as i64 - } else if sync_committee_indices.contains(i) { - *new_balance as i64 - - state.balances()[*i] as i64 - - total_proposer_rewards as i64 + .filter_map(|(&i, &new_balance)| { + let initial_balance = *state.balances().get(i)? as i64; + let reward = if i != proposer_index { + new_balance as i64 - initial_balance + } else if sync_committee_indices.contains(&i) { + new_balance as i64 - initial_balance - total_proposer_rewards as i64 } else { return None; }; Some(SyncCommitteeReward { - validator_index: *i as u64, + validator_index: i as u64, reward, }) }) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0a494e1d8a4..efbb4feaf08 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -45,10 +45,7 @@ use slog_async::Async; use slog_term::{FullFormat, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; -use state_processing::{ - state_advance::{complete_state_advance, partial_state_advance}, - StateProcessingStrategy, -}; +use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -754,10 +751,7 @@ where pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - ( - head.beacon_state.clone_with_only_committee_caches(), - state_root, - ) + (head.beacon_state.clone(), state_root) } pub fn head_slot(&self) -> Slot { @@ -800,8 +794,9 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate) + .load_hot_state(&state_hash.into()) .unwrap() + .map(|(state, _)| state) } pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { @@ -1015,9 +1010,7 @@ where return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); - // Only perform a "partial" state advance since we do not require the state roots to be - // accurate. - partial_state_advance( + complete_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index a63940074b4..e9993fcd397 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -493,10 +493,10 @@ impl ValidatorMonitor { .skip(self.indices.len()) .for_each(|(i, validator)| { let i = i as u64; - if let Some(validator) = self.validators.get_mut(&validator.pubkey) { + if let Some(validator) = self.validators.get_mut(validator.pubkey()) { validator.set_index(i) } - self.indices.insert(i, validator.pubkey); + self.indices.insert(i, *validator.pubkey()); }); // Add missed non-finalized blocks for the monitored validators @@ -536,12 +536,12 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI, &[id], - u64_to_i64(validator.effective_balance), + u64_to_i64(validator.effective_balance()), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_SLASHED, &[id], - i64::from(validator.slashed), + i64::from(validator.slashed()), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ACTIVE, @@ -561,22 +561,22 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH, &[id], - u64_to_i64(validator.activation_eligibility_epoch), + u64_to_i64(validator.activation_eligibility_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_EPOCH, &[id], - u64_to_i64(validator.activation_epoch), + u64_to_i64(validator.activation_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_EXIT_EPOCH, &[id], - u64_to_i64(validator.exit_epoch), + u64_to_i64(validator.exit_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_WITHDRAWABLE_EPOCH, &[id], - u64_to_i64(validator.withdrawable_epoch), + u64_to_i64(validator.withdrawable_epoch()), ); } } diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 40910b9b9fe..02be7120ca9 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -170,7 +170,7 @@ async fn voluntary_exit_duplicate_in_state() { .validators() .get(exited_validator as usize) .unwrap() - .exit_epoch, + .exit_epoch(), spec.far_future_epoch ); @@ -274,14 +274,12 @@ async fn proposer_slashing_duplicate_in_state() { .await; // Verify validator is actually slashed. - assert!( - harness - .get_current_state() - .validators() - .get(slashed_validator as usize) - .unwrap() - .slashed - ); + assert!(harness + .get_current_state() + .validators() + .get(slashed_validator as usize) + .unwrap() + .slashed()); // Clear the in-memory gossip cache & try to verify the same slashing on gossip. // It should still fail because gossip verification should check the validator's `slashed` field @@ -402,14 +400,12 @@ async fn attester_slashing_duplicate_in_state() { .await; // Verify validator is actually slashed. - assert!( - harness - .get_current_state() - .validators() - .get(slashed_validator as usize) - .unwrap() - .slashed - ); + assert!(harness + .get_current_state() + .validators() + .get(slashed_validator as usize) + .unwrap() + .slashed()); // Clear the in-memory gossip cache & try to verify the same slashing on gossip. // It should still fail because gossip verification should check the validator's `slashed` field diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index f1262596f70..58b359125dd 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -223,7 +223,7 @@ impl InvalidPayloadRig { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let head = self.harness.chain.head_snapshot(); - let state = head.beacon_state.clone_with_only_committee_caches(); + let state = head.beacon_state.clone(); let slot = slot_override.unwrap_or(state.slot() + 1); let ((block, blobs), post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); @@ -2048,7 +2048,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .get_block_weight(&head.head_block_root()) .unwrap(), - head.snapshot.beacon_state.validators()[0].effective_balance, + head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance(), "proposer boost should be removed from the head block and the vote of a single validator applied" ); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index a78463ef5d7..1c80525223a 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -105,8 +105,8 @@ async fn test_sync_committee_rewards() { .get_validator_index(&validator.pubkey) .unwrap() .unwrap(); - let pre_state_balance = parent_state.balances()[validator_index]; - let post_state_balance = state.balances()[validator_index]; + let pre_state_balance = *parent_state.balances().get(validator_index).unwrap(); + let post_state_balance = *state.balances().get(validator_index).unwrap(); let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); if validator_index == proposer_index { @@ -141,7 +141,7 @@ async fn test_verify_attestation_rewards_base() { ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // extend slots to beginning of epoch N + 2 harness.extend_slots(E::slots_per_epoch() as usize).await; @@ -163,7 +163,7 @@ async fn test_verify_attestation_rewards_base() { let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -185,7 +185,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { AttestationStrategy::SomeValidators(half_validators.clone()), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // extend slots to beginning of epoch N + 2 harness.advance_slot(); @@ -215,7 +215,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -241,7 +241,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc // advance to create first justification epoch and get initial balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning assert_eq!( @@ -284,7 +284,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -298,7 +298,7 @@ async fn test_verify_attestation_rewards_altair() { harness .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -364,7 +364,7 @@ async fn test_verify_attestation_rewards_altair() { apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -386,7 +386,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { half_validators.clone(), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -458,7 +458,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -492,7 +492,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep // advance for first justification epoch and get balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -568,7 +568,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 66f4138afb4..24689c0b31a 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -836,6 +836,8 @@ async fn block_replayer_hooks() { assert_eq!(post_block_slots, block_slots); // States match. + end_state.apply_pending_mutations().unwrap(); + replay_state.apply_pending_mutations().unwrap(); end_state.drop_all_caches().unwrap(); replay_state.drop_all_caches().unwrap(); assert_eq!(end_state, replay_state); @@ -1221,9 +1223,17 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); if current_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache, "block at slot {slot}"); + assert_eq!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } else { - assert_ne!(committee_cache, state_cache, "block at slot {slot}"); + assert_ne!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } Ok(()) }, @@ -1253,9 +1263,9 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); if previous_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache); + assert_eq!(committee_cache, state_cache.as_ref()); } else { - assert_ne!(committee_cache, state_cache); + assert_ne!(committee_cache, state_cache.as_ref()); } Ok(()) }, @@ -3609,16 +3619,16 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { - let chain_dump = harness.chain.chain_dump().unwrap(); + let mut chain_dump = harness.chain.chain_dump().unwrap(); let split_slot = harness.chain.store.get_split_slot(); assert_eq!(chain_dump.len() as u64, expected_len); - for checkpoint in &chain_dump { + for checkpoint in &mut chain_dump { // Check that the tree hash of the stored state is as expected assert_eq!( checkpoint.beacon_state_root(), - checkpoint.beacon_state.tree_hash_root(), + checkpoint.beacon_state.update_tree_hash_cache().unwrap(), "tree hash of stored state is incorrect" ); From eff12d77d831e3ae3beda18c7de910a581313ce8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:05:38 +1000 Subject: [PATCH 10/41] interop genesis --- beacon_node/genesis/src/interop.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index b4753e92f1f..f11eeeac09a 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -178,14 +178,15 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials(); assert_eq!( - creds[0], spec.bls_withdrawal_prefix_byte, + creds.as_bytes()[0], + spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( - &creds[1..], - &hash(&v.pubkey.as_ssz_bytes())[1..], + &creds.as_bytes()[1..], + &hash(&v.pubkey().as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } @@ -240,7 +241,8 @@ mod test { } for (index, v) in state.validators().iter().enumerate() { - let creds = v.withdrawal_credentials.as_bytes(); + let withdrawal_credientials = v.withdrawal_credentials(); + let creds = withdrawal_credientials.as_bytes(); if index % 2 == 0 { assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, From 7e66ce101fd5d4af3c6fd1dbf94d0638838c75d5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:10:47 +1000 Subject: [PATCH 11/41] http api --- beacon_node/http_api/src/attester_duties.rs | 7 +-- .../http_api/src/block_packing_efficiency.rs | 2 +- beacon_node/http_api/src/lib.rs | 16 +++--- beacon_node/http_api/src/proposer_duties.rs | 5 +- beacon_node/http_api/src/state_id.rs | 54 +++++++++++++++++-- beacon_node/http_api/src/validator.rs | 2 +- .../http_api/src/validator_inclusion.rs | 4 +- beacon_node/http_api/src/validators.rs | 4 +- beacon_node/http_api/tests/fork_tests.rs | 5 +- beacon_node/http_api/tests/tests.rs | 24 ++++----- 10 files changed, 81 insertions(+), 42 deletions(-) diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index f3242a2b374..6c7dc3348c1 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -5,9 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{self as api_types}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; -use types::{ - AttestationDuty, BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, RelativeEpoch, -}; +use types::{AttestationDuty, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, RelativeEpoch}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -90,8 +88,7 @@ fn compute_historic_attester_duties( if head.beacon_state.current_epoch() <= request_epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index d78f1f7c66e..f105fdf0a7d 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -279,7 +279,7 @@ pub fn get_block_packing_efficiency( )); let pre_slot_hook = - |state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { + |_, state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { // Add attestations to `available_attestations`. handler.lock().add_attestations(state.slot())?; Ok(()) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 9e6022dc954..42188a6c97c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -61,7 +61,6 @@ use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::borrow::Cow; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -870,10 +869,10 @@ pub fn serve( None }; - let committee_cache = if let Some(ref shuffling) = + let committee_cache = if let Some(shuffling) = maybe_cached_shuffling { - Cow::Borrowed(&**shuffling) + shuffling } else { let possibly_built_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) { @@ -882,16 +881,13 @@ pub fn serve( relative_epoch, ) => { - state - .committee_cache(relative_epoch) - .map(Cow::Borrowed) + state.committee_cache(relative_epoch).cloned() } _ => CommitteeCache::initialized( state, epoch, &chain.spec, - ) - .map(Cow::Owned), + ), } .map_err(|e| { match e { @@ -937,9 +933,9 @@ pub fn serve( .shuffling_cache .try_write_for(std::time::Duration::from_secs(1)) { - cache_write.insert_committee_cache( + cache_write.insert_value( shuffling_id, - &*possibly_built_cache, + &possibly_built_cache, ); } } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index c31dd9b1faa..ab8952976c8 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -10,7 +10,7 @@ use safe_arith::SafeArith; use slog::{debug, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; -use types::{CloneConfig, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -192,8 +192,7 @@ fn compute_historic_proposer_duties( if head.beacon_state.current_epoch() <= epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 1a76333e2d4..c4b721f0411 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,6 +1,7 @@ use crate::ExecutionOptimistic; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; +use slog::{debug, warn}; use std::fmt; use std::str::FromStr; use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; @@ -178,10 +179,7 @@ impl StateId { .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; return Ok(( - cached_head - .snapshot - .beacon_state - .clone_with_only_committee_caches(), + cached_head.snapshot.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), false, )); @@ -190,6 +188,49 @@ impl StateId { _ => (self.root(chain)?, None), }; + let mut opt_state_cache = Some(chain.parallel_state_cache.write()); + + // Try the cache. + if let Some(cache_item) = opt_state_cache + .as_mut() + .and_then(|cache| cache.get(&state_root)) + { + drop(opt_state_cache.take()); + match cache_item.wait() { + Ok(state) => { + debug!( + chain.logger(), + "HTTP state cache hit"; + "state_root" => ?state_root, + "slot" => state.slot(), + ); + return Ok(((*state).clone(), execution_optimistic, finalized)); + } + Err(e) => { + warn!( + chain.logger(), + "State promise failed"; + "state_root" => ?state_root, + "outcome" => "re-computing", + "error" => ?e, + ); + } + } + } + + // Re-lock only in case of failed promise. + debug!( + chain.logger(), + "HTTP state cache miss"; + "state_root" => ?state_root + ); + let mut state_cache = opt_state_cache.unwrap_or_else(|| chain.parallel_state_cache.write()); + + let sender = state_cache.create_promise(state_root).map_err(|e| { + warp_utils::reject::custom_server_error(format!("too many concurrent requests: {e:?}")) + })?; + drop(state_cache); + let state = chain .get_state(&state_root, slot_opt) .map_err(warp_utils::reject::beacon_chain_error) @@ -202,6 +243,11 @@ impl StateId { }) })?; + // Fulfil promise (and re-lock again). + let mut state_cache = chain.parallel_state_cache.write(); + state_cache.resolve_promise(sender, state_root, &state); + drop(state_cache); + Ok((state, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index 7f11ddd8f43..f54c6424313 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -14,7 +14,7 @@ pub fn pubkey_to_validator_index( state .validators() .get(index) - .map_or(false, |v| v.pubkey == *pubkey) + .map_or(false, |v| *v.pubkey == *pubkey) }) .map(Result::Ok) .transpose() diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index dd4e137ce66..0a257725741 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -95,13 +95,13 @@ pub fn validator_inclusion_data( let summary = get_epoch_processing_summary(&mut state, &chain.spec)?; Ok(Some(ValidatorInclusionData { - is_slashed: validator.slashed, + is_slashed: validator.slashed(), is_withdrawable_in_current_epoch: validator.is_withdrawable_at(epoch), is_active_unslashed_in_current_epoch: summary .is_active_unslashed_in_current_epoch(validator_index), is_active_unslashed_in_previous_epoch: summary .is_active_unslashed_in_previous_epoch(validator_index), - current_epoch_effective_balance_gwei: validator.effective_balance, + current_epoch_effective_balance_gwei: validator.effective_balance(), is_current_epoch_target_attester: summary .is_current_epoch_target_attester(validator_index) .map_err(convert_cache_error)?, diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs index 20af7a680df..69765d79199 100644 --- a/beacon_node/http_api/src/validators.rs +++ b/beacon_node/http_api/src/validators.rs @@ -29,7 +29,7 @@ pub fn get_beacon_state_validators( .filter(|(index, (validator, _))| { query_ids.as_ref().map_or(true, |ids| { ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::PublicKey(pubkey) => validator.pubkey() == pubkey, ValidatorId::Index(param_index) => { *param_index == *index as u64 } @@ -93,7 +93,7 @@ pub fn get_beacon_state_validator_balances( .filter(|(index, (validator, _))| { optional_ids.map_or(true, |ids| { ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::PublicKey(pubkey) => validator.pubkey() == pubkey, ValidatorId::Index(param_index) => { *param_index == *index as u64 } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 74b26475639..ad32ff1d579 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -128,17 +128,18 @@ async fn attestations_across_fork_with_skip_slots() { let all_validators = harness.get_all_validators(); let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); - let fork_state = harness + let mut fork_state = harness .chain .state_at_slot(fork_slot, StateSkipConfig::WithStateRoots) .unwrap(); + let fork_state_root = fork_state.update_tree_hash_cache().unwrap(); harness.set_current_slot(fork_slot); let attestations = harness.make_attestations( &all_validators, &fork_state, - fork_state.canonical_root(), + fork_state_root, (*fork_state.get_block_root(fork_slot - 1).unwrap()).into(), fork_slot, ); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index e4580e4ffdb..8536f0265e3 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -806,7 +806,7 @@ impl ApiTester { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { Some((state, _execution_optimistic, _finalized)) => { - state.validators().clone().into() + state.validators().clone().to_vec() } None => vec![], }; @@ -822,7 +822,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| val.pubkey.clone()), + .map_or(PublicKeyBytes::empty(), |val| *val.pubkey), ) }) .collect::>(); @@ -865,7 +865,7 @@ impl ApiTester { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { index: i as u64, - balance: state.balances()[i as usize], + balance: *state.balances().get(i as usize).unwrap(), }); } } @@ -892,7 +892,7 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some(state) => state.validators().to_vec(), None => vec![], }; let validator_index_ids = validator_indices @@ -907,7 +907,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| val.pubkey.clone()), + .map_or(PublicKeyBytes::empty(), |val| *val.pubkey), ) }) .collect::>(); @@ -955,7 +955,7 @@ impl ApiTester { if i >= state.validators().len() as u64 { continue; } - let validator = state.validators()[i as usize].clone(); + let validator = state.validators().get(i as usize).unwrap().clone(); let status = ValidatorStatus::from_validator( &validator, epoch, @@ -967,7 +967,7 @@ impl ApiTester { { validators.push(ValidatorData { index: i as u64, - balance: state.balances()[i as usize], + balance: *state.balances().get(i as usize).unwrap(), status, validator, }); @@ -995,13 +995,13 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some(state) => state.validators().to_vec(), None => vec![], }; for (i, validator) in validators.into_iter().enumerate() { let validator_ids = &[ - ValidatorId::PublicKey(validator.pubkey.clone()), + ValidatorId::PublicKey(*validator.pubkey), ValidatorId::Index(i as u64), ]; @@ -1025,7 +1025,7 @@ impl ApiTester { ValidatorData { index: i as u64, - balance: state.balances()[i], + balance: *state.balances().get(i).unwrap(), status: ValidatorStatus::from_validator( &validator, epoch, @@ -2360,7 +2360,7 @@ impl ApiTester { .unwrap() { let expected = AttesterData { - pubkey: state.validators()[i as usize].pubkey.clone().into(), + pubkey: *state.validators().get(i as usize).unwrap().pubkey, validator_index: i, committees_at_slot: duty.committees_at_slot, committee_index: duty.index, @@ -2465,7 +2465,7 @@ impl ApiTester { let index = state .get_beacon_proposer_index(slot, &self.chain.spec) .unwrap(); - let pubkey = state.validators()[index].pubkey.clone().into(); + let pubkey = *state.validators().get(index).unwrap().pubkey; ProposerData { pubkey, From c34ccab3e0b4b86bb0907957d521046ea3d8b8c0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:13:16 +1000 Subject: [PATCH 12/41] lighthouse --- lighthouse/tests/beacon_node.rs | 39 ++++++++++---------- testing/state_transition_vectors/src/exit.rs | 19 ++++++---- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 68d8e46eb02..325c3f9dbce 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -172,26 +172,6 @@ fn shuffling_cache_set() { .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); } -#[test] -fn snapshot_cache_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.snapshot_cache_size, - beacon_node::beacon_chain::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE - ) - }); -} - -#[test] -fn snapshot_cache_set() { - CommandLineTest::new() - .flag("state-cache-size", Some("500")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.snapshot_cache_size, 500)); -} - #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() @@ -1849,6 +1829,25 @@ fn historic_state_cache_size_default() { }); } #[test] +fn parallel_state_cache_size_flag() { + CommandLineTest::new() + .flag("parallel-state-cache-size", Some("4")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.parallel_state_cache_size, 4_usize)); +} +#[test] +fn parallel_state_cache_size_default() { + use beacon_node::beacon_chain::chain_config::DEFAULT_PARALLEL_STATE_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.parallel_state_cache_size, + DEFAULT_PARALLEL_STATE_CACHE_SIZE + ); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index e3cd346da13..4c3b0c4f44a 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -170,7 +170,7 @@ vectors_and_tests!( invalid_exit_already_initiated, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH + 1; + *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -189,8 +189,11 @@ vectors_and_tests!( invalid_not_active_before_activation_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().activation_epoch = - E::default_spec().far_future_epoch; + *state + .validators_mut() + .get_mut(0) + .unwrap() + .activation_epoch_mut() = E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -209,7 +212,7 @@ vectors_and_tests!( invalid_not_active_after_exit_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH; + *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -332,17 +335,17 @@ mod custom_tests { fn assert_exited(state: &BeaconState, validator_index: usize) { let spec = E::default_spec(); - let validator = &state.validators()[validator_index]; + let validator = &state.validators().get(validator_index).unwrap(); assert_eq!( - validator.exit_epoch, + validator.exit_epoch(), // This is correct until we exceed the churn limit. If that happens, we // need to introduce more complex logic. state.current_epoch() + 1 + spec.max_seed_lookahead, "exit epoch" ); assert_eq!( - validator.withdrawable_epoch, - validator.exit_epoch + E::default_spec().min_validator_withdrawability_delay, + validator.withdrawable_epoch(), + validator.exit_epoch() + E::default_spec().min_validator_withdrawability_delay, "withdrawable epoch" ); } From 0adb24f04ae074cf057277ebb10275c76bd8dc3a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:17:14 +1000 Subject: [PATCH 13/41] op pool --- beacon_node/operation_pool/src/lib.rs | 36 +++++++++++++++++++++------ 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 03659bcee05..fee8a49c51d 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -392,7 +392,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + .map_or(false, |validator| !validator.slashed()) }, |slashing| slashing.as_inner().clone(), E::MaxProposerSlashings::to_usize(), @@ -451,7 +451,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -470,7 +470,7 @@ impl OperationPool { // // We cannot check the `slashed` field since the `head` is not finalized and // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint().epoch + validator.exit_epoch() > head_state.finalized_checkpoint().epoch }) .map_or(false, |indices| !indices.is_empty()); @@ -527,7 +527,7 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -1272,7 +1272,12 @@ mod release_tests { // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). for i in 0..state.validators().len() { - state.validators_mut()[i].effective_balance = 1_000_000_000 * i as u64; + state + .validators_mut() + .get_mut(i) + .unwrap() + .mutable + .effective_balance = 1_000_000_000 * i as u64; } let num_validators = num_committees @@ -1530,9 +1535,24 @@ mod release_tests { let spec = &harness.spec; let mut state = harness.get_current_state(); let op_pool = OperationPool::::new(); - state.validators_mut()[1].effective_balance = 17_000_000_000; - state.validators_mut()[2].effective_balance = 17_000_000_000; - state.validators_mut()[3].effective_balance = 17_000_000_000; + state + .validators_mut() + .get_mut(1) + .unwrap() + .mutable + .effective_balance = 17_000_000_000; + state + .validators_mut() + .get_mut(2) + .unwrap() + .mutable + .effective_balance = 17_000_000_000; + state + .validators_mut() + .get_mut(3) + .unwrap() + .mutable + .effective_balance = 17_000_000_000; let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3]); let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); From 3d09bf5872b0fa8f72a199f60a52078339ec364f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:18:10 +1000 Subject: [PATCH 14/41] beacon chain misc --- .../network/src/network_beacon_processor/gossip_methods.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f7bba900372..c9f8cb381c9 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2295,7 +2295,7 @@ impl NetworkBeaconProcessor { debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - e @ AttnError::BeaconChainError(BeaconChainError::MaxCommitteePromises(_)) => { + AttnError::BeaconChainError(BeaconChainError::ShufflingCacheError(e)) => { debug!( self.log, "Dropping attestation"; From a197ffb4308f46a205b8f11984309ef9b2e0badc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:19:01 +1000 Subject: [PATCH 15/41] parallel state cache --- .../beacon_chain/src/parallel_state_cache.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 beacon_node/beacon_chain/src/parallel_state_cache.rs diff --git a/beacon_node/beacon_chain/src/parallel_state_cache.rs b/beacon_node/beacon_chain/src/parallel_state_cache.rs new file mode 100644 index 00000000000..d568d3248cd --- /dev/null +++ b/beacon_node/beacon_chain/src/parallel_state_cache.rs @@ -0,0 +1,22 @@ +use promise_cache::{PromiseCache, Protect}; +use types::{BeaconState, Hash256}; + +#[derive(Debug, Default)] +pub struct ParallelStateProtector; + +impl Protect for ParallelStateProtector { + type SortKey = usize; + + /// Evict in arbitrary (hashmap) order by using the same key for every value. + fn sort_key(&self, _: &Hash256) -> Self::SortKey { + 0 + } + + /// We don't care too much about preventing evictions of particular states here. All the states + /// in this cache should be different from the head state. + fn protect_from_eviction(&self, _: &Hash256) -> bool { + false + } +} + +pub type ParallelStateCache = PromiseCache, ParallelStateProtector>; From 4139cbe34f80db1ae382117ef59944de71a6cf70 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 12:25:10 +1000 Subject: [PATCH 16/41] store --- beacon_node/store/Cargo.toml | 6 + beacon_node/store/src/errors.rs | 6 + beacon_node/store/src/hot_cold_store.rs | 114 ++++++---- beacon_node/store/src/iter.rs | 10 +- beacon_node/store/src/state_cache.rs | 283 ++++++++++++++++++++++++ 5 files changed, 373 insertions(+), 46 deletions(-) create mode 100644 beacon_node/store/src/state_cache.rs diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7bf1ef76bef..288d167b419 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,3 +25,9 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } +xdelta3 = { workspace = true } +zstd = { workspace = true } +safe_arith = { workspace = true } +bls = { workspace = true } +smallvec = { workspace = true } +logging = { workspace = true } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 96e02b80ff8..ea8025dcabb 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -113,6 +113,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: EpochCacheError) -> Error { + Error::CacheBuildError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 70e02164e08..6aa50eae0ce 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -66,12 +66,22 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. block_cache: Mutex>, + /// Cache of beacon states. + /// + /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. + state_cache: Mutex>, + /// Immutable validator cache. + pub immutable_validators: Arc>>, /// LRU cache of replayed states. - state_cache: Mutex>>, + // FIXME(sproul): re-enable historic state cache + #[allow(dead_code)] + historic_state_cache: Mutex>>, + /// Cache of hierarchical diff buffers. + diff_buffer_cache: Mutex>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. - pub(crate) log: Logger, + pub log: Logger, /// Mere vessel for E. _phantom: PhantomData, } @@ -192,8 +202,6 @@ impl HotColdDB, MemoryStore> { impl HotColdDB, LevelDB> { /// Open a new or existing database, with the given paths to the hot and cold DBs. /// - /// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`. - /// /// The `migrate_schema` function is passed in so that the parent `BeaconChain` can provide /// context and access `BeaconChain`-level code without creating a circular dependency. pub fn open( @@ -352,6 +360,21 @@ impl HotColdDB, LevelDB> { } impl, Cold: ItemStore> HotColdDB { + pub fn update_finalized_state( + &self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + self.state_cache + .lock() + .update_finalized_state(state_root, block_root, state) + } + + pub fn state_cache_len(&self) -> usize { + self.state_cache.lock().len() + } + /// Store a block and update the LRU cache. pub fn put_block( &self, @@ -648,19 +671,17 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, StateProcessingStrategy::Accurate) + self.get_hot_state(state_root) } } else { - match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? { + match self.get_hot_state(state_root)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } } } - /// Fetch a state from the store, but don't compute all of the values when replaying blocks - /// upon that state (e.g., state roots). Additionally, only states from the hot store are - /// returned. + /// Get a state with `latest_block_root == block_root` advanced through to at most `slot`. /// /// See `Self::get_advanced_hot_state` for information about `max_slot`. /// @@ -706,6 +727,13 @@ impl, Cold: ItemStore> HotColdDB max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { + if let Some(cached) = self + .state_cache + .lock() + .get_by_block_root(block_root, max_slot) + { + return Ok(Some(cached)); + } self.get_advanced_hot_state_with_strategy( block_root, max_slot, @@ -715,12 +743,13 @@ impl, Cold: ItemStore> HotColdDB } /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. + // FIXME(sproul): delete the state processing strategy stuff again pub fn get_advanced_hot_state_with_strategy( &self, block_root: Hash256, max_slot: Slot, state_root: Hash256, - state_processing_strategy: StateProcessingStrategy, + _state_processing_strategy: StateProcessingStrategy, ) -> Result)>, Error> { // Hold a read lock on the split point so it can't move while we're trying to load the // state. @@ -741,11 +770,11 @@ impl, Cold: ItemStore> HotColdDB } else { state_root }; - let state = self - .load_hot_state(&state_root, state_processing_strategy)? - .map(|state| (state_root, state)); + let opt_state = self + .load_hot_state(&state_root)? + .map(|(state, _block_root)| (state_root, state)); drop(split); - Ok(state) + Ok(opt_state) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -755,17 +784,10 @@ impl, Cold: ItemStore> HotColdDB /// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint /// (which will be deleted by this function but shouldn't be). pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> { - // Delete the state summary. - self.hot_db - .key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?; - - // Delete the full state if it lies on an epoch boundary. - if slot % E::slots_per_epoch() == 0 { - self.hot_db - .key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())?; - } - - Ok(()) + self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::DeleteState( + *state_root, + Some(slot), + )]) } pub fn forwards_block_roots_iterator( @@ -1021,19 +1043,20 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutState(_, _) => (), - StoreOp::PutStateSummary(_, _) => (), - StoreOp::PutStateTemporaryFlag(_) => (), StoreOp::DeleteStateTemporaryFlag(_) => (), StoreOp::DeleteBlock(block_root) => { guard.delete_block(&block_root); + self.state_cache.lock().delete_block_states(&block_root); } - StoreOp::DeleteBlobs(_) => (), + StoreOp::DeleteState(state_root, _) => { + self.state_cache.lock().delete_state(&state_root) + } - StoreOp::DeleteState(_, _) => (), + StoreOp::DeleteBlobs(_) => (), StoreOp::DeleteExecutionPayload(_) => (), @@ -1390,35 +1413,33 @@ impl, Cold: ItemStore> HotColdDB /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. - fn replay_blocks( + pub fn replay_blocks( &self, state: BeaconState, blocks: Vec>>, target_slot: Slot, - state_root_iter: Option>>, - state_processing_strategy: StateProcessingStrategy, + state_root_iter: impl Iterator>, + pre_slot_hook: Option>, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) - .state_processing_strategy(state_processing_strategy) .no_signature_verification() - .minimal_block_root_verification(); + .minimal_block_root_verification() + .state_root_iter(state_root_iter); - let have_state_root_iterator = state_root_iter.is_some(); - if let Some(state_root_iter) = state_root_iter { - block_replayer = block_replayer.state_root_iter(state_root_iter); + if let Some(pre_slot_hook) = pre_slot_hook { + block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); } block_replayer .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { - if have_state_root_iterator && block_replayer.state_root_miss() { + if block_replayer.state_root_miss() { warn!( self.log, - "State root iterator miss"; + "State root cache miss during block replay"; "slot" => target_slot, ); } - block_replayer.into_state() }) } @@ -2528,15 +2549,22 @@ pub fn migrate_database, Cold: ItemStore>( }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; - // Split point is now persisted in the hot database on disk. The in-memory split point - // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update + // Split point is now persisted in the hot database on disk. The in-memory split point + // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update // the in-memory split point now. *split_guard = split; } - // Delete the states from the hot database if we got this far. + // Delete the blocks and states from the hot database if we got this far. store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; + // Update the cache's view of the finalized state. + store.update_finalized_state( + finalized_state_root, + finalized_block_root, + finalized_state.clone(), + )?; + debug!( store.log, "Freezer migration complete"; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index e459c1c3575..03090ca14c5 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -412,15 +412,16 @@ mod test { let mut hashes = (0..).map(Hash256::from_low_u64_be); let roots_a = state_a.block_roots_mut(); for i in 0..roots_a.len() { - roots_a[i] = hashes.next().unwrap() + *roots_a.get_mut(i).unwrap() = hashes.next().unwrap(); } let roots_b = state_b.block_roots_mut(); for i in 0..roots_b.len() { - roots_b[i] = hashes.next().unwrap() + *roots_b.get_mut(i).unwrap() = hashes.next().unwrap(); } let state_a_root = hashes.next().unwrap(); - state_b.state_roots_mut()[0] = state_a_root; + *state_b.state_roots_mut().get_mut(0).unwrap() = state_a_root; + state_a.apply_pending_mutations().unwrap(); store.put_state(&state_a_root, &state_a).unwrap(); let iter = BlockRootsIterator::new(&store, &state_b); @@ -472,6 +473,9 @@ mod test { let state_a_root = Hash256::from_low_u64_be(slots_per_historical_root as u64); let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2); + state_a.apply_pending_mutations().unwrap(); + state_b.apply_pending_mutations().unwrap(); + store.put_state(&state_a_root, &state_a).unwrap(); store.put_state(&state_b_root, &state_b).unwrap(); diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs new file mode 100644 index 00000000000..1bd73c53f8b --- /dev/null +++ b/beacon_node/store/src/state_cache.rs @@ -0,0 +1,283 @@ +use crate::Error; +use lru::LruCache; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::num::NonZeroUsize; +use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; + +/// Fraction of the LRU cache to leave intact during culling. +const CULL_EXEMPT_NUMERATOR: usize = 1; +const CULL_EXEMPT_DENOMINATOR: usize = 10; + +/// States that are less than or equal to this many epochs old *could* become finalized and will not +/// be culled from the cache. +const EPOCH_FINALIZATION_LIMIT: u64 = 4; + +#[derive(Debug)] +pub struct FinalizedState { + state_root: Hash256, + state: BeaconState, +} + +/// Map from block_root -> slot -> state_root. +#[derive(Debug, Default)] +pub struct BlockMap { + blocks: HashMap, +} + +/// Map from slot -> state_root. +#[derive(Debug, Default)] +pub struct SlotMap { + slots: BTreeMap, +} + +#[derive(Debug)] +pub struct StateCache { + finalized_state: Option>, + states: LruCache>, + block_map: BlockMap, + capacity: NonZeroUsize, + max_epoch: Epoch, +} + +#[derive(Debug)] +pub enum PutStateOutcome { + Finalized, + Duplicate, + New, +} + +impl StateCache { + pub fn new(capacity: NonZeroUsize) -> Self { + StateCache { + finalized_state: None, + states: LruCache::new(capacity), + block_map: BlockMap::default(), + capacity, + max_epoch: Epoch::new(0), + } + } + + pub fn len(&self) -> usize { + self.states.len() + } + + pub fn update_finalized_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + if state.slot() % E::slots_per_epoch() != 0 { + return Err(Error::FinalizedStateUnaligned); + } + + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + state.slot() < finalized_state.state.slot() + }) + { + return Err(Error::FinalizedStateDecreasingSlot); + } + + // Add to block map. + self.block_map.insert(block_root, state.slot(), state_root); + + // Prune block map. + let state_roots_to_prune = self.block_map.prune(state.slot()); + + // Delete states. + for state_root in state_roots_to_prune { + self.states.pop(&state_root); + } + + // Update finalized state. + self.finalized_state = Some(FinalizedState { state_root, state }); + Ok(()) + } + + /// Return a status indicating whether the state already existed in the cache. + pub fn put_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + finalized_state.state_root == state_root + }) + { + return Ok(PutStateOutcome::Finalized); + } + + if self.states.peek(&state_root).is_some() { + return Ok(PutStateOutcome::Duplicate); + } + + // Refuse states with pending mutations: we want cached states to be as small as possible + // i.e. stored entirely as a binary merkle tree with no updates overlaid. + if state.has_pending_mutations() { + return Err(Error::StateForCacheHasPendingUpdates { + state_root, + slot: state.slot(), + }); + } + + // Update the cache's idea of the max epoch. + self.max_epoch = std::cmp::max(state.current_epoch(), self.max_epoch); + + // If the cache is full, use the custom cull routine to make room. + if let Some(over_capacity) = self.len().checked_sub(self.capacity.get()) { + self.cull(over_capacity + 1); + } + + // Insert the full state into the cache. + self.states.put(state_root, state.clone()); + + // Record the connection from block root and slot to this state. + let slot = state.slot(); + self.block_map.insert(block_root, slot, state_root); + + Ok(PutStateOutcome::New) + } + + pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { + if let Some(ref finalized_state) = self.finalized_state { + if state_root == finalized_state.state_root { + return Some(finalized_state.state.clone()); + } + } + self.states.get(&state_root).cloned() + } + + pub fn get_by_block_root( + &mut self, + block_root: Hash256, + slot: Slot, + ) -> Option<(Hash256, BeaconState)> { + let slot_map = self.block_map.blocks.get(&block_root)?; + + // Find the state at `slot`, or failing that the most recent ancestor. + let state_root = slot_map + .slots + .iter() + .rev() + .find_map(|(ancestor_slot, state_root)| { + (*ancestor_slot <= slot).then_some(*state_root) + })?; + + let state = self.get_by_state_root(state_root)?; + Some((state_root, state)) + } + + pub fn delete_state(&mut self, state_root: &Hash256) { + self.states.pop(state_root); + self.block_map.delete(state_root); + } + + pub fn delete_block_states(&mut self, block_root: &Hash256) { + if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + for state_root in slot_map.slots.values() { + self.states.pop(state_root); + } + } + } + + /// Cull approximately `count` states from the cache. + /// + /// States are culled LRU, with the following extra order imposed: + /// + /// - Advanced states. + /// - Mid-epoch unadvanced states. + /// - Epoch-boundary states that are too old to be finalized. + /// - Epoch-boundary states that could be finalized. + pub fn cull(&mut self, count: usize) { + let cull_exempt = std::cmp::max( + 1, + self.len() * CULL_EXEMPT_NUMERATOR / CULL_EXEMPT_DENOMINATOR, + ); + + // Stage 1: gather states to cull. + let mut advanced_state_roots = vec![]; + let mut mid_epoch_state_roots = vec![]; + let mut old_boundary_state_roots = vec![]; + let mut good_boundary_state_roots = vec![]; + for (&state_root, state) in self.states.iter().skip(cull_exempt) { + let is_advanced = state.slot() > state.latest_block_header().slot; + let is_boundary = state.slot() % E::slots_per_epoch() == 0; + let could_finalize = + (self.max_epoch - state.current_epoch()) <= EPOCH_FINALIZATION_LIMIT; + + if is_boundary { + if could_finalize { + good_boundary_state_roots.push(state_root); + } else { + old_boundary_state_roots.push(state_root); + } + } else if is_advanced { + advanced_state_roots.push(state_root); + } else { + mid_epoch_state_roots.push(state_root); + } + + // Terminate early in the common case where we've already found enough junk to cull. + if advanced_state_roots.len() == count { + break; + } + } + + // Stage 2: delete. + // This could probably be more efficient in how it interacts with the block map. + for state_root in advanced_state_roots + .iter() + .chain(mid_epoch_state_roots.iter()) + .chain(old_boundary_state_roots.iter()) + .chain(good_boundary_state_roots.iter()) + .take(count) + { + self.delete_state(state_root); + } + } +} + +impl BlockMap { + fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { + let slot_map = self.blocks.entry(block_root).or_default(); + slot_map.slots.insert(slot, state_root); + } + + fn prune(&mut self, finalized_slot: Slot) -> HashSet { + let mut pruned_states = HashSet::new(); + + self.blocks.retain(|_, slot_map| { + slot_map.slots.retain(|slot, state_root| { + let keep = *slot >= finalized_slot; + if !keep { + pruned_states.insert(*state_root); + } + keep + }); + + !slot_map.slots.is_empty() + }); + + pruned_states + } + + fn delete(&mut self, state_root_to_delete: &Hash256) { + self.blocks.retain(|_, slot_map| { + slot_map + .slots + .retain(|_, state_root| state_root != state_root_to_delete); + !slot_map.slots.is_empty() + }); + } + + fn delete_block_states(&mut self, block_root: &Hash256) -> Option { + self.blocks.remove(block_root) + } +} From 2d551c4a7bf4f4792edcc6e72fefcdc9c81c6cda Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 13:09:40 +1000 Subject: [PATCH 17/41] fix issues in store --- Cargo.lock | 11 +- beacon_node/store/src/chunked_vector.rs | 70 ++++++++---- beacon_node/store/src/config.rs | 4 + beacon_node/store/src/errors.rs | 10 +- beacon_node/store/src/hot_cold_store.rs | 107 +++++++++--------- beacon_node/store/src/impls/beacon_state.rs | 4 +- beacon_node/store/src/lib.rs | 1 + beacon_node/store/src/partial_beacon_state.rs | 33 +++--- beacon_node/store/src/state_cache.rs | 1 + 9 files changed, 143 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3ef7c9c88a..96c901d2b7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -810,7 +810,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.1.222-exp" +version = "5.1.3" dependencies = [ "beacon_chain", "clap", @@ -1011,7 +1011,6 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", - "criterion", "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_serde_utils", @@ -1047,7 +1046,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.1.222-exp" +version = "5.1.3" dependencies = [ "beacon_node", "clap", @@ -1800,7 +1799,6 @@ dependencies = [ "clap", "clap_utils", "environment", - "ethereum_ssz", "hex", "logging", "slog", @@ -4325,7 +4323,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.1.222-exp" +version = "5.1.3" dependencies = [ "account_utils", "beacon_chain", @@ -4901,7 +4899,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.1.222-exp" +version = "5.1.3" dependencies = [ "account_manager", "account_utils", @@ -4933,7 +4931,6 @@ dependencies = [ "slashing_protection", "slog", "sloggers", - "store", "task_executor", "tempfile", "tracing-subscriber", diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index a0c50e5a2b5..d3ba057209c 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -1,6 +1,6 @@ //! Space-efficient storage for `BeaconState` vector fields. //! -//! This module provides logic for splitting the `FixedVector` fields of a `BeaconState` into +//! This module provides logic for splitting the `Vector` fields of a `BeaconState` into //! chunks, and storing those chunks in contiguous ranges in the on-disk database. The motiviation //! for doing this is avoiding massive duplication in every on-disk state. For example, rather than //! storing the whole `historical_roots` vector, which is updated once every couple of thousand @@ -17,7 +17,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; -use types::historical_summary::HistoricalSummary; +use types::{historical_summary::HistoricalSummary, milhouse, List, Vector}; /// Description of how a `BeaconState` field is updated during state processing. /// @@ -60,12 +60,13 @@ fn genesis_value_key() -> [u8; 8] { /// type-level. We require their value-level witnesses to be `Copy` so that we can avoid the /// turbofish when calling functions like `store_updated_vector`. pub trait Field: Copy { - /// The type of value stored in this field: the `T` from `FixedVector`. + /// The type of value stored in this field: the `T` from `Vector`. /// /// The `Default` impl will be used to fill extra vector entries. - type Value: Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug; + type Value: Default + std::fmt::Debug + milhouse::Value; + // Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug - /// The length of this field: the `N` from `FixedVector`. + /// The length of this field: the `N` from `Vector`. type Length: Unsigned; /// The database column where the integer-indexed chunks for this field should be stored. @@ -273,10 +274,10 @@ pub trait Field: Copy { } } -/// Marker trait for fixed-length fields (`FixedVector`). +/// Marker trait for fixed-length fields (`Vector`). pub trait FixedLengthField: Field {} -/// Marker trait for variable-length fields (`VariableList`). +/// Marker trait for variable-length fields (`List`). pub trait VariableLengthField: Field {} /// Macro to implement the `Field` trait on a new unit struct type. @@ -331,7 +332,7 @@ field!( activation_slot: Some(Slot::new(0)), deactivation_slot: None }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.block_roots(), index) ); field!( @@ -345,7 +346,7 @@ field!( activation_slot: Some(Slot::new(0)), deactivation_slot: None, }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.state_roots(), index) ); field!( @@ -361,7 +362,7 @@ field!( .capella_fork_epoch .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_list_index(state.historical_roots(), index) ); field!( @@ -371,7 +372,7 @@ field!( E::EpochsPerHistoricalVector, DBColumn::BeaconRandaoMixes, |_| OncePerEpoch { lag: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.randao_mixes(), index) ); field!( @@ -387,7 +388,7 @@ field!( .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), deactivation_slot: None, }, - |state: &BeaconState<_>, index, _| safe_modulo_index( + |state: &BeaconState<_>, index, _| safe_modulo_list_index( state .historical_summaries() .map_err(|_| ChunkError::InvalidFork)?, @@ -565,7 +566,7 @@ pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore< store: &S, slot: Slot, spec: &ChainSpec, -) -> Result, Error> { +) -> Result, Error> { // Do a range query let chunk_size = F::chunk_size(); let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); @@ -589,7 +590,7 @@ pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore< default, )?; - Ok(result.into()) + Ok(Vector::new(result).map_err(ChunkError::Milhouse)?) } /// The historical roots are stored in vector chunks, despite not actually being a vector. @@ -597,7 +598,7 @@ pub fn load_variable_list_from_db, E: EthSpec, S: KeyV store: &S, slot: Slot, spec: &ChainSpec, -) -> Result, Error> { +) -> Result, Error> { let chunk_size = F::chunk_size(); let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); let start_cindex = start_vindex / chunk_size; @@ -617,15 +618,35 @@ pub fn load_variable_list_from_db, E: EthSpec, S: KeyV } } - Ok(result.into()) + Ok(List::new(result).map_err(ChunkError::Milhouse)?) } -/// Index into a field of the state, avoiding out of bounds and division by 0. -fn safe_modulo_index(values: &[T], index: u64) -> Result { +/// Index into a `List` field of the state, avoiding out of bounds and division by 0. +fn safe_modulo_list_index( + values: &List, + index: u64, +) -> Result { + if values.is_empty() { + Err(ChunkError::ZeroLengthList) + } else { + values + .get(index as usize % values.len()) + .copied() + .ok_or(ChunkError::IndexOutOfBounds { index }) + } +} + +fn safe_modulo_vector_index( + values: &Vector, + index: u64, +) -> Result { if values.is_empty() { Err(ChunkError::ZeroLengthVector) } else { - Ok(values[index as usize % values.len()]) + values + .get(index as usize % values.len()) + .copied() + .ok_or(ChunkError::IndexOutOfBounds { index }) } } @@ -712,6 +733,10 @@ where #[derive(Debug, PartialEq)] pub enum ChunkError { ZeroLengthVector, + ZeroLengthList, + IndexOutOfBounds { + index: u64, + }, InvalidSize { chunk_index: usize, expected: usize, @@ -744,6 +769,13 @@ pub enum ChunkError { length: usize, }, InvalidFork, + Milhouse(milhouse::Error), +} + +impl From for ChunkError { + fn from(e: milhouse::Error) -> ChunkError { + Self::Milhouse(e) + } } #[cfg(test)] diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 681d424e282..d43999d8220 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -9,6 +9,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5); +pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -22,6 +23,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: NonZeroUsize, + /// Maximum number of states to store in the in-memory state cache. + pub state_cache_size: NonZeroUsize, /// Maximum number of states from freezer database to store in the in-memory state cache. pub historic_state_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. @@ -57,6 +60,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + state_cache_size: DEFAULT_STATE_CACHE_SIZE, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index ea8025dcabb..91e6a920ba3 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -3,7 +3,7 @@ use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -49,6 +49,14 @@ pub enum Error { InvalidBytes, UnableToDowngrade, InconsistentFork(InconsistentFork), + CacheBuildError(EpochCacheError), + RandaoMixOutOfBounds, + FinalizedStateDecreasingSlot, + FinalizedStateUnaligned, + StateForCacheHasPendingUpdates { + state_root: Hash256, + slot: Slot, + }, } pub trait HandleUnavailable { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6aa50eae0ce..0c3809f68ac 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -17,6 +17,7 @@ use crate::metadata::{ PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; +use crate::state_cache::StateCache; use crate::{ get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, @@ -30,7 +31,8 @@ use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy, + block_replayer::PreSlotHook, BlockProcessingError, BlockReplayer, SlotProcessingError, + StateProcessingStrategy, }; use std::cmp::min; use std::marker::PhantomData; @@ -70,14 +72,8 @@ pub struct HotColdDB, Cold: ItemStore> { /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. state_cache: Mutex>, - /// Immutable validator cache. - pub immutable_validators: Arc>>, /// LRU cache of replayed states. - // FIXME(sproul): re-enable historic state cache - #[allow(dead_code)] historic_state_cache: Mutex>>, - /// Cache of hierarchical diff buffers. - diff_buffer_cache: Mutex>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. @@ -188,7 +184,8 @@ impl HotColdDB, MemoryStore> { blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -223,7 +220,8 @@ impl HotColdDB, LevelDB> { blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -700,12 +698,7 @@ impl, Cold: ItemStore> HotColdDB state_root: Hash256, ) -> Result)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); - self.get_advanced_hot_state_with_strategy( - *block_root, - max_slot, - state_root, - StateProcessingStrategy::Inconsistent, - ) + self.get_advanced_hot_state(*block_root, max_slot, state_root) } /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. @@ -734,23 +727,7 @@ impl, Cold: ItemStore> HotColdDB { return Ok(Some(cached)); } - self.get_advanced_hot_state_with_strategy( - block_root, - max_slot, - state_root, - StateProcessingStrategy::Accurate, - ) - } - /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. - // FIXME(sproul): delete the state processing strategy stuff again - pub fn get_advanced_hot_state_with_strategy( - &self, - block_root: Hash256, - max_slot: Slot, - state_root: Hash256, - _state_processing_strategy: StateProcessingStrategy, - ) -> Result)>, Error> { // Hold a read lock on the split point so it can't move while we're trying to load the // state. let split = self.split.read_recursive(); @@ -855,17 +832,9 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // NOTE: minor inefficiency here because we load an unnecessary hot state summary - // - // `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch - // boundary state in the hot DB. - let state = self - .load_hot_state( - &epoch_boundary_state_root, - StateProcessingStrategy::Accurate, - )? - .ok_or(HotColdDBError::MissingEpochBoundaryState( - epoch_boundary_state_root, - ))?; + let (state, _) = self.load_hot_state(&epoch_boundary_state_root)?.ok_or( + HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + )?; Ok(Some(state)) } else { // Try the cold DB @@ -1043,6 +1012,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutState(_, _) => (), + StoreOp::PutStateSummary(_, _) => (), + StoreOp::PutStateTemporaryFlag(_) => (), StoreOp::DeleteStateTemporaryFlag(_) => (), @@ -1114,14 +1085,36 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Get a post-finalization state from the database or store. + pub fn get_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { + return Ok(Some(state)); + } + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + ); + + let state_from_disk = self.load_hot_state(state_root)?; + + if let Some((state, block_root)) = state_from_disk { + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)?; + Ok(Some(state)) + } else { + Ok(None) + } + } + /// Load a post-finalization state from the hot database. /// /// Will replay blocks from the nearest epoch boundary. pub fn load_hot_state( &self, state_root: &Hash256, - state_processing_strategy: StateProcessingStrategy, - ) -> Result>, Error> { + ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); // If the state is marked as temporary, do not return it. It will become visible @@ -1153,11 +1146,12 @@ impl, Cold: ItemStore> HotColdDB blocks, slot, no_state_root_iter(), - state_processing_strategy, + None, + StateProcessingStrategy::Accurate, )? }; - Ok(Some(state)) + Ok(Some((state, latest_block_root))) } else { Ok(None) } @@ -1270,7 +1264,7 @@ impl, Cold: ItemStore> HotColdDB /// Load a frozen state that lies between restore points. fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { - if let Some(state) = self.state_cache.lock().get(&slot) { + if let Some(state) = self.historic_state_cache.lock().get(&slot) { return Ok(state.clone()); } @@ -1284,7 +1278,7 @@ impl, Cold: ItemStore> HotColdDB let mut low_state: Option> = None; // Try to get a more recent state from the cache to avoid massive blocks replay. - for (s, state) in self.state_cache.lock().iter() { + for (s, state) in self.historic_state_cache.lock().iter() { if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx && *s < slot && low_slot < *s @@ -1327,11 +1321,12 @@ impl, Cold: ItemStore> HotColdDB blocks, slot, Some(state_root_iter), + None, StateProcessingStrategy::Accurate, )?; // If state is not error, put it in the cache. - self.state_cache.lock().put(slot, state.clone()); + self.historic_state_cache.lock().put(slot, state.clone()); Ok(state) } @@ -1418,13 +1413,19 @@ impl, Cold: ItemStore> HotColdDB state: BeaconState, blocks: Vec>>, target_slot: Slot, - state_root_iter: impl Iterator>, + state_root_iter: Option>>, pre_slot_hook: Option>, + state_processing_strategy: StateProcessingStrategy, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() .minimal_block_root_verification() - .state_root_iter(state_root_iter); + .state_processing_strategy(state_processing_strategy); + + let have_state_root_iterator = state_root_iter.is_some(); + if let Some(state_root_iter) = state_root_iter { + block_replayer = block_replayer.state_root_iter(state_root_iter); + } if let Some(pre_slot_hook) = pre_slot_hook { block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); @@ -1433,7 +1434,7 @@ impl, Cold: ItemStore> HotColdDB block_replayer .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { - if block_replayer.state_root_miss() { + if have_state_root_iterator && block_replayer.state_root_miss() { warn!( self.log, "State root cache miss during block replay"; @@ -2234,7 +2235,7 @@ impl, Cold: ItemStore> HotColdDB } /// This function fills in missing block roots between last restore point slot and split - /// slot, if any. + /// slot, if any. pub fn heal_freezer_block_roots_at_split(&self) -> Result<(), Error> { let split = self.get_split_info(); let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index d08bf564927..f752bf39795 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -46,14 +46,14 @@ pub fn get_full_state, E: EthSpec>( #[derive(Encode)] pub struct StorageContainer { state: BeaconState, - committee_caches: Vec, + committee_caches: Vec>, } impl StorageContainer { /// Create a new instance for storing a `BeaconState`. pub fn new(state: &BeaconState) -> Self { Self { - state: state.clone_with(CloneConfig::none()), + state: state.clone(), committee_caches: state.committee_caches().to_vec(), } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index e86689b0cf1..a3789c8ec45 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -25,6 +25,7 @@ pub mod metadata; pub mod metrics; mod partial_beacon_state; pub mod reconstruct; +pub mod state_cache; pub mod iter; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 4e5a2b8e64b..25438fc7e0a 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -34,42 +34,42 @@ where pub latest_block_header: BeaconBlockHeader, #[ssz(skip_serializing, skip_deserializing)] - pub block_roots: Option>, + pub block_roots: Option>, #[ssz(skip_serializing, skip_deserializing)] - pub state_roots: Option>, + pub state_roots: Option>, #[ssz(skip_serializing, skip_deserializing)] - pub historical_roots: Option>, + pub historical_roots: Option>, // Ethereum 1.0 chain data pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, + pub eth1_data_votes: List, pub eth1_deposit_index: u64, // Registry - pub validators: VariableList, - pub balances: VariableList, + pub validators: List, + pub balances: List, // Shuffling /// Randao value from the current slot, for patching into the per-epoch randao vector. pub latest_randao_value: Hash256, #[ssz(skip_serializing, skip_deserializing)] - pub randao_mixes: Option>, + pub randao_mixes: Option>, // Slashings - slashings: FixedVector, + slashings: Vector, // Attestations (genesis fork only) #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, E::MaxPendingAttestations>, + pub previous_epoch_attestations: List, E::MaxPendingAttestations>, #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, E::MaxPendingAttestations>, + pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub previous_epoch_participation: VariableList, + pub previous_epoch_participation: List, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub current_epoch_participation: VariableList, + pub current_epoch_participation: List, // Finality pub justification_bits: BitVector, @@ -79,7 +79,7 @@ where // Inactivity #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub inactivity_scores: VariableList, + pub inactivity_scores: List, // Light-client sync committees #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] @@ -117,7 +117,7 @@ where #[ssz(skip_serializing, skip_deserializing)] #[superstruct(only(Capella, Deneb, Electra))] - pub historical_summaries: Option>, + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -369,7 +369,9 @@ impl PartialBeaconState { // Patch the value for the current slot into the index for the current epoch let current_epoch = self.slot().epoch(E::slots_per_epoch()); let len = randao_mixes.len(); - randao_mixes[current_epoch.as_usize() % len] = *self.latest_randao_value(); + *randao_mixes + .get_mut(current_epoch.as_usize() % len) + .ok_or(Error::RandaoMixOutOfBounds)? = *self.latest_randao_value(); *self.randao_mixes_mut() = Some(randao_mixes) } @@ -422,7 +424,6 @@ macro_rules! impl_try_into_beacon_state { exit_cache: <_>::default(), slashings_cache: <_>::default(), epoch_cache: <_>::default(), - tree_hash_cache: <_>::default(), // Variant-specific fields $( diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 1bd73c53f8b..dcc230de5c0 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -46,6 +46,7 @@ pub enum PutStateOutcome { New, } +#[allow(clippy::len_without_is_empty)] impl StateCache { pub fn new(capacity: NonZeroUsize) -> Self { StateCache { From 8fca7971937cdcb09aeab42bdd2423e5b820ed75 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 16:14:15 +1000 Subject: [PATCH 18/41] IT COMPILES --- beacon_node/beacon_chain/src/beacon_chain.rs | 49 +++-- .../beacon_chain/src/blob_verification.rs | 6 +- .../beacon_chain/src/block_verification.rs | 7 +- beacon_node/beacon_chain/src/builder.rs | 18 +- .../beacon_chain/src/canonical_head.rs | 2 +- beacon_node/beacon_chain/src/errors.rs | 3 + beacon_node/beacon_chain/src/lib.rs | 11 +- beacon_node/beacon_chain/src/metrics.rs | 15 +- beacon_node/beacon_chain/src/migrate.rs | 2 +- .../beacon_chain/src/state_advance_timer.rs | 196 ++++++------------ .../src/validator_pubkey_cache.rs | 7 +- beacon_node/src/config.rs | 7 +- lcli/src/transition_blocks.rs | 7 +- testing/ef_tests/src/cases/bls_verify_msg.rs | 9 +- .../src/cases/merkle_proof_validity.rs | 3 - 15 files changed, 136 insertions(+), 206 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5adf51fe07a..3b26de02468 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -452,7 +452,7 @@ pub struct BeaconChain { /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. - pub(crate) validator_pubkey_cache: Arc>>, + pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, /// A cache used when producing attestations whilst the head block is still being imported. @@ -765,6 +765,7 @@ impl BeaconChain { start_slot, local_head.beacon_state.clone(), local_head.beacon_block_root, + &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -792,12 +793,7 @@ impl BeaconChain { let iter = self.store.forwards_block_roots_iterator_until( start_slot, end_slot, - || { - Ok(( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_block_root, - )) - }, + || Ok((head.beacon_state.clone(), head.beacon_block_root)), &self.spec, )?; Ok(iter @@ -869,6 +865,7 @@ impl BeaconChain { start_slot, local_head.beacon_state_root(), local_head.beacon_state.clone(), + &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -888,12 +885,7 @@ impl BeaconChain { let iter = self.store.forwards_state_roots_iterator_until( start_slot, end_slot, - || { - Ok(( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_state_root(), - )) - }, + || Ok((head.beacon_state.clone(), head.beacon_state_root())), &self.spec, )?; Ok(iter @@ -1469,7 +1461,10 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Result, Error> { - let pubkey_cache = self.validator_pubkey_cache.read(); + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; Ok(pubkey_cache.get_index(pubkey)) } @@ -1482,7 +1477,10 @@ impl BeaconChain { &self, validator_pubkeys: impl Iterator, ) -> Result, Error> { - let pubkey_cache = self.validator_pubkey_cache.read(); + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; validator_pubkeys .map(|pubkey| { @@ -1507,7 +1505,10 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_pubkey(&self, validator_index: usize) -> Result, Error> { - let pubkey_cache = self.validator_pubkey_cache.read(); + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; Ok(pubkey_cache.get(validator_index).cloned()) } @@ -1517,7 +1518,11 @@ impl BeaconChain { &self, validator_index: usize, ) -> Result, Error> { - let pubkey_cache = self.validator_pubkey_cache.read(); + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + Ok(pubkey_cache.get_pubkey_bytes(validator_index).copied()) } @@ -1530,7 +1535,10 @@ impl BeaconChain { &self, validator_indices: &[usize], ) -> Result, Error> { - let pubkey_cache = self.validator_pubkey_cache.read(); + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; let mut map = HashMap::with_capacity(validator_indices.len()); for &validator_index in validator_indices { @@ -3297,7 +3305,8 @@ impl BeaconChain { // would be difficult to check that they all lock fork choice first. let mut ops = self .validator_pubkey_cache - .write() + .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; // Apply the state to the attester cache, only if it is from the previous epoch or later. @@ -6305,7 +6314,7 @@ impl BeaconChain { let beacon_block = self .store - .get_blinded_block(&beacon_block_root, None)? + .get_blinded_block(&beacon_block_root)? .ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 496a11f93e0..6df40f31526 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -4,7 +4,8 @@ use std::sync::Arc; use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::block_verification::{ - cheap_state_advance_to_obtain_committees, process_block_slash_info, BlockSlashInfo, + cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, + BlockSlashInfo, }; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; @@ -516,7 +517,8 @@ pub fn validate_blob_sidecar_for_gossip( // Signature verify the signed block header. let signature_is_valid = { - let pubkey_cache = chain.validator_pubkey_cache.read(); + let pubkey_cache = + get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; let pubkey = pubkey_cache .get(proposer_index) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 461e54df719..8a16f97ab0b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -63,7 +63,7 @@ use crate::observed_block_producers::SeenBlock; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - beacon_chain::{BeaconForkChoice, ForkChoiceError}, + beacon_chain::{BeaconForkChoice, ForkChoiceError, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; @@ -2013,7 +2013,10 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BeaconChainError> { - Ok(chain.validator_pubkey_cache.read()) + chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) } /// Produces an _empty_ `BlockSignatureVerifier`. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 2904da28062..abfa2a0d2d7 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -14,6 +14,7 @@ use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; +use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, @@ -38,8 +39,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Epoch, EthSpec, Graffiti, Hash256, - Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, + Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -91,6 +92,7 @@ pub struct BeaconChainBuilder { shutdown_sender: Option>, light_client_server_tx: Option>>, head_tracker: Option, + validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, log: Option, @@ -133,6 +135,7 @@ where shutdown_sender: None, light_client_server_tx: None, head_tracker: None, + validator_pubkey_cache: None, spec: E::default_spec(), chain_config: ChainConfig::default(), log: None, @@ -289,7 +292,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_blinded_block(&chain.genesis_block_root, Some(Slot::new(0))) + .get_blinded_block(&chain.genesis_block_root) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -575,6 +578,13 @@ where .map_err(|e| format!("Failed to initialize blob info: {:?}", e))?, ); + // Store pruning checkpoint to prevent attempting to prune before the anchor state. + self.pending_io_batch + .push(store.pruning_checkpoint_store_op(Checkpoint { + root: weak_subj_block_root, + epoch: weak_subj_state.slot().epoch(E::slots_per_epoch()), + })); + let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, beacon_block: Arc::new(weak_subj_block), @@ -955,7 +965,7 @@ where Default::default(), log.clone(), ))), - validator_pubkey_cache, + validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), light_client_server_cache: LightClientServerCache::new(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 633e8365b6c..447978755b9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -651,7 +651,7 @@ impl BeaconChain { let mut new_snapshot = { let beacon_block = self .store - .get_full_block(&new_view.head_block_root, None)? + .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; let (_, beacon_state) = self diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 11025d5937b..a68409bca23 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -31,6 +31,7 @@ use state_processing::{ use std::time::Duration; use task_executor::ShutdownReason; use tokio::task::JoinError; +use types::milhouse::Error as MilhouseError; use types::*; macro_rules! easy_from_to { @@ -223,6 +224,7 @@ pub enum BeaconChainError { AvailabilityCheckError(AvailabilityCheckError), LightClientError(LightClientError), UnsupportedFork, + MilhouseError(MilhouseError), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -252,6 +254,7 @@ easy_from_to!(InconsistentFork, BeaconChainError); easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); easy_from_to!(LightClientError, BeaconChainError); +easy_from_to!(MilhouseError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7ee18de0351..3e0060cb7ab 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -58,6 +58,7 @@ pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; +pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, @@ -97,13 +98,3 @@ pub use state_processing::per_block_processing::errors::{ pub use store; pub use timeout_rw_lock::TimeoutRwLock; pub use types; - -pub mod validator_pubkey_cache { - use crate::BeaconChainTypes; - - pub type ValidatorPubkeyCache = store::ValidatorPubkeyCache< - ::EthSpec, - ::HotStore, - ::ColdStore, - >; -} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 4970975f251..39a6ae344ff 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1182,15 +1182,10 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); - if let Some(snapshot_cache) = beacon_chain - .snapshot_cache - .try_write_for(SNAPSHOT_CACHE_TIMEOUT) - { - set_gauge( - &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, - snapshot_cache.len() as i64, - ) - } + set_gauge_by_usize( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + beacon_chain.store.state_cache_len(), + ); let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); set_gauge_by_usize( @@ -1320,7 +1315,7 @@ fn scrape_head_state(state: &BeaconState, state_root: Hash256) { num_active += 1; } - if v.slashed { + if v.slashed() { num_slashed += 1; } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 10cbe6378f0..ad597bf92aa 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -6,7 +6,7 @@ use parking_lot::Mutex; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::mem; -use std::sync::Arc; +use std::sync::{mpsc, Arc}; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 39d35f81113..70afc4b9a82 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,9 +15,7 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, - chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, - snapshot_cache::StateAdvance, + beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT, chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, }; use slog::{debug, error, warn, Logger}; @@ -29,7 +27,7 @@ use std::sync::{ }; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; -use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -45,14 +43,14 @@ const MAX_ADVANCE_DISTANCE: u64 = 4; /// impact whilst having 8 epochs without a block is a comfortable grace period. const MAX_FORK_CHOICE_DISTANCE: u64 = 256; -/// Drop any unused block production state cache after this many slots. -const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4; - #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), // We don't use the inner value directly, but it's used in the Debug impl. HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), + // We don't use the inner value directly, but it's used in the Debug impl. + BeaconState(#[allow(dead_code)] BeaconStateError), + Store(#[allow(dead_code)] store::Error), MaxDistanceExceeded { current_slot: Slot, head_slot: Slot, @@ -72,6 +70,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for Error { + fn from(e: store::Error) -> Self { + Self::Store(e) + } +} + /// Provides a simple thread-safe lock to be used for task co-ordination. Practically equivalent to /// `Mutex<()>`. #[derive(Clone)] @@ -231,7 +241,7 @@ async fn state_advance_timer( // Prepare proposers so that the node can send payload attributes in the case where // it decides to abandon a proposer boost re-org. - let proposer_head = beacon_chain + beacon_chain .prepare_beacon_proposer(current_slot) .await .unwrap_or_else(|e| { @@ -243,86 +253,12 @@ async fn state_advance_timer( ); None }); - - // Use a blocking task to avoid blocking the core executor whilst waiting for locks - // in `ForkChoiceSignalTx`. - beacon_chain.task_executor.clone().spawn_blocking( - move || { - // If we're proposing, clone the head state preemptively so that it isn't on - // the hot path of proposing. We can delete this once we have tree-states. - if let Some(proposer_head) = proposer_head { - let mut cache = beacon_chain.block_production_state.lock(); - - // Avoid holding two states in memory. It's OK to hold the lock because - // we always lock the block production cache before the snapshot cache - // and we prefer for block production to wait for the block production - // cache if a clone is in-progress. - if cache - .as_ref() - .map_or(false, |(cached_head, _)| *cached_head != proposer_head) - { - drop(cache.take()); - } - if let Some(proposer_state) = beacon_chain - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(proposer_head) - }) - { - *cache = Some((proposer_head, proposer_state)); - debug!( - log, - "Cloned state ready for block production"; - "head_block_root" => ?proposer_head, - "slot" => next_slot - ); - } else { - warn!( - log, - "Block production state missing from snapshot cache"; - "head_block_root" => ?proposer_head, - "slot" => next_slot - ); - } - } else { - // If we aren't proposing, drop any old block production cache to save - // memory. - let mut cache = beacon_chain.block_production_state.lock(); - if let Some((_, state)) = &*cache { - if state.pre_state.slot() + MAX_BLOCK_PRODUCTION_CACHE_DISTANCE - <= current_slot - { - drop(cache.take()); - } - } - } - - // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, - ); - } - } - }, - "fork_choice_advance_signal_tx", - ); }, "fork_choice_advance", ); } } -/// Reads the `snapshot_cache` from the `beacon_chain` and attempts to take a clone of the -/// `BeaconState` of the head block. If it obtains this clone, the state will be advanced a single -/// slot then placed back in the `snapshot_cache` to be used for block verification. -/// -/// See the module-level documentation for rationale. fn advance_head( beacon_chain: &Arc>, log: &Logger, @@ -345,46 +281,42 @@ fn advance_head( } } - let head_root = beacon_chain.head_beacon_block_root(); + let (head_block_root, head_block_state_root) = { + let snapshot = beacon_chain.head_snapshot(); + (snapshot.beacon_block_root, snapshot.beacon_state_root()) + }; - let (head_slot, head_state_root, mut state) = match beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .get_for_state_advance(head_root) - { - StateAdvance::AlreadyAdvanced => { + let (head_state_root, mut state) = beacon_chain + .store + .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? + .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; + + // Protect against advancing a state more than a single slot. + // + // Advancing more than one slot without storing the intermediate state would corrupt the + // database. Future works might store temporary, intermediate states inside this function. + match state.slot().cmp(&state.latest_block_header().slot) { + std::cmp::Ordering::Equal => (), + std::cmp::Ordering::Greater => { return Err(Error::StateAlreadyAdvanced { - block_root: head_root, - }) + block_root: head_block_root, + }); } - StateAdvance::BlockNotFound => return Err(Error::HeadMissingFromSnapshotCache(head_root)), - StateAdvance::State { - state, - state_root, - block_slot, - } => (block_slot, state_root, *state), - }; + std::cmp::Ordering::Less => { + return Err(Error::BadStateSlot { + _block_slot: state.latest_block_header().slot, + _state_slot: state.slot(), + }); + } + } let initial_slot = state.slot(); let initial_epoch = state.current_epoch(); - let state_root = if state.slot() == head_slot { - Some(head_state_root) - } else { - // Protect against advancing a state more than a single slot. - // - // Advancing more than one slot without storing the intermediate state would corrupt the - // database. Future works might store temporary, intermediate states inside this function. - return Err(Error::BadStateSlot { - _block_slot: head_slot, - _state_slot: state.slot(), - }); - }; - // Advance the state a single slot. - if let Some(summary) = per_slot_processing(&mut state, state_root, &beacon_chain.spec) - .map_err(BeaconChainError::from)? + if let Some(summary) = + per_slot_processing(&mut state, Some(head_state_root), &beacon_chain.spec) + .map_err(BeaconChainError::from)? { // Expose Prometheus metrics. if let Err(e) = summary.observe_metrics() { @@ -418,7 +350,7 @@ fn advance_head( debug!( log, "Advanced head state one slot"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "state_slot" => state.slot(), "current_slot" => current_slot, ); @@ -437,14 +369,14 @@ fn advance_head( if initial_epoch < state.current_epoch() { // Update the proposer cache. // - // We supply the `head_root` as the decision block since the prior `if` statement guarantees + // We supply the `head_block_root` as the decision block since the prior `if` statement guarantees // the head root is the latest block from the prior epoch. beacon_chain .beacon_proposer_cache .lock() .insert( state.current_epoch(), - head_root, + head_block_root, state .get_beacon_proposer_indices(&beacon_chain.spec) .map_err(BeaconChainError::from)?, @@ -453,8 +385,9 @@ fn advance_head( .map_err(BeaconChainError::from)?; // Update the attester cache. - let shuffling_id = AttestationShufflingId::new(head_root, &state, RelativeEpoch::Next) - .map_err(BeaconChainError::from)?; + let shuffling_id = + AttestationShufflingId::new(head_block_root, &state, RelativeEpoch::Next) + .map_err(BeaconChainError::from)?; let committee_cache = state .committee_cache(RelativeEpoch::Next) .map_err(BeaconChainError::from)?; @@ -462,12 +395,12 @@ fn advance_head( .shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id.clone(), committee_cache); + .insert_value(shuffling_id.clone(), committee_cache); debug!( log, "Primed proposer and attester caches"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block, "state_epoch" => state.current_epoch(), "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), @@ -477,22 +410,13 @@ fn advance_head( // Apply the state to the attester cache, if the cache deems it interesting. beacon_chain .attester_cache - .maybe_cache_state(&state, head_root, &beacon_chain.spec) + .maybe_cache_state(&state, head_block_root, &beacon_chain.spec) .map_err(BeaconChainError::from)?; let final_slot = state.slot(); - // Insert the advanced state back into the snapshot cache. - beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .update_pre_state(head_root, state) - .ok_or(Error::HeadMissingFromSnapshotCache(head_root))?; - // If we have moved into the next slot whilst processing the state then this function is going - // to become ineffective and likely become a hindrance as we're stealing the tree hash cache - // from the snapshot cache (which may force the next block to rebuild a new one). + // to become ineffective. // // If this warning occurs very frequently on well-resourced machines then we should consider // starting it earlier in the slot. Otherwise, it's a good indication that the machine is too @@ -503,7 +427,7 @@ fn advance_head( warn!( log, "State advance too slow"; - "head_root" => %head_root, + "head_block_root" => %head_block_root, "advanced_slot" => final_slot, "current_slot" => current_slot, "starting_slot" => starting_slot, @@ -511,10 +435,14 @@ fn advance_head( ); } + // Write the advanced state to the database. + let advanced_state_root = state.update_tree_hash_cache()?; + beacon_chain.store.put_state(&advanced_state_root, &state)?; + debug!( log, "Completed state advance"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "advanced_slot" => final_slot, "initial_slot" => initial_slot, ); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 2cf0c326158..7e605733951 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -81,9 +81,10 @@ impl ValidatorPubkeyCache { ) -> Result>, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( - state.validators()[self.pubkeys.len()..] - .iter() - .map(|v| v.pubkey), + state + .validators() + .iter_from(self.pubkeys.len())? + .map(|v| *v.pubkey), ) } else { Ok(vec![]) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9b0032e3068..284b571c61a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -171,9 +171,6 @@ pub fn get_config( if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { - client_config.chain.snapshot_cache_size = cache_size; - } /* * Prometheus metrics HTTP server @@ -400,6 +397,10 @@ pub fn get_config( .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { + client_config.store.state_cache_size = cache_size; + } + if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { client_config.store.historic_state_cache_size = historic_state_cache_size .parse() diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index bab1649d147..7f188387d9d 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -201,10 +201,7 @@ pub fn run( let store = Arc::new(store); debug!("Building pubkey cache (might take some time)"); - let validator_pubkey_cache = store.immutable_validators.clone(); - validator_pubkey_cache - .write() - .import_new_pubkeys(&pre_state) + let validator_pubkey_cache = ValidatorPubkeyCache::new(&pre_state, store) .map_err(|e| format!("Failed to create pubkey cache: {:?}", e))?; /* @@ -248,7 +245,7 @@ pub fn run( block, state_root_opt, &config, - &*validator_pubkey_cache.read(), + &validator_pubkey_cache, &mut saved_ctxt, spec, )?; diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 31fb16a4df4..42ee459a607 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; -use bls::{PublicKey, PublicKeyBytes, Signature, SignatureBytes}; +use bls::{PublicKeyBytes, Signature, SignatureBytes}; use serde::Deserialize; use types::Hash256; @@ -29,13 +29,6 @@ impl Case for BlsVerify { .try_into() .and_then(|signature: Signature| { let pk = self.input.pubkey.decompress()?; - - // Check serialization roundtrip. - let pk_uncompressed = pk.serialize_uncompressed(); - let pk_from_uncompressed = PublicKey::deserialize_uncompressed(&pk_uncompressed) - .expect("uncompressed serialization should round-trip"); - assert_eq!(pk_from_uncompressed, pk); - Ok(signature.verify(&pk, Hash256::from_slice(&message))) }) .unwrap_or(false); diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index ddca5e2184b..cf0b9f77c8f 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -77,9 +77,6 @@ impl Case for MerkleProofValidity { } } - // Tree hash cache should still be initialized (not dropped). - assert!(state.tree_hash_cache().is_initialized()); - Ok(()) } } From 2b3b71aabe1941781662f5cd7751a45cfad2e658 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2024 16:54:52 +1000 Subject: [PATCH 19/41] Remove some unnecessary module qualification --- consensus/types/src/beacon_block.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 94c44abcc90..14874f0204f 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -358,7 +358,7 @@ impl> BeaconBlockBase { }; let deposit = Deposit { - proof: ssz_types::FixedVector::from_elem(Hash256::zero()), + proof: FixedVector::from_elem(Hash256::zero()), data: deposit_data, }; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index eafd12b13ca..cae5d51cdff 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1099,7 +1099,7 @@ impl BeaconState { let aggregate_pubkey = AggregatePublicKey::aggregate(&decompressed_pubkeys)?; Ok(SyncCommittee { - pubkeys: ssz_types::FixedVector::new(pubkeys)?, + pubkeys: FixedVector::new(pubkeys)?, aggregate_pubkey: aggregate_pubkey.to_public_key().compress(), }) } From 8fda723be63388d098314f916d487d9eba8012ed Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 11 Apr 2024 13:13:04 +0900 Subject: [PATCH 20/41] Revert Arced pubkey optimization (#5536) --- account_manager/src/validator/exit.rs | 8 +- .../beacon_chain/src/attestation_rewards.rs | 4 +- .../beacon_chain/src/beacon_block_reward.rs | 4 +- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/builder.rs | 4 +- beacon_node/beacon_chain/src/metrics.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 16 +- .../src/validator_pubkey_cache.rs | 2 +- .../beacon_chain/tests/op_verification.rs | 30 +- .../tests/payload_invalidation.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- beacon_node/genesis/src/interop.rs | 6 +- beacon_node/http_api/src/validator.rs | 2 +- .../http_api/src/validator_inclusion.rs | 4 +- beacon_node/http_api/src/validators.rs | 4 +- beacon_node/http_api/tests/tests.rs | 10 +- beacon_node/operation_pool/src/lib.rs | 36 +- common/eth2/src/types.rs | 12 +- consensus/fork_choice/tests/tests.rs | 2 +- .../proto_array/src/justified_balances.rs | 6 +- .../src/common/initiate_validator_exit.rs | 6 +- .../src/common/slash_validator.rs | 8 +- .../update_progressive_balances_cache.rs | 4 +- consensus/state_processing/src/epoch_cache.rs | 2 +- consensus/state_processing/src/genesis.rs | 8 +- .../src/per_block_processing.rs | 2 +- .../process_operations.rs | 25 +- .../per_block_processing/signature_sets.rs | 2 +- .../verify_bls_to_execution_change.rs | 4 +- .../src/per_block_processing/verify_exit.rs | 4 +- .../base/validator_statuses.rs | 4 +- .../effective_balance_updates.rs | 10 +- .../epoch_processing_summary.rs | 2 +- .../per_epoch_processing/registry_updates.rs | 6 +- .../src/per_epoch_processing/single_pass.rs | 37 +- .../src/per_epoch_processing/slashings.rs | 4 +- consensus/types/benches/benches.rs | 20 +- consensus/types/src/activation_queue.rs | 2 +- consensus/types/src/beacon_state.rs | 28 +- .../types/src/beacon_state/compact_state.rs | 316 ------------------ .../types/src/beacon_state/exit_cache.rs | 4 +- .../types/src/beacon_state/slashings_cache.rs | 2 +- consensus/types/src/beacon_state/tests.rs | 1 - consensus/types/src/lib.rs | 4 +- consensus/types/src/validator.rs | 214 ++---------- lcli/src/new_testnet.rs | 27 +- lcli/src/replace_state_pubkeys.rs | 7 +- testing/state_transition_vectors/src/exit.rs | 17 +- watch/src/updater/mod.rs | 10 +- 49 files changed, 220 insertions(+), 718 deletions(-) delete mode 100644 consensus/types/src/beacon_state/compact_state.rs diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index f5cdd635188..bc9e0ee1dd6 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -203,8 +203,8 @@ async fn publish_voluntary_exit( let validator_data = get_validator_data(client, &keypair.pk).await?; match validator_data.status { ValidatorStatus::ActiveExiting => { - let exit_epoch = validator_data.validator.exit_epoch(); - let withdrawal_epoch = validator_data.validator.withdrawable_epoch(); + let exit_epoch = validator_data.validator.exit_epoch; + let withdrawal_epoch = validator_data.validator.withdrawable_epoch; let current_epoch = get_current_epoch::(genesis_data.genesis_time, spec) .ok_or("Failed to get current epoch. Please check your system time")?; eprintln!("Voluntary exit has been accepted into the beacon chain, but not yet finalized. \ @@ -224,7 +224,7 @@ async fn publish_voluntary_exit( ValidatorStatus::ExitedSlashed | ValidatorStatus::ExitedUnslashed => { eprintln!( "Validator has exited on epoch: {}", - validator_data.validator.exit_epoch() + validator_data.validator.exit_epoch ); break; } @@ -250,7 +250,7 @@ async fn get_validator_index_for_exit( ValidatorStatus::ActiveOngoing => { let eligible_epoch = validator_data .validator - .activation_epoch() + .activation_epoch .safe_add(spec.shard_committee_period) .map_err(|e| format!("Failed to calculate eligible epoch, validator activation epoch too high: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 45b690dc8f3..491b7ef7da9 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -230,13 +230,13 @@ impl BeaconChain { let mut inactivity_penalty = 0i64; if eligible { - let effective_balance = validator.effective_balance(); + let effective_balance = validator.effective_balance; for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { let (ideal_reward, penalty) = ideal_rewards_hashmap .get(&(flag_index, effective_balance)) .ok_or(BeaconChainError::AttestationRewardsError)?; - let voted_correctly = !validator.slashed() + let voted_correctly = !validator.slashed && previous_epoch_participation_flags.has_flag(flag_index)?; if voted_correctly { if flag_index == TIMELY_HEAD_FLAG_INDEX { diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 9ee5ec41eed..5b70215d225 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -135,7 +135,7 @@ impl BeaconChain { proposer_slashing_reward.safe_add_assign( state .get_validator(proposer_slashing.proposer_index() as usize)? - .effective_balance() + .effective_balance .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } @@ -157,7 +157,7 @@ impl BeaconChain { attester_slashing_reward.safe_add_assign( state .get_validator(attester_index as usize)? - .effective_balance() + .effective_balance .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3b26de02468..d79157d7515 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4809,7 +4809,7 @@ impl BeaconChain { let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| *v.pubkey()) + .map(|v| v.pubkey) .ok_or(BlockProductionError::BeaconChain( BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), ))?; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index abfa2a0d2d7..6accb66dfbf 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1282,7 +1282,7 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials(); + let creds = v.withdrawal_credentials; let creds = creds.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, @@ -1290,7 +1290,7 @@ mod test { ); assert_eq!( &creds[1..], - &hash(&v.pubkey().as_ssz_bytes())[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 39a6ae344ff..58fe14f3d0d 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1315,7 +1315,7 @@ fn scrape_head_state(state: &BeaconState, state_root: Hash256) { num_active += 1; } - if v.slashed() { + if v.slashed { num_slashed += 1; } diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index e9993fcd397..a63940074b4 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -493,10 +493,10 @@ impl ValidatorMonitor { .skip(self.indices.len()) .for_each(|(i, validator)| { let i = i as u64; - if let Some(validator) = self.validators.get_mut(validator.pubkey()) { + if let Some(validator) = self.validators.get_mut(&validator.pubkey) { validator.set_index(i) } - self.indices.insert(i, *validator.pubkey()); + self.indices.insert(i, validator.pubkey); }); // Add missed non-finalized blocks for the monitored validators @@ -536,12 +536,12 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI, &[id], - u64_to_i64(validator.effective_balance()), + u64_to_i64(validator.effective_balance), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_SLASHED, &[id], - i64::from(validator.slashed()), + i64::from(validator.slashed), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ACTIVE, @@ -561,22 +561,22 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH, &[id], - u64_to_i64(validator.activation_eligibility_epoch()), + u64_to_i64(validator.activation_eligibility_epoch), ); metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_EPOCH, &[id], - u64_to_i64(validator.activation_epoch()), + u64_to_i64(validator.activation_epoch), ); metrics::set_int_gauge( &metrics::VALIDATOR_EXIT_EPOCH, &[id], - u64_to_i64(validator.exit_epoch()), + u64_to_i64(validator.exit_epoch), ); metrics::set_int_gauge( &metrics::VALIDATOR_WITHDRAWABLE_EPOCH, &[id], - u64_to_i64(validator.withdrawable_epoch()), + u64_to_i64(validator.withdrawable_epoch), ); } } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 7e605733951..e1b50706286 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -84,7 +84,7 @@ impl ValidatorPubkeyCache { state .validators() .iter_from(self.pubkeys.len())? - .map(|v| *v.pubkey), + .map(|v| v.pubkey), ) } else { Ok(vec![]) diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 02be7120ca9..40910b9b9fe 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -170,7 +170,7 @@ async fn voluntary_exit_duplicate_in_state() { .validators() .get(exited_validator as usize) .unwrap() - .exit_epoch(), + .exit_epoch, spec.far_future_epoch ); @@ -274,12 +274,14 @@ async fn proposer_slashing_duplicate_in_state() { .await; // Verify validator is actually slashed. - assert!(harness - .get_current_state() - .validators() - .get(slashed_validator as usize) - .unwrap() - .slashed()); + assert!( + harness + .get_current_state() + .validators() + .get(slashed_validator as usize) + .unwrap() + .slashed + ); // Clear the in-memory gossip cache & try to verify the same slashing on gossip. // It should still fail because gossip verification should check the validator's `slashed` field @@ -400,12 +402,14 @@ async fn attester_slashing_duplicate_in_state() { .await; // Verify validator is actually slashed. - assert!(harness - .get_current_state() - .validators() - .get(slashed_validator as usize) - .unwrap() - .slashed()); + assert!( + harness + .get_current_state() + .validators() + .get(slashed_validator as usize) + .unwrap() + .slashed + ); // Clear the in-memory gossip cache & try to verify the same slashing on gossip. // It should still fail because gossip verification should check the validator's `slashed` field diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 58b359125dd..0ef348319af 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2048,7 +2048,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .get_block_weight(&head.head_block_root()) .unwrap(), - head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance(), + head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance, "proposer boost should be removed from the head block and the vote of a single validator applied" ); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 24689c0b31a..b6a2ba61c23 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -797,7 +797,7 @@ async fn block_replayer_hooks() { let mut post_block_slots = vec![]; let mut replay_state = BlockReplayer::::new(state, &chain.spec) - .pre_slot_hook(Box::new(|state| { + .pre_slot_hook(Box::new(|_, state| { pre_slots.push(state.slot()); Ok(()) })) diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index f11eeeac09a..4c78b8efd8f 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -178,7 +178,7 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials(); + let creds = v.withdrawal_credentials; assert_eq!( creds.as_bytes()[0], spec.bls_withdrawal_prefix_byte, @@ -186,7 +186,7 @@ mod test { ); assert_eq!( &creds.as_bytes()[1..], - &hash(&v.pubkey().as_ssz_bytes())[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } @@ -241,7 +241,7 @@ mod test { } for (index, v) in state.validators().iter().enumerate() { - let withdrawal_credientials = v.withdrawal_credentials(); + let withdrawal_credientials = v.withdrawal_credentials; let creds = withdrawal_credientials.as_bytes(); if index % 2 == 0 { assert_eq!( diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index f54c6424313..7f11ddd8f43 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -14,7 +14,7 @@ pub fn pubkey_to_validator_index( state .validators() .get(index) - .map_or(false, |v| *v.pubkey == *pubkey) + .map_or(false, |v| v.pubkey == *pubkey) }) .map(Result::Ok) .transpose() diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 0a257725741..dd4e137ce66 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -95,13 +95,13 @@ pub fn validator_inclusion_data( let summary = get_epoch_processing_summary(&mut state, &chain.spec)?; Ok(Some(ValidatorInclusionData { - is_slashed: validator.slashed(), + is_slashed: validator.slashed, is_withdrawable_in_current_epoch: validator.is_withdrawable_at(epoch), is_active_unslashed_in_current_epoch: summary .is_active_unslashed_in_current_epoch(validator_index), is_active_unslashed_in_previous_epoch: summary .is_active_unslashed_in_previous_epoch(validator_index), - current_epoch_effective_balance_gwei: validator.effective_balance(), + current_epoch_effective_balance_gwei: validator.effective_balance, is_current_epoch_target_attester: summary .is_current_epoch_target_attester(validator_index) .map_err(convert_cache_error)?, diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs index 69765d79199..20af7a680df 100644 --- a/beacon_node/http_api/src/validators.rs +++ b/beacon_node/http_api/src/validators.rs @@ -29,7 +29,7 @@ pub fn get_beacon_state_validators( .filter(|(index, (validator, _))| { query_ids.as_ref().map_or(true, |ids| { ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => validator.pubkey() == pubkey, + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, ValidatorId::Index(param_index) => { *param_index == *index as u64 } @@ -93,7 +93,7 @@ pub fn get_beacon_state_validator_balances( .filter(|(index, (validator, _))| { optional_ids.map_or(true, |ids| { ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => validator.pubkey() == pubkey, + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, ValidatorId::Index(param_index) => { *param_index == *index as u64 } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 8536f0265e3..d44b9a688ce 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -822,7 +822,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| *val.pubkey), + .map_or(PublicKeyBytes::empty(), |val| val.pubkey), ) }) .collect::>(); @@ -907,7 +907,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| *val.pubkey), + .map_or(PublicKeyBytes::empty(), |val| val.pubkey), ) }) .collect::>(); @@ -1001,7 +1001,7 @@ impl ApiTester { for (i, validator) in validators.into_iter().enumerate() { let validator_ids = &[ - ValidatorId::PublicKey(*validator.pubkey), + ValidatorId::PublicKey(validator.pubkey), ValidatorId::Index(i as u64), ]; @@ -2360,7 +2360,7 @@ impl ApiTester { .unwrap() { let expected = AttesterData { - pubkey: *state.validators().get(i as usize).unwrap().pubkey, + pubkey: state.validators().get(i as usize).unwrap().pubkey, validator_index: i, committees_at_slot: duty.committees_at_slot, committee_index: duty.index, @@ -2465,7 +2465,7 @@ impl ApiTester { let index = state .get_beacon_proposer_index(slot, &self.chain.spec) .unwrap(); - let pubkey = *state.validators().get(index).unwrap().pubkey; + let pubkey = state.validators().get(index).unwrap().pubkey; ProposerData { pubkey, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index fee8a49c51d..3f0dd6abaa1 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -392,7 +392,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed()) + .map_or(false, |validator| !validator.slashed) }, |slashing| slashing.as_inner().clone(), E::MaxProposerSlashings::to_usize(), @@ -451,7 +451,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -470,7 +470,7 @@ impl OperationPool { // // We cannot check the `slashed` field since the `head` is not finalized and // a fork could un-slash someone. - validator.exit_epoch() > head_state.finalized_checkpoint().epoch + validator.exit_epoch > head_state.finalized_checkpoint().epoch }) .map_or(false, |indices| !indices.is_empty()); @@ -527,7 +527,7 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -1272,12 +1272,7 @@ mod release_tests { // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). for i in 0..state.validators().len() { - state - .validators_mut() - .get_mut(i) - .unwrap() - .mutable - .effective_balance = 1_000_000_000 * i as u64; + state.validators_mut().get_mut(i).unwrap().effective_balance = 1_000_000_000 * i as u64; } let num_validators = num_committees @@ -1535,24 +1530,9 @@ mod release_tests { let spec = &harness.spec; let mut state = harness.get_current_state(); let op_pool = OperationPool::::new(); - state - .validators_mut() - .get_mut(1) - .unwrap() - .mutable - .effective_balance = 17_000_000_000; - state - .validators_mut() - .get_mut(2) - .unwrap() - .mutable - .effective_balance = 17_000_000_000; - state - .validators_mut() - .get_mut(3) - .unwrap() - .mutable - .effective_balance = 17_000_000_000; + state.validators_mut().get_mut(1).unwrap().effective_balance = 17_000_000_000; + state.validators_mut().get_mut(2).unwrap().effective_balance = 17_000_000_000; + state.validators_mut().get_mut(3).unwrap().effective_balance = 17_000_000_000; let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3]); let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 04e37ed1935..5f85d777957 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -375,20 +375,20 @@ pub enum ValidatorStatus { impl ValidatorStatus { pub fn from_validator(validator: &Validator, epoch: Epoch, far_future_epoch: Epoch) -> Self { if validator.is_withdrawable_at(epoch) { - if validator.effective_balance() == 0 { + if validator.effective_balance == 0 { ValidatorStatus::WithdrawalDone } else { ValidatorStatus::WithdrawalPossible } - } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch() { - if validator.slashed() { + } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch { + if validator.slashed { ValidatorStatus::ExitedSlashed } else { ValidatorStatus::ExitedUnslashed } } else if validator.is_active_at(epoch) { - if validator.exit_epoch() < far_future_epoch { - if validator.slashed() { + if validator.exit_epoch < far_future_epoch { + if validator.slashed { ValidatorStatus::ActiveSlashed } else { ValidatorStatus::ActiveExiting @@ -399,7 +399,7 @@ impl ValidatorStatus { // `pending` statuses are specified as validators where `validator.activation_epoch > current_epoch`. // If this code is reached, this criteria must have been met because `validator.is_active_at(epoch)`, // `validator.is_exited_at(epoch)`, and `validator.is_withdrawable_at(epoch)` all returned false. - } else if validator.activation_eligibility_epoch() == far_future_epoch { + } else if validator.activation_eligibility_epoch == far_future_epoch { ValidatorStatus::PendingInitialized } else { ValidatorStatus::PendingQueued diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index f90383e96a8..3153275fb73 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -392,7 +392,7 @@ impl ForkChoiceTest { .into_iter() .map(|v| { if v.is_active_at(state.current_epoch()) { - v.effective_balance() + v.effective_balance } else { 0 } diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs index daff362209a..e08c8443eef 100644 --- a/consensus/proto_array/src/justified_balances.rs +++ b/consensus/proto_array/src/justified_balances.rs @@ -24,11 +24,11 @@ impl JustifiedBalances { .validators() .iter() .map(|validator| { - if !validator.slashed() && validator.is_active_at(current_epoch) { - total_effective_balance.safe_add_assign(validator.effective_balance())?; + if !validator.slashed && validator.is_active_at(current_epoch) { + total_effective_balance.safe_add_assign(validator.effective_balance)?; num_active_validators.safe_add_assign(1)?; - Ok(validator.effective_balance()) + Ok(validator.effective_balance) } else { Ok(0) } diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 4abe326cb1c..c6565a76b0a 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -33,13 +33,13 @@ pub fn initiate_validator_exit( let validator = state.get_validator_cow(index)?; // Return if the validator already initiated exit - if validator.exit_epoch() != spec.far_future_epoch { + if validator.exit_epoch != spec.far_future_epoch { return Ok(()); } let validator = validator.into_mut()?; - validator.mutable.exit_epoch = exit_queue_epoch; - validator.mutable.withdrawable_epoch = + validator.exit_epoch = exit_queue_epoch; + validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; state diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index da84b0af135..16b4e74ece9 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -25,12 +25,12 @@ pub fn slash_validator( initiate_validator_exit(state, slashed_index, spec)?; let validator = state.get_validator_mut(slashed_index)?; - validator.mutable.slashed = true; - validator.mutable.withdrawable_epoch = cmp::max( - validator.withdrawable_epoch(), + validator.slashed = true; + validator.withdrawable_epoch = cmp::max( + validator.withdrawable_epoch, epoch.safe_add(E::EpochsPerSlashingsVector::to_u64())?, ); - let validator_effective_balance = validator.effective_balance(); + let validator_effective_balance = validator.effective_balance; state.set_slashings( epoch, state diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index 280b5377ab9..af843b3acbc 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -35,7 +35,7 @@ pub fn initialize_progressive_balances_cache( .zip(state.previous_epoch_participation()?) { // Exclude slashed validators. We are calculating *unslashed* participating totals. - if validator.slashed() { + if validator.slashed { continue; } @@ -78,7 +78,7 @@ fn update_flag_total_balances( ) -> Result<(), BeaconStateError> { for (flag, balance) in total_balances.total_flag_balances.iter_mut().enumerate() { if participation_flags.has_flag(flag)? { - balance.safe_add_assign(validator.effective_balance())?; + balance.safe_add_assign(validator.effective_balance)?; } } Ok(()) diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 1d7473d7350..b2f2d85407e 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -117,7 +117,7 @@ pub fn initialize_epoch_cache( let mut activation_queue = ActivationQueue::default(); for (index, validator) in state.validators().iter().enumerate() { - effective_balances.push(validator.effective_balance()); + effective_balances.push(validator.effective_balance); // Add to speculative activation queue. activation_queue diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 88dd94186ae..80c0e6fdc33 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -159,13 +159,13 @@ pub fn process_activations( .get(index) .copied() .ok_or(Error::BalancesOutOfBounds(index))?; - validator.mutable.effective_balance = std::cmp::min( + validator.effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); - if validator.effective_balance() == spec.max_effective_balance { - validator.mutable.activation_eligibility_epoch = E::genesis_epoch(); - validator.mutable.activation_epoch = E::genesis_epoch(); + if validator.effective_balance == spec.max_effective_balance { + validator.activation_eligibility_epoch = E::genesis_epoch(); + validator.activation_epoch = E::genesis_epoch(); } } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 5d26cd22664..b370ec6216b 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -251,7 +251,7 @@ pub fn process_block_header( // Verify proposer is not slashed verify!( - !state.get_validator(proposer_index as usize)?.slashed(), + !state.get_validator(proposer_index as usize)?.slashed, HeaderInvalid::ProposerSlashed(proposer_index) ); diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 7e114c71c6e..441ce699430 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,7 +5,6 @@ use crate::common::{ }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; -use std::sync::Arc; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; pub fn process_operations>( @@ -413,19 +412,17 @@ pub fn process_deposit( // Create a new validator. let validator = Validator { - pubkey: Arc::new(deposit.data.pubkey), - mutable: ValidatorMutable { - withdrawal_credentials: deposit.data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - slashed: false, - }, + pubkey: deposit.data.pubkey, + withdrawal_credentials: deposit.data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + slashed: false, }; state.validators_mut().push(validator)?; state.balances_mut().push(deposit.data.amount)?; diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index d3d3af096db..163b2cff7a9 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -64,7 +64,7 @@ where .validators() .get(validator_index) .and_then(|v| { - let pk: Option = v.pubkey().decompress().ok(); + let pk: Option = v.pubkey.decompress().ok(); pk }) .map(Cow::Owned) diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 500355c7543..1e8f25ed10b 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -29,7 +29,7 @@ pub fn verify_bls_to_execution_change( verify!( validator - .withdrawal_credentials() + .withdrawal_credentials .as_bytes() .first() .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) @@ -41,7 +41,7 @@ pub fn verify_bls_to_execution_change( // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); verify!( - validator.withdrawal_credentials().as_bytes().get(1..) == pubkey_hash.get(1..), + validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch ); diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index 3619feaf857..fc258d38298 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -41,7 +41,7 @@ pub fn verify_exit( // Verify that the validator has not yet exited. verify!( - validator.exit_epoch() == spec.far_future_epoch, + validator.exit_epoch == spec.far_future_epoch, ExitInvalid::AlreadyExited(exit.validator_index) ); @@ -56,7 +56,7 @@ pub fn verify_exit( // Verify the validator has been active long enough. let earliest_exit_epoch = validator - .activation_epoch() + .activation_epoch .safe_add(spec.shard_committee_period)?; verify!( current_epoch >= earliest_exit_epoch, diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index fe8db7d2dee..7e244058038 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -202,9 +202,9 @@ impl ValidatorStatuses { let previous_epoch = state.previous_epoch(); for validator in state.validators().iter() { - let effective_balance = validator.effective_balance(); + let effective_balance = validator.effective_balance; let mut status = ValidatorStatus { - is_slashed: validator.slashed(), + is_slashed: validator.slashed, is_eligible: state.is_eligible_validator(previous_epoch, validator)?, is_withdrawable_in_current_epoch: validator.is_withdrawable_at(current_epoch), current_epoch_effective_balance: effective_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index 146e4a3a8e3..73881e932b7 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -30,23 +30,23 @@ pub fn process_effective_balance_updates( .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; let new_effective_balance = if balance.safe_add(downward_threshold)? - < validator.effective_balance() - || validator.effective_balance().safe_add(upward_threshold)? < balance + < validator.effective_balance + || validator.effective_balance.safe_add(upward_threshold)? < balance { std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ) } else { - validator.effective_balance() + validator.effective_balance }; if validator.is_active_at(next_epoch) { new_total_active_balance.safe_add_assign(new_effective_balance)?; } - if new_effective_balance != validator.effective_balance() { - validator.into_mut()?.mutable.effective_balance = new_effective_balance; + if new_effective_balance != validator.effective_balance { + validator.into_mut()?.effective_balance = new_effective_balance; } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 508426af18c..6f48050e161 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -54,7 +54,7 @@ impl ParticipationEpochSummary { pub fn is_active_and_unslashed(&self, val_index: usize, epoch: Epoch) -> bool { self.validators .get(val_index) - .map(|validator| !validator.slashed() && validator.is_active_at(epoch)) + .map(|validator| !validator.slashed && validator.is_active_at(epoch)) .unwrap_or(false) } diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index c978a76d059..6b86f9c1e76 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -17,7 +17,7 @@ pub fn process_registry_updates( let current_epoch = state.current_epoch(); let is_ejectable = |validator: &Validator| { validator.is_active_at(current_epoch) - && validator.effective_balance() <= spec.ejection_balance + && validator.effective_balance <= spec.ejection_balance }; let indices_to_update: Vec<_> = state .validators() @@ -32,7 +32,7 @@ pub fn process_registry_updates( for index in indices_to_update { let validator = state.get_validator_mut(index)?; if validator.is_eligible_for_activation_queue(spec) { - validator.mutable.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(validator) { initiate_validator_exit(state, index, spec)?; @@ -50,7 +50,7 @@ pub fn process_registry_updates( let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; for index in activation_queue { - state.get_validator_mut(index)?.mutable.activation_epoch = delayed_activation_epoch; + state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 513fc26b6ff..64ce3f04ea5 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -200,8 +200,7 @@ pub fn process_epoch_single_pass( let is_active_current_epoch = validator.is_active_at(current_epoch); let is_active_previous_epoch = validator.is_active_at(previous_epoch); let is_eligible = is_active_previous_epoch - || (validator.slashed() - && previous_epoch.safe_add(1)? < validator.withdrawable_epoch()); + || (validator.slashed && previous_epoch.safe_add(1)? < validator.withdrawable_epoch); let base_reward = if is_eligible { epoch_cache.get_base_reward(index)? @@ -211,10 +210,10 @@ pub fn process_epoch_single_pass( let validator_info = &ValidatorInfo { index, - effective_balance: validator.effective_balance(), + effective_balance: validator.effective_balance, base_reward, is_eligible, - is_slashed: validator.slashed(), + is_slashed: validator.slashed, is_active_current_epoch, is_active_previous_epoch, previous_epoch_participation, @@ -468,17 +467,16 @@ fn process_single_registry_update( let current_epoch = state_ctxt.current_epoch; if validator.is_eligible_for_activation_queue(spec) { - validator.make_mut()?.mutable.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; } - if validator.is_active_at(current_epoch) - && validator.effective_balance() <= spec.ejection_balance + if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance { initiate_validator_exit(validator, exit_cache, state_ctxt, spec)?; } if activation_queue.contains(&validator_info.index) { - validator.make_mut()?.mutable.activation_epoch = + validator.make_mut()?.activation_epoch = spec.compute_activation_exit_epoch(current_epoch)?; } @@ -500,7 +498,7 @@ fn initiate_validator_exit( spec: &ChainSpec, ) -> Result<(), Error> { // Return if the validator already initiated exit - if validator.exit_epoch() != spec.far_future_epoch { + if validator.exit_epoch != spec.far_future_epoch { return Ok(()); } @@ -516,8 +514,8 @@ fn initiate_validator_exit( } let validator = validator.make_mut()?; - validator.mutable.exit_epoch = exit_queue_epoch; - validator.mutable.withdrawable_epoch = + validator.exit_epoch = exit_queue_epoch; + validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; exit_cache.record_validator_exit(exit_queue_epoch)?; @@ -554,12 +552,11 @@ fn process_single_slashing( state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { - if validator.slashed() - && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch() + if validator.slashed && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; let penalty_numerator = validator - .effective_balance() + .effective_balance .safe_div(increment)? .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; let penalty = penalty_numerator @@ -599,11 +596,11 @@ fn process_single_effective_balance_update( state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { - let old_effective_balance = validator.effective_balance(); + let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? - < validator.effective_balance() + < validator.effective_balance || validator - .effective_balance() + .effective_balance .safe_add(eb_ctxt.upward_threshold)? < balance { @@ -612,7 +609,7 @@ fn process_single_effective_balance_update( spec.max_effective_balance, ) } else { - validator.effective_balance() + validator.effective_balance }; if validator.is_active_at(state_ctxt.next_epoch) { @@ -620,12 +617,12 @@ fn process_single_effective_balance_update( } if new_effective_balance != old_effective_balance { - validator.make_mut()?.mutable.effective_balance = new_effective_balance; + validator.make_mut()?.effective_balance = new_effective_balance; // Update progressive balances cache for the *current* epoch, which will soon become the // previous epoch once the epoch transition completes. progressive_balances.on_effective_balance_change( - validator.slashed(), + validator.slashed, validator_info.current_epoch_participation, old_effective_balance, new_effective_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 7618c9b6367..a1770478008 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -27,9 +27,9 @@ pub fn process_slashings( .iter() .enumerate() .filter(|(_, validator)| { - validator.slashed() && target_withdrawable_epoch == validator.withdrawable_epoch() + validator.slashed && target_withdrawable_epoch == validator.withdrawable_epoch }) - .map(|(index, validator)| (index, validator.effective_balance())) + .map(|(index, validator)| (index, validator.effective_balance)) .collect::>(); for (index, validator_effective_balance) in indices { diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 17d266a56e5..5c1036a4c5a 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -8,7 +8,7 @@ use ssz::Encode; use std::sync::Arc; use types::{ test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, - MainnetEthSpec, Validator, ValidatorMutable, + MainnetEthSpec, Validator, }; fn get_state(validator_count: usize) -> BeaconState { @@ -33,16 +33,14 @@ fn get_state(validator_count: usize) -> BeaconState { .collect::>() .par_iter() .map(|&i| Validator { - pubkey: Arc::new(generate_deterministic_keypair(i).pk.compress()), - mutable: ValidatorMutable { - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: spec.max_effective_balance, - slashed: false, - activation_eligibility_epoch: Epoch::new(0), - activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), - }, + pubkey: generate_deterministic_keypair(i).pk.compress(), + withdrawal_credentials: Hash256::from_low_u64_le(i as u64), + effective_balance: spec.max_effective_balance, + slashed: false, + activation_eligibility_epoch: Epoch::new(0), + activation_epoch: Epoch::new(0), + exit_epoch: Epoch::from(u64::max_value()), + withdrawable_epoch: Epoch::from(u64::max_value()), }) .collect(), ) diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/activation_queue.rs index acbb276a61a..09ffa5b85e7 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/activation_queue.rs @@ -23,7 +23,7 @@ impl ActivationQueue { ) { if validator.could_be_eligible_for_activation_at(next_epoch, spec) { self.queue - .insert((validator.activation_eligibility_epoch(), index)); + .insert((validator.activation_eligibility_epoch, index)); } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index cae5d51cdff..e6ad8211b55 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,7 +1,6 @@ use self::committee_cache::get_active_validator_indices; use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; -use crate::validator::ValidatorTrait; use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; @@ -37,7 +36,6 @@ pub use milhouse::{interface::Interface, List, Vector}; #[macro_use] mod committee_cache; mod balance; -pub mod compact_state; mod exit_cache; mod iter; mod progressive_balances_cache; @@ -224,7 +222,7 @@ impl From for Hash256 { arbitrary::Arbitrary, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec, GenericValidator: ValidatorTrait"), + arbitrary(bound = "E: EthSpec"), derivative(Clone), ), specific_variant_attributes( @@ -316,10 +314,10 @@ impl From for Hash256 { )] #[serde(untagged)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec, GenericValidator: ValidatorTrait")] +#[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconState +pub struct BeaconState where E: EthSpec, { @@ -364,7 +362,7 @@ where // Registry #[test_random(default)] - pub validators: List, + pub validators: List, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[compare_fields(as_iter)] #[test_random(default)] @@ -1066,7 +1064,7 @@ impl BeaconState { .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; let random_byte = Self::shuffling_random_byte(i, seed.as_bytes())?; - let effective_balance = self.get_validator(candidate_index)?.effective_balance(); + let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(MAX_RANDOM_BYTE)? >= spec .max_effective_balance @@ -1088,7 +1086,7 @@ impl BeaconState { .map(|&index| { self.validators() .get(index) - .map(|v| *v.pubkey()) + .map(|v| v.pubkey) .ok_or(Error::UnknownValidator(index)) }) .collect::, _>>()?; @@ -1119,7 +1117,7 @@ impl BeaconState { Ok(validator_indices .iter() .map(|&validator_index| { - let pubkey = *self.get_validator(validator_index as usize)?.pubkey(); + let pubkey = self.get_validator(validator_index as usize)?.pubkey; Ok(SyncDuty::from_sync_committee( validator_index, @@ -1513,7 +1511,7 @@ impl BeaconState { /// Return the effective balance for a validator with the given `validator_index`. pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) - .map(|v| v.effective_balance()) + .map(|v| v.effective_balance) } /// Get the inactivity score for a single validator. @@ -1602,7 +1600,7 @@ impl BeaconState { for validator in self.validators() { if validator.is_active_at(current_epoch) { - total_active_balance.safe_add_assign(validator.effective_balance())?; + total_active_balance.safe_add_assign(validator.effective_balance)?; } } Ok(std::cmp::max( @@ -1907,7 +1905,7 @@ impl BeaconState { for (i, validator) in self.validators().iter_from(start_index)?.enumerate() { let index = start_index.safe_add(i)?; - let success = pubkey_cache.insert(*validator.pubkey(), index); + let success = pubkey_cache.insert(validator.pubkey, index); if !success { return Err(Error::PubkeyCacheInconsistent); } @@ -1977,8 +1975,7 @@ impl BeaconState { val: &Validator, ) -> Result { Ok(val.is_active_at(previous_epoch) - || (val.slashed() - && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch())) + || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } /// Passing `previous_epoch` to this function rather than computing it internally provides @@ -2026,7 +2023,6 @@ impl BeaconState { #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { // Required for macros (which use type-hints internally). - type GenericValidator = Validator; match (&mut *self, base) { (Self::Base(self_inner), Self::Base(base_inner)) => { @@ -2136,7 +2132,7 @@ impl BeaconState { } } -impl BeaconState { +impl BeaconState { /// The number of fields of the `BeaconState` rounded up to the nearest power of two. /// /// This is relevant to tree-hashing of the `BeaconState`. diff --git a/consensus/types/src/beacon_state/compact_state.rs b/consensus/types/src/beacon_state/compact_state.rs deleted file mode 100644 index 3f8f47c8541..00000000000 --- a/consensus/types/src/beacon_state/compact_state.rs +++ /dev/null @@ -1,316 +0,0 @@ -use crate::{ - BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, BeaconStateDeneb, - BeaconStateElectra, BeaconStateError as Error, BeaconStateMerge, EthSpec, List, PublicKeyBytes, - Validator, ValidatorMutable, -}; -use itertools::process_results; -use std::sync::Arc; - -pub type CompactBeaconState = BeaconState; - -/// Implement the conversion function from BeaconState -> CompactBeaconState. -macro_rules! full_to_compact { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { - BeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $s.genesis_time, - genesis_validators_root: $s.genesis_validators_root, - slot: $s.slot, - fork: $s.fork, - - // History - latest_block_header: $s.latest_block_header.clone(), - block_roots: $s.block_roots.clone(), - state_roots: $s.state_roots.clone(), - historical_roots: $s.historical_roots.clone(), - - // Eth1 - eth1_data: $s.eth1_data.clone(), - eth1_data_votes: $s.eth1_data_votes.clone(), - eth1_deposit_index: $s.eth1_deposit_index, - - // Validator registry - validators: List::try_from_iter( - $s.validators.into_iter().map(|validator| validator.mutable.clone()) - ).expect("fix this"), - balances: $s.balances.clone(), - - // Shuffling - randao_mixes: $s.randao_mixes.clone(), - - // Slashings - slashings: $s.slashings.clone(), - - // Finality - justification_bits: $s.justification_bits.clone(), - previous_justified_checkpoint: $s.previous_justified_checkpoint, - current_justified_checkpoint: $s.current_justified_checkpoint, - finalized_checkpoint: $s.finalized_checkpoint, - - // Caches. - total_active_balance: $s.total_active_balance.clone(), - committee_caches: $s.committee_caches.clone(), - progressive_balances_cache: $s.progressive_balances_cache.clone(), - pubkey_cache: $s.pubkey_cache.clone(), - exit_cache: $s.exit_cache.clone(), - slashings_cache: $s.slashings_cache.clone(), - epoch_cache: $s.epoch_cache.clone(), - - // Variant-specific fields - $( - $extra_fields: $s.$extra_fields.clone() - ),* - }) - } -} - -/// Implement the conversion from CompactBeaconState -> BeaconState. -macro_rules! compact_to_full { - ($inner:ident, $variant_name:ident, $struct_name:ident, $immutable_validators:ident, [$($extra_fields:ident),*]) => { - BeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $inner.genesis_time, - genesis_validators_root: $inner.genesis_validators_root, - slot: $inner.slot, - fork: $inner.fork, - - // History - latest_block_header: $inner.latest_block_header, - block_roots: $inner.block_roots, - state_roots: $inner.state_roots, - historical_roots: $inner.historical_roots, - - // Eth1 - eth1_data: $inner.eth1_data, - eth1_data_votes: $inner.eth1_data_votes, - eth1_deposit_index: $inner.eth1_deposit_index, - - // Validator registry - validators: process_results($inner.validators.into_iter().enumerate().map(|(i, mutable)| { - $immutable_validators(i) - .ok_or(Error::MissingImmutableValidator(i)) - .map(move |pubkey| { - Validator { - pubkey, - mutable: mutable.clone(), - } - }) - }), |iter| List::try_from_iter(iter))??, - balances: $inner.balances, - - // Shuffling - randao_mixes: $inner.randao_mixes, - - // Slashings - slashings: $inner.slashings, - - // Finality - justification_bits: $inner.justification_bits, - previous_justified_checkpoint: $inner.previous_justified_checkpoint, - current_justified_checkpoint: $inner.current_justified_checkpoint, - finalized_checkpoint: $inner.finalized_checkpoint, - - // Caching - total_active_balance: $inner.total_active_balance, - committee_caches: $inner.committee_caches, - progressive_balances_cache: $inner.progressive_balances_cache, - pubkey_cache: $inner.pubkey_cache, - exit_cache: $inner.exit_cache, - slashings_cache: $inner.slashings_cache, - epoch_cache: $inner.epoch_cache, - - // Variant-specific fields - $( - $extra_fields: $inner.$extra_fields - ),* - }) - } -} - -impl BeaconState { - pub fn into_compact_state(self) -> CompactBeaconState { - match self { - BeaconState::Base(s) => full_to_compact!( - s, - self, - Base, - BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] - ), - BeaconState::Altair(s) => full_to_compact!( - s, - self, - Altair, - BeaconStateAltair, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ] - ), - BeaconState::Merge(s) => full_to_compact!( - s, - self, - Merge, - BeaconStateMerge, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - BeaconState::Capella(s) => full_to_compact!( - s, - self, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - BeaconState::Deneb(s) => full_to_compact!( - s, - self, - Deneb, - BeaconStateDeneb, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - BeaconState::Electra(s) => full_to_compact!( - s, - self, - Electra, - BeaconStateElectra, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - } - } -} - -impl CompactBeaconState { - pub fn try_into_full_state(self, immutable_validators: F) -> Result, Error> - where - F: Fn(usize) -> Option>, - { - let state = match self { - BeaconState::Base(inner) => compact_to_full!( - inner, - Base, - BeaconStateBase, - immutable_validators, - [previous_epoch_attestations, current_epoch_attestations] - ), - BeaconState::Altair(inner) => compact_to_full!( - inner, - Altair, - BeaconStateAltair, - immutable_validators, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ] - ), - BeaconState::Merge(inner) => compact_to_full!( - inner, - Merge, - BeaconStateMerge, - immutable_validators, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - BeaconState::Capella(inner) => compact_to_full!( - inner, - Capella, - BeaconStateCapella, - immutable_validators, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - BeaconState::Deneb(inner) => compact_to_full!( - inner, - Deneb, - BeaconStateDeneb, - immutable_validators, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - BeaconState::Electra(inner) => compact_to_full!( - inner, - Electra, - BeaconStateElectra, - immutable_validators, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - historical_summaries, - next_withdrawal_index, - next_withdrawal_validator_index - ] - ), - }; - Ok(state) - } -} diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 1a570549957..0bb984b6676 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -28,8 +28,8 @@ impl ExitCache { // Add all validators with a non-default exit epoch to the cache. validators .into_iter() - .filter(|validator| validator.exit_epoch() != spec.far_future_epoch) - .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch()))?; + .filter(|validator| validator.exit_epoch != spec.far_future_epoch) + .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?; Ok(exit_cache) } diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/beacon_state/slashings_cache.rs index 19813ebbfe1..45d8f7e2129 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/beacon_state/slashings_cache.rs @@ -20,7 +20,7 @@ impl SlashingsCache { let slashed_validators = validators .into_iter() .enumerate() - .filter_map(|(i, validator)| validator.slashed().then_some(i)) + .filter_map(|(i, validator)| validator.slashed.then_some(i)) .collect(); Self { latest_block_slot: Some(latest_block_slot), diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 226eb9099a0..012c063afef 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -99,7 +99,6 @@ async fn test_beacon_proposer_index() { .validators_mut() .get_mut(slot0_candidate0) .unwrap() - .mutable .effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..E::slots_per_epoch() { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 82524e069b1..3d2f94fa04e 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -124,7 +124,7 @@ pub use crate::beacon_block_body::{ }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_state::{compact_state::CompactBeaconState, Error as BeaconStateError, *}; +pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; @@ -220,7 +220,7 @@ pub use crate::sync_committee_subscription::SyncCommitteeSubscription; pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::{Validator, ValidatorMutable}; +pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 349f4a9b16f..98567cd1e6c 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -2,34 +2,28 @@ use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, }; -use arbitrary::Arbitrary; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; use test_random_derive::TestRandom; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -const NUM_FIELDS: usize = 8; - /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 #[derive( - Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, )] -#[serde(deny_unknown_fields)] pub struct Validator { - pub pubkey: Arc, - #[serde(flatten)] - pub mutable: ValidatorMutable, -} - -/// The mutable fields of a validator. -#[derive( - Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Arbitrary, -)] -pub struct ValidatorMutable { + pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, @@ -40,148 +34,47 @@ pub struct ValidatorMutable { pub withdrawable_epoch: Epoch, } -pub trait ValidatorTrait: - std::fmt::Debug - + PartialEq - + Clone - + serde::Serialize - + Send - + Sync - + serde::de::DeserializeOwned - + ssz::Encode - + ssz::Decode - + TreeHash - + TestRandom - + for<'a> arbitrary::Arbitrary<'a> -{ -} - -impl ValidatorTrait for Validator {} -impl ValidatorTrait for ValidatorMutable {} - impl Validator { - pub fn pubkey(&self) -> &PublicKeyBytes { - &self.pubkey - } - - pub fn pubkey_clone(&self) -> Arc { - self.pubkey.clone() - } - - /// Replace the validator's pubkey (should only be used during testing). - pub fn replace_pubkey(&mut self, pubkey: PublicKeyBytes) { - self.pubkey = Arc::new(pubkey); - } - - #[inline] - pub fn withdrawal_credentials(&self) -> Hash256 { - self.mutable.withdrawal_credentials - } - - #[inline] - pub fn effective_balance(&self) -> u64 { - self.mutable.effective_balance - } - - #[inline] - pub fn slashed(&self) -> bool { - self.mutable.slashed - } - - #[inline] - pub fn activation_eligibility_epoch(&self) -> Epoch { - self.mutable.activation_eligibility_epoch - } - - #[inline] - pub fn activation_epoch(&self) -> Epoch { - self.mutable.activation_epoch - } - - #[inline] - pub fn activation_epoch_mut(&mut self) -> &mut Epoch { - &mut self.mutable.activation_epoch - } - - #[inline] - pub fn exit_epoch(&self) -> Epoch { - self.mutable.exit_epoch - } - - pub fn exit_epoch_mut(&mut self) -> &mut Epoch { - &mut self.mutable.exit_epoch - } - - #[inline] - pub fn withdrawable_epoch(&self) -> Epoch { - self.mutable.withdrawable_epoch - } - /// Returns `true` if the validator is considered active at some epoch. - #[inline] pub fn is_active_at(&self, epoch: Epoch) -> bool { - self.activation_epoch() <= epoch && epoch < self.exit_epoch() + self.activation_epoch <= epoch && epoch < self.exit_epoch } /// Returns `true` if the validator is slashable at some epoch. - #[inline] pub fn is_slashable_at(&self, epoch: Epoch) -> bool { - !self.slashed() && self.activation_epoch() <= epoch && epoch < self.withdrawable_epoch() + !self.slashed && self.activation_epoch <= epoch && epoch < self.withdrawable_epoch } /// Returns `true` if the validator is considered exited at some epoch. - #[inline] pub fn is_exited_at(&self, epoch: Epoch) -> bool { - self.exit_epoch() <= epoch + self.exit_epoch <= epoch } /// Returns `true` if the validator is able to withdraw at some epoch. - #[inline] pub fn is_withdrawable_at(&self, epoch: Epoch) -> bool { - epoch >= self.withdrawable_epoch() + epoch >= self.withdrawable_epoch } /// Returns `true` if the validator is eligible to join the activation queue. /// /// Spec v0.12.1 - #[inline] pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { - self.activation_eligibility_epoch() == spec.far_future_epoch - && self.effective_balance() == spec.max_effective_balance + self.activation_eligibility_epoch == spec.far_future_epoch + && self.effective_balance == spec.max_effective_balance } /// Returns `true` if the validator is eligible to be activated. /// /// Spec v0.12.1 - #[inline] pub fn is_eligible_for_activation( &self, state: &BeaconState, spec: &ChainSpec, ) -> bool { - // Has not yet been activated - self.activation_epoch() == spec.far_future_epoch && // Placement in queue is finalized - self.activation_eligibility_epoch() <= state.finalized_checkpoint().epoch - } - - fn tree_hash_root_internal(&self) -> Result { - let mut hasher = tree_hash::MerkleHasher::with_leaves(NUM_FIELDS); - - hasher.write(self.pubkey().tree_hash_root().as_bytes())?; - hasher.write(self.withdrawal_credentials().tree_hash_root().as_bytes())?; - hasher.write(self.effective_balance().tree_hash_root().as_bytes())?; - hasher.write(self.slashed().tree_hash_root().as_bytes())?; - hasher.write( - self.activation_eligibility_epoch() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(self.activation_epoch().tree_hash_root().as_bytes())?; - hasher.write(self.exit_epoch().tree_hash_root().as_bytes())?; - hasher.write(self.withdrawable_epoch().tree_hash_root().as_bytes())?; - - hasher.finish() + self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch + // Has not yet been activated + && self.activation_epoch == spec.far_future_epoch } /// Returns `true` if the validator *could* be eligible for activation at `epoch`. @@ -191,18 +84,18 @@ impl Validator { /// the epoch transition at the end of `epoch`. pub fn could_be_eligible_for_activation_at(&self, epoch: Epoch, spec: &ChainSpec) -> bool { // Has not yet been activated - self.activation_epoch() == spec.far_future_epoch + self.activation_epoch == spec.far_future_epoch // Placement in queue could be finalized. // // NOTE: the epoch distance is 1 rather than 2 because we consider the activations that // occur at the *end* of `epoch`, after `process_justification_and_finalization` has already // updated the state's checkpoint. - && self.activation_eligibility_epoch() < epoch + && self.activation_eligibility_epoch < epoch } /// Returns `true` if the validator has eth1 withdrawal credential. pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { - self.withdrawal_credentials() + self.withdrawal_credentials .as_bytes() .first() .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) @@ -213,7 +106,7 @@ impl Validator { pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ self.has_eth1_withdrawal_credential(spec) .then(|| { - self.withdrawal_credentials() + self.withdrawal_credentials .as_bytes() .get(12..) .map(Address::from_slice) @@ -228,37 +121,28 @@ impl Validator { let mut bytes = [0u8; 32]; bytes[0] = spec.eth1_address_withdrawal_prefix_byte; bytes[12..].copy_from_slice(execution_address.as_bytes()); - self.mutable.withdrawal_credentials = Hash256::from(bytes); + self.withdrawal_credentials = Hash256::from(bytes); } /// Returns `true` if the validator is fully withdrawable at some epoch. pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { - self.has_eth1_withdrawal_credential(spec) - && self.withdrawable_epoch() <= epoch - && balance > 0 + self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 } /// Returns `true` if the validator is partially withdrawable. pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) - && self.effective_balance() == spec.max_effective_balance + && self.effective_balance == spec.max_effective_balance && balance > spec.max_effective_balance } } impl Default for Validator { + /// Yields a "default" `Validator`. Primarily used for testing. fn default() -> Self { - Validator { - pubkey: Arc::new(PublicKeyBytes::empty()), - mutable: <_>::default(), - } - } -} - -impl Default for ValidatorMutable { - fn default() -> Self { - ValidatorMutable { - withdrawal_credentials: Hash256::zero(), + Self { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::default(), activation_eligibility_epoch: Epoch::from(std::u64::MAX), activation_epoch: Epoch::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX), @@ -269,25 +153,6 @@ impl Default for ValidatorMutable { } } -impl TreeHash for Validator { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - self.tree_hash_root_internal() - .expect("Validator tree_hash_root should not fail") - } -} - #[cfg(test)] mod tests { use super::*; @@ -301,7 +166,7 @@ mod tests { assert!(!v.is_active_at(epoch)); assert!(!v.is_exited_at(epoch)); assert!(!v.is_withdrawable_at(epoch)); - assert!(!v.slashed()); + assert!(!v.slashed); } #[test] @@ -309,10 +174,7 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - mutable: ValidatorMutable { - activation_epoch: epoch, - ..Default::default() - }, + activation_epoch: epoch, ..Validator::default() }; @@ -326,10 +188,7 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - mutable: ValidatorMutable { - exit_epoch: epoch, - ..ValidatorMutable::default() - }, + exit_epoch: epoch, ..Validator::default() }; @@ -343,10 +202,7 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - mutable: ValidatorMutable { - withdrawable_epoch: epoch, - ..ValidatorMutable::default() - }, + withdrawable_epoch: epoch, ..Validator::default() }; diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 4ea04fd15f4..edba4249966 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -17,14 +17,13 @@ use std::fs::File; use std::io::Read; use std::path::PathBuf; use std::str::FromStr; -use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, - ForkName, Hash256, Keypair, PublicKey, Validator, ValidatorMutable, + ForkName, Hash256, Keypair, PublicKey, Validator, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -276,19 +275,17 @@ fn initialize_state_with_validators( let amount = spec.max_effective_balance; // Create a new validator. let validator = Validator { - pubkey: Arc::new(keypair.0.pk.clone().into()), - mutable: ValidatorMutable { - withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount - amount % (spec.effective_balance_increment), - spec.max_effective_balance, - ), - slashed: false, - }, + pubkey: keypair.0.pk.clone().into(), + withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount - amount % (spec.effective_balance_increment), + spec.max_effective_balance, + ), + slashed: false, }; state.validators_mut().push(validator).unwrap(); state.balances_mut().push(amount).unwrap(); diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index 5d8421d6f6e..e8d012b16ec 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -53,14 +53,11 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), eprintln!("{}: {}", index, keypair.pk); - validators - .get_mut(index) - .unwrap() - .replace_pubkey(keypair.pk.into()); + validators.get_mut(index).unwrap().pubkey = keypair.pk.into(); // Update the deposit tree. let mut deposit_data = DepositData { - pubkey: *validators.get(index).unwrap().pubkey(), + pubkey: validators.get(index).unwrap().pubkey, // Set this to a junk value since it's very time consuming to generate the withdrawal // keys and it's not useful for the time being. withdrawal_credentials: Hash256::zero(), diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 4c3b0c4f44a..991e91fd337 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -170,7 +170,7 @@ vectors_and_tests!( invalid_exit_already_initiated, ExitTest { state_modifier: Box::new(|state| { - *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH + 1; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -189,11 +189,8 @@ vectors_and_tests!( invalid_not_active_before_activation_epoch, ExitTest { state_modifier: Box::new(|state| { - *state - .validators_mut() - .get_mut(0) - .unwrap() - .activation_epoch_mut() = E::default_spec().far_future_epoch; + state.validators_mut().get_mut(0).unwrap().activation_epoch = + E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -212,7 +209,7 @@ vectors_and_tests!( invalid_not_active_after_exit_epoch, ExitTest { state_modifier: Box::new(|state| { - *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH; + state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -337,15 +334,15 @@ mod custom_tests { let validator = &state.validators().get(validator_index).unwrap(); assert_eq!( - validator.exit_epoch(), + validator.exit_epoch, // This is correct until we exceed the churn limit. If that happens, we // need to introduce more complex logic. state.current_epoch() + 1 + spec.max_seed_lookahead, "exit epoch" ); assert_eq!( - validator.withdrawable_epoch(), - validator.exit_epoch() + E::default_spec().min_validator_withdrawability_delay, + validator.withdrawable_epoch, + validator.exit_epoch + E::default_spec().min_validator_withdrawability_delay, "withdrawable epoch" ); } diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs index c3c8c94cdd7..65e0a90a2b4 100644 --- a/watch/src/updater/mod.rs +++ b/watch/src/updater/mod.rs @@ -211,20 +211,20 @@ pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result Date: Fri, 12 Apr 2024 12:16:24 +1000 Subject: [PATCH 21/41] Fix caching, rebasing and some tests --- .../beacon_chain/src/block_verification.rs | 69 ++++++++++++------- beacon_node/store/src/hot_cold_store.rs | 21 ++++-- beacon_node/store/src/state_cache.rs | 19 ++++- 3 files changed, 77 insertions(+), 32 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 8a16f97ab0b..49930e46d5b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -91,7 +91,7 @@ use std::fmt::Debug; use std::fs; use std::io::Write; use std::sync::Arc; -use store::{Error as DBError, KeyValueStore, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::{ @@ -1424,31 +1424,52 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = - if parent.beacon_block.slot() == state.slot() { - // If it happens that `pre_state` has *not* already been advanced forward a single - // slot, then there is no need to compute the state root for this - // `per_slot_processing` call since that state root is already stored in the parent - // block. - parent.beacon_block.state_root() + let state_root = if parent.beacon_block.slot() == state.slot() { + // If it happens that `pre_state` has *not* already been advanced forward a single + // slot, then there is no need to compute the state root for this + // `per_slot_processing` call since that state root is already stored in the parent + // block. + parent.beacon_block.state_root() + } else { + // This is a new state we've reached, so stage it for storage in the DB. + // Computing the state root here is time-equivalent to computing it during slot + // processing, but we get early access to it. + let state_root = state.update_tree_hash_cache()?; + + // Store the state immediately, marking it as temporary, and staging the deletion + // of its temporary status as part of the larger atomic operation. + let txn_lock = chain.store.hot_db.begin_rw_transaction(); + let state_already_exists = + chain.store.load_hot_state_summary(&state_root)?.is_some(); + + let state_batch = if state_already_exists { + // If the state exists, it could be temporary or permanent, but in neither case + // should we rewrite it or store a new temporary flag for it. We *will* stage + // the temporary flag for deletion because it's OK to double-delete the flag, + // and we don't mind if another thread gets there first. + vec![] } else { - // This is a new state we've reached, so stage it for storage in the DB. - // Computing the state root here is time-equivalent to computing it during slot - // processing, but we get early access to it. - let state_root = state.update_tree_hash_cache()?; - - // Store the state immediately, marking it as temporary, and staging the deletion - // of its temporary status as part of the larger atomic operation. - let txn_lock = chain.store.hot_db.begin_rw_transaction(); - chain.store.do_atomically_with_block_and_blobs_cache(vec![ - StoreOp::PutState(state_root, &state), - ])?; - drop(txn_lock); - - confirmed_state_roots.push(state_root); - - state_root + vec![ + if state.slot() % T::EthSpec::slots_per_epoch() == 0 { + StoreOp::PutState(state_root, &state) + } else { + StoreOp::PutStateSummary( + state_root, + HotStateSummary::new(&state_root, &state)?, + ) + }, + StoreOp::PutStateTemporaryFlag(state_root), + ] }; + chain + .store + .do_atomically_with_block_and_blobs_cache(state_batch)?; + drop(txn_lock); + + confirmed_state_roots.push(state_root); + + state_root + }; if let Some(summary) = per_slot_processing(&mut state, Some(state_root), &chain.spec)? { // Expose Prometheus metrics. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0c3809f68ac..7cf7c147d2d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1098,10 +1098,10 @@ impl, Cold: ItemStore> HotColdDB let state_from_disk = self.load_hot_state(state_root)?; - if let Some((state, block_root)) = state_from_disk { - self.state_cache - .lock() - .put_state(*state_root, block_root, &state)?; + if let Some((mut state, block_root)) = state_from_disk { + let mut state_cache = self.state_cache.lock(); + state_cache.rebase_on_finalized(&mut state, &self.spec)?; + state_cache.put_state(*state_root, block_root, &state)?; Ok(Some(state)) } else { Ok(None) @@ -1111,6 +1111,9 @@ impl, Cold: ItemStore> HotColdDB /// Load a post-finalization state from the hot database. /// /// Will replay blocks from the nearest epoch boundary. + /// + /// Return the `(state, latest_block_root)` where `latest_block_root` is the root of the last + /// block applied to `state`. pub fn load_hot_state( &self, state_root: &Hash256, @@ -1136,7 +1139,7 @@ impl, Cold: ItemStore> HotColdDB // Optimization to avoid even *thinking* about replaying blocks if we're already // on an epoch boundary. - let state = if slot % E::slots_per_epoch() == 0 { + let mut state = if slot % E::slots_per_epoch() == 0 { boundary_state } else { let blocks = @@ -1150,6 +1153,7 @@ impl, Cold: ItemStore> HotColdDB StateProcessingStrategy::Accurate, )? }; + state.apply_pending_mutations()?; Ok(Some((state, latest_block_root))) } else { @@ -1250,7 +1254,9 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; - partial_state.try_into() + let mut state: BeaconState = partial_state.try_into()?; + state.apply_pending_mutations()?; + Ok(state) } /// Load a restore point state by its `restore_point_index`. @@ -1316,7 +1322,7 @@ impl, Cold: ItemStore> HotColdDB &self.spec, )?; - let state = self.replay_blocks( + let mut state = self.replay_blocks( low_state, blocks, slot, @@ -1324,6 +1330,7 @@ impl, Cold: ItemStore> HotColdDB None, StateProcessingStrategy::Accurate, )?; + state.apply_pending_mutations()?; // If state is not error, put it in the cache. self.historic_state_cache.lock().put(slot, state.clone()); diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index dcc230de5c0..db9b69d1d11 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -2,7 +2,7 @@ use crate::Error; use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; -use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -98,6 +98,23 @@ impl StateCache { Ok(()) } + /// Rebase the given state on the finalized state in order to reduce its memory consumption. + /// + /// This function should only be called on states that are likely not to already share tree + /// nodes with the finalized state, e.g. states loaded from disk. + /// + /// If the finalized state is not initialized this function is a no-op. + pub fn rebase_on_finalized( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), Error> { + if let Some(finalized_state) = &self.finalized_state { + state.rebase_on(&finalized_state.state, spec)?; + } + Ok(()) + } + /// Return a status indicating whether the state already existed in the cache. pub fn put_state( &mut self, From 38b0765b1a66d325db7360412ebe0cd99b72db08 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 12 Apr 2024 12:16:42 +1000 Subject: [PATCH 22/41] Remove unused deps --- Cargo.lock | 359 ++++++++--------------------------- Cargo.toml | 2 - beacon_node/store/Cargo.toml | 2 - 3 files changed, 75 insertions(+), 288 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cfc13639864..3db61fba038 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,7 +49,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.8.5", + "rand", "regex", "rpassword", "serde", @@ -240,7 +240,7 @@ dependencies = [ "k256 0.13.3", "keccak-asm", "proptest", - "rand 0.8.5", + "rand", "ruint", "serde", "tiny-keccak", @@ -433,7 +433,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", ] [[package]] @@ -443,7 +443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", ] [[package]] @@ -625,15 +625,6 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -781,7 +772,7 @@ dependencies = [ "parking_lot 0.12.1", "promise_cache", "proto_array", - "rand 0.8.5", + "rand", "rayon", "safe_arith", "sensitive_url", @@ -897,29 +888,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "bindgen" -version = "0.66.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" -dependencies = [ - "bitflags 2.4.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.52", - "which", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -1016,7 +984,7 @@ dependencies = [ "ethereum_serde_utils", "ethereum_ssz", "hex", - "rand 0.8.5", + "rand", "serde", "tree_hash", "zeroize", @@ -1378,15 +1346,6 @@ dependencies = [ "types", ] -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "cmake" version = "0.1.50" @@ -1578,7 +1537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1590,7 +1549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1602,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "typenum", ] @@ -2056,7 +2015,7 @@ dependencies = [ "lru", "more-asserts", "parking_lot 0.11.2", - "rand 0.8.5", + "rand", "rlp", "smallvec", "socket2 0.4.10", @@ -2133,7 +2092,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core 0.6.4", + "rand_core", "serde", "sha2 0.10.8", "subtle", @@ -2194,7 +2153,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "rand_core 0.6.4", + "rand_core", "sec1 0.3.0", "subtle", "zeroize", @@ -2214,7 +2173,7 @@ dependencies = [ "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core 0.6.4", + "rand_core", "sec1 0.7.3", "subtle", "zeroize", @@ -2241,7 +2200,7 @@ dependencies = [ "hex", "k256 0.13.3", "log", - "rand 0.8.5", + "rand", "rlp", "serde", "sha3 0.10.8", @@ -2462,7 +2421,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.8.5", + "rand", "scrypt", "serde", "serde_json", @@ -2504,7 +2463,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.8.5", + "rand", "serde", "serde_json", "serde_repr", @@ -2736,7 +2695,7 @@ dependencies = [ "k256 0.11.6", "once_cell", "open-fastrlp", - "rand 0.8.5", + "rand", "rlp", "rlp-derive", "serde", @@ -2863,7 +2822,7 @@ dependencies = [ "lru", "parking_lot 0.12.1", "pretty_reqwest_error", - "rand 0.8.5", + "rand", "reqwest", "sensitive_url", "serde", @@ -2931,7 +2890,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2941,7 +2900,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2988,7 +2947,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -3001,7 +2960,7 @@ checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "arbitrary", "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -3072,12 +3031,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "1.1.0" @@ -3355,7 +3308,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec 0.3.1", "quickcheck", - "rand 0.8.5", + "rand", "regex", "serde", "sha2 0.10.8", @@ -3371,7 +3324,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -3382,7 +3335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -3570,7 +3523,7 @@ dependencies = [ "idna 0.4.0", "ipnet", "once_cell", - "rand 0.8.5", + "rand", "socket2 0.5.6", "thiserror", "tinyvec", @@ -3592,7 +3545,7 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "resolv-conf", "smallvec", "thiserror", @@ -3649,15 +3602,6 @@ dependencies = [ "hmac 0.8.1", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "hostname" version = "0.3.1" @@ -3989,7 +3933,7 @@ dependencies = [ "http 0.2.11", "hyper 0.14.28", "log", - "rand 0.8.5", + "rand", "tokio", "url", "xmltree", @@ -4063,7 +4007,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.1.0", + "autocfg", "hashbrown 0.12.3", ] @@ -4521,7 +4465,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project", "quick-protobuf", - "rand 0.8.5", + "rand", "rw-stream-sink", "smallvec", "thiserror", @@ -4583,7 +4527,7 @@ dependencies = [ "multihash", "p256", "quick-protobuf", - "rand 0.8.5", + "rand", "sec1 0.7.3", "sha2 0.10.8", "thiserror", @@ -4605,7 +4549,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand 0.8.5", + "rand", "smallvec", "socket2 0.5.6", "tokio", @@ -4642,7 +4586,7 @@ dependencies = [ "libp2p-identity", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "smallvec", "tracing", "unsigned-varint 0.7.2", @@ -4664,7 +4608,7 @@ dependencies = [ "multihash", "once_cell", "quick-protobuf", - "rand 0.8.5", + "rand", "sha2 0.10.8", "snow", "static_assertions", @@ -4705,7 +4649,7 @@ dependencies = [ "libp2p-tls", "parking_lot 0.12.1", "quinn", - "rand 0.8.5", + "rand", "ring 0.16.20", "rustls", "socket2 0.5.6", @@ -4730,7 +4674,7 @@ dependencies = [ "libp2p-swarm-derive", "multistream-select", "once_cell", - "rand 0.8.5", + "rand", "smallvec", "tokio", "tracing", @@ -4840,7 +4784,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.5", + "rand", "serde", "sha2 0.9.9", "typenum", @@ -4984,7 +4928,7 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand 0.8.5", + "rand", "regex", "serde", "sha2 0.9.9", @@ -5065,7 +5009,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "autocfg 1.1.0", + "autocfg", "scopeguard", ] @@ -5201,7 +5145,7 @@ name = "mdbx-sys" version = "0.11.6-4" source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ - "bindgen 0.59.2", + "bindgen", "cc", "cmake", "libc", @@ -5225,7 +5169,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -5550,7 +5494,7 @@ dependencies = [ "num_cpus", "operation_pool", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rlp", "slog", "slog-async", @@ -5647,7 +5591,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] @@ -5664,7 +5608,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand 0.8.5", + "rand", "serde", "smallvec", "zeroize", @@ -5691,7 +5635,7 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] @@ -5702,7 +5646,7 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ - "autocfg 1.1.0", + "autocfg", "libm", ] @@ -5852,7 +5796,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rayon", "serde", "state_processing", @@ -5992,7 +5936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -6262,7 +6206,7 @@ dependencies = [ "hmac 0.12.1", "md-5", "memchr", - "rand 0.8.5", + "rand", "sha2 0.10.8", "stringprep", ] @@ -6307,16 +6251,6 @@ dependencies = [ "sensitive_url", ] -[[package]] -name = "prettyplease" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" -dependencies = [ - "proc-macro2", - "syn 2.0.52", -] - [[package]] name = "primeorder" version = "0.13.6" @@ -6455,9 +6389,9 @@ dependencies = [ "bitflags 2.4.2", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift 0.3.0", + "rand", + "rand_chacha", + "rand_xorshift", "regex-syntax 0.8.2", "rusty-fork", "tempfile", @@ -6551,7 +6485,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand 0.8.5", + "rand", ] [[package]] @@ -6590,7 +6524,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", - "rand 0.8.5", + "rand", "ring 0.16.20", "rustc-hash", "rustls", @@ -6655,25 +6589,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.8", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift 0.1.1", - "winapi", -] - [[package]] name = "rand" version = "0.8.5" @@ -6681,18 +6596,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.3.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -6702,24 +6607,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.6.4" @@ -6729,75 +6619,13 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -6832,15 +6660,6 @@ dependencies = [ "yasna", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "redox_syscall" version = "0.2.16" @@ -7098,7 +6917,7 @@ dependencies = [ "parity-scale-codec 3.6.9", "primitive-types 0.12.2", "proptest", - "rand 0.8.5", + "rand", "rlp", "ruint-macro", "serde", @@ -7680,7 +7499,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -7690,7 +7509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -7735,7 +7554,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -7757,7 +7576,7 @@ dependencies = [ "lru", "maplit", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rayon", "safe_arith", "serde", @@ -7939,7 +7758,7 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek", - "rand_core 0.6.4", + "rand_core", "ring 0.17.8", "rustc_version 0.4.0", "sha2 0.10.8", @@ -8094,8 +7913,6 @@ dependencies = [ "strum", "tempfile", "types", - "xdelta3", - "zstd", ] [[package]] @@ -8358,7 +8175,7 @@ dependencies = [ "hex", "hmac 0.12.1", "log", - "rand 0.8.5", + "rand", "serde", "serde_json", "sha2 0.10.8", @@ -8464,7 +8281,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.11.0", - "rand 0.8.5", + "rand", "rustc-hash", "sha2 0.10.8", "thiserror", @@ -8575,7 +8392,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "rand 0.8.5", + "rand", "socket2 0.5.6", "tokio", "tokio-util 0.7.10", @@ -8904,8 +8721,8 @@ dependencies = [ "milhouse", "parking_lot 0.12.1", "paste", - "rand 0.8.5", - "rand_xorshift 0.3.0", + "rand", + "rand_xorshift", "rayon", "regex", "rpds", @@ -9125,7 +8942,7 @@ dependencies = [ "malloc_utils", "monitoring_api", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "reqwest", "ring 0.16.20", "safe_arith", @@ -9162,7 +8979,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.8.5", + "rand", "tempfile", "tree_hash", "types", @@ -9428,7 +9245,7 @@ dependencies = [ "logging", "network", "r2d2", - "rand 0.8.5", + "rand", "reqwest", "serde", "serde_json", @@ -9484,18 +9301,6 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.31", -] - [[package]] name = "whoami" version = "1.5.0" @@ -9848,7 +9653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core 0.6.4", + "rand_core", "serde", "zeroize", ] @@ -9870,20 +9675,6 @@ dependencies = [ "time", ] -[[package]] -name = "xdelta3" -version = "0.1.5" -source = "git+http://github.com/michaelsproul/xdelta3-rs?rev=ae9a1d2585ef998f4656acdc35cf263ee88e6dfa#ae9a1d2585ef998f4656acdc35cf263ee88e6dfa" -dependencies = [ - "bindgen 0.66.1", - "cc", - "futures-io", - "futures-util", - "libc", - "log", - "rand 0.6.5", -] - [[package]] name = "xml-rs" version = "0.8.19" @@ -9919,7 +9710,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "pin-project", - "rand 0.8.5", + "rand", "static_assertions", ] @@ -9934,7 +9725,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "pin-project", - "rand 0.8.5", + "rand", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index af5acf0cf38..2a724db5ada 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -178,10 +178,8 @@ tree_hash_derive = "0.5" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.6", default-features = false, features = ["tls"] } -xdelta3 = { git = "http://github.com/michaelsproul/xdelta3-rs", rev="ae9a1d2585ef998f4656acdc35cf263ee88e6dfa" } zeroize = { version = "1", features = ["zeroize_derive"] } zip = "0.6" -zstd = "0.11.2" # Local crates. account_utils = { path = "common/account_utils" } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 288d167b419..b7822670078 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,8 +25,6 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } -xdelta3 = { workspace = true } -zstd = { workspace = true } safe_arith = { workspace = true } bls = { workspace = true } smallvec = { workspace = true } From f4285e5149f878840f3e73ddf0fb4408644ab863 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 12 Apr 2024 14:01:54 +1000 Subject: [PATCH 23/41] Small cleanups --- .../beacon_chain/src/block_verification.rs | 25 ++++++++----------- beacon_node/beacon_chain/src/builder.rs | 3 +-- beacon_node/beacon_chain/src/head_tracker.rs | 4 +-- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 49930e46d5b..6f3d65b7a42 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -574,7 +574,7 @@ pub fn signature_verify_chain_segment( } let (first_root, first_block) = chain_segment.remove(0); - let (mut parent, first_block) = load_parent(first_root, first_block, chain)?; + let (mut parent, first_block) = load_parent(first_block, chain)?; let slot = first_block.slot(); chain_segment.insert(0, (first_root, first_block)); @@ -613,7 +613,7 @@ pub fn signature_verify_chain_segment( // verify signatures let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier::(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); for svb in &mut signature_verified_blocks { signature_verifier .include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?; @@ -890,7 +890,7 @@ impl GossipVerifiedBlock { } else { // The proposer index was *not* cached and we must load the parent in order to determine // the proposer index. - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; debug!( chain.log, @@ -1039,7 +1039,7 @@ impl SignatureVerifiedBlock { // Check the anchor slot before loading the parent, to avoid spurious lookups. check_block_against_anchor_slot(block.message(), chain)?; - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, @@ -1050,8 +1050,7 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = - get_signature_verifier::(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); @@ -1090,7 +1089,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = if let Some(parent) = from.parent { (parent, from.block) } else { - load_parent(from.block_root, from.block, chain)? + load_parent(from.block, chain)? }; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( @@ -1102,8 +1101,7 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = - get_signature_verifier::(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. @@ -1153,7 +1151,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { - load_parent(self.block_root, self.block, chain) + load_parent(self.block, chain) .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; @@ -1837,7 +1835,6 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent>( - _block_root: Hash256, block: B, chain: &BeaconChain, ) -> Result<(PreProcessingSnapshot, B), BlockError> { @@ -1862,7 +1859,7 @@ fn load_parent>( let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); let result = { - // Load the blocks parent block from the database, returning invalid if that block is not + // Load the block's parent block from the database, returning invalid if that block is not // found. // // We don't return a DBInconsistent error here since it's possible for a block to @@ -1896,7 +1893,7 @@ fn load_parent>( })?; if !state.all_caches_built() { - slog::warn!( + debug!( chain.log, "Parent state lacks built caches"; "block_slot" => block.slot(), @@ -1905,7 +1902,7 @@ fn load_parent>( } if block.slot() != state.slot() { - slog::warn!( + debug!( chain.log, "Parent state is not advanced"; "block_slot" => block.slot(), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ab68c7ff54d..e3e0e8b7266 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1274,8 +1274,7 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials; - let creds = creds.as_bytes(); + let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index b7802cbb2e0..71e2473cdcf 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -90,8 +90,8 @@ impl PartialEq for HeadTracker { /// This is used when persisting the state of the `BeaconChain` to disk. #[derive(Encode, Decode, Clone)] pub struct SszHeadTracker { - pub roots: Vec, - pub slots: Vec, + roots: Vec, + slots: Vec, } impl SszHeadTracker { From d0d0b37ec9d79ad279bbf5c98b89c4c512593db8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 12 Apr 2024 14:16:11 +1000 Subject: [PATCH 24/41] Revert shuffling cache/promise cache changes --- Cargo.lock | 11 - Cargo.toml | 1 - beacon_node/beacon_chain/Cargo.toml | 1 - beacon_node/beacon_chain/src/beacon_chain.rs | 15 +- beacon_node/beacon_chain/src/builder.rs | 7 - .../beacon_chain/src/canonical_head.rs | 4 +- beacon_node/beacon_chain/src/chain_config.rs | 6 - beacon_node/beacon_chain/src/errors.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 1 - .../beacon_chain/src/parallel_state_cache.rs | 22 -- .../beacon_chain/src/shuffling_cache.rs | 300 +++++++++++++++--- .../beacon_chain/src/state_advance_timer.rs | 2 +- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/src/state_id.rs | 49 --- .../gossip_methods.rs | 2 +- common/promise_cache/Cargo.toml | 10 - common/promise_cache/src/lib.rs | 227 ------------- lighthouse/tests/beacon_node.rs | 19 -- 18 files changed, 260 insertions(+), 422 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/parallel_state_cache.rs delete mode 100644 common/promise_cache/Cargo.toml delete mode 100644 common/promise_cache/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3db61fba038..d3a9f537f6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -770,7 +770,6 @@ dependencies = [ "oneshot_broadcast", "operation_pool", "parking_lot 0.12.1", - "promise_cache", "proto_array", "rand", "rayon", @@ -6368,16 +6367,6 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "promise_cache" -version = "0.1.0" -dependencies = [ - "derivative", - "itertools", - "oneshot_broadcast", - "slog", -] - [[package]] name = "proptest" version = "1.4.0" diff --git a/Cargo.toml b/Cargo.toml index 2a724db5ada..dd1f811c84d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,6 @@ members = [ "common/malloc_utils", "common/oneshot_broadcast", "common/pretty_reqwest_error", - "common/promise_cache", "common/sensitive_url", "common/slot_clock", "common/system_health", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index fd59a1a799c..22ad5d0d6ad 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -46,7 +46,6 @@ merkle_proof = { workspace = true } oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } parking_lot = { workspace = true } -promise_cache = { path = "../../common/promise_cache" } proto_array = { workspace = true } rand = { workspace = true } rayon = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2dbfb5b1b49..f4a02b7db38 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,7 +54,6 @@ use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; -use crate::parallel_state_cache::ParallelStateCache; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; @@ -466,10 +465,6 @@ pub struct BeaconChain { pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, - /// A cache used to de-duplicate HTTP state requests. - /// - /// The cache is keyed by `state_root`. - pub parallel_state_cache: Arc>>, /// A cache used to produce light_client server messages pub light_client_server_cache: LightClientServerCache, /// Sender to signal the light_client server to produce new updates @@ -3939,7 +3934,7 @@ impl BeaconChain { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(Error::AttestationCacheLockTimeout)? - .insert_value(shuffling_id, committee_cache); + .insert_committee_cache(shuffling_id, committee_cache); } } Ok(()) @@ -6174,7 +6169,7 @@ impl BeaconChain { // access. drop(shuffling_cache); - let committee_cache = cache_item.wait().map_err(Error::ShufflingCacheError)?; + let committee_cache = cache_item.wait()?; map_fn(&committee_cache, shuffling_id.shuffling_decision_block) } else { // Create an entry in the cache that "promises" this value will eventually be computed. @@ -6183,9 +6178,7 @@ impl BeaconChain { // // Creating the promise whilst we hold the `shuffling_cache` lock will prevent the same // promise from being created twice. - let sender = shuffling_cache - .create_promise(shuffling_id.clone()) - .map_err(Error::ShufflingCacheError)?; + let sender = shuffling_cache.create_promise(shuffling_id.clone())?; // Drop the shuffling cache to avoid holding the lock for any longer than // required. @@ -6279,7 +6272,7 @@ impl BeaconChain { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(Error::AttestationCacheLockTimeout)? - .insert_value(shuffling_id, &committee_cache); + .insert_committee_cache(shuffling_id, &committee_cache); metrics::stop_timer(committee_building_timer); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index e3e0e8b7266..1678b68f0cf 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -27,7 +27,6 @@ use futures::channel::mpsc::Sender; use kzg::Kzg; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use promise_cache::PromiseCache; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, debug, error, info, o, Logger}; @@ -866,7 +865,6 @@ where let genesis_time = head_snapshot.beacon_state.genesis_time(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; - let parallel_state_cache_size = self.chain_config.parallel_state_cache_size; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -951,11 +949,6 @@ where beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), - parallel_state_cache: Arc::new(RwLock::new(PromiseCache::new( - parallel_state_cache_size, - Default::default(), - log.clone(), - ))), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 447978755b9..e72cbe7404e 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -820,7 +820,9 @@ impl BeaconChain { Ok(head_shuffling_ids) => { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .map(|mut shuffling_cache| shuffling_cache.update_protector(head_shuffling_ids)) + .map(|mut shuffling_cache| { + shuffling_cache.update_head_shuffling_ids(head_shuffling_ids) + }) .unwrap_or_else(|| { error!( self.log, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 545bdd20b7b..255b8f00497 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -16,9 +16,6 @@ pub const DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR: u32 = 3; /// Fraction of a slot lookahead for fork choice in the state advance timer (500ms on mainnet). pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; -/// Cache only a small number of states in the parallel cache by default. -pub const DEFAULT_PARALLEL_STATE_CACHE_SIZE: usize = 2; - #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing an attestation. @@ -87,8 +84,6 @@ pub struct ChainConfig { pub always_prepare_payload: bool, /// Number of epochs between each migration of data from the hot database to the freezer. pub epochs_per_migration: u64, - /// Size of the promise cache for de-duplicating parallel state requests. - pub parallel_state_cache_size: usize, /// When set to true Light client server computes and caches state proofs for serving updates pub enable_light_client_server: bool, } @@ -122,7 +117,6 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, - parallel_state_cache_size: DEFAULT_PARALLEL_STATE_CACHE_SIZE, enable_light_client_server: false, } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index a68409bca23..340f1f9f797 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -215,7 +215,8 @@ pub enum BeaconChainError { }, AttestationHeadNotInForkChoice(Hash256), MissingPersistedForkChoice, - ShufflingCacheError(promise_cache::PromiseCacheError), + CommitteePromiseFailed(oneshot_broadcast::Error), + MaxCommitteePromises(usize), BlsToExecutionPriorToCapella, BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c273050bcff..c1df9ede87a 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -45,7 +45,6 @@ pub mod observed_block_producers; pub mod observed_operations; mod observed_slashable; pub mod otb_verification_service; -mod parallel_state_cache; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; diff --git a/beacon_node/beacon_chain/src/parallel_state_cache.rs b/beacon_node/beacon_chain/src/parallel_state_cache.rs deleted file mode 100644 index d568d3248cd..00000000000 --- a/beacon_node/beacon_chain/src/parallel_state_cache.rs +++ /dev/null @@ -1,22 +0,0 @@ -use promise_cache::{PromiseCache, Protect}; -use types::{BeaconState, Hash256}; - -#[derive(Debug, Default)] -pub struct ParallelStateProtector; - -impl Protect for ParallelStateProtector { - type SortKey = usize; - - /// Evict in arbitrary (hashmap) order by using the same key for every value. - fn sort_key(&self, _: &Hash256) -> Self::SortKey { - 0 - } - - /// We don't care too much about preventing evictions of particular states here. All the states - /// in this cache should be different from the head state. - fn protect_from_eviction(&self, _: &Hash256) -> bool { - false - } -} - -pub type ParallelStateCache = PromiseCache, ParallelStateProtector>; diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 7db4e082142..b3de6f91c92 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,53 +1,240 @@ -use promise_cache::{PromiseCache, Protect}; +use std::collections::HashMap; +use std::sync::Arc; + +use itertools::Itertools; use slog::{debug, Logger}; + +use oneshot_broadcast::{oneshot, Receiver, Sender}; use types::{ beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, RelativeEpoch, }; +use crate::{metrics, BeaconChainError}; + /// The size of the cache that stores committee caches for quicker verification. /// /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this /// ignores a few extra bytes in the caches that should be insignificant compared to the indices). -/// -/// The cache size also determines the maximum number of concurrent committee cache "promises" that -/// can be issued. In effect, this limits the number of concurrent states that can be loaded into -/// memory for the committee cache. This prevents excessive memory usage at the cost of rejecting -/// some attestations. +pub const DEFAULT_CACHE_SIZE: usize = 16; + +/// The maximum number of concurrent committee cache "promises" that can be issued. In effect, this +/// limits the number of concurrent states that can be loaded into memory for the committee cache. +/// This prevents excessive memory usage at the cost of rejecting some attestations. /// /// We set this value to 2 since states can be quite large and have a significant impact on memory /// usage. A healthy network cannot have more than a few committee caches and those caches should /// always be inserted during block import. Unstable networks with a high degree of forking might /// see some attestations dropped due to this concurrency limit, however I propose that this is /// better than low-resource nodes going OOM. -pub const DEFAULT_CACHE_SIZE: usize = 16; - -impl Protect for BlockShufflingIds { - type SortKey = Epoch; +const MAX_CONCURRENT_PROMISES: usize = 2; + +#[derive(Clone)] +pub enum CacheItem { + /// A committee. + Committee(Arc), + /// A promise for a future committee. + Promise(Receiver>), +} - fn sort_key(&self, k: &AttestationShufflingId) -> Epoch { - k.shuffling_epoch +impl CacheItem { + pub fn is_promise(&self) -> bool { + matches!(self, CacheItem::Promise(_)) } - fn protect_from_eviction(&self, shuffling_id: &AttestationShufflingId) -> bool { - Some(shuffling_id) == self.id_for_epoch(shuffling_id.shuffling_epoch).as_ref() + pub fn wait(self) -> Result, BeaconChainError> { + match self { + CacheItem::Committee(cache) => Ok(cache), + CacheItem::Promise(receiver) => receiver + .recv() + .map_err(BeaconChainError::CommitteePromiseFailed), + } } +} + +/// Provides a cache for `CommitteeCache`. +/// +/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like +/// a find/replace error. +pub struct ShufflingCache { + cache: HashMap, + cache_size: usize, + head_shuffling_ids: BlockShufflingIds, + logger: Logger, +} - fn notify_eviction(&self, shuffling_id: &AttestationShufflingId, logger: &Logger) { - debug!( +impl ShufflingCache { + pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self { + Self { + cache: HashMap::new(), + cache_size, + head_shuffling_ids, logger, - "Removing old shuffling from cache"; - "shuffling_epoch" => shuffling_id.shuffling_epoch, - "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block - ); + } + } + + pub fn get(&mut self, key: &AttestationShufflingId) -> Option { + match self.cache.get(key) { + // The cache contained the committee cache, return it. + item @ Some(CacheItem::Committee(_)) => { + metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); + item.cloned() + } + // The cache contains a promise for the committee cache. Check to see if the promise has + // already been resolved, without waiting for it. + item @ Some(CacheItem::Promise(receiver)) => match receiver.try_recv() { + // The promise has already been resolved. Replace the entry in the cache with a + // `Committee` entry and then return the committee. + Ok(Some(committee)) => { + metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); + metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); + let ready = CacheItem::Committee(committee); + self.insert_cache_item(key.clone(), ready.clone()); + Some(ready) + } + // The promise has not yet been resolved. Return the promise so the caller can await + // it. + Ok(None) => { + metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); + metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); + item.cloned() + } + // The sender has been dropped without sending a committee. There was most likely an + // error computing the committee cache. Drop the key from the cache and return + // `None` so the caller can recompute the committee. + // + // It's worth noting that this is the only place where we removed unresolved + // promises from the cache. This means unresolved promises will only be removed if + // we try to access them again. This is OK, since the promises don't consume much + // memory. We expect that *all* promises should be resolved, unless there is a + // programming or database error. + Err(oneshot_broadcast::Error::SenderDropped) => { + metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_FAILS); + metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); + self.cache.remove(key); + None + } + }, + // The cache does not have this committee and it's not already promised to be computed. + None => { + metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); + None + } + } + } + + pub fn contains(&self, key: &AttestationShufflingId) -> bool { + self.cache.contains_key(key) + } + + pub fn insert_committee_cache( + &mut self, + key: AttestationShufflingId, + committee_cache: &C, + ) { + if self + .cache + .get(&key) + // Replace the committee if it's not present or if it's a promise. A bird in the hand is + // worth two in the promise-bush! + .map_or(true, CacheItem::is_promise) + { + self.insert_cache_item( + key, + CacheItem::Committee(committee_cache.to_arc_committee_cache()), + ); + } + } + + /// Prunes the cache first before inserting a new cache item. + fn insert_cache_item(&mut self, key: AttestationShufflingId, cache_item: CacheItem) { + self.prune_cache(); + self.cache.insert(key, cache_item); + } + + /// Prunes the `cache` to keep the size below the `cache_size` limit, based on the following + /// preferences: + /// - Entries from more recent epochs are preferred over older ones. + /// - Entries with shuffling ids matching the head's previous, current, and future epochs must + /// not be pruned. + fn prune_cache(&mut self) { + let target_cache_size = self.cache_size.saturating_sub(1); + if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { + let shuffling_ids_to_prune = self + .cache + .keys() + .sorted_by_key(|key| key.shuffling_epoch) + .filter(|shuffling_id| { + Some(shuffling_id) + != self + .head_shuffling_ids + .id_for_epoch(shuffling_id.shuffling_epoch) + .as_ref() + .as_ref() + }) + .take(prune_count) + .cloned() + .collect::>(); + + for shuffling_id in shuffling_ids_to_prune.iter() { + debug!( + self.logger, + "Removing old shuffling from cache"; + "shuffling_epoch" => shuffling_id.shuffling_epoch, + "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block + ); + self.cache.remove(shuffling_id); + } + } + } + + pub fn create_promise( + &mut self, + key: AttestationShufflingId, + ) -> Result>, BeaconChainError> { + let num_active_promises = self + .cache + .iter() + .filter(|(_, item)| item.is_promise()) + .count(); + if num_active_promises >= MAX_CONCURRENT_PROMISES { + return Err(BeaconChainError::MaxCommitteePromises(num_active_promises)); + } + + let (sender, receiver) = oneshot(); + self.insert_cache_item(key, CacheItem::Promise(receiver)); + Ok(sender) + } + + /// Inform the cache that the shuffling decision roots for the head has changed. + /// + /// The shufflings for the head's previous, current, and future epochs will never be ejected from + /// the cache during `Self::insert_cache_item`. + pub fn update_head_shuffling_ids(&mut self, head_shuffling_ids: BlockShufflingIds) { + self.head_shuffling_ids = head_shuffling_ids; + } +} + +/// A helper trait to allow lazy-cloning of the committee cache when inserting into the cache. +pub trait ToArcCommitteeCache { + fn to_arc_committee_cache(&self) -> Arc; +} + +impl ToArcCommitteeCache for CommitteeCache { + fn to_arc_committee_cache(&self) -> Arc { + Arc::new(self.clone()) } } -pub type ShufflingCache = PromiseCache; +impl ToArcCommitteeCache for Arc { + fn to_arc_committee_cache(&self) -> Arc { + self.clone() + } +} /// Contains the shuffling IDs for a beacon block. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct BlockShufflingIds { pub current: AttestationShufflingId, pub next: AttestationShufflingId, @@ -107,13 +294,13 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use super::*; - use crate::test_utils::EphemeralHarnessType; - use promise_cache::{CacheItem, PromiseCacheError}; - use std::sync::Arc; use task_executor::test_utils::null_logger; use types::*; + use crate::test_utils::EphemeralHarnessType; + + use super::*; + type E = MinimalEthSpec; type TestBeaconChainType = EphemeralHarnessType; type BeaconChainHarness = crate::test_utils::BeaconChainHarness; @@ -152,7 +339,7 @@ mod test { .clone(); let committee_b = state.committee_cache(RelativeEpoch::Next).unwrap().clone(); assert!(committee_a != committee_b); - (committee_a, committee_b) + (Arc::new(committee_a), Arc::new(committee_b)) } /// Builds a deterministic but incoherent shuffling ID from a `u64`. @@ -185,10 +372,10 @@ mod test { // Ensure the promise has been resolved. let item = cache.get(&id_a).unwrap(); assert!( - matches!(item, CacheItem::Complete(committee) if committee == committee_a), + matches!(item, CacheItem::Committee(committee) if committee == committee_a), "the promise should be resolved" ); - assert_eq!(cache.len(), 1, "the cache should have one entry"); + assert_eq!(cache.cache.len(), 1, "the cache should have one entry"); } #[test] @@ -212,7 +399,7 @@ mod test { // Ensure the key now indicates an empty slot. assert!(cache.get(&id_a).is_none(), "the slot should be empty"); - assert!(cache.is_empty(), "the cache should be empty"); + assert!(cache.cache.is_empty(), "the cache should be empty"); } #[test] @@ -246,7 +433,7 @@ mod test { // Ensure promise A has been resolved. let item = cache.get(&id_a).unwrap(); assert!( - matches!(item, CacheItem::Complete(committee) if committee == committee_a), + matches!(item, CacheItem::Committee(committee) if committee == committee_a), "promise A should be resolved" ); @@ -255,40 +442,41 @@ mod test { // Ensure promise B has been resolved. let item = cache.get(&id_b).unwrap(); assert!( - matches!(item, CacheItem::Complete(committee) if committee == committee_b), + matches!(item, CacheItem::Committee(committee) if committee == committee_b), "promise B should be resolved" ); // Check both entries again. assert!( - matches!(cache.get(&id_a).unwrap(), CacheItem::Complete(committee) if committee == committee_a), + matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee) if committee == committee_a), "promise A should remain resolved" ); assert!( - matches!(cache.get(&id_b).unwrap(), CacheItem::Complete(committee) if committee == committee_b), + matches!(cache.get(&id_b).unwrap(), CacheItem::Committee(committee) if committee == committee_b), "promise B should remain resolved" ); - assert_eq!(cache.len(), 2, "the cache should have two entries"); + assert_eq!(cache.cache.len(), 2, "the cache should have two entries"); } #[test] fn too_many_promises() { let mut cache = new_shuffling_cache(); - for i in 0..cache.max_concurrent_promises() { + for i in 0..MAX_CONCURRENT_PROMISES { cache.create_promise(shuffling_id(i as u64)).unwrap(); } // Ensure that the next promise returns an error. It is important for the application to // dump his ass when he can't keep his promises, you're a queen and you deserve better. assert!(matches!( - cache.create_promise(shuffling_id(cache.max_concurrent_promises() as u64)), - Err(PromiseCacheError::MaxConcurrentPromises(n)) - if n == cache.max_concurrent_promises() + cache.create_promise(shuffling_id(MAX_CONCURRENT_PROMISES as u64)), + Err(BeaconChainError::MaxCommitteePromises( + MAX_CONCURRENT_PROMISES + )) )); assert_eq!( - cache.len(), - cache.max_concurrent_promises(), + cache.cache.len(), + MAX_CONCURRENT_PROMISES, "the cache should have two entries" ); } @@ -298,9 +486,9 @@ mod test { let mut cache = new_shuffling_cache(); let id_a = shuffling_id(1); let committee_cache_a = Arc::new(CommitteeCache::default()); - cache.insert_value(id_a.clone(), &committee_cache_a); + cache.insert_committee_cache(id_a.clone(), &committee_cache_a); assert!( - matches!(cache.get(&id_a).unwrap(), CacheItem::Complete(committee_cache) if committee_cache == committee_cache_a), + matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee_cache) if committee_cache == committee_cache_a), "should insert committee cache" ); } @@ -313,7 +501,7 @@ mod test { .collect::>(); for (shuffling_id, committee_cache) in shuffling_id_and_committee_caches.iter() { - cache.insert_value(shuffling_id.clone(), committee_cache); + cache.insert_committee_cache(shuffling_id.clone(), committee_cache); } for i in 1..(TEST_CACHE_SIZE + 1) { @@ -327,7 +515,11 @@ mod test { !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), "should not contain oldest epoch shuffling id" ); - assert_eq!(cache.len(), TEST_CACHE_SIZE, "should limit cache size"); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); } #[test] @@ -342,7 +534,7 @@ mod test { shuffling_epoch: (current_epoch + 1).into(), shuffling_decision_block: Hash256::from_low_u64_be(current_epoch + i as u64), }; - cache.insert_value(shuffling_id, &committee_cache); + cache.insert_committee_cache(shuffling_id, &committee_cache); } // Now, update the head shuffling ids @@ -352,12 +544,12 @@ mod test { previous: Some(shuffling_id(current_epoch - 1)), block_root: Hash256::from_low_u64_le(42), }; - cache.update_protector(head_shuffling_ids.clone()); + cache.update_head_shuffling_ids(head_shuffling_ids.clone()); // Insert head state shuffling ids. Should not be overridden by other shuffling ids. - cache.insert_value(head_shuffling_ids.current.clone(), &committee_cache); - cache.insert_value(head_shuffling_ids.next.clone(), &committee_cache); - cache.insert_value( + cache.insert_committee_cache(head_shuffling_ids.current.clone(), &committee_cache); + cache.insert_committee_cache(head_shuffling_ids.next.clone(), &committee_cache); + cache.insert_committee_cache( head_shuffling_ids.previous.clone().unwrap(), &committee_cache, ); @@ -368,7 +560,7 @@ mod test { shuffling_epoch: Epoch::from(i), shuffling_decision_block: Hash256::from_low_u64_be(i as u64), }; - cache.insert_value(shuffling_id, &committee_cache); + cache.insert_committee_cache(shuffling_id, &committee_cache); } assert!( @@ -383,6 +575,10 @@ mod test { cache.contains(&head_shuffling_ids.previous.unwrap()), "should retain head shuffling id for previous epoch." ); - assert_eq!(cache.len(), TEST_CACHE_SIZE, "should limit cache size"); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 70afc4b9a82..e42e44f4697 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -395,7 +395,7 @@ fn advance_head( .shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::AttestationCacheLockTimeout)? - .insert_value(shuffling_id.clone(), committee_cache); + .insert_committee_cache(shuffling_id.clone(), committee_cache); debug!( log, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 42188a6c97c..7f5ffbd7e5c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -933,7 +933,7 @@ pub fn serve( .shuffling_cache .try_write_for(std::time::Duration::from_secs(1)) { - cache_write.insert_value( + cache_write.insert_committee_cache( shuffling_id, &possibly_built_cache, ); diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index c4b721f0411..fdc99fa954e 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,7 +1,6 @@ use crate::ExecutionOptimistic; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; -use slog::{debug, warn}; use std::fmt; use std::str::FromStr; use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; @@ -188,49 +187,6 @@ impl StateId { _ => (self.root(chain)?, None), }; - let mut opt_state_cache = Some(chain.parallel_state_cache.write()); - - // Try the cache. - if let Some(cache_item) = opt_state_cache - .as_mut() - .and_then(|cache| cache.get(&state_root)) - { - drop(opt_state_cache.take()); - match cache_item.wait() { - Ok(state) => { - debug!( - chain.logger(), - "HTTP state cache hit"; - "state_root" => ?state_root, - "slot" => state.slot(), - ); - return Ok(((*state).clone(), execution_optimistic, finalized)); - } - Err(e) => { - warn!( - chain.logger(), - "State promise failed"; - "state_root" => ?state_root, - "outcome" => "re-computing", - "error" => ?e, - ); - } - } - } - - // Re-lock only in case of failed promise. - debug!( - chain.logger(), - "HTTP state cache miss"; - "state_root" => ?state_root - ); - let mut state_cache = opt_state_cache.unwrap_or_else(|| chain.parallel_state_cache.write()); - - let sender = state_cache.create_promise(state_root).map_err(|e| { - warp_utils::reject::custom_server_error(format!("too many concurrent requests: {e:?}")) - })?; - drop(state_cache); - let state = chain .get_state(&state_root, slot_opt) .map_err(warp_utils::reject::beacon_chain_error) @@ -243,11 +199,6 @@ impl StateId { }) })?; - // Fulfil promise (and re-lock again). - let mut state_cache = chain.parallel_state_cache.write(); - state_cache.resolve_promise(sender, state_root, &state); - drop(state_cache); - Ok((state, execution_optimistic, finalized)) } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index c9f8cb381c9..f7bba900372 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2295,7 +2295,7 @@ impl NetworkBeaconProcessor { debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - AttnError::BeaconChainError(BeaconChainError::ShufflingCacheError(e)) => { + e @ AttnError::BeaconChainError(BeaconChainError::MaxCommitteePromises(_)) => { debug!( self.log, "Dropping attestation"; diff --git a/common/promise_cache/Cargo.toml b/common/promise_cache/Cargo.toml deleted file mode 100644 index b5fa42bd438..00000000000 --- a/common/promise_cache/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "promise_cache" -version = "0.1.0" -edition.workspace = true - -[dependencies] -derivative = { workspace = true } -oneshot_broadcast = { path = "../oneshot_broadcast" } -itertools = { workspace = true } -slog = { workspace = true } diff --git a/common/promise_cache/src/lib.rs b/common/promise_cache/src/lib.rs deleted file mode 100644 index 36b6bd984f5..00000000000 --- a/common/promise_cache/src/lib.rs +++ /dev/null @@ -1,227 +0,0 @@ -use derivative::Derivative; -use itertools::Itertools; -use oneshot_broadcast::{oneshot, Receiver, Sender}; -use slog::Logger; -use std::collections::HashMap; -use std::hash::Hash; -use std::sync::Arc; - -#[derive(Debug)] -pub struct PromiseCache -where - K: Hash + Eq + Clone, - P: Protect, -{ - cache: HashMap>, - capacity: usize, - protector: P, - max_concurrent_promises: usize, - logger: Logger, -} - -/// A value implementing `Protect` is capable of preventing keys of type `K` from being evicted. -/// -/// It also dictates an ordering on keys which is used to prioritise evictions. -pub trait Protect { - type SortKey: Ord; - - fn sort_key(&self, k: &K) -> Self::SortKey; - - fn protect_from_eviction(&self, k: &K) -> bool; - - fn notify_eviction(&self, _k: &K, _log: &Logger) {} -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""))] -pub enum CacheItem { - Complete(Arc), - Promise(Receiver>), -} - -impl std::fmt::Debug for CacheItem { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - match self { - CacheItem::Complete(value) => value.fmt(f), - CacheItem::Promise(_) => "Promise(..)".fmt(f), - } - } -} - -#[derive(Debug)] -pub enum PromiseCacheError { - Failed(oneshot_broadcast::Error), - MaxConcurrentPromises(usize), -} - -pub trait ToArc { - fn to_arc(&self) -> Arc; -} - -impl CacheItem { - pub fn is_promise(&self) -> bool { - matches!(self, CacheItem::Promise(_)) - } - - pub fn wait(self) -> Result, PromiseCacheError> { - match self { - CacheItem::Complete(value) => Ok(value), - CacheItem::Promise(receiver) => receiver.recv().map_err(PromiseCacheError::Failed), - } - } -} - -impl ToArc for Arc { - fn to_arc(&self) -> Arc { - self.clone() - } -} - -impl ToArc for T -where - T: Clone, -{ - fn to_arc(&self) -> Arc { - Arc::new(self.clone()) - } -} - -impl PromiseCache -where - K: Hash + Eq + Clone, - P: Protect, -{ - pub fn new(capacity: usize, protector: P, logger: Logger) -> Self { - // Making the concurrent promises directly configurable is considered overkill for now, - // so we just derive a vaguely sensible value from the cache size. - let max_concurrent_promises = std::cmp::max(2, capacity / 8); - Self { - cache: HashMap::new(), - capacity, - protector, - max_concurrent_promises, - logger, - } - } - - pub fn get(&mut self, key: &K) -> Option> { - match self.cache.get(key) { - // The cache contained the value, return it. - item @ Some(CacheItem::Complete(_)) => item.cloned(), - // The cache contains a promise for the value. Check to see if the promise has already - // been resolved, without waiting for it. - item @ Some(CacheItem::Promise(receiver)) => match receiver.try_recv() { - // The promise has already been resolved. Replace the entry in the cache with a - // `Complete` entry and then return the value. - Ok(Some(value)) => { - let ready = CacheItem::Complete(value); - self.insert_cache_item(key.clone(), ready.clone()); - Some(ready) - } - // The promise has not yet been resolved. Return the promise so the caller can await - // it. - Ok(None) => item.cloned(), - // The sender has been dropped without sending a value. There was most likely an - // error computing the value. Drop the key from the cache and return - // `None` so the caller can recompute the value. - // - // It's worth noting that this is the only place where we removed unresolved - // promises from the cache. This means unresolved promises will only be removed if - // we try to access them again. This is OK, since the promises don't consume much - // memory. We expect that *all* promises should be resolved, unless there is a - // programming or database error. - Err(oneshot_broadcast::Error::SenderDropped) => { - self.cache.remove(key); - None - } - }, - // The cache does not have this value and it's not already promised to be computed. - None => None, - } - } - - pub fn contains(&self, key: &K) -> bool { - self.cache.contains_key(key) - } - - pub fn insert_value>(&mut self, key: K, value: &C) { - if self - .cache - .get(&key) - // Replace the value if it's not present or if it's a promise. A bird in the hand is - // worth two in the promise-bush! - .map_or(true, CacheItem::is_promise) - { - self.insert_cache_item(key, CacheItem::Complete(value.to_arc())); - } - } - - /// Take care of resolving a promise by ensuring the value is made available: - /// - /// 1. To all waiting thread that are holding a `Receiver`. - /// 2. In the cache itself for future callers. - pub fn resolve_promise>(&mut self, sender: Sender>, key: K, value: &C) { - // Use the sender to notify all actively waiting receivers. - let arc_value = value.to_arc(); - sender.send(arc_value.clone()); - - // Re-insert the value into the cache. The promise may have been evicted in the meantime, - // but we probably want to keep this value (which resolved recently) over other older cache - // entries. - self.insert_value(key, &arc_value); - } - - /// Prunes the cache first before inserting a new item. - fn insert_cache_item(&mut self, key: K, cache_item: CacheItem) { - self.prune_cache(); - self.cache.insert(key, cache_item); - } - - pub fn create_promise(&mut self, key: K) -> Result>, PromiseCacheError> { - let num_active_promises = self.cache.values().filter(|item| item.is_promise()).count(); - if num_active_promises >= self.max_concurrent_promises { - return Err(PromiseCacheError::MaxConcurrentPromises( - num_active_promises, - )); - } - - let (sender, receiver) = oneshot(); - self.insert_cache_item(key, CacheItem::Promise(receiver)); - Ok(sender) - } - - fn prune_cache(&mut self) { - let target_cache_size = self.capacity.saturating_sub(1); - if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { - let keys_to_prune = self - .cache - .keys() - .filter(|k| !self.protector.protect_from_eviction(*k)) - .sorted_by_key(|k| self.protector.sort_key(k)) - .take(prune_count) - .cloned() - .collect::>(); - - for key in &keys_to_prune { - self.protector.notify_eviction(key, &self.logger); - self.cache.remove(key); - } - } - } - - pub fn update_protector(&mut self, protector: P) { - self.protector = protector; - } - - pub fn len(&self) -> usize { - self.cache.len() - } - - pub fn is_empty(&self) -> bool { - self.cache.is_empty() - } - - pub fn max_concurrent_promises(&self) -> usize { - self.max_concurrent_promises - } -} diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 325c3f9dbce..761671a5b1a 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1829,25 +1829,6 @@ fn historic_state_cache_size_default() { }); } #[test] -fn parallel_state_cache_size_flag() { - CommandLineTest::new() - .flag("parallel-state-cache-size", Some("4")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.parallel_state_cache_size, 4_usize)); -} -#[test] -fn parallel_state_cache_size_default() { - use beacon_node::beacon_chain::chain_config::DEFAULT_PARALLEL_STATE_CACHE_SIZE; - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.parallel_state_cache_size, - DEFAULT_PARALLEL_STATE_CACHE_SIZE - ); - }); -} -#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) From 627cac56586a3d9b12a1d8b0177a73de4c6b84c3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 12 Apr 2024 15:58:57 +1000 Subject: [PATCH 25/41] Fix state advance bugs --- .../beacon_chain/src/block_verification.rs | 28 ++++++++----- .../beacon_chain/src/state_advance_timer.rs | 41 +++++++++++++++++-- beacon_node/store/src/hot_cold_store.rs | 15 +++++++ 3 files changed, 71 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 6f3d65b7a42..7b5e23ad745 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1383,8 +1383,18 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Stage a batch of operations to be completed atomically if this block is imported - // successfully. - let mut confirmed_state_roots = vec![]; + // successfully. We include the state root of the pre-state, which may be an advanced state + // that was stored in the DB with a `temporary` flag. + let mut state = parent.pre_state; + + let mut confirmed_state_roots = if state.slot() > parent.beacon_block.slot() { + // Advanced pre-state. Delete its temporary flag. + let pre_state_root = state.update_tree_hash_cache()?; + vec![pre_state_root] + } else { + // Pre state is parent state. It is already stored in the DB without temporary status. + vec![] + }; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1394,14 +1404,6 @@ impl ExecutionPendingBlock { }); } - let mut summaries = vec![]; - - // Transition the parent state to the block slot. - // - // It is important to note that we're using a "pre-state" here, one that has potentially - // been advanced one slot forward from `parent.beacon_block.slot`. - let mut state = parent.pre_state; - // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); if state.slot() < parent_slot || state.slot() > block.slot() { @@ -1420,6 +1422,12 @@ impl ExecutionPendingBlock { eth1_deposit_index: state.eth1_deposit_index(), }; + // Transition the parent state to the block slot. + // + // It is important to note that we're using a "pre-state" here, one that has potentially + // been advanced one slot forward from `parent.beacon_block.slot`. + let mut summaries = vec![]; + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { let state_root = if parent.beacon_block.slot() == state.slot() { diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index e42e44f4697..1f928a16e42 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -25,6 +25,7 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; +use store::KeyValueStore; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; @@ -48,7 +49,6 @@ enum Error { BeaconChain(BeaconChainError), // We don't use the inner value directly, but it's used in the Debug impl. HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), - // We don't use the inner value directly, but it's used in the Debug impl. BeaconState(#[allow(dead_code)] BeaconStateError), Store(#[allow(dead_code)] store::Error), MaxDistanceExceeded { @@ -253,12 +253,36 @@ async fn state_advance_timer( ); None }); + + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + beacon_chain.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance_signal_tx", + ); }, "fork_choice_advance", ); } } +/// Reads the `state_cache` from the `beacon_chain` and attempts to take a clone of the +/// `BeaconState` of the head block. If it obtains this clone, the state will be advanced a single +/// slot then placed in the `state_cache` to be used for block verification. +/// +/// See the module-level documentation for rationale. fn advance_head( beacon_chain: &Arc>, log: &Logger, @@ -435,9 +459,20 @@ fn advance_head( ); } - // Write the advanced state to the database. + // Write the advanced state to the database with a temporary flag that will be deleted when + // a block is imported on top of this state. We should delete this once we bring in the DB + // changes from tree-states that allow us to prune states without temporary flags. let advanced_state_root = state.update_tree_hash_cache()?; - beacon_chain.store.put_state(&advanced_state_root, &state)?; + let txn_lock = beacon_chain.store.hot_db.begin_rw_transaction(); + let state_already_exists = beacon_chain + .store + .load_hot_state_summary(&advanced_state_root)? + .is_some(); + let temporary = !state_already_exists; + beacon_chain + .store + .put_state_possibly_temporary(&advanced_state_root, &state, temporary)?; + drop(txn_lock); debug!( log, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7cf7c147d2d..73c17db3480 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -636,11 +636,26 @@ impl, Cold: ItemStore> HotColdDB /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { + self.put_state_possibly_temporary(state_root, state, false) + } + + /// Store a state in the store. + /// + /// The `temporary` flag indicates whether this state should be considered canonical. + pub fn put_state_possibly_temporary( + &self, + state_root: &Hash256, + state: &BeaconState, + temporary: bool, + ) -> Result<(), Error> { let mut ops: Vec = Vec::new(); if state.slot() < self.get_split_slot() { self.store_cold_state(state_root, state, &mut ops)?; self.cold_db.do_atomically(ops) } else { + if temporary { + ops.push(TemporaryFlag.as_kv_store_op(*state_root)); + } self.store_hot_state(state_root, state, &mut ops)?; self.hot_db.do_atomically(ops) } From e7a8fd720988be7387a374da72bb07721b45a183 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 12 Apr 2024 16:15:24 +1000 Subject: [PATCH 26/41] Fix shuffling tests --- beacon_node/beacon_chain/src/shuffling_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index b3de6f91c92..04d58882639 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -339,7 +339,7 @@ mod test { .clone(); let committee_b = state.committee_cache(RelativeEpoch::Next).unwrap().clone(); assert!(committee_a != committee_b); - (Arc::new(committee_a), Arc::new(committee_b)) + (committee_a, committee_b) } /// Builds a deterministic but incoherent shuffling ID from a `u64`. From b570e0dd755b4bab36de9976409936534fa5f31c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 10:19:31 +1000 Subject: [PATCH 27/41] Remove some resolved FIXMEs --- .../src/per_block_processing/altair/sync_committee.rs | 1 - consensus/types/src/execution_payload.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index e35494a96ef..210db4c9c15 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -59,7 +59,6 @@ pub fn process_sync_aggregate( .into_iter() .zip(aggregate.sync_committee_bits.iter()) { - // FIXME(sproul): double-check this for Capella, proposer shouldn't have 0 effective balance if participation_bit { // Accumulate proposer rewards in a temp var in case the proposer has very low balance, is // part of the sync committee, does not participate and its penalties saturate. diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 68e8f6f444f..27dc8cab0a4 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -6,8 +6,6 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -// FIXME(sproul): try milhouse Vector - pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, From db5f5ea1d25cd8eeadaa39336edfe0c1eb6fd01b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 11:40:19 +1000 Subject: [PATCH 28/41] Remove StateProcessingStrategy --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +--- .../beacon_chain/src/block_verification.rs | 3 +-- .../state_lru_cache.rs | 3 +-- beacon_node/beacon_chain/src/fork_revert.rs | 3 +-- .../beacon_chain/tests/block_verification.rs | 5 +--- beacon_node/store/src/hot_cold_store.rs | 23 +++---------------- beacon_node/store/src/reconstruct.rs | 3 +-- .../state_processing/src/block_replayer.rs | 23 ------------------- consensus/state_processing/src/lib.rs | 2 +- .../src/per_block_processing.rs | 6 +---- .../src/per_block_processing/tests.rs | 7 +----- lcli/src/transition_blocks.rs | 3 +-- testing/ef_tests/src/cases/sanity_blocks.rs | 4 +--- testing/ef_tests/src/cases/transition.rs | 3 +-- testing/state_transition_vectors/src/exit.rs | 3 +-- 15 files changed, 16 insertions(+), 79 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f4a02b7db38..94e40ef6a85 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -102,8 +102,7 @@ use state_processing::{ }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, StateProcessingStrategy, - VerifyBlockRoot, VerifyOperation, + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -5266,7 +5265,6 @@ impl BeaconChain { &mut state, &block, signature_strategy, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7b5e23ad745..32bba73307f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -84,7 +84,7 @@ use state_processing::{ per_block_processing, per_slot_processing, state_advance::partial_state_advance, AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - StateProcessingStrategy, VerifyBlockRoot, + VerifyBlockRoot, }; use std::borrow::Cow; use std::fmt::Debug; @@ -1568,7 +1568,6 @@ impl ExecutionPendingBlock { block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut consensus_context, &chain.spec, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index c3492b53bda..905d2b2c33f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -8,7 +8,7 @@ use crate::{ use lru::LruCache; use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; -use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use state_processing::{BlockReplayer, ConsensusContext}; use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; @@ -182,7 +182,6 @@ impl StateLRUCache { let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = BlockReplayer::new(parent_state, &self.spec) .no_signature_verification() - .state_processing_strategy(StateProcessingStrategy::Accurate) .state_root_iter(state_roots.into_iter()) .minimal_block_root_verification(); diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 084ae95e096..8d1c29f46f6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,7 @@ use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, - StateProcessingStrategy, VerifyBlockRoot, + VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -175,7 +175,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It &mut state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 4d37557f0d1..98a112daffe 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -15,8 +15,7 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, ConsensusContext, StateProcessingStrategy, - VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1309,7 +1308,6 @@ async fn add_base_block_to_altair_chain() { &mut state, &base_block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, @@ -1445,7 +1443,6 @@ async fn add_altair_block_to_base_chain() { &mut state, &altair_block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 73c17db3480..2e773f0ceba 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -32,7 +32,6 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ block_replayer::PreSlotHook, BlockProcessingError, BlockReplayer, SlotProcessingError, - StateProcessingStrategy, }; use std::cmp::min; use std::marker::PhantomData; @@ -1159,14 +1158,7 @@ impl, Cold: ItemStore> HotColdDB } else { let blocks = self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; - self.replay_blocks( - boundary_state, - blocks, - slot, - no_state_root_iter(), - None, - StateProcessingStrategy::Accurate, - )? + self.replay_blocks(boundary_state, blocks, slot, no_state_root_iter(), None)? }; state.apply_pending_mutations()?; @@ -1337,14 +1329,7 @@ impl, Cold: ItemStore> HotColdDB &self.spec, )?; - let mut state = self.replay_blocks( - low_state, - blocks, - slot, - Some(state_root_iter), - None, - StateProcessingStrategy::Accurate, - )?; + let mut state = self.replay_blocks(low_state, blocks, slot, Some(state_root_iter), None)?; state.apply_pending_mutations()?; // If state is not error, put it in the cache. @@ -1437,12 +1422,10 @@ impl, Cold: ItemStore> HotColdDB target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, - state_processing_strategy: StateProcessingStrategy, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification() - .state_processing_strategy(state_processing_strategy); + .minimal_block_root_verification(); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 8fe13777ac4..8ef4886565c 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -5,7 +5,7 @@ use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - StateProcessingStrategy, VerifyBlockRoot, + VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -94,7 +94,6 @@ where &mut state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 1749f773f3a..462b85bd19e 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -62,16 +62,6 @@ impl From for BlockReplayError { } } -/// Defines how state roots should be computed and whether to perform all state transitions during block replay. -#[derive(PartialEq, Clone, Copy)] -pub enum StateProcessingStrategy { - /// Perform all transitions faithfully to the specification. - Accurate, - /// Don't compute state roots and process withdrawals, eventually computing an invalid beacon - /// state that can only be used for obtaining shuffling. - Inconsistent, -} - impl<'a, E, Error, StateRootIter> BlockReplayer<'a, E, Error, StateRootIter> where E: EthSpec, @@ -101,18 +91,6 @@ where } } - /// Set the replayer's state processing strategy different from the default. - pub fn state_processing_strategy( - mut self, - state_processing_strategy: StateProcessingStrategy, - ) -> Self { - // FIXME(sproul): no-op - if state_processing_strategy == StateProcessingStrategy::Inconsistent { - self.verify_block_root = None; - } - self - } - /// Set the replayer's block signature verification strategy. pub fn block_signature_strategy(mut self, block_sig_strategy: BlockSignatureStrategy) -> Self { self.block_sig_strategy = block_sig_strategy; @@ -259,7 +237,6 @@ where &mut self.state, block, self.block_sig_strategy, - StateProcessingStrategy::Accurate, verify_block_root, &mut ctxt, self.spec, diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 7d84c426e8c..74f9d84bb11 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -30,7 +30,7 @@ pub mod upgrade; pub mod verify_operation; pub use all_caches::AllCaches; -pub use block_replayer::{BlockReplayError, BlockReplayer, StateProcessingStrategy}; +pub use block_replayer::{BlockReplayError, BlockReplayer}; pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b370ec6216b..a8447b7714a 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -40,7 +40,6 @@ mod verify_exit; mod verify_proposer_slashing; use crate::common::decrease_balance; -use crate::StateProcessingStrategy; use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_metrics, @@ -102,7 +101,6 @@ pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, - state_processing_strategy: StateProcessingStrategy, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -172,9 +170,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let body = block.body(); - if state_processing_strategy == StateProcessingStrategy::Accurate { - process_withdrawals::(state, body.execution_payload()?, spec)?; - } + process_withdrawals::(state, body.execution_payload()?, spec)?; process_execution_payload::(state, body, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2a2b67e30da..f0055fa80dd 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -5,7 +5,7 @@ use crate::per_block_processing::errors::{ DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -72,7 +72,6 @@ async fn valid_block_ok() { &mut state, &block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -98,7 +97,6 @@ async fn invalid_block_header_state_slot() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -131,7 +129,6 @@ async fn invalid_parent_block_root() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -165,7 +162,6 @@ async fn invalid_block_signature() { &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -199,7 +195,6 @@ async fn invalid_randao_reveal_signature() { &mut state, &signed_block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 7f188387d9d..77fd352829f 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -76,7 +76,7 @@ use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, AllCaches, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -399,7 +399,6 @@ fn do_transition( &mut pre_state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index b0902cb5b74..91bb995cc43 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use types::{BeaconState, RelativeEpoch, SignedBeaconBlock}; @@ -96,7 +96,6 @@ impl Case for SanityBlocks { &mut indiv_state, signed_block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, @@ -107,7 +106,6 @@ impl Case for SanityBlocks { &mut bulk_state, signed_block, BlockSignatureStrategy::VerifyBulk, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 927589948a2..b2c49a96feb 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, SignedBeaconBlock}; @@ -114,7 +114,6 @@ impl Case for TransitionTest { &mut state, block, BlockSignatureStrategy::VerifyBulk, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 991e91fd337..61cae6dbe1b 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -1,7 +1,7 @@ use super::*; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use types::{BeaconBlock, Epoch}; @@ -68,7 +68,6 @@ impl ExitTest { state, block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &E::default_spec(), From 297cea568247252fe35b18e6a5d633a9b6d0e42d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 11:48:23 +1000 Subject: [PATCH 29/41] Optimise withdrawals calculation --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 94e40ef6a85..2dbf3f167af 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4397,7 +4397,6 @@ impl BeaconChain { let parent_block_root = forkchoice_update_params.head_root; - // FIXME(sproul): optimise this for tree-states let (unadvanced_state, unadvanced_state_root) = if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) @@ -4411,10 +4410,11 @@ impl BeaconChain { let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; - let state = self - .get_state(&block.state_root(), Some(block.slot()))? + let (state_root, state) = self + .store + .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? .ok_or(Error::MissingBeaconState(block.state_root()))?; - (Cow::Owned(state), block.state_root()) + (Cow::Owned(state), state_root) }; // Parent state epoch is the same as the proposal, we don't need to advance because the From 3be919d760c6a234bef4ac19664351d1ba4e1db1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 12:04:53 +1000 Subject: [PATCH 30/41] Don't reorg if state cache is missed --- beacon_node/beacon_chain/src/beacon_chain.rs | 14 ++++++------- beacon_node/store/src/hot_cold_store.rs | 21 +++++++++++++++----- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2dbf3f167af..fb103d3af5a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4248,20 +4248,18 @@ impl BeaconChain { .ok()?; drop(proposer_head_timer); let re_org_parent_block = proposer_head.parent_node.root; - let re_org_parent_state_root = proposer_head.parent_node.state_root; - // FIXME(sproul): consider not re-orging if we miss the cache let (state_root, state) = self .store - .get_advanced_hot_state(re_org_parent_block, slot, re_org_parent_state_root) - .map_err(|e| { + .get_advanced_hot_state_from_cache(re_org_parent_block, slot) + .or_else(|| { warn!( self.log, - "Error loading block production state"; - "error" => ?e, + "Not attempting re-org"; + "reason" => "no state in cache" ); - }) - .ok()??; + None + })?; info!( self.log, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 2e773f0ceba..8bc6fc09f6a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -734,11 +734,7 @@ impl, Cold: ItemStore> HotColdDB max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = self - .state_cache - .lock() - .get_by_block_root(block_root, max_slot) - { + if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { return Ok(Some(cached)); } @@ -768,6 +764,21 @@ impl, Cold: ItemStore> HotColdDB Ok(opt_state) } + /// Same as `get_advanced_hot_state` but will return `None` if no compatible state is cached. + /// + /// If this function returns `Some(state)` then that `state` will always have + /// `latest_block_header` matching `block_root` but may not be advanced all the way through to + /// `max_slot`. + pub fn get_advanced_hot_state_from_cache( + &self, + block_root: Hash256, + max_slot: Slot, + ) -> Option<(Hash256, BeaconState)> { + self.state_cache + .lock() + .get_by_block_root(block_root, max_slot) + } + /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. /// /// It is assumed that all states being deleted reside in the hot DB, even if their slot is less From 78d02d52c7dc6b48759478a270f6afabe4238fe2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 12:12:01 +1000 Subject: [PATCH 31/41] Remove inconsistent state func --- beacon_node/beacon_chain/tests/store_tests.rs | 46 ------------------- beacon_node/store/src/hot_cold_store.rs | 22 --------- 2 files changed, 68 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7efa67eec30..ba8a6bf7016 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -719,52 +719,6 @@ async fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[tokio::test] -async fn block_replay_with_inaccurate_state_roots() { - let num_blocks_produced = E::slots_per_epoch() * 3 + 31; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let chain = &harness.chain; - - harness - .extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.head_block_root(); - assert_ne!(head_state.slot() % 32, 0); - - let (_, mut fast_head_state) = store - .get_inconsistent_state_for_attestation_verification_only( - &head_block_root, - head_state.slot(), - head_state_root, - ) - .unwrap() - .unwrap(); - assert_eq!(head_state.validators(), fast_head_state.validators()); - - head_state.build_all_committee_caches(&chain.spec).unwrap(); - fast_head_state - .build_all_committee_caches(&chain.spec) - .unwrap(); - - assert_eq!( - head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap(), - fast_head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap() - ); -} - #[tokio::test] async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8bc6fc09f6a..c71781e0e05 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -693,28 +693,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Get a state with `latest_block_root == block_root` advanced through to at most `slot`. - /// - /// See `Self::get_advanced_hot_state` for information about `max_slot`. - /// - /// ## Warning - /// - /// The returned state **is not a valid beacon state**, it can only be used for obtaining - /// shuffling to process attestations. At least the following components of the state will be - /// broken/invalid: - /// - /// - `state.state_roots` - /// - `state.block_roots` - pub fn get_inconsistent_state_for_attestation_verification_only( - &self, - block_root: &Hash256, - max_slot: Slot, - state_root: Hash256, - ) -> Result)>, Error> { - metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); - self.get_advanced_hot_state(*block_root, max_slot, state_root) - } - /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. /// /// The `state_root` argument is used to look up the block's un-advanced state in case an From e0b30adca07a023e4933ede6caa1b379a19522a2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 12:29:15 +1000 Subject: [PATCH 32/41] Fix beta compiler --- beacon_node/store/src/chunked_vector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index d3ba057209c..4450989d590 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -17,7 +17,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; -use types::{historical_summary::HistoricalSummary, milhouse, List, Vector}; +use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. /// From cddbcbf438175a2ae4fde98c270f2bc286e4fe3b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 12:38:22 +1000 Subject: [PATCH 33/41] Rebase early, rebase often --- beacon_node/store/src/hot_cold_store.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c71781e0e05..708bf5b3516 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1101,10 +1101,10 @@ impl, Cold: ItemStore> HotColdDB let state_from_disk = self.load_hot_state(state_root)?; - if let Some((mut state, block_root)) = state_from_disk { - let mut state_cache = self.state_cache.lock(); - state_cache.rebase_on_finalized(&mut state, &self.spec)?; - state_cache.put_state(*state_root, block_root, &state)?; + if let Some((state, block_root)) = state_from_disk { + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)?; Ok(Some(state)) } else { Ok(None) @@ -1135,11 +1135,17 @@ impl, Cold: ItemStore> HotColdDB epoch_boundary_state_root, }) = self.load_hot_state_summary(state_root)? { - let boundary_state = + let mut boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), )?; + // Immediately rebase the state from disk on the finalized state so that we can reuse + // parts of the tree for state root calculation in `replay_blocks`. + self.state_cache + .lock() + .rebase_on_finalized(&mut boundary_state, &self.spec)?; + // Optimization to avoid even *thinking* about replaying blocks if we're already // on an epoch boundary. let mut state = if slot % E::slots_per_epoch() == 0 { From d2657ccaa792432c7a3c89853ff26b9202bc37ea Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2024 17:48:50 +1000 Subject: [PATCH 34/41] Fix state caching behaviour --- beacon_node/store/src/hot_cold_store.rs | 91 +++++++++++++++++-- .../state_processing/src/block_replayer.rs | 47 +++++++--- 2 files changed, 116 insertions(+), 22 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 708bf5b3516..8f7c52ffa03 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -17,7 +17,7 @@ use crate::metadata::{ PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; -use crate::state_cache::StateCache; +use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, @@ -31,7 +31,8 @@ use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - block_replayer::PreSlotHook, BlockProcessingError, BlockReplayer, SlotProcessingError, + block_replayer::PreSlotHook, AllCaches, BlockProcessingError, BlockReplayer, + SlotProcessingError, }; use std::cmp::min; use std::marker::PhantomData; @@ -720,6 +721,15 @@ impl, Cold: ItemStore> HotColdDB // state. let split = self.split.read_recursive(); + if state_root != split.state_root { + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + "block_root" => ?block_root, + ); + } + // Sanity check max-slot against the split slot. if max_slot < split.slot { return Err(HotColdDBError::FinalizedStateNotInHotDatabase { @@ -1067,6 +1077,26 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { + // Put the state in the cache. + let block_root = state.get_latest_block_root(*state_root); + + // Avoid storing states in the database if they already exist in the state cache. + // The exception to this is the finalized state, which must exist in the cache before it + // is stored on disk. + if let PutStateOutcome::Duplicate = + self.state_cache + .lock() + .put_state(*state_root, block_root, state)? + { + debug!( + self.log, + "Skipping storage of cached state"; + "slot" => state.slot(), + "state_root" => ?state_root + ); + return Ok(()); + } + // On the epoch boundary, store the full state. if state.slot() % E::slots_per_epoch() == 0 { trace!( @@ -1093,18 +1123,30 @@ impl, Cold: ItemStore> HotColdDB if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { return Ok(Some(state)); } - warn!( - self.log, - "State cache missed"; - "state_root" => ?state_root, - ); + + if *state_root != self.get_split_info().state_root { + // Do not warn on start up when loading the split state. + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + ); + } let state_from_disk = self.load_hot_state(state_root)?; - if let Some((state, block_root)) = state_from_disk { + if let Some((mut state, block_root)) = state_from_disk { + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; self.state_cache .lock() .put_state(*state_root, block_root, &state)?; + debug!( + self.log, + "Cached state"; + "state_root" => ?state_root, + "slot" => state.slot(), + ); Ok(Some(state)) } else { Ok(None) @@ -1151,9 +1193,40 @@ impl, Cold: ItemStore> HotColdDB let mut state = if slot % E::slots_per_epoch() == 0 { boundary_state } else { + // Cache ALL intermediate states that are reached during block replay. We may want + // to restrict this in future to only cache epoch boundary states. At worst we will + // cache up to 32 states for each state loaded, which should not flush out the cache + // entirely. + let state_cache_hook = |state_root, state: &mut BeaconState| { + // Ensure all caches are built before attempting to cache. + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + let latest_block_root = state.get_latest_block_root(state_root); + let state_slot = state.slot(); + if let PutStateOutcome::New = + self.state_cache + .lock() + .put_state(state_root, latest_block_root, state)? + { + debug!( + self.log, + "Cached ancestor state"; + "state_root" => ?state_root, + "slot" => state_slot, + ); + } + Ok(()) + }; let blocks = self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; - self.replay_blocks(boundary_state, blocks, slot, no_state_root_iter(), None)? + self.replay_blocks( + boundary_state, + blocks, + slot, + no_state_root_iter(), + Some(Box::new(state_cache_hook)), + )? }; state.apply_pending_mutations()?; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 462b85bd19e..d7621ebf18b 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -6,7 +6,10 @@ use crate::{ use itertools::Itertools; use std::iter::Peekable; use std::marker::PhantomData; -use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, + Slot, +}; pub type PreBlockHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> @@ -14,7 +17,7 @@ pub type PreBlockHook<'a, E, Error> = Box< >; pub type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; pub type PreSlotHook<'a, E, Error> = - Box, &mut BeaconState) -> Result<(), Error> + 'a>; + Box) -> Result<(), Error> + 'a>; pub type PostSlotHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, Option>, bool) -> Result<(), Error> + 'a, @@ -45,9 +48,9 @@ pub struct BlockReplayer< #[derive(Debug)] pub enum BlockReplayError { - NoBlocks, SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), + BeaconState(BeaconStateError), } impl From for BlockReplayError { @@ -62,6 +65,12 @@ impl From for BlockReplayError { } } +impl From for BlockReplayError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + impl<'a, E, Error, StateRootIter> BlockReplayer<'a, E, Error, StateRootIter> where E: EthSpec, @@ -152,17 +161,25 @@ where self } - /// Compute the state root for `slot` as efficiently as possible. + /// Compute the state root for `self.state` as efficiently as possible. + /// + /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be + /// called between advancing a state with `per_slot_processing` and applying the block for that + /// slot. /// /// The `blocks` should be the full list of blocks being applied and `i` should be the index of /// the next block that will be applied, or `blocks.len()` if all blocks have already been /// applied. + /// + /// If the state root is not available from the state root iterator or the blocks then it will + /// be computed from `self.state` and a state root iterator miss will be recorded. fn get_state_root( &mut self, - slot: Slot, blocks: &[SignedBeaconBlock>], i: usize, - ) -> Result, Error> { + ) -> Result { + let slot = self.state.slot(); + // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter @@ -171,7 +188,7 @@ where .transpose()?; if let Some((root, _)) = opt_root { - return Ok(Some(root)); + return Ok(root); } } @@ -179,13 +196,17 @@ where if let Some(prev_i) = i.checked_sub(1) { if let Some(prev_block) = blocks.get(prev_i) { if prev_block.slot() == slot { - return Ok(Some(prev_block.state_root())); + return Ok(prev_block.state_root()); } } } self.state_root_miss = true; - Ok(None) + let state_root = self + .state + .update_tree_hash_cache() + .map_err(BlockReplayError::from)?; + Ok(state_root) } /// Apply `blocks` atop `self.state`, taking care of slot processing. @@ -204,13 +225,13 @@ where } while self.state.slot() < block.slot() { - let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; + let state_root = self.get_state_root(&blocks, i)?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; } - let summary = per_slot_processing(&mut self.state, state_root, self.spec) + let summary = per_slot_processing(&mut self.state, Some(state_root), self.spec) .map_err(BlockReplayError::from)?; if let Some(ref mut post_slot_hook) = self.post_slot_hook { @@ -250,13 +271,13 @@ where if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; + let state_root = self.get_state_root(&blocks, blocks.len())?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; } - let summary = per_slot_processing(&mut self.state, state_root, self.spec) + let summary = per_slot_processing(&mut self.state, Some(state_root), self.spec) .map_err(BlockReplayError::from)?; if let Some(ref mut post_slot_hook) = self.post_slot_hook { From bca33f1f5fd3f3ed373f78887820e04e604989da Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 16 Apr 2024 11:27:28 +1000 Subject: [PATCH 35/41] Update to milhouse release --- Cargo.lock | 23 ++++++++++++----------- Cargo.toml | 10 +++++----- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3a9f537f6f..047bdf24a82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2572,13 +2572,13 @@ dependencies = [ [[package]] name = "ethereum_hashing" -version = "1.0.0-beta.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" +checksum = "6ea7b408432c13f71af01197b1d3d0069c48a27bfcfbe72a81fc346e47f6defb" dependencies = [ "cpufeatures", "lazy_static", - "ring 0.16.20", + "ring 0.17.8", "sha2 0.10.8", ] @@ -5230,7 +5230,8 @@ dependencies = [ [[package]] name = "milhouse" version = "0.1.0" -source = "git+https://github.com/sigp/milhouse?branch=main#40a536490b14dc95834f9ece0001e8e04f7b38d7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3826d3602a3674b07e080ce1982350e454ec253d73f156bd927ac1b652293f4d" dependencies = [ "arbitrary", "derivative", @@ -5239,7 +5240,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "rayon", "serde", "smallvec", @@ -7808,9 +7809,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382939886cb24ee8ac885d09116a60f6262d827c7a9e36012b4f6d3d0116d0b3" +checksum = "625b20de2d4b3891e6972f4ce5061cb11bd52b3479270c4b177c134b571194a9" dependencies = [ "arbitrary", "derivative", @@ -8628,9 +8629,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" +checksum = "134d6b24a5b829f30b5ee7de05ba7384557f5f6b00e29409cdf2392f93201bfa" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", @@ -8639,9 +8640,9 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" +checksum = "9ce7bccc538359a213436af7bc95804bdbf1c2a21d80e22953cbe9e096837ff1" dependencies = [ "darling", "quote", diff --git a/Cargo.toml b/Cargo.toml index dd1f811c84d..c1751087c23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,7 +115,7 @@ discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" -ethereum_hashing = "1.0.0-beta.2" +ethereum_hashing = "0.6.0" ethereum_serde_utils = "0.5.2" ethereum_ssz = "0.5" ethereum_ssz_derive = "0.5" @@ -133,7 +133,7 @@ libsecp256k1 = "0.7" log = "0.4" lru = "0.12" maplit = "1" -milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } +milhouse = "0.1" num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -158,7 +158,7 @@ slog-term = "2" sloggers = { version = "2", features = ["json"] } smallvec = "1.11.2" snap = "1" -ssz_types = "0.5" +ssz_types = "0.6" strum = { version = "0.24", features = ["derive"] } superstruct = "0.7" syn = "1" @@ -172,8 +172,8 @@ tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tree_hash = "0.5" -tree_hash_derive = "0.5" +tree_hash = "0.6" +tree_hash_derive = "0.6" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.6", default-features = false, features = ["tls"] } From 3a16649023e2d6d40d704dbe958cec712544aba9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 18 Apr 2024 14:26:09 +1000 Subject: [PATCH 36/41] Fix on-disk consensus context format --- .../state_lru_cache.rs | 21 +++++-- beacon_node/store/src/consensus_context.rs | 56 +++++++++++++++++++ beacon_node/store/src/lib.rs | 2 + .../state_processing/src/consensus_context.rs | 17 +++--- 4 files changed, 80 insertions(+), 16 deletions(-) create mode 100644 beacon_node/store/src/consensus_context.rs diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index c3492b53bda..75993f5ab54 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -8,8 +8,9 @@ use crate::{ use lru::LruCache; use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; -use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use state_processing::{BlockReplayer, StateProcessingStrategy}; use std::sync::Arc; +use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -26,7 +27,7 @@ pub struct DietAvailabilityPendingExecutedBlock { parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, confirmed_state_roots: Vec, - consensus_context: ConsensusContext, + consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, } @@ -94,7 +95,9 @@ impl StateLRUCache { parent_block: executed_block.import_data.parent_block, parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, confirmed_state_roots: executed_block.import_data.confirmed_state_roots, - consensus_context: executed_block.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + &executed_block.import_data.consensus_context, + ), payload_verification_outcome: executed_block.payload_verification_outcome, } } @@ -119,7 +122,9 @@ impl StateLRUCache { parent_eth1_finalization_data: diet_executed_block .parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -145,7 +150,9 @@ impl StateLRUCache { parent_block: diet_executed_block.parent_block, parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -232,7 +239,9 @@ impl From> parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, confirmed_state_roots: value.import_data.confirmed_state_roots, - consensus_context: value.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + &value.import_data.consensus_context, + ), payload_verification_outcome: value.payload_verification_outcome, } } diff --git a/beacon_node/store/src/consensus_context.rs b/beacon_node/store/src/consensus_context.rs new file mode 100644 index 00000000000..0dd6635bd6e --- /dev/null +++ b/beacon_node/store/src/consensus_context.rs @@ -0,0 +1,56 @@ +use ssz_derive::{Decode, Encode}; +use state_processing::ConsensusContext; +use types::{EthSpec, Hash256, Slot}; + +/// The consensus context is stored on disk as part of the data availability overflow cache. +/// +/// We use this separate struct to keep the on-disk format stable in the presence of changes to the +/// in-memory `ConsensusContext`. You MUST NOT change the fields of this struct without +/// superstructing it and implementing a schema migration. +#[derive(Debug, PartialEq, Clone, Encode, Decode)] +pub struct OnDiskConsensusContext { + /// Slot to act as an identifier/safeguard + slot: Slot, + /// Proposer index of the block at `slot`. + proposer_index: Option, + /// Block root of the block at `slot`. + current_block_root: Option, +} + +impl OnDiskConsensusContext { + pub fn from_consensus_context(ctxt: &ConsensusContext) -> Self { + // Match exhaustively on fields here so we are forced to *consider* updating the on-disk + // format when the `ConsensusContext` fields change. + let &ConsensusContext { + slot, + previous_epoch: _, + current_epoch: _, + proposer_index, + current_block_root, + indexed_attestations: _, + } = ctxt; + OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + } + } + + pub fn into_consensus_context(self) -> ConsensusContext { + let OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + } = self; + + let mut ctxt = ConsensusContext::new(slot); + + if let Some(proposer_index) = proposer_index { + ctxt = ctxt.set_proposer_index(proposer_index); + } + if let Some(block_root) = current_block_root { + ctxt = ctxt.set_current_block_root(block_root); + } + ctxt + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index e86689b0cf1..c3136a910db 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -14,6 +14,7 @@ mod chunk_writer; pub mod chunked_iter; pub mod chunked_vector; pub mod config; +pub mod consensus_context; pub mod errors; mod forwards_iter; mod garbage_collection; @@ -30,6 +31,7 @@ pub mod iter; pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; +pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 263539fa429..68659e367f0 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,7 +1,6 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::EpochCacheError; -use ssz_derive::{Decode, Encode}; use std::collections::{hash_map::Entry, HashMap}; use tree_hash::TreeHash; use types::{ @@ -9,22 +8,20 @@ use types::{ ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug, PartialEq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Clone)] pub struct ConsensusContext { /// Slot to act as an identifier/safeguard - slot: Slot, + pub slot: Slot, /// Previous epoch of the `slot` precomputed for optimization purpose. - pub(crate) previous_epoch: Epoch, + pub previous_epoch: Epoch, /// Current epoch of the `slot` precomputed for optimization purpose. - pub(crate) current_epoch: Epoch, + pub current_epoch: Epoch, /// Proposer index of the block at `slot`. - proposer_index: Option, + pub proposer_index: Option, /// Block root of the block at `slot`. - current_block_root: Option, + pub current_block_root: Option, /// Cache of indexed attestations constructed during block processing. - /// We can skip serializing / deserializing this as the cache will just be rebuilt - #[ssz(skip_serializing, skip_deserializing)] - indexed_attestations: + pub indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, } From 9d3c321fbe4a07b9f3690b645a78e936cfb4df48 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 18 Apr 2024 14:52:14 +1000 Subject: [PATCH 37/41] Squashed commit of the following: commit 3a16649023e2d6d40d704dbe958cec712544aba9 Author: Michael Sproul Date: Thu Apr 18 14:26:09 2024 +1000 Fix on-disk consensus context format --- .../state_lru_cache.rs | 21 +++++-- beacon_node/store/src/consensus_context.rs | 56 +++++++++++++++++++ beacon_node/store/src/lib.rs | 2 + .../state_processing/src/consensus_context.rs | 17 +++--- 4 files changed, 80 insertions(+), 16 deletions(-) create mode 100644 beacon_node/store/src/consensus_context.rs diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 905d2b2c33f..2b70f843f62 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -8,8 +8,9 @@ use crate::{ use lru::LruCache; use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; -use state_processing::{BlockReplayer, ConsensusContext}; +use state_processing::BlockReplayer; use std::sync::Arc; +use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -26,7 +27,7 @@ pub struct DietAvailabilityPendingExecutedBlock { parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, confirmed_state_roots: Vec, - consensus_context: ConsensusContext, + consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, } @@ -94,7 +95,9 @@ impl StateLRUCache { parent_block: executed_block.import_data.parent_block, parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, confirmed_state_roots: executed_block.import_data.confirmed_state_roots, - consensus_context: executed_block.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + &executed_block.import_data.consensus_context, + ), payload_verification_outcome: executed_block.payload_verification_outcome, } } @@ -119,7 +122,9 @@ impl StateLRUCache { parent_eth1_finalization_data: diet_executed_block .parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -145,7 +150,9 @@ impl StateLRUCache { parent_block: diet_executed_block.parent_block, parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -231,7 +238,9 @@ impl From> parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, confirmed_state_roots: value.import_data.confirmed_state_roots, - consensus_context: value.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + &value.import_data.consensus_context, + ), payload_verification_outcome: value.payload_verification_outcome, } } diff --git a/beacon_node/store/src/consensus_context.rs b/beacon_node/store/src/consensus_context.rs new file mode 100644 index 00000000000..0dd6635bd6e --- /dev/null +++ b/beacon_node/store/src/consensus_context.rs @@ -0,0 +1,56 @@ +use ssz_derive::{Decode, Encode}; +use state_processing::ConsensusContext; +use types::{EthSpec, Hash256, Slot}; + +/// The consensus context is stored on disk as part of the data availability overflow cache. +/// +/// We use this separate struct to keep the on-disk format stable in the presence of changes to the +/// in-memory `ConsensusContext`. You MUST NOT change the fields of this struct without +/// superstructing it and implementing a schema migration. +#[derive(Debug, PartialEq, Clone, Encode, Decode)] +pub struct OnDiskConsensusContext { + /// Slot to act as an identifier/safeguard + slot: Slot, + /// Proposer index of the block at `slot`. + proposer_index: Option, + /// Block root of the block at `slot`. + current_block_root: Option, +} + +impl OnDiskConsensusContext { + pub fn from_consensus_context(ctxt: &ConsensusContext) -> Self { + // Match exhaustively on fields here so we are forced to *consider* updating the on-disk + // format when the `ConsensusContext` fields change. + let &ConsensusContext { + slot, + previous_epoch: _, + current_epoch: _, + proposer_index, + current_block_root, + indexed_attestations: _, + } = ctxt; + OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + } + } + + pub fn into_consensus_context(self) -> ConsensusContext { + let OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + } = self; + + let mut ctxt = ConsensusContext::new(slot); + + if let Some(proposer_index) = proposer_index { + ctxt = ctxt.set_proposer_index(proposer_index); + } + if let Some(block_root) = current_block_root { + ctxt = ctxt.set_current_block_root(block_root); + } + ctxt + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index a3789c8ec45..66032d89c52 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -14,6 +14,7 @@ mod chunk_writer; pub mod chunked_iter; pub mod chunked_vector; pub mod config; +pub mod consensus_context; pub mod errors; mod forwards_iter; mod garbage_collection; @@ -31,6 +32,7 @@ pub mod iter; pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; +pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 263539fa429..68659e367f0 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,7 +1,6 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::EpochCacheError; -use ssz_derive::{Decode, Encode}; use std::collections::{hash_map::Entry, HashMap}; use tree_hash::TreeHash; use types::{ @@ -9,22 +8,20 @@ use types::{ ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug, PartialEq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Clone)] pub struct ConsensusContext { /// Slot to act as an identifier/safeguard - slot: Slot, + pub slot: Slot, /// Previous epoch of the `slot` precomputed for optimization purpose. - pub(crate) previous_epoch: Epoch, + pub previous_epoch: Epoch, /// Current epoch of the `slot` precomputed for optimization purpose. - pub(crate) current_epoch: Epoch, + pub current_epoch: Epoch, /// Proposer index of the block at `slot`. - proposer_index: Option, + pub proposer_index: Option, /// Block root of the block at `slot`. - current_block_root: Option, + pub current_block_root: Option, /// Cache of indexed attestations constructed during block processing. - /// We can skip serializing / deserializing this as the cache will just be rebuilt - #[ssz(skip_serializing, skip_deserializing)] - indexed_attestations: + pub indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, } From 62ebdacd0874a67a5451e567219866a96077c242 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 19 Apr 2024 10:13:49 +1000 Subject: [PATCH 38/41] Keep indexed attestations, thanks Sean --- .../state_lru_cache.rs | 6 ++--- beacon_node/store/src/consensus_context.rs | 26 +++++++++++++------ .../state_processing/src/consensus_context.rs | 14 ++++++++++ 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 75993f5ab54..b6dbf2b952f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -27,7 +27,7 @@ pub struct DietAvailabilityPendingExecutedBlock { parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, confirmed_state_roots: Vec, - consensus_context: OnDiskConsensusContext, + consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, } @@ -96,7 +96,7 @@ impl StateLRUCache { parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, confirmed_state_roots: executed_block.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( - &executed_block.import_data.consensus_context, + executed_block.import_data.consensus_context, ), payload_verification_outcome: executed_block.payload_verification_outcome, } @@ -240,7 +240,7 @@ impl From> parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, confirmed_state_roots: value.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( - &value.import_data.consensus_context, + value.import_data.consensus_context, ), payload_verification_outcome: value.payload_verification_outcome, } diff --git a/beacon_node/store/src/consensus_context.rs b/beacon_node/store/src/consensus_context.rs index 0dd6635bd6e..08fad17b14b 100644 --- a/beacon_node/store/src/consensus_context.rs +++ b/beacon_node/store/src/consensus_context.rs @@ -1,6 +1,7 @@ use ssz_derive::{Decode, Encode}; use state_processing::ConsensusContext; -use types::{EthSpec, Hash256, Slot}; +use std::collections::HashMap; +use types::{AttestationData, BitList, EthSpec, Hash256, IndexedAttestation, Slot}; /// The consensus context is stored on disk as part of the data availability overflow cache. /// @@ -8,39 +9,48 @@ use types::{EthSpec, Hash256, Slot}; /// in-memory `ConsensusContext`. You MUST NOT change the fields of this struct without /// superstructing it and implementing a schema migration. #[derive(Debug, PartialEq, Clone, Encode, Decode)] -pub struct OnDiskConsensusContext { +pub struct OnDiskConsensusContext { /// Slot to act as an identifier/safeguard slot: Slot, /// Proposer index of the block at `slot`. proposer_index: Option, /// Block root of the block at `slot`. current_block_root: Option, + /// We keep the indexed attestations in the *in-memory* version of this struct so that we don't + /// need to regenerate them if roundtripping via this type *without* going to disk. + /// + /// They are not part of the on-disk format. + #[ssz(skip_serializing, skip_deserializing)] + indexed_attestations: + HashMap<(AttestationData, BitList), IndexedAttestation>, } -impl OnDiskConsensusContext { - pub fn from_consensus_context(ctxt: &ConsensusContext) -> Self { +impl OnDiskConsensusContext { + pub fn from_consensus_context(ctxt: ConsensusContext) -> Self { // Match exhaustively on fields here so we are forced to *consider* updating the on-disk // format when the `ConsensusContext` fields change. - let &ConsensusContext { + let ConsensusContext { slot, previous_epoch: _, current_epoch: _, proposer_index, current_block_root, - indexed_attestations: _, + indexed_attestations, } = ctxt; OnDiskConsensusContext { slot, proposer_index, current_block_root, + indexed_attestations, } } - pub fn into_consensus_context(self) -> ConsensusContext { + pub fn into_consensus_context(self) -> ConsensusContext { let OnDiskConsensusContext { slot, proposer_index, current_block_root, + indexed_attestations, } = self; let mut ctxt = ConsensusContext::new(slot); @@ -51,6 +61,6 @@ impl OnDiskConsensusContext { if let Some(block_root) = current_block_root { ctxt = ctxt.set_current_block_root(block_root); } - ctxt + ctxt.set_indexed_attestations(indexed_attestations) } } diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 68659e367f0..073d87be85b 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -59,6 +59,7 @@ impl ConsensusContext { } } + #[must_use] pub fn set_proposer_index(mut self, proposer_index: u64) -> Self { self.proposer_index = Some(proposer_index); self @@ -106,6 +107,7 @@ impl ConsensusContext { Ok(proposer_index) } + #[must_use] pub fn set_current_block_root(mut self, block_root: Hash256) -> Self { self.current_block_root = Some(block_root); self @@ -171,4 +173,16 @@ impl ConsensusContext { pub fn num_cached_indexed_attestations(&self) -> usize { self.indexed_attestations.len() } + + #[must_use] + pub fn set_indexed_attestations( + mut self, + attestations: HashMap< + (AttestationData, BitList), + IndexedAttestation, + >, + ) -> Self { + self.indexed_attestations = attestations; + self + } } From a359c74aea583127cb8ec21339159b4bf99ea6d9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 22 Apr 2024 22:19:26 +1000 Subject: [PATCH 39/41] Address half of Sean's review --- Cargo.lock | 2 -- Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 1 - beacon_node/beacon_chain/src/builder.rs | 6 ------ consensus/state_processing/Cargo.toml | 1 - consensus/types/Cargo.toml | 4 ++-- consensus/types/src/chain_spec.rs | 7 ------- testing/ef_tests/src/cases/ssz_generic.rs | 10 +++++++++- 8 files changed, 12 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 047bdf24a82..929aab20b9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -743,7 +743,6 @@ version = "0.2.0" dependencies = [ "bitvec 1.0.1", "bls", - "crossbeam-channel", "derivative", "environment", "eth1", @@ -7856,7 +7855,6 @@ dependencies = [ "tokio", "tree_hash", "types", - "vec_map", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c1751087c23..b9ed07b58bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,7 +106,6 @@ c-kzg = "1" clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.3" -crossbeam-channel = "0.5.8" delay_map = "0.3" derivative = "2" dirs = "3" @@ -146,6 +145,7 @@ rayon = "1.7" regex = "1" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" +rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 22ad5d0d6ad..9c7c7febc5f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -21,7 +21,6 @@ serde_json = { workspace = true } [dependencies] bitvec = { workspace = true } bls = { workspace = true } -crossbeam-channel = { workspace = true } derivative = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 1678b68f0cf..be6b1f9b2bf 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -519,12 +519,6 @@ where let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; - // Build the committee caches before storing. The database assumes that states have - // committee caches built before storing. - weak_subj_state - .build_all_committee_caches(&self.spec) - .map_err(|e| format!("Error building caches on checkpoint state: {:?}", e))?; - // Fill in the linear block roots between the checkpoint block's slot and the aligned // state's slot. All slots less than the block's slot will be handled by block backfill, // while states greater or equal to the checkpoint state will be handled by `migrate_db`. diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index d07763d1825..be5367eb08f 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -28,7 +28,6 @@ arbitrary = { workspace = true } lighthouse_metrics = { workspace = true } lazy_static = { workspace = true } derivative = { workspace = true } -vec_map = "0.8.2" [features] default = ["legacy-arith"] diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 4802481ae82..4b7d9f2b98d 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -53,7 +53,7 @@ smallvec = { workspace = true } maplit = { workspace = true } strum = { workspace = true } milhouse = { workspace = true } -rpds = "0.11.0" +rpds = { workspace = true } [dev-dependencies] criterion = { workspace = true } @@ -70,4 +70,4 @@ sqlite = [] # The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. # For simplicity `Arbitrary` is now derived regardless of the feature's presence. arbitrary-fuzz = [] -portable = ["bls/supranational-portable"] \ No newline at end of file +portable = ["bls/supranational-portable"] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ed6e39b8a92..e9345ab14ea 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -309,13 +309,6 @@ impl ChainSpec { } } - /// Return the name of the fork activated at `slot`, if any. - pub fn fork_activated_at_slot(&self, slot: Slot) -> Option { - let prev_slot_fork = self.fork_name_at_slot::(slot.saturating_sub(Slot::new(1))); - let slot_fork = self.fork_name_at_slot::(slot); - (slot_fork != prev_slot_fork).then_some(slot_fork) - } - /// Returns the fork version for a named fork. pub fn fork_version_for_name(&self, fork_name: ForkName) -> [u8; 4] { match fork_name { diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index e620f4509fc..8de3e217f00 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -8,7 +8,7 @@ use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; use types::typenum::*; -use types::{BitList, BitVector, ForkName, VariableList, Vector}; +use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -139,6 +139,14 @@ impl Case for SszGeneric { [elem_ty => primitive_type] [length => typenum] )?; + type_dispatch!( + ssz_generic_test, + (&self.path), + FixedVector, + <>, + [elem_ty => primitive_type] + [length => typenum] + )?; } "bitlist" => { let mut limit = parts[1]; From 9bf31a2239d6ee63c2a139a3f593c8ed302bdbf6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 23 Apr 2024 10:37:06 +1000 Subject: [PATCH 40/41] More simplifications from Sean's review --- beacon_node/store/src/state_cache.rs | 8 ++- consensus/types/src/beacon_state.rs | 94 +++++++++++++++------------- 2 files changed, 56 insertions(+), 46 deletions(-) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index db9b69d1d11..5c1faa7f2fd 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -35,7 +35,6 @@ pub struct StateCache { finalized_state: Option>, states: LruCache>, block_map: BlockMap, - capacity: NonZeroUsize, max_epoch: Epoch, } @@ -53,7 +52,6 @@ impl StateCache { finalized_state: None, states: LruCache::new(capacity), block_map: BlockMap::default(), - capacity, max_epoch: Epoch::new(0), } } @@ -62,6 +60,10 @@ impl StateCache { self.states.len() } + pub fn capacity(&self) -> usize { + self.states.cap().get() + } + pub fn update_finalized_state( &mut self, state_root: Hash256, @@ -149,7 +151,7 @@ impl StateCache { self.max_epoch = std::cmp::max(state.current_epoch(), self.max_epoch); // If the cache is full, use the custom cull routine to make room. - if let Some(over_capacity) = self.len().checked_sub(self.capacity.get()) { + if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { self.cull(over_capacity + 1); } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d1b86699c2d..5da81f6a752 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -230,6 +230,7 @@ impl From for Hash256 { mappings( map_beacon_state_base_fields(), map_beacon_state_base_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_base_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_base_tree_list_fields( other_type = "BeaconStateBase", @@ -243,6 +244,7 @@ impl From for Hash256 { mappings( map_beacon_state_altair_fields(), map_beacon_state_altair_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_altair_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_altair_tree_list_fields( other_type = "BeaconStateAltair", @@ -256,8 +258,9 @@ impl From for Hash256 { mappings( map_beacon_state_bellatrix_fields(), map_beacon_state_bellatrix_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_bellatrix_tree_list_fields_immutable(groups(tree_lists)), ), - bimappings(bimap_beacon_state_merge_tree_list_fields( + bimappings(bimap_beacon_state_bellatrix_tree_list_fields( other_type = "BeaconStateMerge", self_mutable, fallible, @@ -269,6 +272,7 @@ impl From for Hash256 { mappings( map_beacon_state_capella_fields(), map_beacon_state_capella_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_capella_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_capella_tree_list_fields( other_type = "BeaconStateCapella", @@ -282,6 +286,7 @@ impl From for Hash256 { mappings( map_beacon_state_deneb_fields(), map_beacon_state_deneb_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_deneb_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_deneb_tree_list_fields( other_type = "BeaconStateDeneb", @@ -295,6 +300,7 @@ impl From for Hash256 { mappings( map_beacon_state_electra_fields(), map_beacon_state_electra_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_electra_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_electra_tree_list_fields( other_type = "BeaconStateElectra", @@ -628,21 +634,6 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } - /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. - pub fn get_validator_index_read_only( - &self, - pubkey: &PublicKeyBytes, - ) -> Result, Error> { - let pubkey_cache = self.pubkey_cache(); - if pubkey_cache.len() != self.validators().len() { - return Err(Error::PubkeyCacheIncomplete { - cache_len: pubkey_cache.len(), - registry_len: self.validators().len(), - }); - } - Ok(pubkey_cache.get(pubkey)) - } - /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(E::slots_per_epoch()) @@ -1921,29 +1912,49 @@ impl BeaconState { } pub fn has_pending_mutations(&self) -> bool { - self.block_roots().has_pending_updates() - || self.state_roots().has_pending_updates() - || self.historical_roots().has_pending_updates() - || self.eth1_data_votes().has_pending_updates() - || self.validators().has_pending_updates() - || self.balances().has_pending_updates() - || self.randao_mixes().has_pending_updates() - || self.slashings().has_pending_updates() - || self - .previous_epoch_attestations() - .map_or(false, List::has_pending_updates) - || self - .current_epoch_attestations() - .map_or(false, List::has_pending_updates) - || self - .previous_epoch_participation() - .map_or(false, List::has_pending_updates) - || self - .current_epoch_participation() - .map_or(false, List::has_pending_updates) - || self - .inactivity_scores() - .map_or(false, List::has_pending_updates) + let mut any_pending_mutations = false; + match &self { + Self::Base(self_inner) => { + map_beacon_state_base_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Altair(self_inner) => { + map_beacon_state_altair_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Merge(self_inner) => { + map_beacon_state_bellatrix_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + Self::Capella(self_inner) => { + map_beacon_state_capella_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + Self::Deneb(self_inner) => { + map_beacon_state_deneb_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Electra(self_inner) => { + map_beacon_state_electra_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + }; + any_pending_mutations } /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. @@ -2042,7 +2053,7 @@ impl BeaconState { } (Self::Altair(_), _) => (), (Self::Merge(self_inner), Self::Merge(base_inner)) => { - bimap_beacon_state_merge_tree_list_fields!( + bimap_beacon_state_bellatrix_tree_list_fields!( self_inner, base_inner, |_, self_field, base_field| { self_field.rebase_on(base_field) } @@ -2169,8 +2180,6 @@ impl BeaconState { pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { match self { Self::Base(inner) => { - inner.previous_epoch_attestations.apply_updates()?; - inner.current_epoch_attestations.apply_updates()?; map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } Self::Altair(inner) => { @@ -2189,7 +2198,6 @@ impl BeaconState { map_beacon_state_electra_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } } - self.eth1_data_votes_mut().apply_updates()?; Ok(()) } From 970f3dfc6eca93bbdbf11b89e6c93157eb693bcd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 23 Apr 2024 11:53:19 +1000 Subject: [PATCH 41/41] Cache state after get_advanced_hot_state --- beacon_node/store/src/hot_cold_store.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8f7c52ffa03..484a1139bf9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -704,9 +704,6 @@ impl, Cold: ItemStore> HotColdDB /// - `result_state_root == state.canonical_root()` /// - `state.slot() <= max_slot` /// - `state.get_latest_block_root(result_state_root) == block_root` - /// - /// Presently this is only used to avoid loading the un-advanced split state, but in future will - /// be expanded to return states from an in-memory cache. pub fn get_advanced_hot_state( &self, block_root: Hash256, @@ -745,9 +742,23 @@ impl, Cold: ItemStore> HotColdDB } else { state_root }; - let opt_state = self + let mut opt_state = self .load_hot_state(&state_root)? .map(|(state, _block_root)| (state_root, state)); + + if let Some((state_root, state)) = opt_state.as_mut() { + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + self.state_cache + .lock() + .put_state(*state_root, block_root, state)?; + debug!( + self.log, + "Cached state"; + "state_root" => ?state_root, + "slot" => state.slot(), + ); + } drop(split); Ok(opt_state) }