From 08fe3f8cd3e5bb054cd8aa853838895d19a02bb1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Dec 2022 09:13:42 +1100 Subject: [PATCH] Squashed commit of #3775 Squashed commit of the following: commit ad08d07e6557a054fcef118afe9ff3d758e84414 Author: Paul Hauner Date: Mon Dec 5 16:51:06 2022 +1100 Remove crits for late block commit 8e85d625185056c1e5ac81edc5da6d99b2186d07 Author: Paul Hauner Date: Mon Dec 5 16:48:43 2022 +1100 Downgrade log for payload reveal failure commit 84392d63fa8a30dcb8b410fa70468830cf72999a Author: Michael Sproul Date: Fri Dec 2 00:07:43 2022 +0000 Delete DB schema migrations for v11 and earlier (#3761) ## Proposed Changes Now that the Gnosis merge is scheduled, all users should have upgraded beyond Lighthouse v3.0.0. Accordingly we can delete schema migrations for versions prior to v3.0.0. ## Additional Info I also deleted the state cache stuff I added in #3714 as it turned out to be useless for the light client proofs due to the one-slot offset. commit 18c9be595dbeca3ba9528a907939d2d00b81a9d4 Author: Mac L Date: Thu Dec 1 06:03:53 2022 +0000 Add API endpoint to count statuses of all validators (#3756) ## Issue Addressed #3724 ## Proposed Changes Adds an endpoint to quickly count the number of occurances of each status in the validator set. ## Usage ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq ``` ```json { "data": { "active_ongoing":479508, "active_exiting":0, "active_slashed":0, "pending_initialized":28, "pending_queued":0, "withdrawal_possible":933, "withdrawal_done":0, "exited_unslashed":0, "exited_slashed":3 } } ``` commit 22115049ee753e5d2ba6a7fd194668ad0f7e5f99 Author: Michael Sproul Date: Wed Nov 30 05:22:58 2022 +0000 Prioritise important parts of block processing (#3696) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2327 ## Proposed Changes This is an extension of some ideas I implemented while working on `tree-states`: - Cache the indexed attestations from blocks in the `ConsensusContext`. Previously we were re-computing them 3-4 times over. - Clean up `import_block` by splitting each part into `import_block_XXX`. - Move some stuff off hot paths, specifically: - Relocate non-essential tasks that were running between receiving the payload verification status and priming the early attester cache. These tasks are moved after the cache priming: - Attestation observation - Validator monitor updates - Slasher updates - Updating the shuffling cache - Fork choice attestation observation now happens at the end of block verification in parallel with payload verification (this seems to save 5-10ms). - Payload verification now happens _before_ advancing the pre-state and writing it to disk! States were previously being written eagerly and adding ~20-30ms in front of verifying the execution payload. State catchup also sometimes takes ~500ms if we get a cache miss and need to rebuild the tree hash cache. The remaining task that's taking substantial time (~20ms) is importing the block to fork choice. I _think_ this is because of pull-tips, and we should be able to optimise it out with a clever total active balance cache in the state (which would be computed in parallel with payload verification). I've decided to leave that for future work though. For now it can be observed via the new `beacon_block_processing_post_exec_pre_attestable_seconds` metric. Co-authored-by: Michael Sproul commit b4f4c0d25325f1800cfc59c891f9d7d72298382f Author: Divma Date: Wed Nov 30 03:21:35 2022 +0000 Ipv6 bootnodes (#3752) ## Issue Addressed our bootnodes as of now support only ipv4. this makes it so that they support ipv6 ## Proposed Changes - Adds code necessary to update the bootnodes to run on dual stack nodes and therefore contact and store ipv6 nodes. - Adds some metrics about connectivity type of stored peers. It might have been nice to see some metrics over the sessions but that feels out of scope right now. ## Additional Info - some code quality improvements sneaked in since the changes seemed small - I think it depends on the OS, but enabling mapped addresses on an ipv6 node without dual stack support enabled could fail silently, making these nodes effectively ipv6 only. In the future I'll probably change this to use two sockets, which should fail loudly commit 3534c85e306a57aff79990d5a0a5900f3b1819a1 Author: GeemoCandama Date: Tue Nov 29 08:19:27 2022 +0000 Optimize finalized chain sync by skipping newPayload messages (#3738) ## Issue Addressed #3704 ## Proposed Changes Adds is_syncing_finalized: bool parameter for block verification functions. Sets the payload_verification_status to Optimistic if is_syncing_finalized is true. Uses SyncState in NetworkGlobals in BeaconProcessor to retrieve the syncing status. ## Additional Info I could implement FinalizedSignatureVerifiedBlock if you think it would be nicer. commit a2969ba7de72ddbf86daa70e88582b6387f42431 Author: Paul Hauner Date: Tue Nov 29 05:51:42 2022 +0000 Improve debugging experience for builder proposals (#3725) ## Issue Addressed NA ## Proposed Changes This PR sets out to improve the logging/metrics experience when interacting with the builder. Namely, it: - Adds/changes metrics (see "Metrics Changes" section). - Adds new logs which show the duration of requests to the builder/local EL. - Refactors existing logs for consistency and so that the `parent_hash` is include in all relevant logs (we can grep for this field when trying to trace the flow of block production). Additionally, when I was implementing this PR I noticed that we skip some verification of the builder payload in the scenario where the builder return `Ok` but the local EL returns with `Err`. Namely, we were skipping the bid signature and other values like parent hash and prev randao. In this PR I've changed it so we *always* check these values and reject the bid if they're incorrect. With these changes, we'll sometimes choose to skip a proposal rather than propose something invalid -- that's the only side-effect to the changes that I can see. ## Metrics Changes - Changed: `execution_layer_request_times`: - `method = "get_blinded_payload_local"`: time taken to get a payload from a local EE. - `method = "get_blinded_payload_builder"`: time taken to get a blinded payload from a builder. - `method = "post_blinded_payload_builder"`: time taken to get a builder to reveal a payload they've previously supplied us. - `execution_layer_get_payload_outcome` - `outcome = "success"`: we successfully produced a payload from a builder or local EE. - `outcome = "failure"`: we were unable to get a payload from a builder or local EE. - New: `execution_layer_builder_reveal_payload_outcome` - `outcome = "success"`: a builder revealed a payload from a signed, blinded block. - `outcome = "failure"`: the builder did not reveal the payload. - New: `execution_layer_get_payload_source` - `type = "builder"`: we used a payload from a builder to produce a block. - `type = "local"`: we used a payload from a local EE to produce a block. - New: `execution_layer_get_payload_builder_rejections` has a `reason` field to describe why we rejected a payload from a builder. - New: `execution_layer_payload_bids` tracks the bid (in gwei) from the builder or local EE (local EE not yet supported, waiting on EEs to expose the value). Can only record values that fit inside an i64 (roughly 9 million ETH). ## Additional Info NA commit 99ec9d9bafd21cee3197162455c41f4e388559ed Author: kevinbogner Date: Mon Nov 28 10:05:43 2022 +0000 Add Run a Node guide (#3681) ## Issue Addressed Related to #3672 ## Proposed Changes - Added a guide to run a node. Mainly, copy and paste from 'Merge Migration' and 'Checkpoint Sync'. - Ranked it high in ToC: - Introduction - Installation - Run a Node - Become a Validator ... - Hid 'Merge Migration' in ToC. ## Additional Info - Should I add/rephrase/delete something? - Now there is some redundancy: - 'Run a node' and 'Checkpoint Sync' contain similar information. - Same for 'Run a node' and 'Become a Validator'. Co-authored-by: kevinbogner <114221396+kevinbogner@users.noreply.github.com> Co-authored-by: Michael Sproul commit 27790170760f33b2b4b5344dad10fb225a468bb1 Author: Age Manning Date: Mon Nov 28 07:36:52 2022 +0000 Gossipsub fast message id change (#3755) For improved consistency, this mixes in the topic into our fast message id for more consistent tracking of messages across topics. commit c881b803679760995d637c60c0afc86e0012cca4 Author: Mac L Date: Mon Nov 28 00:22:53 2022 +0000 Add CLI flag for gui requirements (#3731) ## Issue Addressed #3723 ## Proposed Changes Adds a new CLI flag `--gui` which enables all the various flags required for the gui to function properly. Currently enables the `--http` and `--validator-monitor-auto` flags. commit 969ff240cde7f533ac2c6e0deef54cae57d1a23f Author: Mac L Date: Fri Nov 25 07:57:11 2022 +0000 Add CLI flag to opt in to world-readable log files (#3747) ## Issue Addressed #3732 ## Proposed Changes Add a CLI flag to allow users to opt out of the restrictive permissions of the log files. ## Additional Info This is not recommended for most users. The log files can contain sensitive information such as validator indices, public keys and API tokens (see #2438). However some users using a multi-user setup may find this helpful if they understand the risks involved. commit e9bf7f7cc1bb908a2e1aa4164a1966df591e8ab0 Author: antondlr Date: Fri Nov 25 07:57:10 2022 +0000 remove commas from comma-separated kv pairs (#3737) ## Issue Addressed Logs are in comma separated kv list, but the values sometimes contain commas, which breaks parsing commit d5a2de759bc089d122a95e17eccb0e4eebef752a Author: Giulio rebuffo Date: Fri Nov 25 05:19:00 2022 +0000 Added LightClientBootstrap V1 (#3711) ## Issue Addressed Partially addresses #3651 ## Proposed Changes Adds server-side support for light_client_bootstrap_v1 topic ## Additional Info This PR, creates each time a bootstrap without using cache, I do not know how necessary a cache is in this case as this topic is not supposed to be called frequently and IMHO we can just prevent abuse by using the limiter, but let me know what you think or if there is any caveat to this, or if it is necessary only for the sake of good practice. Co-authored-by: Pawan Dhananjay --- beacon_node/beacon_chain/src/beacon_chain.rs | 784 ++++++++++-------- .../src/beacon_fork_choice_store.rs | 22 +- .../beacon_chain/src/block_verification.rs | 259 +++--- beacon_node/beacon_chain/src/chain_config.rs | 3 - .../beacon_chain/src/execution_payload.rs | 48 +- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 5 + .../beacon_chain/src/persisted_fork_choice.rs | 23 +- beacon_node/beacon_chain/src/schema_change.rs | 163 +--- .../src/schema_change/migration_schema_v10.rs | 97 --- .../src/schema_change/migration_schema_v11.rs | 77 -- .../src/schema_change/migration_schema_v6.rs | 28 - .../src/schema_change/migration_schema_v7.rs | 341 -------- .../src/schema_change/migration_schema_v8.rs | 50 -- .../src/schema_change/migration_schema_v9.rs | 176 ---- .../beacon_chain/src/schema_change/types.rs | 315 ------- .../beacon_chain/src/snapshot_cache.rs | 21 - beacon_node/beacon_chain/src/test_utils.rs | 12 +- .../src/validator_pubkey_cache.rs | 48 +- .../beacon_chain/tests/block_verification.rs | 67 +- .../tests/payload_invalidation.rs | 16 +- beacon_node/beacon_chain/tests/store_tests.rs | 5 +- beacon_node/beacon_chain/tests/tests.rs | 5 +- beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/lib.rs | 14 + beacon_node/http_api/src/publish_blocks.rs | 23 +- beacon_node/http_api/src/ui.rs | 71 ++ beacon_node/lighthouse_network/src/config.rs | 12 +- .../src/peer_manager/mod.rs | 5 +- .../src/peer_manager/network_behaviour.rs | 2 +- .../src/rpc/codec/ssz_snappy.rs | 18 +- .../lighthouse_network/src/rpc/handler.rs | 2 +- .../lighthouse_network/src/rpc/methods.rs | 18 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 14 +- .../lighthouse_network/src/rpc/outbound.rs | 12 +- .../lighthouse_network/src/rpc/protocol.rs | 76 +- .../src/rpc/rate_limiter.rs | 11 + .../src/service/api_types.rs | 14 +- .../lighthouse_network/src/service/mod.rs | 21 +- .../src/types/sync_state.rs | 11 + .../network/src/beacon_processor/mod.rs | 67 +- .../beacon_processor/worker/gossip_methods.rs | 15 +- .../src/beacon_processor/worker/mod.rs | 2 +- .../beacon_processor/worker/rpc_methods.rs | 75 +- .../beacon_processor/worker/sync_methods.rs | 26 +- beacon_node/network/src/router/mod.rs | 4 + beacon_node/network/src/router/processor.rs | 12 + beacon_node/network/src/sync/manager.rs | 2 +- .../network/src/sync/network_context.rs | 4 +- beacon_node/src/cli.rs | 8 + beacon_node/src/config.rs | 19 +- beacon_node/store/src/hot_cold_store.rs | 10 - book/src/SUMMARY.md | 7 +- book/src/api-lighthouse.md | 22 + book/src/database-migrations.md | 6 +- book/src/intro.md | 1 + book/src/merge-migration.md | 1 + book/src/run_a_node.md | 171 ++++ boot_node/src/config.rs | 37 +- boot_node/src/server.rs | 90 +- consensus/ssz_types/src/bitfield.rs | 6 +- .../state_processing/src/consensus_context.rs | 73 +- .../src/per_block_processing.rs | 13 +- .../block_signature_verifier.rs | 70 +- .../process_operations.rs | 38 +- .../verify_attestation.rs | 19 +- lcli/src/main.rs | 1 + lcli/src/transition_blocks.rs | 16 +- lighthouse/environment/src/lib.rs | 4 +- lighthouse/src/main.rs | 12 + lighthouse/tests/beacon_node.rs | 32 +- testing/ef_tests/src/cases/fork_choice.rs | 3 +- testing/ef_tests/src/cases/operations.rs | 12 +- testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 76 files changed, 1738 insertions(+), 2035 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/types.rs create mode 100644 beacon_node/http_api/src/ui.rs create mode 100644 book/src/run_a_node.md diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6f409fdadc0..32ae742d86f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -18,7 +18,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; +use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -73,7 +73,7 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ - common::{get_attesting_indices_from_state, get_indexed_attestation}, + common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ errors::AttestationValidationError, verify_attestation_for_block_inclusion, @@ -997,46 +997,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Run a function with mutable access to a state for `block_root`. - /// - /// The primary purpose of this function is to borrow a state with its tree hash cache - /// from the snapshot cache *without moving it*. This means that calls to this function should - /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability - /// to delay block import. - /// - /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. - /// If no state is found on disk then `Ok(None)` will be returned. - /// - /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, - /// which can inform logging/metrics. - /// - /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour - /// of `tree-states`, where all caches are CoW and everything is good in the world. - pub fn with_mutable_state_for_block>( - &self, - block: &SignedBeaconBlock, - block_root: Hash256, - f: F, - ) -> Result, Error> - where - F: FnOnce(&mut BeaconState, bool) -> Result, - { - if let Some(state) = self - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .borrow_unadvanced_state_mut(block_root) - { - let cache_hit = true; - f(state, cache_hit).map(Some) - } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { - let cache_hit = false; - f(&mut state, cache_hit).map(Some) - } else { - Ok(None) - } - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -2341,6 +2301,7 @@ impl BeaconChain { self: &Arc, chain_segment: Vec>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2409,6 +2370,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, count_unrealized, + notify_execution_layer, ) .await { @@ -2497,6 +2459,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2510,8 +2473,11 @@ impl BeaconChain { // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { - let execution_pending = - unverified_block.into_execution_pending_block(block_root, &chain)?; + let execution_pending = unverified_block.into_execution_pending_block( + block_root, + &chain, + notify_execution_layer, + )?; chain .import_execution_pending_block(execution_pending, count_unrealized) .await @@ -2581,6 +2547,7 @@ impl BeaconChain { confirmed_state_roots, payload_verification_handle, parent_eth1_finalization_data, + consensus_context, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2634,6 +2601,7 @@ impl BeaconChain { count_unrealized, parent_block, parent_eth1_finalization_data, + consensus_context, ) }, "payload_verification_handle", @@ -2659,70 +2627,36 @@ impl BeaconChain { count_unrealized: CountUnrealized, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, + mut consensus_context: ConsensusContext, ) -> Result> { + // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- + // Everything in this initial section is on the hot path between processing the block and + // being able to attest to it. DO NOT add any extra processing in this initial section + // unless it must run before fork choice. + // ----------------------------------------------------------------------------------------- let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let block = signed_block.message(); + let post_exec_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_POST_EXEC_PROCESSING); - let attestation_observation_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); - - // Iterate through the attestations in the block and register them as an "observed - // attestation". This will stop us from propagating them on the gossip network. - for a in signed_block.message().body().attestations() { - match self.observed_attestations.write().observe_item(a, None) { - // If the observation was successful or if the slot for the attestation was too - // low, continue. - // - // We ignore `SlotTooLow` since this will be very common whilst syncing. - Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} - Err(e) => return Err(BlockError::BeaconChainError(e.into())), - } - } - - metrics::stop_timer(attestation_observation_timer); - - // If a slasher is configured, provide the attestations from the block. - if let Some(slasher) = self.slasher.as_ref() { - for attestation in signed_block.message().body().attestations() { - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - slasher.accept_attestation(indexed_attestation); - } - } + // Check against weak subjectivity checkpoint. + self.check_block_against_weak_subjectivity_checkpoint(block, block_root, &state)?; // If there are new validators in this block, update our pubkey cache. // - // We perform this _before_ adding the block to fork choice because the pubkey cache is - // used by attestation processing which will only process an attestation if the block is - // known to fork choice. This ordering ensure that the pubkey cache is always up-to-date. - self.validator_pubkey_cache + // The only keys imported here will be ones for validators deposited in this block, because + // the cache *must* already have been updated for the parent block when it was imported. + // Newly deposited validators are not active and their keys are not required by other parts + // of block processing. The reason we do this here and not after making the block attestable + // is so we don't have to think about lock ordering with respect to the fork choice lock. + // There are a bunch of places where we lock both fork choice and the pubkey cache and it + // would be difficult to check that they all lock fork choice first. + let mut kv_store_ops = self + .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. - for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { - let shuffling_id = AttestationShufflingId::new(block_root, &state, *relative_epoch)?; - - let shuffling_is_cached = self - .shuffling_cache - .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .contains(&shuffling_id); - - if !shuffling_is_cached { - state.build_committee_cache(*relative_epoch, &self.spec)?; - let committee_cache = state.committee_cache(*relative_epoch)?; - self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id, committee_cache); - } - } - // Apply the state to the attester cache, only if it is from the previous epoch or later. // // In a perfect scenario there should be no need to add previous-epoch states to the cache. @@ -2734,52 +2668,7 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - // Alias for readability. - let block = signed_block.message(); - - // Only perform the weak subjectivity check if it was configured. - if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { - // Note: we're using the finalized checkpoint from the head state, rather than fork - // choice. - // - // We are doing this to ensure that we detect changes in finalization. It's possible - // that fork choice has already been updated to the finalized checkpoint in the block - // we're importing. - let current_head_finalized_checkpoint = - self.canonical_head.cached_head().finalized_checkpoint(); - // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. - let new_finalized_checkpoint = state.finalized_checkpoint(); - - // This ensures we only perform the check once. - if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) - && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) - { - if let Err(e) = - self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, &state) - { - let mut shutdown_sender = self.shutdown_sender(); - crit!( - self.log, - "Weak subjectivity checkpoint verification failed while importing block!"; - "block_root" => ?block_root, - "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, - "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, - "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, - "error" => ?e, - ); - crit!(self.log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Weak subjectivity checkpoint verification failed. Provided block root is not a checkpoint." - )) - .map_err(|err| BlockError::BeaconChainError(BeaconChainError::WeakSubjectivtyShutdownError(err)))?; - return Err(BlockError::WeakSubjectivityConflict); - } - } - } - - // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. let mut fork_choice = self.canonical_head.fork_choice_write_lock(); @@ -2809,77 +2698,6 @@ impl BeaconChain { .map_err(|e| BlockError::BeaconChainError(e.into()))?; } - // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); - let validator_monitor = self.validator_monitor.read(); - - // Register each attester slashing in the block with fork choice. - for attester_slashing in block.body().attester_slashings() { - fork_choice.on_attester_slashing(attester_slashing); - } - - // Register each attestation in the block with the fork choice service. - for attestation in block.body().attestations() { - let _fork_choice_attestation_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - let attestation_target_epoch = attestation.data.target.epoch; - - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - - match fork_choice.on_attestation( - current_slot, - &indexed_attestation, - AttestationFromBlock::True, - &self.spec, - ) { - Ok(()) => Ok(()), - // Ignore invalid attestations whilst importing attestations from a block. The - // block might be very old and therefore the attestations useless to fork choice. - Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), - Err(e) => Err(BlockError::BeaconChainError(e.into())), - }?; - - // To avoid slowing down sync, only register attestations for the - // `observed_block_attesters` if they are from the previous epoch or later. - if attestation_target_epoch + 1 >= current_epoch { - let mut observed_block_attesters = self.observed_block_attesters.write(); - for &validator_index in &indexed_attestation.attesting_indices { - if let Err(e) = observed_block_attesters - .observe_validator(attestation_target_epoch, validator_index as usize) - { - debug!( - self.log, - "Failed to register observed block attester"; - "error" => ?e, - "epoch" => attestation_target_epoch, - "validator_index" => validator_index, - ) - } - } - } - - // Only register this with the validator monitor when the block is sufficiently close to - // the current slot. - if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() - + block.slot().as_u64() - >= current_slot.as_u64() - { - match fork_choice.get_block(&block.parent_root()) { - Some(parent_block) => validator_monitor.register_attestation_in_block( - &indexed_attestation, - parent_block.slot, - &self.spec, - ), - None => warn!(self.log, "Failed to get parent block"; "slot" => %block.slot()), - } - } - } - // If the block is recent enough and it was not optimistically imported, check to see if it // becomes the head block. If so, apply it to the early attester cache. This will allow // attestations to the block without waiting for the block and state to be inserted to the @@ -2928,56 +2746,28 @@ impl BeaconChain { ), } } + drop(post_exec_timer); - // Register sync aggregate with validator monitor - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - // `SyncCommittee` for the sync_aggregate should correspond to the duty slot - let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; - let participant_pubkeys = sync_committee - .pubkeys - .iter() - .zip(sync_aggregate.sync_committee_bits.iter()) - .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) - .collect::>(); - - validator_monitor.register_sync_aggregate_in_block( - block.slot(), - block.parent_root(), - participant_pubkeys, - ); - } - - for exit in block.body().voluntary_exits() { - validator_monitor.register_block_voluntary_exit(&exit.message) - } - - for slashing in block.body().attester_slashings() { - validator_monitor.register_block_attester_slashing(slashing) - } + // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- + // Most blocks are now capable of being attested to thanks to the `early_attester_cache` + // cache above. Resume non-essential processing. + // ----------------------------------------------------------------------------------------- - for slashing in block.body().proposer_slashings() { - validator_monitor.register_block_proposer_slashing(slashing) - } - - drop(validator_monitor); - - // Only present some metrics for blocks from the previous epoch or later. - // - // This helps avoid noise in the metrics during sync. - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 1 >= self.epoch()? { - metrics::observe( - &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, - block.body().attestations().len() as f64, - ); - - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - metrics::set_gauge( - &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, - sync_aggregate.num_set_bits() as i64, - ); - } - } + self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_observe_attestations( + block, + &state, + &mut consensus_context, + current_epoch, + ); + self.import_block_update_validator_monitor( + block, + &state, + &mut consensus_context, + current_slot, + parent_block.slot(), + ); + self.import_block_update_slasher(block, &state, &mut consensus_context); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); @@ -2994,7 +2784,9 @@ impl BeaconChain { ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); + + if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { error!( self.log, "Database write failed!"; @@ -3002,6 +2794,10 @@ impl BeaconChain { "error" => ?e, ); + // Clear the early attester cache to prevent attestations which we would later be unable + // to verify due to the failure. + self.early_attester_cache.clear(); + // Since the write failed, try to revert the canonical head back to what was stored // in the database. This attempts to prevent inconsistency between the database and // fork choice. @@ -3044,6 +2840,7 @@ impl BeaconChain { eth1_deposit_index: state.eth1_deposit_index(), }; let current_finalized_checkpoint = state.finalized_checkpoint(); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3051,7 +2848,7 @@ impl BeaconChain { snapshot_cache.insert( BeaconSnapshot { beacon_state: state, - beacon_block: signed_block, + beacon_block: signed_block.clone(), beacon_block_root: block_root, }, None, @@ -3070,22 +2867,312 @@ impl BeaconChain { self.head_tracker .register_block(block_root, parent_root, slot); - // Send an event to the `events` endpoint after fully processing the block. - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_block_subscribers() { - event_handler.register(EventKind::Block(SseBlock { - slot, - block: block_root, - execution_optimistic: payload_verification_status.is_optimistic(), - })); + metrics::stop_timer(db_write_timer); + + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); + + // Update the deposit contract cache. + self.import_block_update_deposit_contract_finalization( + block, + block_root, + current_epoch, + current_finalized_checkpoint, + current_eth1_finalization_data, + parent_eth1_finalization_data, + parent_block.slot(), + ); + + // Inform the unknown block cache, in case it was waiting on this block. + self.pre_finalization_block_cache + .block_processed(block_root); + + self.import_block_update_metrics_and_events( + block, + block_root, + block_time_imported, + payload_verification_status, + current_slot, + ); + + Ok(block_root) + } + + /// Check block's consistentency with any configured weak subjectivity checkpoint. + fn check_block_against_weak_subjectivity_checkpoint( + &self, + block: BeaconBlockRef, + block_root: Hash256, + state: &BeaconState, + ) -> Result<(), BlockError> { + // Only perform the weak subjectivity check if it was configured. + let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint { + checkpoint + } else { + return Ok(()); + }; + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + + // This ensures we only perform the check once. + if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch + && wss_checkpoint.epoch <= new_finalized_checkpoint.epoch + { + if let Err(e) = + self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state) + { + let mut shutdown_sender = self.shutdown_sender(); + crit!( + self.log, + "Weak subjectivity checkpoint verification failed while importing block!"; + "block_root" => ?block_root, + "parent_root" => ?block.parent_root(), + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, + "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, + "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, + "error" => ?e + ); + crit!( + self.log, + "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network." + ); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Weak subjectivity checkpoint verification failed. \ + Provided block root is not a checkpoint.", + )) + .map_err(|err| { + BlockError::BeaconChainError( + BeaconChainError::WeakSubjectivtyShutdownError(err), + ) + })?; + return Err(BlockError::WeakSubjectivityConflict); } } + Ok(()) + } - metrics::stop_timer(db_write_timer); + /// Process a block for the validator monitor, including all its constituent messages. + fn import_block_update_validator_monitor( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_slot: Slot, + parent_block_slot: Slot, + ) { + // Only register blocks with the validator monitor when the block is sufficiently close to + // the current slot. + if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() + + block.slot().as_u64() + < current_slot.as_u64() + { + return; + } - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); + // Allow the validator monitor to learn about a new valid state. + self.validator_monitor + .write() + .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + + let validator_monitor = self.validator_monitor.read(); + + // Sync aggregate. + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + // `SyncCommittee` for the sync_aggregate should correspond to the duty slot + let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + match self.sync_committee_at_epoch(duty_epoch) { + Ok(sync_committee) => { + let participant_pubkeys = sync_committee + .pubkeys + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) + .collect::>(); + + validator_monitor.register_sync_aggregate_in_block( + block.slot(), + block.parent_root(), + participant_pubkeys, + ); + } + Err(e) => { + warn!( + self.log, + "Unable to fetch sync committee"; + "epoch" => duty_epoch, + "purpose" => "validator monitor", + "error" => ?e, + ); + } + } + } + + // Attestations. + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "validator monitor", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + validator_monitor.register_attestation_in_block( + indexed_attestation, + parent_block_slot, + &self.spec, + ); + } + + for exit in block.body().voluntary_exits() { + validator_monitor.register_block_voluntary_exit(&exit.message) + } + + for slashing in block.body().attester_slashings() { + validator_monitor.register_block_attester_slashing(slashing) + } + + for slashing in block.body().proposer_slashings() { + validator_monitor.register_block_proposer_slashing(slashing) + } + } + + /// Iterate through the attestations in the block and register them as "observed". + /// + /// This will stop us from propagating them on the gossip network. + fn import_block_observe_attestations( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_epoch: Epoch, + ) { + // To avoid slowing down sync, only observe attestations if the block is from the + // previous epoch or later. + if state.current_epoch() + 1 < current_epoch { + return; + } + + let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); + + for a in block.body().attestations() { + match self.observed_attestations.write().observe_item(a, None) { + // If the observation was successful or if the slot for the attestation was too + // low, continue. + // + // We ignore `SlotTooLow` since this will be very common whilst syncing. + Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} + Err(e) => { + debug!( + self.log, + "Failed to register observed attestation"; + "error" => ?e, + "epoch" => a.data.target.epoch + ); + } + } + + let indexed_attestation = match ctxt.get_indexed_attestation(state, a) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "observation", + "attestation_slot" => a.data.slot, + "error" => ?e, + ); + continue; + } + }; + + let mut observed_block_attesters = self.observed_block_attesters.write(); + + for &validator_index in &indexed_attestation.attesting_indices { + if let Err(e) = observed_block_attesters + .observe_validator(a.data.target.epoch, validator_index as usize) + { + debug!( + self.log, + "Failed to register observed block attester"; + "error" => ?e, + "epoch" => a.data.target.epoch, + "validator_index" => validator_index, + ) + } + } + } + } + + /// If a slasher is configured, provide the attestations from the block. + fn import_block_update_slasher( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + ) { + if let Some(slasher) = self.slasher.as_ref() { + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "slasher", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + slasher.accept_attestation(indexed_attestation.clone()); + } + } + } - let block_delay_total = get_slot_delay_ms(block_time_imported, slot, &self.slot_clock); + fn import_block_update_metrics_and_events( + &self, + block: BeaconBlockRef, + block_root: Hash256, + block_time_imported: Duration, + payload_verification_status: PayloadVerificationStatus, + current_slot: Slot, + ) { + // Only present some metrics for blocks from the previous epoch or later. + // + // This helps avoid noise in the metrics during sync. + if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot { + metrics::observe( + &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, + block.body().attestations().len() as f64, + ); + + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + metrics::set_gauge( + &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, + sync_aggregate.num_set_bits() as i64, + ); + } + } + + let block_delay_total = + get_slot_delay_ms(block_time_imported, block.slot(), &self.slot_clock); // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to // the cache during sync. @@ -3117,62 +3204,105 @@ impl BeaconChain { ); } - // Do not write to eth1 finalization cache for blocks older than 5 epochs - // this helps reduce noise during sync - if block_delay_total - < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) - { - let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); - if parent_block_epoch < current_epoch { - // we've crossed epoch boundary, store Eth1FinalizationData - let (checkpoint, eth1_finalization_data) = - if current_slot % T::EthSpec::slots_per_epoch() == 0 { - // current block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block_root, - }, - current_eth1_finalization_data, - ) - } else { - // parent block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: parent_block.canonical_root(), - }, - parent_eth1_finalization_data, - ) - }; + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_block_subscribers() { + event_handler.register(EventKind::Block(SseBlock { + slot: block.slot(), + block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); + } + } + } - if let Some(finalized_eth1_data) = self - .eth1_finalization_cache - .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) - .and_then(|mut cache| { - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }) - { - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - let finalized_deposit_count = finalized_eth1_data.deposit_count; - eth1_chain.finalize_eth1_data(finalized_eth1_data); - debug!( - self.log, - "called eth1_chain.finalize_eth1_data()"; - "epoch" => current_finalized_checkpoint.epoch, - "deposit count" => finalized_deposit_count, - ); - } - } + fn import_block_update_shuffling_cache( + &self, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result<(), BlockError> { + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; + + let shuffling_is_cached = self + .shuffling_cache + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); + + if !shuffling_is_cached { + state.build_committee_cache(relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .insert_committee_cache(shuffling_id, committee_cache); } } + Ok(()) + } - // Inform the unknown block cache, in case it was waiting on this block. - self.pre_finalization_block_cache - .block_processed(block_root); + #[allow(clippy::too_many_arguments)] + fn import_block_update_deposit_contract_finalization( + &self, + block: BeaconBlockRef, + block_root: Hash256, + current_epoch: Epoch, + current_finalized_checkpoint: Checkpoint, + current_eth1_finalization_data: Eth1FinalizationData, + parent_eth1_finalization_data: Eth1FinalizationData, + parent_block_slot: Slot, + ) { + // Do not write to eth1 finalization cache for blocks older than 5 epochs. + if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch { + return; + } - Ok(block_root) + let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if block.slot() % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block.parent_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); + } + } + } } /// If configured, wait for the fork choice run at the start of the slot to complete. @@ -3553,10 +3683,12 @@ impl BeaconChain { // This will be a lot slower but guards against bugs in block production and can be // quickly rolled out without a release. if self.config.paranoid_block_proposal { + let mut tmp_ctxt = ConsensusContext::new(state.slot()); attestations.retain(|att| { verify_attestation_for_block_inclusion( &state, att, + &mut tmp_ctxt, VerifySignatures::True, &self.spec, ) diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 4f6003fda1b..5cba5f3c3bb 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -61,7 +61,7 @@ pub fn get_effective_balances(state: &BeaconState) -> Vec { } #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), no_enum )] @@ -75,13 +75,11 @@ pub(crate) struct CacheItem { pub(crate) type CacheItem = CacheItemV8; #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)), no_enum )] pub struct BalancesCache { - #[superstruct(only(V1))] - pub(crate) items: Vec, #[superstruct(only(V8))] pub(crate) items: Vec, } @@ -366,26 +364,20 @@ where } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V1, V7))] - pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8, V10, V11))] + #[superstruct(only(V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V7, V8, V10, V11))] + #[superstruct(only(V11))] pub proposer_boost_root: Hash256, #[superstruct(only(V11))] pub equivocating_indices: BTreeSet, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 104de57dbf6..ab317e96b96 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -45,29 +45,29 @@ use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, PayloadNotifier, + AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::PayloadVerificationStatus; +use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_processing::per_block_processing::is_merge_transition_block; +use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, @@ -550,8 +550,22 @@ pub fn signature_verify_chain_segment( let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); + for (block_root, block) in &chain_segment { - signature_verifier.include_all_signatures(block, Some(*block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(*block_root); + + signature_verifier.include_all_signatures(block, &mut consensus_context)?; + + // Save the block and its consensus context. The context will have had its proposer index + // and attesting indices filled in, which can be used to accelerate later block processing. + signature_verified_blocks.push(SignatureVerifiedBlock { + block: block.clone(), + block_root: *block_root, + parent: None, + consensus_context, + }); } if signature_verifier.verify().is_err() { @@ -560,22 +574,6 @@ pub fn signature_verify_chain_segment( drop(pubkey_cache); - let mut signature_verified_blocks = chain_segment - .into_iter() - .map(|(block_root, block)| { - // Proposer index has already been verified above during signature verification. - let consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); - SignatureVerifiedBlock { - block, - block_root, - parent: None, - consensus_context, - } - }) - .collect::>(); - if let Some(signature_verified_block) = signature_verified_blocks.first_mut() { signature_verified_block.parent = Some(parent); } @@ -625,6 +623,7 @@ pub struct ExecutionPendingBlock { pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, + pub consensus_context: ConsensusContext, pub payload_verification_handle: PayloadVerificationHandle, } @@ -636,8 +635,9 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockError> { - self.into_execution_pending_block_slashable(block_root, chain) + self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { @@ -653,6 +653,7 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -899,10 +900,15 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - execution_pending.into_execution_pending_block_slashable(block_root, chain) + execution_pending.into_execution_pending_block_slashable( + block_root, + chain, + notify_execution_layer, + ) } fn block(&self) -> &SignedBeaconBlock { @@ -944,13 +950,14 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures(&block, Some(block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(block_root); + + signature_verifier.include_all_signatures(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { - consensus_context: ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()), + consensus_context, block, block_root, parent: Some(parent), @@ -995,16 +1002,16 @@ impl SignatureVerifiedBlock { // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. - let verified_proposer_index = Some(block.message().proposer_index()); + let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(&block, verified_proposer_index)?; + .include_all_signatures_except_proposal(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { block, block_root: from.block_root, parent: Some(parent), - consensus_context: from.consensus_context, + consensus_context, }) } else { Err(BlockError::InvalidSignature) @@ -1032,6 +1039,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -1047,6 +1055,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc parent, self.consensus_context, chain, + notify_execution_layer, ) .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } @@ -1063,13 +1072,14 @@ impl IntoExecutionPendingBlock for Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain) + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) } fn block(&self) -> &SignedBeaconBlock { @@ -1091,6 +1101,7 @@ impl ExecutionPendingBlock { parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { if let Some(parent) = chain .canonical_head @@ -1127,6 +1138,79 @@ impl ExecutionPendingBlock { check_block_relevancy(&block, block_root, chain)?; + // Define a future that will verify the execution payload with an execution engine. + // + // We do this as early as possible so that later parts of this function can run in parallel + // with the payload verification. + let payload_notifier = PayloadNotifier::new( + chain.clone(), + block.clone(), + &parent.pre_state, + notify_execution_layer, + )?; + let is_valid_merge_transition_block = + is_merge_transition_block(&parent.pre_state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; + }; + + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + let payload_verification_status = payload_notifier.notify_new_payload().await?; + + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); + + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } + } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + /* * Advance the given `parent.beacon_state` to the slot of the given `block`. */ @@ -1231,79 +1315,11 @@ impl ExecutionPendingBlock { summaries.push(summary); } } + metrics::stop_timer(catchup_timer); let block_slot = block.slot(); let state_current_epoch = state.current_epoch(); - // Define a future that will verify the execution payload with an execution engine (but - // don't execute it yet). - let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; - let is_valid_merge_transition_block = - is_merge_transition_block(&state, block.message().body()); - let payload_verification_future = async move { - let chain = payload_notifier.chain.clone(); - let block = payload_notifier.block.clone(); - - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; - }; - - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = payload_notifier.notify_new_payload().await?; - - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.execution_payload.block_hash); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - - Ok(PayloadVerificationOutcome { - payload_verification_status, - is_valid_merge_transition_block, - }) - }; - // Spawn the payload verification future as a new task, but don't wait for it to complete. - // The `payload_verification_future` will be awaited later to ensure verification completed - // successfully. - let payload_verification_handle = chain - .task_executor - .spawn_handle( - payload_verification_future, - "execution_payload_verification", - ) - .ok_or(BeaconChainError::RuntimeShutdown)?; - // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -1330,8 +1346,6 @@ impl ExecutionPendingBlock { } } - metrics::stop_timer(catchup_timer); - /* * Build the committee caches on the state. */ @@ -1421,6 +1435,44 @@ impl ExecutionPendingBlock { }); } + /* + * Apply the block's attestations to fork choice. + * + * We're running in parallel with the payload verification at this point, so this is + * free real estate. + */ + let current_slot = chain.slot()?; + let mut fork_choice = chain.canonical_head.fork_choice_write_lock(); + + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.message().body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + + // Register each attestation in the block with fork choice. + for (i, attestation) in block.message().body().attestations().iter().enumerate() { + let _fork_choice_attestation_timer = + metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); + + let indexed_attestation = consensus_context + .get_indexed_attestation(&state, attestation) + .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + + match fork_choice.on_attestation( + current_slot, + indexed_attestation, + AttestationFromBlock::True, + &chain.spec, + ) { + Ok(()) => Ok(()), + // Ignore invalid attestations whilst importing attestations from a block. The + // block might be very old and therefore the attestations useless to fork choice. + Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), + Err(e) => Err(BlockError::BeaconChainError(e.into())), + }?; + } + drop(fork_choice); + Ok(Self { block, block_root, @@ -1428,6 +1480,7 @@ impl ExecutionPendingBlock { parent_block: parent.beacon_block, parent_eth1_finalization_data, confirmed_state_roots, + consensus_context, payload_verification_handle, }) } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index f970c5607e7..286cc17a963 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -47,8 +47,6 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, - /// Whether to enable the light client server protocol. - pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -70,7 +68,6 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, - enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2221d1fc7cd..85f7629bb79 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -35,6 +35,16 @@ pub enum AllowOptimisticImport { No, } +/// Signal whether the execution payloads of new blocks should be +/// immediately verified with the EL or imported optimistically without +/// any EL communication. +#[derive(Default, Clone, Copy)] +pub enum NotifyExecutionLayer { + #[default] + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier { pub chain: Arc>, @@ -47,21 +57,28 @@ impl PayloadNotifier { chain: Arc>, block: Arc>, state: &BeaconState, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - let payload_verification_status = if is_execution_enabled(state, block.message().body()) { - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload( - state, - block.message().execution_payload()?, - &chain.spec, - ) - .map_err(BlockError::PerBlockProcessingError)?; - None - } else { - Some(PayloadVerificationStatus::Irrelevant) + let payload_verification_status = match notify_execution_layer { + NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic), + NotifyExecutionLayer::Yes => { + if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.slot(), + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + } + } }; Ok(Self { @@ -357,7 +374,8 @@ pub fn get_execution_payload< let spec = &chain.spec; let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let timestamp = + compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5ead5311e59..3889fe4aa53 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -63,6 +63,7 @@ pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; +pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ead4a540254..b37c5afc35f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -64,6 +64,11 @@ lazy_static! { "beacon_block_processing_state_root_seconds", "Time spent calculating the state root when processing a block." ); + pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result = try_create_histogram_with_buckets( + "beacon_block_processing_post_exec_pre_attestable_seconds", + "Time between finishing execution processing and the block becoming attestable", + linear_buckets(5e-3, 5e-3, 10) + ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( "beacon_block_processing_db_write_seconds", "Time spent writing a newly processed block and state to DB" diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index a60dacdc7c0..829dc2a8a77 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,7 +1,4 @@ -use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, - PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; +use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; @@ -10,21 +7,9 @@ use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV11; -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, - #[superstruct(only(V1))] - pub fork_choice_store: PersistedForkChoiceStoreV1, - #[superstruct(only(V7))] - pub fork_choice_store: PersistedForkChoiceStoreV7, - #[superstruct(only(V8))] - pub fork_choice_store: PersistedForkChoiceStoreV8, - #[superstruct(only(V10))] - pub fork_choice_store: PersistedForkChoiceStoreV10, #[superstruct(only(V11))] pub fork_choice_store: PersistedForkChoiceStoreV11, } @@ -47,8 +32,4 @@ macro_rules! impl_store_item { }; } -impl_store_item!(PersistedForkChoiceV1); -impl_store_item!(PersistedForkChoiceV7); -impl_store_item!(PersistedForkChoiceV8); -impl_store_item!(PersistedForkChoiceV10); impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fd55048c388..73906b1b586 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,20 +1,9 @@ //! Utilities for managing database schema changes. -mod migration_schema_v10; -mod migration_schema_v11; mod migration_schema_v12; mod migration_schema_v13; -mod migration_schema_v6; -mod migration_schema_v7; -mod migration_schema_v8; -mod migration_schema_v9; -mod types; -use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; -use crate::persisted_fork_choice::{ - PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, - PersistedForkChoiceV8, -}; use crate::types::ChainSpec; use slog::{warn, Logger}; use std::sync::Arc; @@ -23,6 +12,7 @@ use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. +#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future pub fn migrate_schema( db: Arc>, deposit_contract_deploy_block: u64, @@ -62,156 +52,9 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(5) are deprecated. + // Migrations from before SchemaVersion(11) are deprecated. // - // Migration for adding `execution_status` field to the fork choice store. - (SchemaVersion(5), SchemaVersion(6)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - // The top-level `PersistedForkChoice` struct is still V1 but will have its internal - // bytes for the fork choice updated to V6. - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(mut persisted_fork_choice) = fork_choice_opt { - migration_schema_v6::update_execution_statuses::(&mut persisted_fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // 1. Add `proposer_boost_root`. - // 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to - // `finalized_checkpoint`. - // 3. This migration also includes a potential update to the justified - // checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint - // combination does not actually exist for any blocks in fork choice. This was possible in - // the consensus spec prior to v1.1.6. - // - // Relevant issues: - // - // https://github.com/sigp/lighthouse/issues/2741 - // https://github.com/ethereum/consensus-specs/pull/2727 - // https://github.com/ethereum/consensus-specs/pull/2730 - (SchemaVersion(6), SchemaVersion(7)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(persisted_fork_choice_v1) = fork_choice_opt { - // This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field. - let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into(); - - let result = migration_schema_v7::update_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - ); - - // Fall back to re-initializing fork choice from an anchor state if necessary. - if let Err(e) = result { - warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e); - migration_schema_v7::update_with_reinitialized_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - spec, - ) - .map_err(StoreError::SchemaMigrationError)?; - } - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Migration to add an `epoch` key to the fork choice's balances cache. - (SchemaVersion(7), SchemaVersion(8)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v8::update_fork_choice::(fork_choice, db.clone())?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v8 to v9 to separate the execution payloads into their own column. - (SchemaVersion(8), SchemaVersion(9)) => { - migration_schema_v9::upgrade_to_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - // Downgrade from v9 to v8 to ignore the separation of execution payloads - // NOTE: only works before the Bellatrix fork epoch. - (SchemaVersion(9), SchemaVersion(8)) => { - migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - (SchemaVersion(9), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(10), SchemaVersion(9)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. - (SchemaVersion(10), SchemaVersion(11)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. - (SchemaVersion(11), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v11::downgrade_fork_choice(fork_choice, log); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. (SchemaVersion(11), SchemaVersion(12)) => { let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs deleted file mode 100644 index 70e0007851c..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs +++ /dev/null @@ -1,97 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; -use crate::schema_change::{ - types::{SszContainerV10, SszContainerV7}, - StoreError, -}; -use proto_array::core::SszContainer; -use ssz::{Decode, Encode}; - -pub fn update_fork_choice( - mut fork_choice: PersistedForkChoiceV8, -) -> Result { - let ssz_container_v7 = SszContainerV7::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // These transformations instantiate `node.unrealized_justified_checkpoint` and - // `node.unrealized_finalized_checkpoint` to `None`. - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -pub fn downgrade_fork_choice( - mut fork_choice: PersistedForkChoiceV10, -) -> Result { - let ssz_container_v10 = SszContainerV10::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -impl From for PersistedForkChoiceStoreV10 { - fn from(other: PersistedForkChoiceStoreV8) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - unrealized_justified_checkpoint: other.best_justified_checkpoint, - unrealized_finalized_checkpoint: other.finalized_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV10 { - fn from(other: PersistedForkChoiceV8) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} - -impl From for PersistedForkChoiceStoreV8 { - fn from(other: PersistedForkChoiceStoreV10) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV8 { - fn from(other: PersistedForkChoiceV10) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs deleted file mode 100644 index dde80a5cac7..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; -use slog::{warn, Logger}; -use std::collections::BTreeSet; - -/// Add the equivocating indices field. -pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { - let PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - } = fork_choice_v10.fork_choice_store; - - PersistedForkChoiceV11 { - fork_choice: fork_choice_v10.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices: BTreeSet::new(), - }, - } -} - -pub fn downgrade_fork_choice( - fork_choice_v11: PersistedForkChoiceV11, - log: Logger, -) -> PersistedForkChoiceV10 { - let PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices, - } = fork_choice_v11.fork_choice_store; - - if !equivocating_indices.is_empty() { - warn!( - log, - "Deleting slashed validators from fork choice store"; - "count" => equivocating_indices.len(), - "message" => "this may make your node more susceptible to following the wrong chain", - ); - } - - PersistedForkChoiceV10 { - fork_choice: fork_choice_v11.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - }, - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs deleted file mode 100644 index 231da838cdc..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs +++ /dev/null @@ -1,28 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 5 to 6. -use crate::persisted_fork_choice::PersistedForkChoiceV1; -use crate::schema_change::types::{SszContainerV1, SszContainerV6}; -use crate::BeaconChainTypes; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -pub(crate) fn update_execution_statuses( - persisted_fork_choice: &mut PersistedForkChoiceV1, -) -> Result<(), String> { - let ssz_container_v1 = - SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - ) - })?; - - let ssz_container_v6: SszContainerV6 = ssz_container_v1.into(); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes(); - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs deleted file mode 100644 index d953d30027f..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ /dev/null @@ -1,341 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 6 to 7. -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; -use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::ForkChoice; -use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice}; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use store::hot_cold_store::HotColdDB; -use store::iter::BlockRootsIterator; -use store::Error as StoreError; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -/// This method is used to re-initialize fork choice from the finalized state in case we hit an -/// error during this migration. -pub(crate) fn update_with_reinitialized_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, - spec: &ChainSpec, -) -> Result<(), String> { - let anchor_block_root = persisted_fork_choice - .fork_choice_store - .finalized_checkpoint - .root; - let anchor_block = db - .get_full_block_prior_to_v9(&anchor_block_root) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon block".to_string())?; - let anchor_state = db - .get_state(&anchor_block.state_root(), Some(anchor_block.slot())) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon state".to_string())?; - let snapshot = BeaconSnapshot { - beacon_block: Arc::new(anchor_block), - beacon_block_root: anchor_block_root, - beacon_state: anchor_state, - }; - let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot); - let fork_choice = ForkChoice::from_anchor( - store, - anchor_block_root, - &snapshot.beacon_block, - &snapshot.beacon_state, - // Don't provide the current slot here, just use what's in the store. We don't need to know - // the head here, plus it's nice to avoid mutating fork choice during this process. - None, - // This config will get overwritten on startup. - CountUnrealizedFull::default(), - spec, - ) - .map_err(|e| format!("{:?}", e))?; - persisted_fork_choice.fork_choice = fork_choice.to_persisted(); - Ok(()) -} - -pub(crate) fn update_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, -) -> Result<(), StoreError> { - // `PersistedForkChoice` stores the `ProtoArray` as a `Vec`. Deserialize these - // bytes assuming the legacy struct, and transform them to the new struct before - // re-serializing. - let ssz_container_v6 = - SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // Clone the V6 proto nodes in order to maintain information about `node.justified_epoch` - // and `node.finalized_epoch`. - let nodes_v6 = ssz_container_v6.nodes.clone(); - - let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint; - let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint; - - // These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint` - // to `None`. - let ssz_container_v7: SszContainerV7 = - ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - // `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup. - let mut fork_choice: ProtoArrayForkChoice = - (ssz_container, CountUnrealizedFull::default()).into(); - - update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) - .map_err(StoreError::SchemaMigrationError)?; - - // Update the justified checkpoint in the store in case we have a discrepancy - // between the store and the proto array nodes. - update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. - let ssz_container = SszContainer::from(&fork_choice); - let ssz_container_v7 = SszContainerV7::from(ssz_container); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; - - Ok(()) -} - -struct HeadInfo { - index: usize, - root: Hash256, - slot: Slot, -} - -fn update_checkpoints( - finalized_root: Hash256, - nodes_v6: &[ProtoNodeV6], - fork_choice: &mut ProtoArrayForkChoice, - db: Arc>, -) -> Result<(), String> { - let heads = find_finalized_descendant_heads(finalized_root, fork_choice); - - // For each head, first gather all epochs we will need to find justified or finalized roots for. - for head in heads { - // `relevant_epochs` are epochs for which we will need to find the root at the start slot. - // We don't need to worry about whether the are finalized or justified epochs. - let mut relevant_epochs = HashSet::new(); - let relevant_epoch_finder = |index, _: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - relevant_epochs.insert(justified_epoch); - relevant_epochs.insert(finalized_epoch); - Ok(()) - }; - - apply_to_chain_of_ancestors( - finalized_root, - head.index, - fork_choice, - relevant_epoch_finder, - )?; - - // find the block roots associated with each relevant epoch. - let roots_by_epoch = - map_relevant_epochs_to_roots::(head.root, head.slot, relevant_epochs, db.clone())?; - - // Apply this mutator to the chain of descendants from this head, adding justified - // and finalized checkpoints for each. - let node_mutator = |index, node: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - - // Update the checkpoints only if they haven't already been populated. - if node.justified_checkpoint.is_none() { - let justified_checkpoint = - roots_by_epoch - .get(&justified_epoch) - .map(|&root| Checkpoint { - epoch: justified_epoch, - root, - }); - node.justified_checkpoint = justified_checkpoint; - } - if node.finalized_checkpoint.is_none() { - let finalized_checkpoint = - roots_by_epoch - .get(&finalized_epoch) - .map(|&root| Checkpoint { - epoch: finalized_epoch, - root, - }); - node.finalized_checkpoint = finalized_checkpoint; - } - - Ok(()) - }; - - apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?; - } - Ok(()) -} - -/// Coverts the given `HashSet` to a `Vec` then reverse sorts by `Epoch`. Next, a -/// single `BlockRootsIterator` is created which is used to iterate backwards from the given -/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch. -fn map_relevant_epochs_to_roots( - head_root: Hash256, - head_slot: Slot, - epochs: HashSet, - db: Arc>, -) -> Result, String> { - // Convert the `HashSet` to a `Vec` and reverse sort the epochs. - let mut relevant_epochs = epochs.into_iter().collect::>(); - relevant_epochs.sort_unstable_by(|a, b| b.cmp(a)); - - // Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch. - let mut iter = std::iter::once(Ok((head_root, head_slot))) - .chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?); - let mut roots_by_epoch = HashMap::new(); - for epoch in relevant_epochs { - let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); - - let root = iter - .find_map(|next| match next { - Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)), - Err(e) => Some(Err(format!("{:?}", e))), - }) - .transpose()? - .ok_or_else(|| "Justified root not found".to_string())?; - roots_by_epoch.insert(epoch, root); - } - Ok(roots_by_epoch) -} - -/// Applies a mutator to every node in a chain, starting from the node at the given -/// `head_index` and iterating through ancestors until the `finalized_root` is reached. -fn apply_to_chain_of_ancestors( - finalized_root: Hash256, - head_index: usize, - fork_choice: &mut ProtoArrayForkChoice, - mut node_mutator: F, -) -> Result<(), String> -where - F: FnMut(usize, &mut ProtoNode) -> Result<(), String>, -{ - let head = fork_choice - .core_proto_array_mut() - .nodes - .get_mut(head_index) - .ok_or_else(|| "Head index not found in proto nodes".to_string())?; - - node_mutator(head_index, head)?; - - let mut parent_index_opt = head.parent; - let mut parent_opt = - parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - - // Iterate backwards through all parents until there is no reference to a parent or we reach - // the `finalized_root` node. - while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) { - node_mutator(parent_index, parent)?; - - // Break out of this while loop *after* the `node_mutator` has been applied to the finalized - // node. - if parent.root == finalized_root { - break; - } - - // Update parent values - parent_index_opt = parent.parent; - parent_opt = parent_index_opt - .and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - } - Ok(()) -} - -/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then -/// checks that these nodes are descendants of the finalized root in order to determine if they are -/// relevant. -fn find_finalized_descendant_heads( - finalized_root: Hash256, - fork_choice: &ProtoArrayForkChoice, -) -> Vec { - let nodes_referenced_as_parents: HashSet = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| node.parent) - .collect::>(); - - fork_choice - .core_proto_array() - .nodes - .iter() - .enumerate() - .filter_map(|(index, node)| { - (!nodes_referenced_as_parents.contains(&index) - && fork_choice.is_descendant(finalized_root, node.root)) - .then_some(HeadInfo { - index, - root: node.root, - slot: node.slot, - }) - }) - .collect::>() -} - -fn update_store_justified_checkpoint( - persisted_fork_choice: &mut PersistedForkChoiceV7, - fork_choice: &mut ProtoArrayForkChoice, -) -> Result<(), String> { - let justified_checkpoint = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| { - (node.finalized_checkpoint - == Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint)) - .then_some(node.justified_checkpoint) - .flatten() - }) - .max_by_key(|justified_checkpoint| justified_checkpoint.epoch) - .ok_or("Proto node with current finalized checkpoint not found")?; - - fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - Ok(()) -} - -// Add a zero `proposer_boost_root` when migrating from V1-6 to V7. -impl From for PersistedForkChoiceStoreV7 { - fn from(other: PersistedForkChoiceStoreV1) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: Hash256::zero(), - } - } -} - -impl From for PersistedForkChoiceV7 { - fn from(other: PersistedForkChoiceV1) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs deleted file mode 100644 index ef3f7857f9a..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{ - BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; -use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8}; -use std::sync::Arc; -use store::{Error as StoreError, HotColdDB}; -use types::EthSpec; - -pub fn update_fork_choice( - fork_choice: PersistedForkChoiceV7, - db: Arc>, -) -> Result { - let PersistedForkChoiceStoreV7 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - } = fork_choice.fork_choice_store; - let mut fork_choice_store = PersistedForkChoiceStoreV8 { - balances_cache: BalancesCacheV8::default(), - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - }; - - // Add epochs to the balances cache. It's safe to just use the block's epoch because - // before schema v8 the cache would always miss on skipped slots. - for item in balances_cache.items { - // Drop any blocks that aren't found, they're presumably too old and this is only a cache. - if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? { - fork_choice_store.balances_cache.items.push(CacheItemV8 { - block_root: item.block_root, - epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), - balances: item.balances, - }); - } - } - - Ok(PersistedForkChoiceV8 { - fork_choice: fork_choice.fork_choice, - fork_choice_store, - }) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs deleted file mode 100644 index e2c48d5c89d..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs +++ /dev/null @@ -1,176 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use slog::{debug, error, info, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{DBColumn, Error, HotColdDB, KeyValueStore}; -use types::{EthSpec, Hash256, Slot}; - -const OPS_PER_BLOCK_WRITE: usize = 2; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - // At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it - // actually has no payload. - let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? { - block - } else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let genesis_state = - if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { - state - } else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -pub fn upgrade_to_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - // This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration - // was implemented before the activation of Bellatrix on all networks except Kiln, so the only - // users who will need to wait for the slow copying migration are Kiln users. - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - info!( - log, - "Upgrading database schema to v9"; - "info" => "This will take several minutes. Each block will be read from and \ - re-written to the database. You may safely exit now (Ctrl-C) and resume \ - the migration later. Downgrading is no longer possible." - ); - - for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) { - let block_root = res?; - let block = match db.get_full_block_prior_to_v9(&block_root) { - // A pre-v9 block is present. - Ok(Some(block)) => block, - // A block is missing. - Ok(None) => return Err(Error::BlockNotFound(block_root)), - // There was an error reading a pre-v9 block. Try reading it as a post-v9 block. - Err(_) => { - if db.try_get_full_block(&block_root)?.is_some() { - // The block is present as a post-v9 block, assume that it was already - // correctly migrated. - continue; - } else { - // This scenario should not be encountered since a prior check has ensured - // that this block exists. - return Err(Error::V9MigrationFailure(block_root)); - } - } - }; - - if block.message().execution_payload().is_ok() { - // Overwrite block with blinded block and store execution payload separately. - debug!( - log, - "Rewriting Bellatrix block"; - "block_root" => ?block_root, - ); - - let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE); - db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?; - db.hot_db.do_atomically(kv_batch)?; - } - } - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - } - - Ok(()) -} - -// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been -// reached. -pub fn downgrade_from_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Downgrading database schema from v9"; - "info" => "You need to upgrade to v9 again before the merge" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - error!( - log, - "Downgrading from schema v9 after the Bellatrix fork epoch is not supported"; - "current_epoch" => current_epoch, - "bellatrix_fork_epoch" => bellatrix_fork_epoch, - "reason" => "You need a v9 schema database to run on a merged version of Prater or \ - mainnet. On Kiln, you have to re-sync", - ); - Err(Error::ResyncRequiredForExecutionPayloadSeparation) - } else { - Ok(()) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs deleted file mode 100644 index 02a54c1a3f8..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ /dev/null @@ -1,315 +0,0 @@ -use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot}; -use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker}; -use proto_array::ExecutionStatus; -use ssz::four_byte_option_impl; -use ssz::Encode; -use ssz_derive::{Decode, Encode}; -use superstruct::superstruct; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); -four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), - no_enum -)] -pub struct ProtoNode { - pub slot: Slot, - pub state_root: Hash256, - pub target_root: Hash256, - pub current_epoch_shuffling_id: AttestationShufflingId, - pub next_epoch_shuffling_id: AttestationShufflingId, - pub root: Hash256, - #[ssz(with = "four_byte_option_usize")] - pub parent: Option, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Option, - pub weight: u64, - #[ssz(with = "four_byte_option_usize")] - pub best_child: Option, - #[ssz(with = "four_byte_option_usize")] - pub best_descendant: Option, - #[superstruct(only(V6, V7, V10))] - pub execution_status: ExecutionStatus, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_finalized_checkpoint: Option, -} - -impl Into for ProtoNodeV1 { - fn into(self) -> ProtoNodeV6 { - ProtoNodeV6 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - // We set the following execution value as if the block is a pre-merge-fork block. This - // is safe as long as we never import a merge block with the old version of proto-array. - // This will be safe since we can't actually process merge blocks until we've made this - // change to fork choice. - execution_status: ExecutionStatus::irrelevant(), - } - } -} - -impl Into for ProtoNodeV6 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: None, - finalized_checkpoint: None, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV7 { - fn into(self) -> ProtoNodeV10 { - ProtoNodeV10 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: None, - unrealized_finalized_checkpoint: None, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNode { - ProtoNode { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - } - } -} - -impl From for ProtoNodeV7 { - fn from(container: ProtoNode) -> Self { - Self { - slot: container.slot, - state_root: container.state_root, - target_root: container.target_root, - current_epoch_shuffling_id: container.current_epoch_shuffling_id, - next_epoch_shuffling_id: container.next_epoch_shuffling_id, - root: container.root, - parent: container.parent, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - weight: container.weight, - best_child: container.best_child, - best_descendant: container.best_descendant, - execution_status: container.execution_status, - } - } -} - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Encode, Decode)), - no_enum -)] -#[derive(Encode, Decode)] -pub struct SszContainer { - pub votes: Vec, - pub balances: Vec, - pub prune_threshold: usize, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Checkpoint, - #[superstruct(only(V1))] - pub nodes: Vec, - #[superstruct(only(V6))] - pub nodes: Vec, - #[superstruct(only(V7))] - pub nodes: Vec, - #[superstruct(only(V10))] - pub nodes: Vec, - pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7, V10))] - pub previous_proposer_boost: ProposerBoost, -} - -impl Into for SszContainerV1 { - fn into(self) -> SszContainerV6 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV6 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - nodes, - indices: self.indices, - } - } -} - -impl SszContainerV6 { - pub(crate) fn into_ssz_container_v7( - self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint, - finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: ProposerBoost::default(), - } - } -} - -impl Into for SszContainerV7 { - fn into(self) -> SszContainerV10 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV10 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainer { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainer { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl From for SszContainerV7 { - fn from(container: SszContainer) -> Self { - let nodes = container.nodes.into_iter().map(Into::into).collect(); - - Self { - votes: container.votes, - balances: container.balances, - prune_threshold: container.prune_threshold, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - nodes, - indices: container.indices, - previous_proposer_boost: container.previous_proposer_boost, - } - } -} diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 33447bc2efd..40b73451cb0 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,27 +298,6 @@ impl SnapshotCache { }) } - /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. - /// - /// Care must be taken not to mutate the state in an invalid way. This function should only - /// be used to mutate the *caches* of the state, for example the tree hash cache when - /// calculating a light client merkle proof. - pub fn borrow_unadvanced_state_mut( - &mut self, - block_root: Hash256, - ) -> Option<&mut BeaconState> { - self.snapshots - .iter_mut() - .find(|snapshot| { - // If the pre-state exists then state advance has already taken the state for - // `block_root` and mutated its tree hash cache. Rather than re-building it while - // holding the snapshot cache lock (>1 second), prefer to return `None` from this - // function and force the caller to load it from disk. - snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() - }) - .map(|snapshot| &mut snapshot.beacon_state) - } - /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a1c7acf173a..b88966b41a9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,7 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, - BeaconChainError, ProduceBlockVerification, + BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -586,7 +586,7 @@ where pub fn get_timestamp_at_slot(&self) -> u64 { let state = self.get_current_state(); - compute_timestamp_at_slot(&state, &self.spec).unwrap() + compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { @@ -1460,7 +1460,12 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, Arc::new(block), CountUnrealized::True) + .process_block( + block_root, + Arc::new(block), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1477,6 +1482,7 @@ where block.canonical_root(), Arc::new(block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await? .into(); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 60fdb607c86..26aea2d2722 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -3,7 +3,8 @@ use crate::{BeaconChainTypes, BeaconStore}; use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; -use store::{DBColumn, Error as StoreError, StoreItem}; +use std::marker::PhantomData; +use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -14,21 +15,17 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public /// keys in compressed form and they are needed in decompressed form for signature verification. /// Decompression is expensive when many keys are involved. -/// -/// The cache has a `backing` that it uses to maintain a persistent, on-disk -/// copy of itself. This allows it to be restored between process invocations. pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, pubkey_bytes: Vec, - store: BeaconStore, + _phantom: PhantomData, } impl ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// - /// Also creates a new persistence file, returning an error if there is already a file at - /// `persistence_path`. + /// The new cache will be updated with the keys from `state` and immediately written to disk. pub fn new( state: &BeaconState, store: BeaconStore, @@ -37,10 +34,11 @@ impl ValidatorPubkeyCache { pubkeys: vec![], indices: HashMap::new(), pubkey_bytes: vec![], - store, + _phantom: PhantomData, }; - cache.import_new_pubkeys(state)?; + let store_ops = cache.import_new_pubkeys(state)?; + store.hot_db.do_atomically(store_ops)?; Ok(cache) } @@ -69,17 +67,19 @@ impl ValidatorPubkeyCache { pubkeys, indices, pubkey_bytes, - store, + _phantom: PhantomData, }) } /// Scan the given `state` and add any new validator public keys. /// /// Does not delete any keys from `self` if they don't appear in `state`. + /// + /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result<(), BeaconChainError> { + ) -> Result, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -87,12 +87,12 @@ impl ValidatorPubkeyCache { .map(|v| v.pubkey), ) } else { - Ok(()) + Ok(vec![]) } } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result<(), BeaconChainError> + fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -100,6 +100,7 @@ impl ValidatorPubkeyCache { self.pubkeys.reserve(validator_keys.len()); self.indices.reserve(validator_keys.len()); + let mut store_ops = Vec::with_capacity(validator_keys.len()); for pubkey in validator_keys { let i = self.pubkeys.len(); @@ -107,17 +108,11 @@ impl ValidatorPubkeyCache { return Err(BeaconChainError::DuplicateValidatorPublicKey); } - // The item is written to disk _before_ it is written into - // the local struct. - // - // This means that a pubkey cache read from disk will always be equivalent to or - // _later than_ the cache that was running in the previous instance of Lighthouse. - // - // The motivation behind this ordering is that we do not want to have states that - // reference a pubkey that is not in our cache. However, it's fine to have pubkeys - // that are never referenced in a state. - self.store - .put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; + // Stage the new validator key for writing to disk. + // It will be committed atomically when the block that introduced it is written to disk. + // Notably it is NOT written while the write lock on the cache is held. + // See: https://github.com/sigp/lighthouse/issues/2327 + store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); self.pubkeys.push( (&pubkey) @@ -129,7 +124,7 @@ impl ValidatorPubkeyCache { self.indices.insert(pubkey, i); } - Ok(()) + Ok(store_ops) } /// Get the public key for a validator with index `i`. @@ -296,9 +291,10 @@ mod test { // Add some more keypairs. let (state, keypairs) = get_state(12); - cache + let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); + store.hot_db.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 998f22f7703..38a55e2212a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,7 +3,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; @@ -147,14 +147,18 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![], CountUnrealized::True) + .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone(), CountUnrealized::True) + .process_chain_segment( + blocks.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import chain segment"); @@ -183,7 +187,11 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec(), CountUnrealized::True) + .process_chain_segment( + chunk.to_vec(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -219,7 +227,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -239,7 +247,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -270,7 +278,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -291,7 +299,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -317,7 +325,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -339,7 +347,11 @@ async fn assert_invalid_signature( // imported prior to this test. let _ = harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; harness.chain.recompute_head_at_current_slot().await; @@ -349,6 +361,7 @@ async fn assert_invalid_signature( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await; assert!( @@ -400,7 +413,11 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -412,7 +429,8 @@ async fn invalid_signature_gossip_block() { .process_block( signed_block.canonical_root(), Arc::new(signed_block), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await, Err(BlockError::InvalidSignature) @@ -446,7 +464,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -644,7 +662,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -725,6 +743,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .expect("should import valid gossip verified block"); @@ -996,6 +1015,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,6 +1055,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1180,7 +1201,8 @@ async fn add_base_block_to_altair_chain() { .process_block( base_block.canonical_root(), Arc::new(base_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1195,7 +1217,11 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(base_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1313,7 +1339,8 @@ async fn add_altair_block_to_base_chain() { .process_block( altair_block.canonical_root(), Arc::new(altair_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1328,7 +1355,11 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(altair_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2336c3ba994..d77cc19678d 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,8 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, + WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -693,6 +693,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -789,6 +790,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,7 +1037,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1317,7 +1319,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, CountUnrealized::True) + .process_block( + block.canonical_root(), + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .unwrap(); } @@ -1879,6 +1886,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b1907bc96e9..b2fc7a6402e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, - WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, + ServerSentEventHandler, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -2148,6 +2148,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), Arc::new(full_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index a13946bf2b9..d80db132ef9 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -687,7 +687,8 @@ async fn run_skip_slot_test(skip_slots: u64) { .process_block( harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2a2225cbdfd..dfce9745774 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1556,7 +1556,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - crit!( + error!( self.log(), "Builder failed to reveal payload"; "info" => "this relay failure may cause a missed proposal", diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7f6852f364b..645c4ccfaba 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -17,6 +17,7 @@ mod proposer_duties; mod publish_blocks; mod state_id; mod sync_committees; +mod ui; mod validator_inclusion; mod version; @@ -2886,6 +2887,18 @@ pub fn serve( }, ); + // GET lighthouse/ui/validator_count + let get_lighthouse_ui_validator_count = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_count")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_count(chain).map(api_types::GenericResponse::from) + }) + }); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3353,6 +3366,7 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) + .or(get_lighthouse_ui_validator_count.boxed()) .or(get_events.boxed()), ) .boxed() diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 3c50fb95a2d..5d27f117b02 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,9 +1,11 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use beacon_chain::{ + BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, +}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{crit, error, info, warn, Logger}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; @@ -35,7 +37,12 @@ pub async fn publish_block( let block_root = block_root.unwrap_or_else(|| block.canonical_root()); match chain - .process_block(block_root, block.clone(), CountUnrealized::True) + .process_block( + block_root, + block.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(root) => { @@ -65,10 +72,10 @@ pub async fn publish_block( // // Check to see the thresholds are non-zero to avoid logging errors with small // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( + let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let delayed_threshold = too_late_threshold / 2; + if delay >= too_late_threshold { + error!( log, "Block was broadcast too late"; "msg" => "system may be overloaded, block likely to be orphaned", @@ -76,7 +83,7 @@ pub async fn publish_block( "slot" => block.slot(), "root" => ?root, ) - } else if delay >= error_threshold { + } else if delay >= delayed_threshold { error!( log, "Block broadcast was delayed"; diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs new file mode 100644 index 00000000000..8f9400dbbd0 --- /dev/null +++ b/beacon_node/http_api/src/ui.rs @@ -0,0 +1,71 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ValidatorStatus; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct ValidatorCountResponse { + pub active_ongoing: u64, + pub active_exiting: u64, + pub active_slashed: u64, + pub pending_initialized: u64, + pub pending_queued: u64, + pub withdrawal_possible: u64, + pub withdrawal_done: u64, + pub exited_unslashed: u64, + pub exited_slashed: u64, +} + +pub fn get_validator_count( + chain: Arc>, +) -> Result { + let spec = &chain.spec; + let mut active_ongoing = 0; + let mut active_exiting = 0; + let mut active_slashed = 0; + let mut pending_initialized = 0; + let mut pending_queued = 0; + let mut withdrawal_possible = 0; + let mut withdrawal_done = 0; + let mut exited_unslashed = 0; + let mut exited_slashed = 0; + + chain + .with_head(|head| { + let state = &head.beacon_state; + let epoch = state.current_epoch(); + for validator in state.validators() { + let status = + ValidatorStatus::from_validator(validator, epoch, spec.far_future_epoch); + + match status { + ValidatorStatus::ActiveOngoing => active_ongoing += 1, + ValidatorStatus::ActiveExiting => active_exiting += 1, + ValidatorStatus::ActiveSlashed => active_slashed += 1, + ValidatorStatus::PendingInitialized => pending_initialized += 1, + ValidatorStatus::PendingQueued => pending_queued += 1, + ValidatorStatus::WithdrawalPossible => withdrawal_possible += 1, + ValidatorStatus::WithdrawalDone => withdrawal_done += 1, + ValidatorStatus::ExitedUnslashed => exited_unslashed += 1, + ValidatorStatus::ExitedSlashed => exited_slashed += 1, + // Since we are not invoking `superset`, all other variants will be 0. + _ => (), + } + } + Ok::<(), BeaconChainError>(()) + }) + .map_err(beacon_chain_error)?; + + Ok(ValidatorCountResponse { + active_ongoing, + active_exiting, + active_slashed, + pending_initialized, + pending_queued, + withdrawal_possible, + withdrawal_done, + exited_unslashed, + exited_slashed, + }) +} diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 71566b87780..0ae3d9a23b6 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -130,6 +130,9 @@ pub struct Config { /// Whether metrics are enabled. pub metrics_enabled: bool, + + /// Whether light client protocols should be enabled. + pub enable_light_client_server: bool, } impl Default for Config { @@ -207,6 +210,7 @@ impl Default for Config { shutdown_after_sync: false, topics: Vec::new(), metrics_enabled: false, + enable_light_client_server: false, } } } @@ -284,9 +288,11 @@ impl From for NetworkLoad { /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = - |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + // We use the first 8 bytes of SHA256(topic, data) for content addressing + let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let data = [message.topic.as_str().as_bytes(), &message.data].concat(); + FastMessageId::from(&Sha256::digest(data)[..8]) + }; fn prefix( prefix: [u8; 4], message: &GossipsubMessage, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 0f291359565..a468239a9e4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -405,7 +405,7 @@ impl PeerManager { debug!(self.log, "Identified Peer"; "peer" => %peer_id, "protocol_version" => &info.protocol_version, "agent_version" => &info.agent_version, - "listening_ addresses" => ?info.listen_addrs, + "listening_addresses" => ?info.listen_addrs, "observed_address" => ?info.observed_addr, "protocols" => ?info.protocols ); @@ -501,6 +501,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -517,6 +518,7 @@ impl PeerManager { Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, Protocol::Goodbye => return, + Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, } @@ -531,6 +533,7 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index c84e368f169..175dfaf0188 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -139,7 +139,7 @@ impl NetworkBehaviour for PeerManager { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state - error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id); + error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); // Reban the peer self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a46a05a8ce3..a4dd602b3fd 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, + light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -70,6 +70,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -230,6 +231,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), }; // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { @@ -472,7 +474,11 @@ fn handle_v1_request( Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - + Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap( + LightClientBootstrapRequest { + root: Hash256::from_ssz_bytes(decoded_buffer)?, + }, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -544,6 +550,9 @@ fn handle_v1_response( Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), + Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap( + LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, + ))), } } @@ -867,6 +876,9 @@ mod tests { OutboundRequest::MetaData(metadata) => { assert_eq!(decoded, InboundRequest::MetaData(metadata)) } + OutboundRequest::LightClientBootstrap(bootstrap) => { + assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap)) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9ac062adc46..9d6229eb382 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -285,7 +285,7 @@ where } else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses - trace!(self.log, "Inbound stream has expired, response not sent"; + trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); } return; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 26d755a6e06..5da595c3db7 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,7 +12,9 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, +}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -243,6 +245,9 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + LightClientBootstrap(LightClientBootstrap), + /// A PONG response to a PING request. Pong(Ping), @@ -273,6 +278,12 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } +/// Request a light_client_bootstrap for lightclients peers. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientBootstrapRequest { + pub root: Hash256, +} + /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] @@ -321,6 +332,7 @@ impl RPCCodedResponse { RPCResponse::BlocksByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, + RPCResponse::LightClientBootstrap(_) => false, }, RPCCodedResponse::Error(_, _) => true, // Stream terminations are part of responses that have chunks @@ -355,6 +367,7 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, + RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } } @@ -390,6 +403,9 @@ impl std::fmt::Display for RPCResponse { } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), + RPCResponse::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7b0092ef713..203a642a8be 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -26,8 +26,8 @@ pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub use handler::SubstreamId; pub use methods::{ - BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks, - RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, + MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -108,18 +108,24 @@ pub struct RPC { /// Queue of events to be processed. events: Vec, RPCHandler>>, fork_context: Arc, + enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(fork_context: Arc, log: slog::Logger) -> Self { + pub fn new( + fork_context: Arc, + enable_light_client_server: bool, + log: slog::Logger, + ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) .one_every(Protocol::Goodbye, Duration::from_secs(10)) + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) .n_every( Protocol::BlocksByRange, methods::MAX_REQUEST_BLOCKS, @@ -132,6 +138,7 @@ impl RPC { limiter, events: Vec::new(), fork_context, + enable_light_client_server, log, } } @@ -188,6 +195,7 @@ where RPCProtocol { fork_context: self.fork_context.clone(), max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7d5acc43643..774303800e8 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -38,6 +38,7 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } @@ -84,9 +85,12 @@ impl OutboundRequest { ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ], + // Note: This match arm is technically unreachable as we only respond to light client requests + // that we generate from the beacon state. + // We do not make light client rpc requests from the beacon node + OutboundRequest::LightClientBootstrap(_) => vec![], } } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -98,6 +102,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, + OutboundRequest::LightClientBootstrap(_) => 1, } } @@ -110,6 +115,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, + OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -121,6 +127,7 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -178,6 +185,9 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), + OutboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "Lightclient Bootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 81960214b16..1f40f81971c 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -153,6 +153,8 @@ pub enum Protocol { Ping, /// The `MetaData` protocol name. MetaData, + /// The `LightClientBootstrap` protocol name. + LightClientBootstrap, } /// RPC Versions @@ -179,6 +181,7 @@ impl std::fmt::Display for Protocol { Protocol::BlocksByRoot => "beacon_blocks_by_root", Protocol::Ping => "ping", Protocol::MetaData => "metadata", + Protocol::LightClientBootstrap => "light_client_bootstrap", }; f.write_str(repr) } @@ -207,6 +210,7 @@ impl std::fmt::Display for Version { pub struct RPCProtocol { pub fork_context: Arc, pub max_rpc_size: usize, + pub enable_light_client_server: bool, pub phantom: PhantomData, } @@ -216,7 +220,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - vec![ + let mut supported_protocols = vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -227,7 +231,15 @@ impl UpgradeInfo for RPCProtocol { ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ] + ]; + if self.enable_light_client_server { + supported_protocols.push(ProtocolId::new( + Protocol::LightClientBootstrap, + Version::V1, + Encoding::SSZSnappy, + )); + } + supported_protocols } } @@ -289,6 +301,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -312,6 +328,10 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), as Encode>::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), } } @@ -417,57 +437,13 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } -impl UpgradeInfo for InboundRequest { - type Info = ProtocolId; - type InfoIter = Vec; - - // add further protocols as we support more encodings/versions - fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() - } -} - /// Implements the encoding per supported protocol for `RPCRequest`. impl InboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - InboundRequest::Status(_) => vec![ProtocolId::new( - Protocol::Status, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::Goodbye(_) => vec![ProtocolId::new( - Protocol::Goodbye, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRange(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::BlocksByRoot(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::Ping(_) => vec![ProtocolId::new( - Protocol::Ping, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::MetaData(_) => vec![ - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -479,6 +455,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, + InboundRequest::LightClientBootstrap(_) => 1, } } @@ -491,6 +468,7 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, + InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -506,6 +484,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), InboundRequest::MetaData(_) => unreachable!(), + InboundRequest::LightClientBootstrap(_) => unreachable!(), } } } @@ -609,6 +588,9 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), + InboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 70b14c33dec..6ba9f6e9419 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -73,6 +73,8 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// LightClientBootstrap rate limiter. + lcbootstrap_rl: Limiter, } /// Error type for non conformant requests @@ -98,6 +100,8 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the LightClientBootstrap protocol. + lcbootstrap_quota: Option, } impl RPCRateLimiterBuilder { @@ -116,6 +120,7 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self } @@ -155,6 +160,9 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let lcbootstrap_quote = self + .lcbootstrap_quota + .ok_or("LightClientBootstrap quota not specified")?; // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; @@ -163,6 +171,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -176,6 +185,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + lcbootstrap_rl, init_time: Instant::now(), }) } @@ -199,6 +209,7 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e5d81737cfb..849a86f51ba 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{EthSpec, SignedBeaconBlock}; +use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; use crate::rpc::{ methods::{ - BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, - RPCResponse, ResponseTermination, StatusMessage, + BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, + OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, }, OutboundRequest, SubstreamId, }; @@ -34,6 +34,8 @@ pub enum Request { BlocksByRange(BlocksByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), + // light client bootstrap request + LightClientBootstrap(LightClientBootstrapRequest), } impl std::convert::From for OutboundRequest { @@ -47,6 +49,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } } @@ -66,6 +69,8 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a LightClientUpdate request. + LightClientBootstrap(LightClientBootstrap), } impl std::convert::From> for RPCCodedResponse { @@ -80,6 +85,9 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::LightClientBootstrap(b) => { + RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + } } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 97d96d171d2..a6f1ce20ade 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -259,7 +259,11 @@ impl Network { (gossipsub, update_gossipsub_scores) }; - let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + let eth2_rpc = RPC::new( + ctx.fork_context.clone(), + config.enable_light_client_server, + log.clone(), + ); let discovery = { // Build and start the discovery sub-behaviour @@ -978,6 +982,9 @@ impl Network { Request::Status(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) } + Request::LightClientBootstrap(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) + } Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } @@ -1247,6 +1254,14 @@ impl Network { ); Some(event) } + InboundRequest::LightClientBootstrap(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::LightClientBootstrap(req), + ); + Some(event) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1274,6 +1289,10 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + // Should never be reached + RPCResponse::LightClientBootstrap(bootstrap) => { + self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index ce03f61ffe6..5f09aec27a7 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -74,6 +74,17 @@ impl SyncState { } } + pub fn is_syncing_finalized(&self) -> bool { + match self { + SyncState::SyncingFinalized { .. } => true, + SyncState::SyncingHead { .. } => false, + SyncState::SyncTransition => false, + SyncState::BackFillSyncing { .. } => false, + SyncState::Synced => false, + SyncState::Stalled => false, + } + } + /// Returns true if the node is synced. /// /// NOTE: We consider the node synced if it is fetching old historical blocks. diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f477878ac0d..9528cfd1dfb 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -41,10 +41,11 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::LightClientBootstrapRequest; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -156,6 +157,10 @@ const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -195,6 +200,7 @@ pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -557,6 +563,22 @@ impl WorkEvent { } } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn lightclient_bootstrap_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + }, + } + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() @@ -733,6 +755,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + LightClientBootstrapRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + }, } impl Work { @@ -755,6 +782,7 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, + Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, } @@ -898,7 +926,7 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); @@ -1137,6 +1165,8 @@ impl BeaconProcessor { } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); // This statement should always be the final else statement. + } else if let Some(item) = lcbootstrap_queue.pop() { + self.spawn_worker(item, toolbox); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1237,6 +1267,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::LightClientBootstrapRequest { .. } => { + lcbootstrap_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1554,8 +1587,24 @@ impl BeaconProcessor { /* * Verification for a chain segment (multiple blocks). */ - Work::ChainSegment { process_id, blocks } => task_spawner - .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + Work::ChainSegment { process_id, blocks } => { + let notify_execution_layer = if self + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + + task_spawner.spawn_async(async move { + worker + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await + }) + } /* * Processing of Status Messages. */ @@ -1594,6 +1643,16 @@ impl BeaconProcessor { request, ) }), + /* + * Processing of lightclient bootstrap requests from other peers. + */ + Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking(move || { + worker.handle_light_client_bootstrap(peer_id, request_id, request) + }), Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eaf5cd005cc..947d9cfe274 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -7,7 +7,7 @@ use beacon_chain::{ sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, - GossipVerifiedBlock, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; @@ -793,7 +793,7 @@ impl Worker { | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -805,7 +805,7 @@ impl Worker { return None; } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; @@ -827,7 +827,7 @@ impl Worker { // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { - warn!(self.log, "Could not verify block for gossip, rejecting the block"; + warn!(self.log, "Could not verify block for gossip. Rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -934,7 +934,12 @@ impl Worker { match self .chain - .process_block(block_root, verified_block, CountUnrealized::True) + .process_block( + block_root, + verified_block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(block_root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index f907c49b7d5..1cbc64b6329 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -38,7 +38,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service, likely shutdown"; + debug!(self.log, "Could not send message to the network service. Likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 37aee01716b..3e354a70d21 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -11,7 +11,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -204,6 +204,79 @@ impl Worker { ) } + /// Handle a `BlocksByRoot` request from the peer. + pub fn handle_light_client_bootstrap( + self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + let block_root = request.root; + let state_root = match self.chain.get_blinded_block(&block_root) { + Ok(signed_block) => match signed_block { + Some(signed_block) => signed_block.state_root(), + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let mut beacon_state = match self.chain.get_state(&state_root, None) { + Ok(beacon_state) => match beacon_state { + Some(state) => state, + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { + Ok(bootstrap) => bootstrap, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + self.send_response( + peer_id, + Response::LightClientBootstrap(bootstrap), + request_id, + ) + } + /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( self, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5d97894fe40..1ec045e97eb 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -10,6 +10,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, + NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; @@ -85,7 +86,12 @@ impl Worker { let slot = block.slot(); let result = self .chain - .process_block(block_root, block, CountUnrealized::True) + .process_block( + block_root, + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -127,6 +133,7 @@ impl Worker { &self, sync_type: ChainSegmentProcessId, downloaded_blocks: Vec>>, + notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { // this a request from the range sync @@ -136,7 +143,11 @@ impl Worker { let sent_blocks = downloaded_blocks.len(); match self - .process_blocks(downloaded_blocks.iter(), count_unrealized) + .process_blocks( + downloaded_blocks.iter(), + count_unrealized, + notify_execution_layer, + ) .await { (_, Ok(_)) => { @@ -215,7 +226,11 @@ impl Worker { // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match self - .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .process_blocks( + downloaded_blocks.iter().rev(), + CountUnrealized::True, + notify_execution_layer, + ) .await { (imported_blocks, Err(e)) => { @@ -246,11 +261,12 @@ impl Worker { &self, downloaded_blocks: impl Iterator>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); match self .chain - .process_chain_segment(blocks, count_unrealized) + .process_chain_segment(blocks, count_unrealized, notify_execution_layer) .await { ChainSegmentResult::Successful { imported_blocks } => { @@ -428,7 +444,7 @@ impl Worker { } else { // The block is in the future, but not too far. debug!( - self.log, "Block is slightly ahead of our slot clock, ignoring."; + self.log, "Block is slightly ahead of our slot clock. Ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 03b877506fb..5df308f2592 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -168,6 +168,9 @@ impl Router { Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), + Request::LightClientBootstrap(request) => self + .processor + .on_lightclient_bootstrap(peer_id, id, request), } } @@ -192,6 +195,7 @@ impl Router { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::LightClientBootstrap(_) => unreachable!(), } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ce11cbdcef3..3c9a4a81fb9 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -160,6 +160,18 @@ impl Processor { )) } + /// Handle a `LightClientBootstrap` request from the peer. + pub fn on_lightclient_bootstrap( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( + peer_id, request_id, request, + )) + } + /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( &mut self, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cdef904715c..230c883a937 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -633,7 +633,7 @@ impl SyncManager { // Some logs. if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { - debug!(self.log, "Execution engine not online, dropping active requests."; + debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 45ade7034c4..c81fed24439 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -242,7 +242,7 @@ impl SyncNetworkContext { source: ReportSource::SyncService, }) .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer, channel failed"); + warn!(self.log, "Could not report peer: channel failed"); }); } @@ -257,7 +257,7 @@ impl SyncNetworkContext { msg, }) .unwrap_or_else(|e| { - warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); + warn!(self.log, "Could not report peer: channel failed"; "error"=> %e); }); } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index b00d56513cc..44a995176d1 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -875,4 +875,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [experimental]") .takes_value(false) ) + .arg( + Arg::with_name("gui") + .long("gui") + .hidden(true) + .help("Enable the graphical user interface and all its requirements. \ + This is equivalent to --http and --validator-monitor-auto.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 406074fe388..e98b585f5f3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -14,6 +14,7 @@ use std::cmp::max; use std::fmt::Debug; use std::fmt::Write; use std::fs; +use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -708,8 +709,11 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); - // Light client server config. - client_config.chain.enable_light_client_server = cli_args.is_present("light-client-server"); + // Graphical user interface config. + if cli_args.is_present("gui") { + client_config.http_api.enabled = true; + client_config.validator_monitor_auto = true; + } Ok(client_config) } @@ -840,9 +844,11 @@ pub fn set_network_config( } if cli_args.is_present("enr-match") { - // set the enr address to localhost if the address is 0.0.0.0 - if config.listen_address == "0.0.0.0".parse::().expect("valid ip addr") { - config.enr_address = Some("127.0.0.1".parse::().expect("valid ip addr")); + // set the enr address to localhost if the address is unspecified + if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); + } else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST)); } else { config.enr_address = Some(config.listen_address); } @@ -922,6 +928,9 @@ pub fn set_network_config( config.discv5_config.table_filter = |_| true; } + // Light client server config. + config.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(()) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index aff2be4cf14..4f63f4e7f97 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -386,16 +386,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Get a schema V8 or earlier full block by reading it and its payload from disk. - pub fn get_full_block_prior_to_v9( - &self, - block_root: &Hash256, - ) -> Result>, Error> { - self.get_block_with(block_root, |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) - }) - } - /// Convert a blinded block into a full block by loading its execution payload if necessary. pub fn make_full_block( &self, diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index d05677465b5..a43fa10e649 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -1,9 +1,6 @@ # Summary * [Introduction](./intro.md) -* [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -13,6 +10,9 @@ * [Cross-Compiling](./cross-compiling.md) * [Homebrew](./homebrew.md) * [Update Priorities](./installation-priorities.md) +* [Run a Node](./run_a_node.md) +* [Become a Validator](./mainnet-validator.md) + * [Become a Testnet Validator](./testnet-validator.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) @@ -46,6 +46,7 @@ * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) + * [Merge Migration](./merge-migration.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index c1ba6a2dcc6..763372692ee 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -99,6 +99,28 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio } ``` +### `/lighthouse/ui/validator_count` + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "active_ongoing":479508, + "active_exiting":0, + "active_slashed":0, + "pending_initialized":28, + "pending_queued":0, + "withdrawal_possible":933, + "withdrawal_done":0, + "exited_unslashed":0, + "exited_slashed":3 + } +} +``` + ### `/lighthouse/syncing` ```bash diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 2b0ac836a4e..0982e10ab90 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -19,13 +19,13 @@ validator client or the slasher**. | v2.0.0 | Oct 2021 | v5 | no | | v2.1.0 | Jan 2022 | v8 | no | | v2.2.0 | Apr 2022 | v8 | no | -| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | -| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 | +| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 | | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | -| v3.3.0 | TBD | v13 | yes | +| v3.3.0 | Nov 2022 | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/intro.md b/book/src/intro.md index fca075892b1..ef16913d686 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -18,6 +18,7 @@ We implement the specification as defined in the You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. +- Run your very [own beacon node](./run_a_node.md). - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index c0ba048997c..08f1b51e42a 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -61,6 +61,7 @@ the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) Once you have configured your execution engine to open up the engine API (usually on port 8551) you should add the URL to your `lighthouse bn` flags with `--execution-endpoint `, as well as diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md new file mode 100644 index 00000000000..5ce42aa6305 --- /dev/null +++ b/book/src/run_a_node.md @@ -0,0 +1,171 @@ +# Run a Node + +This document provides detail for users who want to run a Lighthouse beacon node. +You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: + +1. Set up an [execution node](#step-1-set-up-an-execution-node); +1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); +1. Run [Lighthouse](#step-3-run-lighthouse); +1. [Check logs](#step-4-check-logs); and +1. [Further readings](#step-5-further-readings). + +Checkpoint sync is *optional*; however, we recommend it since it is substantially faster +than syncing from genesis while still providing the same functionality. + +## Step 1: Set up an execution node + +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions +present in blocks. Two flags are used to configure this connection: + +- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the + execution engine. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +Each execution engine has its own flags for configuring the engine API and JWT. +Please consult the relevant page of your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) + +The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. + +## Step 2: Choose a checkpoint sync provider + +Lighthouse supports fast sync from a recent finalized checkpoint. +The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) +provided by the Ethereum community. + +In [step 3](#step-3-run-lighthouse), when running Lighthouse, +we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. + +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. + +For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, +which we will use in [step 3](#step-3-run-lighthouse). + +## Step 3: Run Lighthouse + +To run Lighthouse, we use the three flags from the steps above: +- `--execution-endpoint`; +- `--execution-jwt`; and +- `--checkpoint-sync-url`. + +Additionally, we run Lighthouse with the `--network` flag, which selects a network: + +- `lighthouse` (no flag): Mainnet. +- `lighthouse --network mainnet`: Mainnet. +- `lighthouse --network goerli`: Goerli (testnet). + +Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. + +For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), +[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). + +Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). +In the following, we will provide examples of what a Lighthouse setup could look like. + +### Staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --http +``` + +A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. +The default listen address is `127.0.0.1:5052`. +The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + +### Non-staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --disable-deposit-contract-sync +``` + +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. + +--- + +Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. + +## Step 4: Check logs +Several logs help you identify if Lighthouse is running correctly. + +### Logs - Checkpoint sync +Lighthouse will print a message to indicate that checkpoint sync is being used: + +``` +INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon +``` + +After a short time (usually less than a minute), it will log the details of the checkpoint +loaded from the remote beacon node: + +``` +INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon +``` + +Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. + +If a validator client is connected to the node then it will be able to start completing its duties +as soon as forwards sync completes. + +> **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint +> against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), +> a friend's node, or a block explorer. + +#### Backfilling Blocks + +Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks +from the checkpoint back to genesis. + +The beacon node will log messages similar to the following each minute while it completes backfill +sync: + +``` +INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier +``` + +Once backfill is complete, a `INFO Historical block download complete` log will be emitted. + +Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync. + +### Logs - Syncing + +You should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + + +## Step 5: Further readings + +Several other resources are the next logical step to explore after running your beacon node: + +- Learn how to [become a validator](./mainnet-validator.md); +- Explore how to [manage your keys](./key-management.md); +- Research on [validator management](./validator-management.md); +- Dig into the [APIs](./api.md) that the beacon node and validator client provide; +- Study even more about [checkpoint sync](./checkpoint-sync.md); or +- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). + +Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 4df7a5f235e..b7a66cbbd84 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,9 +1,11 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; +use lighthouse_network::discv5::enr::EnrBuilder; +use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ - discovery::{create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr}, + discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; use serde_derive::{Deserialize, Serialize}; @@ -70,6 +72,15 @@ impl BootNodeConfig { // the address to listen on let listen_socket = SocketAddr::new(network_config.listen_address, network_config.discovery_port); + if listen_socket.is_ipv6() { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: true, + }; + } else { + // Set explicitly as ipv4 otherwise + network_config.discv5_config.ip_mode = IpMode::Ip4; + } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -104,7 +115,29 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let mut builder = create_enr_builder_from_config(&network_config, false); + let mut builder = EnrBuilder::new("v4"); + // Set the enr address if specified. Set also the port. + // NOTE: if the port is specified but the the address is not, the port won't be + // set since it can't be known if it's an ipv6 or ipv4 udp port. + if let Some(enr_address) = network_config.enr_address { + match enr_address { + std::net::IpAddr::V4(ipv4_addr) => { + builder.ip4(ipv4_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp4(port); + } + } + std::net::IpAddr::V6(ipv6_addr) => { + builder.ip6(ipv6_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp6(port); + // We are enabling mapped addresses in the boot node in this case, + // so advertise an udp4 port as well. + builder.udp4(port); + } + } + } + }; // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index c4bf887e942..8f38fb300dc 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -9,53 +9,63 @@ use slog::info; use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { + let BootNodeConfig { + listen_socket, + boot_nodes, + local_enr, + local_key, + discv5_config, + .. + } = config; + // Print out useful information about the generated ENR - let enr_socket = config - .local_enr - .udp4_socket() - .expect("Enr has a UDP socket"); - let eth2_field = config - .local_enr + let enr_v4_socket = local_enr.udp4_socket(); + let enr_v6_socket = local_enr.udp6_socket(); + let eth2_field = local_enr .eth2() .map(|fork_id| hex::encode(fork_id.fork_digest)) .unwrap_or_default(); - info!(log, "Configuration parameters"; "listening_address" => format!("{}:{}", config.listen_socket.ip(), config.listen_socket.port()), "broadcast_address" => format!("{}:{}",enr_socket.ip(), enr_socket.port()), "eth2" => eth2_field); + let pretty_v4_socket = enr_v4_socket.as_ref().map(|addr| addr.to_string()); + let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string()); + info!( + log, "Configuration parameters"; + "listening_address" => %listen_socket, + "advertised_v4_address" => ?pretty_v4_socket, + "advertised_v6_address" => ?pretty_v6_socket, + "eth2" => eth2_field + ); - info!(log, "Identity established"; "peer_id" => config.local_enr.peer_id().to_string(), "node_id" => config.local_enr.node_id().to_string()); + info!(log, "Identity established"; "peer_id" => %local_enr.peer_id(), "node_id" => %local_enr.node_id()); // build the contactable multiaddr list, adding the p2p protocol - info!(log, "Contact information"; "enr" => config.local_enr.to_base64()); - info!(log, "Contact information"; "multiaddrs" => format!("{:?}", config.local_enr.multiaddr_p2p())); + info!(log, "Contact information"; "enr" => local_enr.to_base64()); + info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server - let mut discv5 = Discv5::new( - config.local_enr.clone(), - config.local_key, - config.discv5_config, - ) - .unwrap(); + let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table - for enr in config.boot_nodes { + for enr in boot_nodes { info!( log, "Adding bootnode"; - "address" => ?enr.udp4_socket(), - "peer_id" => enr.peer_id().to_string(), - "node_id" => enr.node_id().to_string() + "ipv4_address" => ?enr.udp4_socket(), + "ipv6_address" => ?enr.udp6_socket(), + "peer_id" => ?enr.peer_id(), + "node_id" => ?enr.node_id() ); - if enr != config.local_enr { + if enr != local_enr { if let Err(e) = discv5.add_enr(enr) { - slog::warn!(log, "Failed adding ENR"; "error" => e.to_string()); + slog::warn!(log, "Failed adding ENR"; "error" => ?e); } } } // start the server - if let Err(e) = discv5.start(config.listen_socket).await { - slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string()); + if let Err(e) = discv5.start(listen_socket).await { + slog::crit!(log, "Could not start discv5 server"; "error" => %e); return; } @@ -72,7 +82,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { let mut event_stream = match discv5.event_stream().await { Ok(stream) => stream, Err(e) => { - slog::crit!(log, "Failed to obtain event stream"; "error" => e.to_string()); + slog::crit!(log, "Failed to obtain event stream"; "error" => %e); return; } }; @@ -81,9 +91,35 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { loop { tokio::select! { _ = metric_interval.tick() => { + // Get some ipv4/ipv6 stats to add in the metrics. + let mut ipv4_only_reachable: usize = 0; + let mut ipv6_only_reachable: usize= 0; + let mut ipv4_ipv6_reachable: usize = 0; + let mut unreachable_nodes: usize = 0; + for enr in discv5.kbuckets().iter_ref().filter_map(|entry| entry.status.is_connected().then_some(entry.node.value)) { + let declares_ipv4 = enr.udp4_socket().is_some(); + let declares_ipv6 = enr.udp6_socket().is_some(); + match (declares_ipv4, declares_ipv6) { + (true, true) => ipv4_ipv6_reachable += 1, + (true, false) => ipv4_only_reachable += 1, + (false, true) => ipv6_only_reachable += 1, + (false, false) => unreachable_nodes += 1, + } + } + // display server metrics let metrics = discv5.metrics(); - info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second)); + info!( + log, "Server metrics"; + "connected_peers" => discv5.connected_peers(), + "active_sessions" => metrics.active_sessions, + "requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second), + "ipv4_nodes" => ipv4_only_reachable, + "ipv6_nodes" => ipv6_only_reachable, + "ipv6_and_ipv4_nodes" => ipv4_ipv6_reachable, + "unreachable_nodes" => unreachable_nodes, + ); + } Some(event) = event_stream.recv() => { match event { @@ -95,7 +131,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { - info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); + info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } Discv5Event::SessionEstablished{ .. } => {} // Ignore } diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index b0cf4551eea..0539cc7d2c6 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -22,7 +22,7 @@ pub trait BitfieldBehaviour: Clone {} /// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Variable { _phantom: PhantomData, } @@ -30,7 +30,7 @@ pub struct Variable { /// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Fixed { _phantom: PhantomData, } @@ -96,7 +96,7 @@ pub type BitVector = Bitfield>; /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. #[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Hash(bound = ""))] +#[derivative(PartialEq, Eq, Hash(bound = ""))] pub struct Bitfield { bytes: SmallVec<[u8; SMALLVEC_LEN]>, len: usize, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index fdd3f95a65b..0bd5f61aff8 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,8 +1,11 @@ +use crate::common::get_indexed_attestation; +use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, - Slot, + Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, + EthSpec, ExecPayload, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -13,6 +16,9 @@ pub struct ConsensusContext { proposer_index: Option, /// Block root of the block at `slot`. current_block_root: Option, + /// Cache of indexed attestations constructed during block processing. + indexed_attestations: + HashMap<(AttestationData, BitList), IndexedAttestation>, _phantom: PhantomData, } @@ -20,6 +26,7 @@ pub struct ConsensusContext { pub enum ContextError { BeaconState(BeaconStateError), SlotMismatch { slot: Slot, expected: Slot }, + EpochMismatch { epoch: Epoch, expected: Epoch }, } impl From for ContextError { @@ -34,6 +41,7 @@ impl ConsensusContext { slot, proposer_index: None, current_block_root: None, + indexed_attestations: HashMap::new(), _phantom: PhantomData, } } @@ -43,13 +51,39 @@ impl ConsensusContext { self } + /// Strict method for fetching the proposer index. + /// + /// Gets the proposer index for `self.slot` while ensuring that it matches `state.slot()`. This + /// method should be used in block processing and almost everywhere the proposer index is + /// required. If the slot check is too restrictive, see `get_proposer_index_from_epoch_state`. pub fn get_proposer_index( &mut self, state: &BeaconState, spec: &ChainSpec, ) -> Result { self.check_slot(state.slot())?; + self.get_proposer_index_no_checks(state, spec) + } + + /// More liberal method for fetching the proposer index. + /// + /// Fetches the proposer index for `self.slot` but does not require the state to be from an + /// exactly matching slot (merely a matching epoch). This is useful in batch verification where + /// we want to extract the proposer index from a single state for every slot in the epoch. + pub fn get_proposer_index_from_epoch_state( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + self.check_epoch(state.current_epoch())?; + self.get_proposer_index_no_checks(state, spec) + } + fn get_proposer_index_no_checks( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { if let Some(proposer_index) = self.proposer_index { return Ok(proposer_index); } @@ -89,4 +123,39 @@ impl ConsensusContext { }) } } + + fn check_epoch(&self, epoch: Epoch) -> Result<(), ContextError> { + let expected = self.slot.epoch(T::slots_per_epoch()); + if epoch == expected { + Ok(()) + } else { + Err(ContextError::EpochMismatch { epoch, expected }) + } + } + + pub fn get_indexed_attestation( + &mut self, + state: &BeaconState, + attestation: &Attestation, + ) -> Result<&IndexedAttestation, BlockOperationError> { + let key = ( + attestation.data.clone(), + attestation.aggregation_bits.clone(), + ); + + match self.indexed_attestations.entry(key) { + Entry::Occupied(occupied) => Ok(occupied.into_mut()), + Entry::Vacant(vacant) => { + let committee = + state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; + let indexed_attestation = + get_indexed_attestation(committee.committee, attestation)?; + Ok(vacant.insert(indexed_attestation)) + } + } + } + + pub fn num_cached_indexed_attestations(&self) -> usize { + self.indexed_attestations.len() + } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index cccc8eacd9f..7d0cb01aebc 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -111,16 +111,13 @@ pub fn per_block_processing>( let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. - let block_root = Some(ctxt.get_current_block_root(signed_block)?); - let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); block_verify!( BlockSignatureVerifier::verify_entire_block( state, |i| get_pubkey_from_state(state, i), |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, - block_root, - proposer_index, + ctxt, spec ) .is_ok(), @@ -339,6 +336,7 @@ pub fn get_new_eth1_data( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload pub fn partially_verify_execution_payload>( state: &BeaconState, + block_slot: Slot, payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -359,7 +357,7 @@ pub fn partially_verify_execution_payload>( } ); - let timestamp = compute_timestamp_at_slot(state, spec)?; + let timestamp = compute_timestamp_at_slot(state, block_slot, spec)?; block_verify!( payload.timestamp() == timestamp, BlockProcessingError::ExecutionInvalidTimestamp { @@ -383,7 +381,7 @@ pub fn process_execution_payload>( payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload(state, payload, spec)?; + partially_verify_execution_payload(state, state.slot(), payload, spec)?; *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); @@ -420,9 +418,10 @@ pub fn is_execution_enabled>( /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, + block_slot: Slot, spec: &ChainSpec, ) -> Result { - let slots_since_genesis = state.slot().as_u64().safe_sub(spec.genesis_slot.as_u64())?; + let slots_since_genesis = block_slot.as_u64().safe_sub(spec.genesis_slot.as_u64())?; slots_since_genesis .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 7584df14ec9..5e52ff8cb83 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,14 +1,13 @@ #![allow(clippy::integer_arithmetic)] use super::signature_sets::{Error as SignatureSetError, *}; -use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use crate::{ConsensusContext, ContextError}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, IndexedAttestation, - SignedBeaconBlock, + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, }; pub type Result = std::result::Result; @@ -28,6 +27,8 @@ pub enum Error { IncorrectBlockProposer { block: u64, local_shuffling: u64 }, /// Failed to load a signature set. The block may be invalid or we failed to process it. SignatureSetError(SignatureSetError), + /// Error related to the consensus context, likely the proposer index or block root calc. + ContextError(ContextError), } impl From for Error { @@ -36,6 +37,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ContextError) -> Error { + Error::ContextError(e) + } +} + impl From for Error { fn from(e: SignatureSetError) -> Error { match e { @@ -122,12 +129,11 @@ where get_pubkey: F, decompressor: D, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, spec: &'a ChainSpec, ) -> Result<()> { let mut verifier = Self::new(state, get_pubkey, decompressor, spec); - verifier.include_all_signatures(block, block_root, verified_proposer_index)?; + verifier.include_all_signatures(block, ctxt)?; verifier.verify() } @@ -135,11 +141,14 @@ where pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let block_root = Some(ctxt.get_current_block_root(block)?); + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); + self.include_block_proposal(block, block_root, verified_proposer_index)?; - self.include_all_signatures_except_proposal(block, verified_proposer_index)?; + self.include_all_signatures_except_proposal(block, ctxt)?; Ok(()) } @@ -149,12 +158,14 @@ where pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); self.include_randao_reveal(block, verified_proposer_index)?; self.include_proposer_slashings(block)?; self.include_attester_slashings(block)?; - self.include_attestations(block)?; + self.include_attestations(block, ctxt)?; // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; @@ -260,7 +271,8 @@ where pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, - ) -> Result>> { + ctxt: &mut ConsensusContext, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().attestations().len()); @@ -270,28 +282,18 @@ where .body() .attestations() .iter() - .try_fold( - Vec::with_capacity(block.message().body().attestations().len()), - |mut vec, attestation| { - let committee = self - .state - .get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = - get_indexed_attestation(committee.committee, attestation)?; - - self.sets.push(indexed_attestation_signature_set( - self.state, - self.get_pubkey.clone(), - &attestation.signature, - &indexed_attestation, - self.spec, - )?); - - vec.push(indexed_attestation); - - Ok(vec) - }, - ) + .try_for_each(|attestation| { + let indexed_attestation = ctxt.get_indexed_attestation(self.state, attestation)?; + + self.sets.push(indexed_attestation_signature_set( + self.state, + self.get_pubkey.clone(), + &attestation.signature, + indexed_attestation, + self.spec, + )?); + Ok(()) + }) .map_err(Error::into) } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 1000586e660..9f27c4c9a1e 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -57,8 +57,14 @@ pub mod base { // Verify and apply each attestation. for (i, attestation) in attestations.iter().enumerate() { - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; + verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(i))?; let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), @@ -94,19 +100,11 @@ pub mod altair { ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let proposer_index = ctxt.get_proposer_index(state, spec)?; attestations .iter() .enumerate() .try_for_each(|(i, attestation)| { - process_attestation( - state, - attestation, - i, - proposer_index, - verify_signatures, - spec, - ) + process_attestation(state, attestation, i, ctxt, verify_signatures, spec) }) } @@ -114,16 +112,24 @@ pub mod altair { state: &mut BeaconState, attestation: &Attestation, att_index: usize, - proposer_index: u64, + ctxt: &mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; - let indexed_attestation = - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(att_index))?; + let proposer_index = ctxt.get_proposer_index(state, spec)?; + + let attesting_indices = &verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(att_index))? + .attesting_indices; // Matching roots, participation flag indices let data = &attestation.data; @@ -135,7 +141,7 @@ pub mod altair { let total_active_balance = state.get_total_active_balance()?; let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; - for index in &indexed_attestation.attesting_indices { + for index in attesting_indices { let index = *index as usize; for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 5d8113af4f0..303a6e3913a 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,7 +1,7 @@ use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; -use crate::common::get_indexed_attestation; use crate::per_block_processing::is_valid_indexed_attestation; +use crate::ConsensusContext; use safe_arith::SafeArith; use types::*; @@ -15,12 +15,13 @@ fn error(reason: Invalid) -> BlockOperationError { /// to `state`. Otherwise, returns a descriptive `Err`. /// /// Optionally verifies the aggregate signature, depending on `verify_signatures`. -pub fn verify_attestation_for_block_inclusion( +pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -39,7 +40,7 @@ pub fn verify_attestation_for_block_inclusion( } ); - verify_attestation_for_state(state, attestation, verify_signatures, spec) + verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) } /// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given @@ -49,12 +50,13 @@ pub fn verify_attestation_for_block_inclusion( /// prior blocks in `state`. /// /// Spec v0.12.1 -pub fn verify_attestation_for_state( +pub fn verify_attestation_for_state<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -66,9 +68,8 @@ pub fn verify_attestation_for_state( verify_casper_ffg_vote(attestation, state)?; // Check signature and bitfields - let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation)?; - is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?; + let indexed_attestation = ctxt.get_indexed_attestation(state, attestation)?; + is_valid_indexed_attestation(state, indexed_attestation, verify_signatures, spec)?; Ok(indexed_attestation) } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8b233d847bf..9d548b0499a 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -789,6 +789,7 @@ fn run( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, }) .map_err(|e| format!("should start logger: {:?}", e))? .build() diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 84d0a517652..44a1772ccd2 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -339,6 +339,10 @@ fn do_transition( .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + if !config.no_signature_verification { let get_pubkey = move |validator_index| { validator_pubkey_cache @@ -359,18 +363,20 @@ fn do_transition( get_pubkey, decompressor, &block, - Some(block_root), - Some(block.message().proposer_index()), + &mut ctxt, spec, ) .map_err(|e| format!("Invalid block signature: {:?}", e))?; debug!("Batch verify block signatures: {:?}", t.elapsed()); + + // Signature verification should prime the indexed attestation cache. + assert_eq!( + ctxt.num_cached_indexed_attestations(), + block.message().body().attestations().len() + ); } let t = Instant::now(); - let mut ctxt = ConsensusContext::new(pre_state.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut pre_state, &block, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index c5b58581d21..fad7edeb196 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -55,6 +55,7 @@ pub struct LoggerConfig { pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, + pub is_restricted: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -68,6 +69,7 @@ impl Default for LoggerConfig { max_log_size: 200, max_log_number: 5, compression: false, + is_restricted: true, } } } @@ -257,7 +259,7 @@ impl EnvironmentBuilder { .rotate_size(config.max_log_size) .rotate_keep(config.max_log_number) .rotate_compress(config.compression) - .restrict_permissions(true) + .restrict_permissions(config.is_restricted) .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 9dc0902e064..da72204f967 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -129,6 +129,15 @@ fn main() { to store old logs.") .global(true), ) + .arg( + Arg::with_name("logfile-no-restricted-perms") + .long("logfile-no-restricted-perms") + .help( + "If present, log files will be generated as world-readable meaning they can be read by \ + any user on the machine. Note that logs can often contain sensitive information \ + about your validator and so this flag should be used with caution.") + .global(true), + ) .arg( Arg::with_name("log-format") .long("log-format") @@ -407,6 +416,8 @@ fn run( let logfile_compress = matches.is_present("logfile-compress"); + let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { @@ -446,6 +457,7 @@ fn run( max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, + is_restricted: logfile_restricted, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 0d700492502..d39235cb136 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1548,6 +1548,23 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } +#[test] +fn logfile_restricted_perms_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted); + }); +} +#[test] +fn logfile_no_restricted_perms_flag() { + CommandLineTest::new() + .flag("logfile-no-restricted-perms", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted == false); + }); +} #[test] fn sync_eth1_chain_default() { @@ -1587,7 +1604,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, false)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, false)); } #[test] @@ -1595,5 +1612,16 @@ fn light_client_server_enabled() { CommandLineTest::new() .flag("light-client-server", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.enable_light_client_server, true)); + .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); +} + +#[test] +fn gui_flag() { + CommandLineTest::new() + .flag("gui", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.http_api.enabled); + assert!(config.validator_monitor_auto); + }); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8faf4db8219..039efb36845 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, CountUnrealized, + BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -388,6 +388,7 @@ impl Tester { block_root, block.clone(), CountUnrealized::False, + NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index a351a597c0a..aaa725f567a 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -80,7 +80,6 @@ impl Operation for Attestation { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); - let proposer_index = ctxt.get_proposer_index(state, spec)?; match state { BeaconState::Base(_) => base::process_attestations( state, @@ -89,14 +88,9 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( - state, - self, - 0, - proposer_index, - VerifySignatures::True, - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) + } } } } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3d59013f2a2..8284bff6096 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -67,6 +67,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 06f9e9a4f35..53c4447da2c 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -52,6 +52,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 00e439e4c9f..1c8b41f0573 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -56,6 +56,7 @@ fn syncing_sim( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?;