Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into tree-states
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Jan 25, 2024
2 parents 6262be7 + 1be5253 commit 262e5f2
Show file tree
Hide file tree
Showing 91 changed files with 2,252 additions and 651 deletions.
361 changes: 232 additions & 129 deletions Cargo.lock

Large diffs are not rendered by default.

6 changes: 5 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ crossbeam-channel = "0.5.8"
delay_map = "0.3"
derivative = "2"
dirs = "3"
discv5 = { git="https://github.com/sigp/discv5", rev="dbb4a718cd32eaed8127c3c8241bfd0fde9eb908", features = ["libp2p"] }
discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] }
env_logger = "0.9"
error-chain = "0.12"
ethereum-types = "0.14"
Expand Down Expand Up @@ -163,6 +163,10 @@ tempfile = "3"
tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] }
tokio-stream = { version = "0.1", features = ["sync"] }
tokio-util = { version = "0.6", features = ["codec", "compat", "time"] }
tracing-appender = "0.2"
tracing-core = "0.1"
tracing-log = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tree_hash = "0.5"
tree_hash_derive = "0.5"
url = "2"
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release"
PINNED_NIGHTLY ?= nightly
CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19

# List of features to use when building natively. Can be overriden via the environment.
# List of features to use when building natively. Can be overridden via the environment.
# No jemalloc on Windows
ifeq ($(OS),Windows_NT)
FEATURES?=
Expand Down
2 changes: 2 additions & 0 deletions account_manager/src/validator/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin
suggested_fee_recipient,
None,
None,
None,
None,
)
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;

Expand Down
27 changes: 19 additions & 8 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData
use crate::events::ServerSentEventHandler;
use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle};
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult};
use crate::head_tracker::HeadTracker;
use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker};
use crate::historical_blocks::HistoricalBlockError;
use crate::light_client_finality_update_verification::{
Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate,
Expand Down Expand Up @@ -604,38 +604,48 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut batch = vec![];

let _head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
batch.push(self.persist_head_in_batch()?);

// Hold a lock to head_tracker until it has been persisted to disk. Otherwise there's a race
// condition with the pruning thread which can result in a block present in the head tracker
// but absent in the DB. This inconsistency halts pruning and dramastically increases disk
// size. Ref: https://github.com/sigp/lighthouse/issues/4773
let head_tracker = self.head_tracker.0.read();
batch.push(self.persist_head_in_batch(&head_tracker)?);

let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE);
batch.push(self.persist_fork_choice_in_batch()?);

self.store.hot_db.do_atomically(batch)?;
drop(head_tracker);

Ok(())
}

/// Return a `PersistedBeaconChain` without reference to a `BeaconChain`.
pub fn make_persisted_head(
genesis_block_root: Hash256,
head_tracker: &HeadTracker,
head_tracker_reader: &HeadTrackerReader,
) -> PersistedBeaconChain {
PersistedBeaconChain {
_canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT,
genesis_block_root,
ssz_head_tracker: head_tracker.to_ssz_container(),
ssz_head_tracker: SszHeadTracker::from_map(head_tracker_reader),
}
}

/// Return a database operation for writing the beacon chain head to disk.
pub fn persist_head_in_batch(&self) -> Result<KeyValueStoreOp, DBError> {
Self::persist_head_in_batch_standalone(self.genesis_block_root, &self.head_tracker)
pub fn persist_head_in_batch(
&self,
head_tracker_reader: &HeadTrackerReader,
) -> Result<KeyValueStoreOp, DBError> {
Self::persist_head_in_batch_standalone(self.genesis_block_root, head_tracker_reader)
}

pub fn persist_head_in_batch_standalone(
genesis_block_root: Hash256,
head_tracker: &HeadTracker,
head_tracker_reader: &HeadTrackerReader,
) -> Result<KeyValueStoreOp, DBError> {
Self::make_persisted_head(genesis_block_root, head_tracker)
Self::make_persisted_head(genesis_block_root, head_tracker_reader)
.as_kv_store_op(BEACON_CHAIN_DB_KEY)
}

Expand Down Expand Up @@ -1327,6 +1337,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.head_tracker.heads()
}

/// Only used in tests.
pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool {
self.head_tracker.contains_head((*block_hash).into())
}
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -718,7 +718,7 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for PublishBlockReq
Ok::<_, BlockContentsError<T::EthSpec>>(gossip_verified_blobs)
})
.transpose()?;
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?;

Ok((gossip_verified_block, gossip_verified_blobs))
}
Expand Down
17 changes: 11 additions & 6 deletions beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -833,20 +833,25 @@ where
//
// This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance
// doesn't write a `PersistedBeaconChain` without the rest of the batch.
self.pending_io_batch.push(BeaconChain::<
Witness<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>,
>::persist_head_in_batch_standalone(
genesis_block_root, &head_tracker
).map_err(|e| format!("{:?}", e))?);
let head_tracker_reader = head_tracker.0.read();
self.pending_io_batch
.push(BeaconChain::<
Witness<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>,
>::persist_head_in_batch_standalone(
genesis_block_root,
&head_tracker_reader,
)
.map_err(|e| format!("{e:?}"))?);
self.pending_io_batch.push(BeaconChain::<
Witness<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>,
>::persist_fork_choice_in_batch_standalone(
&fork_choice
).map_err(|e| format!("{:?}", e))?);
).map_err(|e| format!("{e:?}"))?);
store
.hot_db
.do_atomically(self.pending_io_batch)
.map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?;
drop(head_tracker_reader);

let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root();
let genesis_time = head_snapshot.beacon_state.genesis_time();
Expand Down
3 changes: 1 addition & 2 deletions beacon_node/beacon_chain/src/capella_readiness.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
//! Provides tools for checking if a node is ready for the Capella upgrade and following merge
//! transition.
//! Provides tools for checking if a node is ready for the Capella upgrade.

use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -952,7 +952,7 @@ mod test {
};

let availability_pending_block = AvailabilityPendingExecutedBlock {
block: Arc::new(block),
block,
import_data,
payload_verification_outcome,
};
Expand Down
121 changes: 121 additions & 0 deletions beacon_node/beacon_chain/src/deneb_readiness.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
//! Provides tools for checking if a node is ready for the Deneb upgrade.

use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{
ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V3,
};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::time::Duration;
use types::*;

/// The time before the Deneb fork when we will start issuing warnings about preparation.
use super::merge_readiness::SECONDS_IN_A_WEEK;
pub const DENEB_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2;
pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300;

#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "type")]
pub enum DenebReadiness {
/// The execution engine is deneb-enabled (as far as we can tell)
Ready,
/// We are connected to an execution engine which doesn't support the V3 engine api methods
V3MethodsNotSupported { error: String },
/// The transition configuration with the EL failed, there might be a problem with
/// connectivity, authentication or a difference in configuration.
ExchangeCapabilitiesFailed { error: String },
/// The user has not configured an execution endpoint
NoExecutionEndpoint,
}

impl fmt::Display for DenebReadiness {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DenebReadiness::Ready => {
write!(f, "This node appears ready for Deneb.")
}
DenebReadiness::ExchangeCapabilitiesFailed { error } => write!(
f,
"Could not exchange capabilities with the \
execution endpoint: {}",
error
),
DenebReadiness::NoExecutionEndpoint => write!(
f,
"The --execution-endpoint flag is not specified, this is a \
requirement post-merge"
),
DenebReadiness::V3MethodsNotSupported { error } => write!(
f,
"Execution endpoint does not support Deneb methods: {}",
error
),
}
}
}

impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns `true` if deneb epoch is set and Deneb fork has occurred or will
/// occur within `DENEB_READINESS_PREPARATION_SECONDS`
pub fn is_time_to_prepare_for_deneb(&self, current_slot: Slot) -> bool {
if let Some(deneb_epoch) = self.spec.deneb_fork_epoch {
let deneb_slot = deneb_epoch.start_slot(T::EthSpec::slots_per_epoch());
let deneb_readiness_preparation_slots =
DENEB_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot;
// Return `true` if Deneb has happened or is within the preparation time.
current_slot + deneb_readiness_preparation_slots > deneb_slot
} else {
// The Deneb fork epoch has not been defined yet, no need to prepare.
false
}
}

/// Attempts to connect to the EL and confirm that it is ready for capella.
pub async fn check_deneb_readiness(&self) -> DenebReadiness {
if let Some(el) = self.execution_layer.as_ref() {
match el
.get_engine_capabilities(Some(Duration::from_secs(
ENGINE_CAPABILITIES_REFRESH_INTERVAL,
)))
.await
{
Err(e) => {
// The EL was either unreachable or responded with an error
DenebReadiness::ExchangeCapabilitiesFailed {
error: format!("{:?}", e),
}
}
Ok(capabilities) => {
let mut missing_methods = String::from("Required Methods Unsupported:");
let mut all_good = true;
if !capabilities.get_payload_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_GET_PAYLOAD_V3);
all_good = false;
}
if !capabilities.forkchoice_updated_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V3);
all_good = false;
}
if !capabilities.new_payload_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_NEW_PAYLOAD_V3);
all_good = false;
}

if all_good {
DenebReadiness::Ready
} else {
DenebReadiness::V3MethodsNotSupported {
error: missing_methods,
}
}
}
}
} else {
DenebReadiness::NoExecutionEndpoint
}
}
}
9 changes: 8 additions & 1 deletion beacon_node/beacon_chain/src/head_tracker.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use parking_lot::RwLock;
use parking_lot::{RwLock, RwLockReadGuard};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use types::{Hash256, Slot};
Expand All @@ -16,6 +16,8 @@ pub enum Error {
#[derive(Default, Debug)]
pub struct HeadTracker(pub RwLock<HashMap<Hash256, Slot>>);

pub type HeadTrackerReader<'a> = RwLockReadGuard<'a, HashMap<Hash256, Slot>>;

impl HeadTracker {
/// Register a block with `Self`, so it may or may not be included in a `Self::heads` call.
///
Expand Down Expand Up @@ -44,6 +46,11 @@ impl HeadTracker {

/// Returns a `SszHeadTracker`, which contains all necessary information to restore the state
/// of `Self` at some later point.
///
/// Should ONLY be used for tests, due to the potential for database races.
///
/// See <https://github.com/sigp/lighthouse/issues/4773>
#[cfg(test)]
pub fn to_ssz_container(&self) -> SszHeadTracker {
SszHeadTracker::from_map(&self.0.read())
}
Expand Down
7 changes: 4 additions & 3 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;

let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import);
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
let mut hot_batch = Vec::with_capacity(blocks_to_import.len() + n_blobs_lists_to_import);
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());

for available_block in blocks_to_import.into_iter().rev() {
Expand All @@ -126,7 +126,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if let Some(blobs) = maybe_blobs {
new_oldest_blob_slot = Some(block.slot());
self.store
.blobs_as_kv_store_ops(&block_root, blobs, &mut hot_batch);
.blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch);
}

// Store block roots, including at all skip slots in the freezer DB.
Expand Down Expand Up @@ -203,7 +203,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
drop(verify_timer);
drop(sig_timer);

// Write the I/O batch to disk.
// Write the I/O batches to disk.
self.store.blobs_db.do_atomically(blob_batch)?;
self.store.cold_db.do_atomically(cold_batch)?;

let mut anchor_and_blob_batch = Vec::with_capacity(2);
Expand Down
1 change: 1 addition & 0 deletions beacon_node/beacon_chain/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ pub mod canonical_head;
pub mod capella_readiness;
pub mod chain_config;
pub mod data_availability_checker;
pub mod deneb_readiness;
mod early_attester_cache;
mod errors;
pub mod eth1_chain;
Expand Down
9 changes: 9 additions & 0 deletions beacon_node/beacon_chain/src/schema_change.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! Utilities for managing database schema changes.
mod migration_schema_v17;
mod migration_schema_v18;
mod migration_schema_v19;
mod migration_schema_v20;

use crate::beacon_chain::BeaconChainTypes;
Expand Down Expand Up @@ -78,6 +79,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v18::downgrade_from_v18::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(18), SchemaVersion(19)) => {
let ops = migration_schema_v19::upgrade_to_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(19), SchemaVersion(18)) => {
let ops = migration_schema_v19::downgrade_from_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,
Expand Down
Loading

0 comments on commit 262e5f2

Please sign in to comment.