Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into tree-states
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Jan 11, 2024
2 parents c8dc082 + 2e8e160 commit 8db17da
Show file tree
Hide file tree
Showing 114 changed files with 4,361 additions and 2,285 deletions.
1,827 changes: 985 additions & 842 deletions Cargo.lock

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ crossbeam-channel = "0.5.8"
delay_map = "0.3"
derivative = "2"
dirs = "3"
discv5 = { version = "0.3", features = ["libp2p"] }
discv5 = { git="https://github.com/sigp/discv5", rev="dbb4a718cd32eaed8127c3c8241bfd0fde9eb908", features = ["libp2p"] }
env_logger = "0.9"
error-chain = "0.12"
ethereum-types = "0.14"
Expand All @@ -121,12 +121,12 @@ fnv = "1"
fs2 = "0.4"
futures = "0.3"
hex = "0.4"
hyper = "0.14"
hyper = "1"
itertools = "0.10"
lazy_static = "1"
libsecp256k1 = "0.7"
log = "0.4"
lru = "0.7"
lru = "0.12"
maplit = "1"
milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" }
num_cpus = "1"
Expand All @@ -145,13 +145,13 @@ rusqlite = { version = "0.28", features = ["bundled"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_repr = "0.1"
serde_yaml = "0.8"
serde_yaml = "0.9"
sha2 = "0.9"
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] }
slog-async = "2"
slog-term = "2"
sloggers = { version = "2", features = ["json"] }
smallvec = "1"
smallvec = "1.11.2"
snap = "1"
ssz_types = "0.5"
strum = { version = "0.24", features = ["derive"] }
Expand Down Expand Up @@ -224,7 +224,7 @@ swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" }
task_executor = { path = "common/task_executor" }
types = { path = "consensus/types" }
unused_port = { path = "common/unused_port" }
validator_client = { path = "validator_client/" }
validator_client = { path = "validator_client" }
validator_dir = { path = "common/validator_dir" }
warp_utils = { path = "common/warp_utils" }

Expand Down
9 changes: 7 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -200,12 +200,17 @@ test-exec-engine:
# test vectors.
test: test-release

# Updates the CLI help text pages in the Lighthouse book.
# Updates the CLI help text pages in the Lighthouse book, building with Docker.
cli:
docker run --rm --user=root \
-v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \
bash -c 'cd lighthouse && make && ./scripts/cli.sh'


# Updates the CLI help text pages in the Lighthouse book, building using local
# `cargo`.
cli-local:
make && ./scripts/cli.sh

# Runs the entire test suite, downloading test vectors if required.
test-full: cargo-fmt test-release test-debug test-ef test-exec-engine

Expand Down
107 changes: 107 additions & 0 deletions beacon_node/beacon_chain/src/attestation_simulator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
use crate::{BeaconChain, BeaconChainTypes};
use slog::{debug, error};
use slot_clock::SlotClock;
use std::sync::Arc;
use task_executor::TaskExecutor;
use tokio::time::sleep;
use types::{EthSpec, Slot};

/// Don't run the attestation simulator if the head slot is this many epochs
/// behind the wall-clock slot.
const SYNCING_TOLERANCE_EPOCHS: u64 = 2;

/// Spawns a routine which produces an unaggregated attestation at every slot.
///
/// This routine will run once per slot
pub fn start_attestation_simulator_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
) {
executor.clone().spawn(
async move { attestation_simulator_service(executor, chain).await },
"attestation_simulator_service",
);
}

/// Loop indefinitely, calling `BeaconChain::produce_unaggregated_attestation` every 4s into each slot.
async fn attestation_simulator_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
) {
let slot_duration = chain.slot_clock.slot_duration();
let additional_delay = slot_duration / 3;

loop {
match chain.slot_clock.duration_to_next_slot() {
Some(duration) => {
sleep(duration + additional_delay).await;

debug!(
chain.log,
"Simulating unagg. attestation production";
);

// Run the task in the executor
let inner_chain = chain.clone();
executor.spawn(
async move {
if let Ok(current_slot) = inner_chain.slot() {
produce_unaggregated_attestation(inner_chain, current_slot);
}
},
"attestation_simulator_service",
);
}
None => {
error!(chain.log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
sleep(slot_duration).await;
}
};
}
}

pub fn produce_unaggregated_attestation<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
current_slot: Slot,
) {
// Don't run the attestation simulator when the head slot is far behind the
// wall-clock slot.
//
// This helps prevent the simulator from becoming a burden by computing
// committees from old states.
let syncing_tolerance_slots = SYNCING_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch();
if chain.best_slot() + syncing_tolerance_slots < current_slot {
return;
}

// Since attestations for different committees are practically identical (apart from the committee index field)
// Committee 0 is guaranteed to exist. That means there's no need to load the committee.
let beacon_committee_index = 0;

// Store the unaggregated attestation in the validator monitor for later processing
match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) {
Ok(unaggregated_attestation) => {
let data = &unaggregated_attestation.data;

debug!(
chain.log,
"Produce unagg. attestation";
"attestation_source" => data.source.root.to_string(),
"attestation_target" => data.target.root.to_string(),
);

chain
.validator_monitor
.write()
.set_unaggregated_attestation(unaggregated_attestation);
}
Err(e) => {
debug!(
chain.log,
"Failed to simulate attestation";
"error" => ?e
);
}
}
}
72 changes: 55 additions & 17 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{
check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy,
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
IntoExecutionPendingBlock,
signature_verify_chain_segment, verify_header_signature, BlockError, ExecutionPendingBlock,
GossipVerifiedBlock, IntoExecutionPendingBlock,
};
use crate::block_verification_types::{
AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock,
Expand Down Expand Up @@ -52,6 +52,7 @@ use crate::observed_attesters::{
use crate::observed_blob_sidecars::ObservedBlobSidecars;
use crate::observed_block_producers::ObservedBlockProducers;
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
use crate::observed_slashable::ObservedSlashable;
use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT};
use crate::persisted_fork_choice::PersistedForkChoice;
use crate::pre_finalization_cache::PreFinalizationBlockCache;
Expand Down Expand Up @@ -402,7 +403,9 @@ pub struct BeaconChain<T: BeaconChainTypes> {
/// Maintains a record of which validators have proposed blocks for each slot.
pub observed_block_producers: RwLock<ObservedBlockProducers<T::EthSpec>>,
/// Maintains a record of blob sidecars seen over the gossip network.
pub(crate) observed_blob_sidecars: RwLock<ObservedBlobSidecars<T::EthSpec>>,
pub observed_blob_sidecars: RwLock<ObservedBlobSidecars<T::EthSpec>>,
/// Maintains a record of slashable message seen over the gossip network or RPC.
pub observed_slashable: RwLock<ObservedSlashable<T::EthSpec>>,
/// Maintains a record of which validators have submitted voluntary exits.
pub(crate) observed_voluntary_exits: Mutex<ObservedOperations<SignedVoluntaryExit, T::EthSpec>>,
/// Maintains a record of which validators we've seen proposer slashings for.
Expand Down Expand Up @@ -490,20 +493,24 @@ impl<E: EthSpec> BeaconBlockResponseWrapper<E> {
})
}

pub fn execution_payload_value(&self) -> Option<Uint256> {
pub fn execution_payload_value(&self) -> Uint256 {
match self {
BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value,
BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value,
}
}

pub fn consensus_block_value(&self) -> Option<u64> {
pub fn consensus_block_value_gwei(&self) -> u64 {
match self {
BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value,
BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value,
}
}

pub fn consensus_block_value_wei(&self) -> Uint256 {
Uint256::from(self.consensus_block_value_gwei()) * 1_000_000_000
}

pub fn is_blinded(&self) -> bool {
matches!(self, BeaconBlockResponseWrapper::Blinded(_))
}
Expand All @@ -518,9 +525,9 @@ pub struct BeaconBlockResponse<T: EthSpec, Payload: AbstractExecPayload<T>> {
/// The Blobs / Proofs associated with the new block
pub blob_items: Option<(KzgProofs<T>, BlobsList<T>)>,
/// The execution layer reward for the block
pub execution_payload_value: Option<Uint256>,
pub execution_payload_value: Uint256,
/// The consensus layer reward to the proposer
pub consensus_block_value: Option<u64>,
pub consensus_block_value: u64,
}

impl FinalizationAndCanonicity {
Expand Down Expand Up @@ -3122,9 +3129,27 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
if let Some(slasher) = self.slasher.as_ref() {
for blob_sidecar in blobs.iter().filter_map(|blob| blob.clone()) {
slasher.accept_block_header(blob_sidecar.signed_block_header.clone());
// Need to scope this to ensure the lock is dropped before calling `process_availability`
// Even an explicit drop is not enough to convince the borrow checker.
{
let mut slashable_cache = self.observed_slashable.write();
for header in blobs
.into_iter()
.filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone()))
.unique()
{
if verify_header_signature::<T, BlockError<T::EthSpec>>(self, &header).is_ok() {
slashable_cache
.observe_slashable(
header.message.slot,
header.message.proposer_index,
block_root,
)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(header);
}
}
}
}
let availability = self
Expand Down Expand Up @@ -3569,9 +3594,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}

// Allow the validator monitor to learn about a new valid state.
self.validator_monitor
.write()
.process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state);
self.validator_monitor.write().process_valid_state(
current_slot.epoch(T::EthSpec::slots_per_epoch()),
state,
&self.spec,
);

let validator_monitor = self.validator_monitor.read();

Expand Down Expand Up @@ -3967,6 +3994,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
slot: Slot,
validator_graffiti: Option<Graffiti>,
verification: ProduceBlockVerification,
builder_boost_factor: Option<u64>,
block_production_version: BlockProductionVersion,
) -> Result<BeaconBlockResponseWrapper<T::EthSpec>, BlockProductionError> {
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS);
Expand Down Expand Up @@ -3995,6 +4023,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
randao_reveal,
validator_graffiti,
verification,
builder_boost_factor,
block_production_version,
)
.await
Expand Down Expand Up @@ -4539,6 +4568,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
randao_reveal: Signature,
validator_graffiti: Option<Graffiti>,
verification: ProduceBlockVerification,
builder_boost_factor: Option<u64>,
block_production_version: BlockProductionVersion,
) -> Result<BeaconBlockResponseWrapper<T::EthSpec>, BlockProductionError> {
// Part 1/3 (blocking)
Expand All @@ -4555,6 +4585,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
produce_at_slot,
randao_reveal,
validator_graffiti,
builder_boost_factor,
block_production_version,
)
},
Expand Down Expand Up @@ -4644,13 +4675,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}

#[allow(clippy::too_many_arguments)]
fn produce_partial_beacon_block(
self: &Arc<Self>,
mut state: BeaconState<T::EthSpec>,
state_root_opt: Option<Hash256>,
produce_at_slot: Slot,
randao_reveal: Signature,
validator_graffiti: Option<Graffiti>,
builder_boost_factor: Option<u64>,
block_production_version: BlockProductionVersion,
) -> Result<PartialBeaconBlock<T::EthSpec>, BlockProductionError> {
let eth1_chain = self
Expand Down Expand Up @@ -4713,6 +4746,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
parent_root,
proposer_index,
builder_params,
builder_boost_factor,
block_production_version,
)?;
Some(prepare_payload_handle)
Expand Down Expand Up @@ -5056,8 +5090,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.try_into()
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
bls_to_execution_changes: bls_to_execution_changes.into(),
blob_kzg_commitments: kzg_commitments
.ok_or(BlockProductionError::InvalidPayloadFork)?,
blob_kzg_commitments: kzg_commitments.ok_or(
BlockProductionError::MissingKzgCommitment(
"Kzg commitments missing from block contents".to_string(),
),
)?,
},
}),
maybe_blobs_and_proofs,
Expand Down Expand Up @@ -5172,8 +5209,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block,
state,
blob_items,
execution_payload_value: Some(execution_payload_value),
consensus_block_value: Some(consensus_block_value),
execution_payload_value,
consensus_block_value,
})
}

Expand Down Expand Up @@ -5449,6 +5486,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(),
payload_attributes: payload_attributes.into(),
},
metadata: Default::default(),
version: Some(self.spec.fork_name_at_slot::<T::EthSpec>(prepare_slot)),
}));
}
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/beacon_chain/src/beacon_proposer_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,14 @@ use lru::LruCache;
use smallvec::SmallVec;
use state_processing::state_advance::partial_state_advance;
use std::cmp::Ordering;
use std::num::NonZeroUsize;
use types::non_zero_usize::new_non_zero_usize;
use types::{
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned,
};

/// The number of sets of proposer indices that should be cached.
const CACHE_SIZE: usize = 16;
const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16);

/// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being
/// incorrect is non-substantial from a consensus perspective (and probably also from a
Expand Down
Loading

0 comments on commit 8db17da

Please sign in to comment.