Skip to content

Commit

Permalink
Ignore gossip blobs with a slot greater than peer das activation epoch.
Browse files Browse the repository at this point in the history
  • Loading branch information
jimmygchen committed May 10, 2024
1 parent 5f09cfd commit fdcf129
Show file tree
Hide file tree
Showing 6 changed files with 47 additions and 5 deletions.
5 changes: 5 additions & 0 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3006,6 +3006,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Err(BlockError::BlockIsAlreadyKnown(blob.block_root()));
}

// No need to process and import blobs beyond PeerDAS epoch.
if self.is_peer_das_enabled_for_slot(blob.slot()) {
return Err(BlockError::BlobNotRequired(blob.slot()));
}

if let Some(event_handler) = self.event_handler.as_ref() {
if event_handler.has_blob_sidecar_subscribers() {
event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar(
Expand Down
10 changes: 9 additions & 1 deletion beacon_node/beacon_chain/src/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,14 @@ pub enum BlockError<E: EthSpec> {
/// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob.
/// https://github.com/sigp/lighthouse/issues/4546
AvailabilityCheck(AvailabilityCheckError),
/// A Blob with a slot after PeerDAS is received and is not required to be imported.
/// This can happen because we stay subscribed to the blob subnet after 2 epochs, as we could
/// still receive valid blobs from a Deneb epoch after PeerDAS is activated.
///
/// ## Peer scoring
///
/// This indicates the peer is sending an unexpected gossip blob and should be penalised.
BlobNotRequired(Slot),
}

impl<E: EthSpec> From<AvailabilityCheckError> for BlockError<E> {
Expand Down Expand Up @@ -760,7 +768,7 @@ fn build_gossip_verified_blobs<T: BeaconChainTypes>(
for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() {
let _timer =
metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION);
let blob = BlobSidecar::new(i, blob, &block, *kzg_proof)
let blob = BlobSidecar::new(i, blob, block, *kzg_proof)
.map_err(BlockContentsError::BlobSidecarError)?;
drop(_timer);
let gossip_verified_blob =
Expand Down
1 change: 1 addition & 0 deletions beacon_node/beacon_chain/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ pub mod observed_block_producers;
pub mod observed_operations;
mod observed_slashable;
pub mod otb_verification_service;
pub mod peer_das;
mod persisted_beacon_chain;
mod persisted_fork_choice;
mod pre_finalization_cache;
Expand Down
18 changes: 18 additions & 0 deletions beacon_node/beacon_chain/src/peer_das.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
use crate::{BeaconChain, BeaconChainTypes};
use types::{Epoch, EthSpec, Slot};

impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`.
pub(crate) fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool {
self.spec
.eip7594_fork_epoch
.map_or(false, |eip7594_fork_epoch| {
block_epoch >= eip7594_fork_epoch
})
}

/// Returns true if the epoch of the given slot is greater than or equal to the `EIP7594_FORK_EPOCH`.
pub(crate) fn is_peer_das_enabled_for_slot(&self, block_slot: Slot) -> bool {
self.is_peer_das_enabled_for_epoch(block_slot.epoch(T::EthSpec::slots_per_epoch()))
}
}
8 changes: 4 additions & 4 deletions beacon_node/http_api/src/publish_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,10 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
}
};

// TODO(das): We could potentially get rid of these conversions and pass `GossipVerified` types
// to `publish_block`, i.e. have `GossipVerified` types in `PubsubMessage`?
// This saves us from extra code and provides guarantee that published
// components are verified.
// Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not,
// `Arc`'d but blobs are.
let block = gossip_verified_block.block.block_cloned();
Expand All @@ -183,10 +187,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
.collect::<Vec<_>>();
VariableList::from(blobs)
});
// TODO(das): We could potentially get rid of these conversions and pass `GossipVerified` types
// to `publish_block`, i.e. have `GossipVerified` types in `PubsubMessage`?
// This saves us from extra code and provides guarantee that published
// components are verified.
let data_cols_opt = gossip_verified_data_columns
.as_ref()
.map(|gossip_verified_data_columns| {
Expand Down
10 changes: 10 additions & 0 deletions beacon_node/network/src/network_beacon_processor/gossip_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1158,6 +1158,16 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
);
return None;
}
Err(e @ BlockError::BlobNotRequired(slot)) => {
// TODO(das): penalty not implemented yet as other clients may still send us blobs
// during early stage of implementation.
debug!(self.log, "Received blobs for slot after PeerDAS epoch from peer";
"slot" => %slot,
"peer_id" => %peer_id,
);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
return None;
}
};

metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL);
Expand Down

0 comments on commit fdcf129

Please sign in to comment.