Skip to content

Commit

Permalink
gossip: do not allow duplicate proofs for incorrect shred versions (a…
Browse files Browse the repository at this point in the history
…nza-xyz#1931)

* gossip: do not allow duplicate proofs for incorrect shred versions

* pr feedback: refactor test function to take shred_version
  • Loading branch information
AshwinSekar committed Jul 1, 2024
1 parent bd1a5df commit 69ea21e
Show file tree
Hide file tree
Showing 5 changed files with 190 additions and 14 deletions.
1 change: 1 addition & 0 deletions core/src/tvu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,7 @@ impl Tvu {
leader_schedule_cache.clone(),
bank_forks.clone(),
duplicate_slots_sender,
tvu_config.shred_version,
),
);

Expand Down
2 changes: 2 additions & 0 deletions gossip/src/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1261,6 +1261,7 @@ impl ClusterInfo {
other_payload,
None::<fn(Slot) -> Option<Pubkey>>, // Leader schedule
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
self.my_shred_version(),
)?;
Ok(())
}
Expand Down Expand Up @@ -3698,6 +3699,7 @@ mod tests {
Some(leader_schedule),
timestamp(),
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
version,
)
.unwrap()
.collect();
Expand Down
2 changes: 2 additions & 0 deletions gossip/src/crds_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ impl CrdsGossip {
leader_schedule: Option<F>,
// Maximum serialized size of each DuplicateShred chunk payload.
max_payload_size: usize,
shred_version: u16,
) -> Result<(), duplicate_shred::Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
Expand All @@ -114,6 +115,7 @@ impl CrdsGossip {
leader_schedule,
timestamp(),
max_payload_size,
shred_version,
)?;
// Find the index of oldest duplicate shred.
let mut num_dup_shreds = 0;
Expand Down
174 changes: 163 additions & 11 deletions gossip/src/duplicate_shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ pub enum Error {
InvalidErasureMetaConflict,
#[error("invalid last index conflict")]
InvalidLastIndexConflict,
#[error("invalid shred version: {0}")]
InvalidShredVersion(u16),
#[error("invalid signature")]
InvalidSignature,
#[error("invalid size limit")]
Expand All @@ -90,6 +92,7 @@ pub enum Error {

/// Check that `shred1` and `shred2` indicate a valid duplicate proof
/// - Must be for the same slot
/// - Must match the expected shred version
/// - Must both sigverify for the correct leader
/// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type`
/// - If `shred1` and `shred2` share the same index they must be not equal
Expand All @@ -98,14 +101,26 @@ pub enum Error {
/// LAST_SHRED_IN_SLOT, however the other shred must have a higher index.
/// - If `shred1` and `shred2` do not share the same index and are coding shreds
/// verify that they have conflicting erasure metas
fn check_shreds<F>(leader_schedule: Option<F>, shred1: &Shred, shred2: &Shred) -> Result<(), Error>
fn check_shreds<F>(
leader_schedule: Option<F>,
shred1: &Shred,
shred2: &Shred,
shred_version: u16,
) -> Result<(), Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred1.slot() != shred2.slot() {
return Err(Error::SlotMismatch);
}

if shred1.version() != shred_version {
return Err(Error::InvalidShredVersion(shred1.version()));
}
if shred2.version() != shred_version {
return Err(Error::InvalidShredVersion(shred2.version()));
}

if let Some(leader_schedule) = leader_schedule {
let slot_leader =
leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?;
Expand Down Expand Up @@ -165,6 +180,7 @@ pub(crate) fn from_shred<F>(
leader_schedule: Option<F>,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
shred_version: u16,
) -> Result<impl Iterator<Item = DuplicateShred>, Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
Expand All @@ -173,7 +189,7 @@ where
return Err(Error::InvalidDuplicateShreds);
}
let other_shred = Shred::new_from_serialized_shred(other_payload)?;
check_shreds(leader_schedule, &shred, &other_shred)?;
check_shreds(leader_schedule, &shred, &other_shred, shred_version)?;
let slot = shred.slot();
let proof = DuplicateSlotProof {
shred1: shred.into_payload(),
Expand Down Expand Up @@ -226,6 +242,7 @@ fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result
pub(crate) fn into_shreds(
slot_leader: &Pubkey,
chunks: impl IntoIterator<Item = DuplicateShred>,
shred_version: u16,
) -> Result<(Shred, Shred), Error> {
let mut chunks = chunks.into_iter();
let DuplicateShred {
Expand Down Expand Up @@ -261,10 +278,16 @@ pub(crate) fn into_shreds(
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;

if shred1.slot() != slot || shred2.slot() != slot {
Err(Error::SlotMismatch)
} else {
check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?;
check_shreds(
Some(|_| Some(slot_leader).copied()),
&shred1,
&shred2,
shred_version,
)?;
Ok((shred1, shred2))
}
}
Expand Down Expand Up @@ -487,11 +510,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -542,6 +566,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -560,7 +585,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidDuplicateSlotProof
);
}
Expand Down Expand Up @@ -629,11 +656,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, &shred3);
assert_eq!(shred2, &shred4);
}
Expand Down Expand Up @@ -737,6 +765,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -755,7 +784,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidLastIndexConflict
);
}
Expand Down Expand Up @@ -814,11 +845,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -895,6 +927,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -913,7 +946,9 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidErasureMetaConflict
);
}
Expand Down Expand Up @@ -986,11 +1021,12 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
Expand Down Expand Up @@ -1077,6 +1113,7 @@ pub(crate) mod tests {
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Expand All @@ -1095,9 +1132,124 @@ pub(crate) mod tests {
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks).err().unwrap(),
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::ShredTypeMismatch
);
}
}

#[test]
fn test_shred_version() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};

let (data_shreds, coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

// Wrong shred version 1
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap();
let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

// Wrong shred version 2
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap();
let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
true,
&shredder,
&leader,
true,
);

let test_cases = vec![
// One correct shred version, one wrong
(coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
// Both wrong shred version
(
wrong_coding_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_coding_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
];

for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), // self_pubkey
shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), // wallclock
512, // max_size
version,
)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);

let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), // self_pubkey
shred2.clone(),
rng.gen(), // wallclock
512, // max_size
)
.unwrap()
.collect();
assert!(chunks.len() > 4);

assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);
}
}
}
Loading

0 comments on commit 69ea21e

Please sign in to comment.