Skip to content
This repository was archived by the owner on Jan 22, 2025. It is now read-only.

Commit 3036699

Browse files
mergify[bot]AshwinSekar
authored andcommitted
v1.18: blockstore: relax backwards chained merkle root check for upgrades (backport of #1163) (#1196)
blockstore: relax backwards chained merkle root check for upgrades (#1163) * blockstore: relax backwards chained merkle root check for upgrades * s/v1.18.12/v1.18.13 Co-authored-by: Trent Nelson <490004+t-nelson@users.noreply.github.com> --------- Co-authored-by: Trent Nelson <490004+t-nelson@users.noreply.github.com> (cherry picked from commit a645d07) Co-authored-by: Ashwin Sekar <ashwin@anza.xyz>
1 parent c56e74e commit 3036699

File tree

1 file changed

+113
-2
lines changed

1 file changed

+113
-2
lines changed

ledger/src/blockstore.rs

Lines changed: 113 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1930,7 +1930,7 @@ impl Blockstore {
19301930
return true;
19311931
};
19321932

1933-
let prev_merkle_root_meta = merkle_root_metas
1933+
let Some(prev_merkle_root_meta) = merkle_root_metas
19341934
.get(&prev_erasure_set)
19351935
.map(WorkingEntry::as_ref)
19361936
.map(Cow::Borrowed)
@@ -1939,7 +1939,14 @@ impl Blockstore {
19391939
.unwrap()
19401940
.map(Cow::Owned)
19411941
})
1942-
.expect("merkle root meta must exist for erasure meta");
1942+
else {
1943+
warn!(
1944+
"The merkle root meta for the previous erasure set {prev_erasure_set:?} does not exist.
1945+
This should only happen if you have recently upgraded from a version < v1.18.13.
1946+
Skipping the backwards chained merkle root for {erasure_set:?}"
1947+
);
1948+
return true;
1949+
};
19431950
let prev_shred_id = ShredId::new(
19441951
slot,
19451952
prev_merkle_root_meta.first_received_shred_index(),
@@ -11641,4 +11648,108 @@ pub mod tests {
1164111648
)
1164211649
);
1164311650
}
11651+
11652+
#[test]
11653+
fn test_chained_merkle_root_upgrade_inconsistency_backwards() {
11654+
// Insert a coding shred (without a merkle meta) then inconsistent shreds from the next FEC set
11655+
let ledger_path = get_tmp_ledger_path_auto_delete!();
11656+
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11657+
11658+
let parent_slot = 0;
11659+
let slot = 1;
11660+
let fec_set_index = 0;
11661+
let (data_shreds, coding_shreds, leader_schedule) =
11662+
setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11663+
let coding_shred_previous = coding_shreds[0].clone();
11664+
let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11665+
11666+
assert!(blockstore
11667+
.insert_shred_return_duplicate(coding_shred_previous.clone(), &leader_schedule,)
11668+
.is_empty());
11669+
11670+
// Remove the merkle root meta in order to simulate this blockstore originating from
11671+
// an older version.
11672+
let mut write_batch = blockstore.db.batch().unwrap();
11673+
blockstore
11674+
.db
11675+
.delete_range_cf::<cf::MerkleRootMeta>(&mut write_batch, slot, slot)
11676+
.unwrap();
11677+
blockstore.db.write(write_batch).unwrap();
11678+
assert!(blockstore
11679+
.merkle_root_meta(coding_shred_previous.erasure_set())
11680+
.unwrap()
11681+
.is_none());
11682+
11683+
// Add an incorrectly chained merkle from the next set. Although incorrectly chained
11684+
// we skip the duplicate check as the merkle root meta is missing.
11685+
let merkle_root = Hash::new_unique();
11686+
assert!(merkle_root != coding_shred_previous.merkle_root().unwrap());
11687+
let (data_shreds, coding_shreds, leader_schedule) =
11688+
setup_erasure_shreds_with_index_and_chained_merkle(
11689+
slot,
11690+
parent_slot,
11691+
10,
11692+
next_fec_set_index,
11693+
Some(merkle_root),
11694+
);
11695+
let data_shred = data_shreds[0].clone();
11696+
let coding_shred = coding_shreds[0].clone();
11697+
assert!(blockstore
11698+
.insert_shred_return_duplicate(coding_shred, &leader_schedule)
11699+
.is_empty());
11700+
assert!(blockstore
11701+
.insert_shred_return_duplicate(data_shred, &leader_schedule,)
11702+
.is_empty());
11703+
}
11704+
11705+
#[test]
11706+
fn test_chained_merkle_root_upgrade_inconsistency_forwards() {
11707+
// Insert a data shred (without a merkle root), then an inconsistent coding shred from the previous FEC set.
11708+
let ledger_path = get_tmp_ledger_path_auto_delete!();
11709+
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
11710+
11711+
let parent_slot = 0;
11712+
let slot = 1;
11713+
let fec_set_index = 0;
11714+
let (data_shreds, coding_shreds, leader_schedule) =
11715+
setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
11716+
let coding_shred = coding_shreds[0].clone();
11717+
let next_fec_set_index = fec_set_index + data_shreds.len() as u32;
11718+
11719+
// Incorrectly chained merkle
11720+
let merkle_root = Hash::new_unique();
11721+
assert!(merkle_root != coding_shred.merkle_root().unwrap());
11722+
let (next_data_shreds, next_coding_shreds, leader_schedule_next) =
11723+
setup_erasure_shreds_with_index_and_chained_merkle(
11724+
slot,
11725+
parent_slot,
11726+
10,
11727+
next_fec_set_index,
11728+
Some(merkle_root),
11729+
);
11730+
let next_data_shred = next_data_shreds[0].clone();
11731+
11732+
assert!(blockstore
11733+
.insert_shred_return_duplicate(next_data_shred, &leader_schedule_next,)
11734+
.is_empty());
11735+
11736+
// Remove the merkle root meta in order to simulate this blockstore originating from
11737+
// an older version.
11738+
let mut write_batch = blockstore.db.batch().unwrap();
11739+
blockstore
11740+
.db
11741+
.delete_range_cf::<cf::MerkleRootMeta>(&mut write_batch, slot, slot)
11742+
.unwrap();
11743+
blockstore.db.write(write_batch).unwrap();
11744+
assert!(blockstore
11745+
.merkle_root_meta(next_coding_shreds[0].erasure_set())
11746+
.unwrap()
11747+
.is_none());
11748+
11749+
// Insert previous FEC set, although incorrectly chained we skip the duplicate check
11750+
// as the merkle root meta is missing.
11751+
assert!(blockstore
11752+
.insert_shred_return_duplicate(coding_shred, &leader_schedule)
11753+
.is_empty());
11754+
}
1164411755
}

0 commit comments

Comments
 (0)