Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
### Fixed

- CheckQuorum now requires a quorum in every configuration (#7375)
- `read_ledger.py` validates the offsets table in committed ledger files, reporting an error if this is truncated (#7501).

## [6.0.16]

Expand Down
6 changes: 2 additions & 4 deletions js/ccf-app/src/endpoints.ts
Original file line number Diff line number Diff line change
Expand Up @@ -158,14 +158,12 @@ interface UserMemberCOSEAuthIdentityCommon {
}

export interface MemberCOSESign1AuthnIdentity
extends UserMemberAuthnIdentityCommon,
UserMemberCOSEAuthIdentityCommon {
extends UserMemberAuthnIdentityCommon, UserMemberCOSEAuthIdentityCommon {
policy: "member_cose_sign1";
}

export interface UserCOSESign1AuthnIdentity
extends UserMemberAuthnIdentityCommon,
UserMemberCOSEAuthIdentityCommon {
extends UserMemberAuthnIdentityCommon, UserMemberCOSEAuthIdentityCommon {
policy: "user_cose_sign1";
}

Expand Down
14 changes: 14 additions & 0 deletions python/src/ccf/ledger.py
Original file line number Diff line number Diff line change
Expand Up @@ -956,7 +956,13 @@ def __init__(self, name: str):

# If the ledger chunk is not yet committed, the ledger header will be empty.
# Default to reading the file size instead.
full_file_size = os.path.getsize(name)
if self._pos_offset > 0:
if self._pos_offset > full_file_size:
raise ValueError(
f"Invalid ledger chunk {name}: File header claims offset table is at {self._pos_offset}, yet file is only {full_file_size} bytes"
)

self._file_size = self._pos_offset

positions_buffer = _peek_all(self._file, self._pos_offset)
Expand All @@ -978,6 +984,14 @@ def __init__(self, name: str):

self.start_seqno, self.end_seqno = range_from_filename(name)

if self.end_seqno is not None:
tx_count_from_filename = self.end_seqno - self.start_seqno + 1
tx_count_from_positions = len(self._positions)
if tx_count_from_filename != tx_count_from_positions:
raise ValueError(
f"Invalid ledger chunk {name}: Expected to contain {tx_count_from_filename} transactions due to filename, but found {tx_count_from_positions} by reading file"
)

def __getitem__(self, key):
if isinstance(key, int):
position = self._positions[key]
Expand Down
115 changes: 115 additions & 0 deletions tests/e2e_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -1503,6 +1503,121 @@ def run_read_ledger_on_testdata(args):
f"Valid snapshot at {snapshot_file.path} with {len(tables)} tables"
)

# Corrupt a single chunk to confirm that read_ledger throws appropriate errors
source_chunk = os.path.join(
args.historical_testdata,
"double_sealed_service",
"ledger",
"ledger_44-64.committed",
)
good_chunk = ccf.ledger.LedgerChunk(source_chunk)
start_seqno, end_seqno = good_chunk.get_seqnos()
expected_range = end_seqno - start_seqno + 1
tx_count_error = (
f"Expected to contain {expected_range} transactions due to filename"
)
chunk_name = os.path.basename(source_chunk)
with open(source_chunk, "rb") as src_f:
good_data = src_f.read()
source_size = len(good_data)
header_size = ccf.ledger.LEDGER_HEADER_SIZE
source_offset = int.from_bytes(good_data[0:header_size], byteorder="little")
assert source_offset > 0
assert source_offset < source_size

# Create an alternate version of the chunk without the offsets table. Should be equivalent to the file immediately before it was marked `.committed`
no_offsets_table = (
int(0).to_bytes(header_size, byteorder="little")
+ good_data[header_size:source_offset]
)

null_block_size = source_size // 8

for name, corrupted_data, expected_parse_error in [
(
"truncate_pre_offsets",
good_data[: source_offset - 1],
f"File header claims offset table is at {source_offset}",
),
(
"truncate_at_offsets",
good_data[:source_offset],
tx_count_error,
),
(
"truncate_tx_no_offsets",
no_offsets_table[: source_size // 2],
tx_count_error,
),
(
"nulled_block",
good_data[: source_size // 2]
+ b"\00" * null_block_size
+ good_data[source_size // 2 + null_block_size :],
"index out of range",
),
(
"header_offset_too_large",
(source_size + 1).to_bytes(header_size, byteorder="little")
+ good_data[header_size:],
f"File header claims offset table is at {source_size + 1}",
),
(
"truncate_mid_offsets",
good_data[:-4],
tx_count_error,
),
(
"misaligned_offsets_too_small",
good_data[:-1],
"Expected positions to contain uint32s",
),
(
"misaligned_offsets_too_large",
good_data + b"\x00",
"Expected positions to contain uint32s",
),
(
"unread_data",
good_data + b"\x00" * 4,
tx_count_error,
),
(
"unread_data_no_offsets_misaligned",
no_offsets_table + b"\x00",
"Failed to read precise number of bytes",
),
(
"unread_data_no_offsets",
no_offsets_table + b"\x00" * 8,
tx_count_error,
),
]:
temp_dir = tempfile.TemporaryDirectory(
dir="workspace", prefix=name + "-", delete=False
)
corrupted_chunk_path = os.path.join(temp_dir.name, chunk_name)
LOG.info(f"Testing chunk corruption {name} in {corrupted_chunk_path}")
with open(corrupted_chunk_path, "wb") as dst_f:
dst_f.write(corrupted_data)

try:
chunk = ccf.ledger.LedgerChunk(corrupted_chunk_path)
for tx in chunk:
tx.get_public_domain()

assert (
False
), f"Expected to raise exception while parsing corrupted ledger chunk {name}"

except Exception as e:
assert expected_parse_error in str(
e
), f"Unexpected error message for corruption {name}: {e}"

# NB: cleanup() is deliberately skipped if an assertion fails, so the corrupted files can be inspected
temp_dir.cleanup()


def test_error_message_on_failure_to_read_aci_sec_context(args):
with infra.network.network(
Expand Down