Skip to content

Commit 51f5707

Browse files
Frandomatheus23
authored andcommitted
docs: improve docs of verifiable stream methods
1 parent d46810d commit 51f5707

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

src/store/traits.rs

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -96,20 +96,25 @@ pub trait MapEntry: std::fmt::Debug + Clone + Send + Sync + 'static {
9696
/// A future that resolves to a reader that can be used to read the data
9797
fn data_reader(&self) -> impl Future<Output = io::Result<impl AsyncSliceReader>> + Send;
9898

99-
/// Encodes data and outboard into a stream which can be imported with [`Store::import_verifiable_stream`].
99+
/// Encodes data and outboard into a [`AsyncStreamWriter`].
100+
///
101+
/// Data and outboard parts will be interleaved.
102+
///
103+
/// `offset` is the byte offset in the blob to start the stream from. It will be rounded down to
104+
/// the next chunk group.
100105
///
101106
/// Returns immediately without error if `start` is equal or larger than the entry's size.
102107
fn write_verifiable_stream<'a>(
103108
&'a self,
104-
start: u64,
109+
offset: u64,
105110
writer: impl AsyncStreamWriter + 'a,
106111
) -> impl Future<Output = io::Result<()>> + 'a {
107112
async move {
108113
let size = self.size().value();
109-
if start >= size {
114+
if offset >= size {
110115
return Ok(());
111116
}
112-
let ranges = range_from_offset_and_length(start, size - start);
117+
let ranges = range_from_offset_and_length(offset, size - offset);
113118
let (outboard, data) = tokio::try_join!(self.outboard(), self.data_reader())?;
114119
encode_ranges_validated(data, outboard, &ranges, writer).await?;
115120
Ok(())
@@ -367,15 +372,19 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug {
367372
}
368373

369374
/// Import a blob from a verified stream, as emitted by [`MapEntry::write_verifiable_stream`];
375+
///
376+
/// `total_size` is the total size of the blob as reported by the remote.
377+
/// `offset` is the byte offset in the blob where the stream starts. It will be rounded
378+
/// to the next chunk group.
370379
fn import_verifiable_stream<'a>(
371380
&'a self,
372381
hash: Hash,
373382
total_size: u64,
374-
stream_offset: u64,
383+
offset: u64,
375384
reader: impl AsyncStreamReader + 'a,
376385
) -> impl Future<Output = io::Result<()>> + 'a {
377386
async move {
378-
if stream_offset >= total_size {
387+
if offset >= total_size {
379388
return Err(io::Error::new(
380389
io::ErrorKind::InvalidInput,
381390
"offset must not be greater than total_size",
@@ -384,7 +393,7 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug {
384393
let entry = self.get_or_create(hash, total_size).await?;
385394
let mut bw = entry.batch_writer().await?;
386395

387-
let ranges = range_from_offset_and_length(stream_offset, total_size - stream_offset);
396+
let ranges = range_from_offset_and_length(offset, total_size - offset);
388397
let mut decoder = ResponseDecoder::new(
389398
hash.into(),
390399
ranges,

0 commit comments

Comments
 (0)