Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix seeking #133

Merged
merged 7 commits into from
Jun 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 16 additions & 25 deletions src/filesystem/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,10 @@ pub(crate) struct FileInfo {
pub(crate) file_id: RawFile,
/// The unique ID for the volume this directory is on
pub(crate) volume_id: RawVolume,
/// The current cluster, and how many bytes that short-cuts us
/// The last cluster we accessed, and how many bytes that short-cuts us.
///
/// This saves us walking from the very start of the FAT chain when we move
/// forward through a file.
pub(crate) current_cluster: (u32, ClusterId),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this variable be rather called last_cluster_accessed or similar?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought about it, but it changes a bunch of stuff. Maybe in a later PR?

/// How far through the file we've read (in bytes).
pub(crate) current_offset: u32,
Expand All @@ -236,41 +239,30 @@ impl FileInfo {

/// Seek to a new position in the file, relative to the start of the file.
pub fn seek_from_start(&mut self, offset: u32) -> Result<(), FileError> {
if offset <= self.entry.size {
self.current_offset = offset;
if offset < self.current_cluster.0 {
// Back to start
self.current_cluster = (0, self.entry.cluster);
}
Ok(())
} else {
Err(FileError::InvalidOffset)
if offset > self.entry.size {
return Err(FileError::InvalidOffset);
}
self.current_offset = offset;
Ok(())
}

/// Seek to a new position in the file, relative to the end of the file.
pub fn seek_from_end(&mut self, offset: u32) -> Result<(), FileError> {
if offset <= self.entry.size {
self.current_offset = self.entry.size - offset;
if offset < self.current_cluster.0 {
// Back to start
self.current_cluster = (0, self.entry.cluster);
}
Ok(())
} else {
Err(FileError::InvalidOffset)
if offset > self.entry.size {
return Err(FileError::InvalidOffset);
}
self.current_offset = self.entry.size - offset;
Ok(())
}

/// Seek to a new position in the file, relative to the current position.
pub fn seek_from_current(&mut self, offset: i32) -> Result<(), FileError> {
let new_offset = i64::from(self.current_offset) + i64::from(offset);
if new_offset >= 0 && new_offset <= i64::from(self.entry.size) {
self.current_offset = new_offset as u32;
Ok(())
} else {
Err(FileError::InvalidOffset)
if new_offset < 0 || new_offset > i64::from(self.entry.size) {
return Err(FileError::InvalidOffset);
}
self.current_offset = new_offset as u32;
Ok(())
}

/// Amount of file left to read.
Expand All @@ -281,7 +273,6 @@ impl FileInfo {
/// Update the file's length.
pub(crate) fn update_length(&mut self, new: u32) {
self.entry.size = new;
self.entry.size = new;
}
}

Expand Down
91 changes: 54 additions & 37 deletions src/volume_mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,7 @@ where
let (block_idx, block_offset, block_avail) = self.find_data_on_disk(
volume_idx,
&mut current_cluster,
self.open_files[file_idx].entry.cluster,
self.open_files[file_idx].current_offset,
)?;
self.open_files[file_idx].current_cluster = current_cluster;
Expand Down Expand Up @@ -701,44 +702,45 @@ where
written, bytes_to_write, current_cluster
);
let current_offset = self.open_files[file_idx].current_offset;
let (block_idx, block_offset, block_avail) =
match self.find_data_on_disk(volume_idx, &mut current_cluster, current_offset) {
Ok(vars) => {
debug!(
"Found block_idx={:?}, block_offset={:?}, block_avail={}",
vars.0, vars.1, vars.2
);
vars
}
Err(Error::EndOfFile) => {
debug!("Extending file");
match self.open_volumes[volume_idx].volume_type {
VolumeType::Fat(ref mut fat) => {
if fat
.alloc_cluster(
&self.block_device,
Some(current_cluster.1),
false,
)
.is_err()
{
return Err(Error::DiskFull);
}
debug!("Allocated new FAT cluster, finding offsets...");
let new_offset = self
.find_data_on_disk(
volume_idx,
&mut current_cluster,
self.open_files[file_idx].current_offset,
)
.map_err(|_| Error::AllocationError)?;
debug!("New offset {:?}", new_offset);
new_offset
let (block_idx, block_offset, block_avail) = match self.find_data_on_disk(
volume_idx,
&mut current_cluster,
self.open_files[file_idx].entry.cluster,
current_offset,
) {
Ok(vars) => {
debug!(
"Found block_idx={:?}, block_offset={:?}, block_avail={}",
vars.0, vars.1, vars.2
);
vars
}
Err(Error::EndOfFile) => {
debug!("Extending file");
match self.open_volumes[volume_idx].volume_type {
VolumeType::Fat(ref mut fat) => {
if fat
.alloc_cluster(&self.block_device, Some(current_cluster.1), false)
.is_err()
{
return Err(Error::DiskFull);
}
debug!("Allocated new FAT cluster, finding offsets...");
let new_offset = self
.find_data_on_disk(
volume_idx,
&mut current_cluster,
self.open_files[file_idx].entry.cluster,
self.open_files[file_idx].current_offset,
)
.map_err(|_| Error::AllocationError)?;
debug!("New offset {:?}", new_offset);
new_offset
}
}
Err(e) => return Err(e),
};
}
Err(e) => return Err(e),
};
let mut blocks = [Block::new()];
let to_copy = core::cmp::min(block_avail, bytes_to_write - written);
if block_offset != 0 {
Expand Down Expand Up @@ -1043,18 +1045,33 @@ where

/// This function turns `desired_offset` into an appropriate block to be
/// read. It either calculates this based on the start of the file, or
/// from the last cluster we read - whichever is better.
/// from the given start point - whichever is better.
///
/// Returns:
///
/// * the index for the block on the disk that contains the data we want,
/// * the byte offset into that block for the data we want, and
/// * how many bytes remain in that block.
fn find_data_on_disk(
&self,
volume_idx: usize,
start: &mut (u32, ClusterId),
file_start: ClusterId,
desired_offset: u32,
) -> Result<(BlockIdx, usize, usize), Error<D::Error>> {
let bytes_per_cluster = match &self.open_volumes[volume_idx].volume_type {
VolumeType::Fat(fat) => fat.bytes_per_cluster(),
};
// do we need to be before our start point?
if desired_offset < start.0 {
// user wants to go backwards - start from the beginning of the file
// because the FAT is only a singly-linked list.
start.0 = 0;
start.1 = file_start;
}
// How many clusters forward do we need to go?
let offset_from_cluster = desired_offset - start.0;
// walk through the FAT chain
let num_clusters = offset_from_cluster / bytes_per_cluster;
let mut block_cache = BlockCache::empty();
for _ in 0..num_clusters {
Expand All @@ -1065,7 +1082,7 @@ where
};
start.0 += bytes_per_cluster;
}
// How many blocks in are we?
// How many blocks in are we now?
let offset_from_cluster = desired_offset - start.0;
assert!(offset_from_cluster < bytes_per_cluster);
let num_blocks = BlockCount(offset_from_cluster / Block::LEN_U32);
Expand Down
37 changes: 31 additions & 6 deletions tests/read_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,27 +150,31 @@ fn read_file_backwards() {

const CHUNK_SIZE: u32 = 100;
let length = volume_mgr.file_length(test_file).expect("file length");
let mut offset = length - CHUNK_SIZE;
let mut read = 0;

// go to end
volume_mgr.file_seek_from_end(test_file, 0).expect("seek");

// We're going to read the file backwards in chunks of 100 bytes. This
// checks we didn't make any assumptions about only going forwards.
while read < length {
// go to start of next chunk
volume_mgr
.file_seek_from_start(test_file, offset)
.file_seek_from_current(test_file, -(CHUNK_SIZE as i32))
.expect("seek");
// read chunk
let mut buffer = [0u8; CHUNK_SIZE as usize];
let len = volume_mgr.read(test_file, &mut buffer).expect("read");
assert_eq!(len, CHUNK_SIZE as usize);
contents.push_front(buffer.to_vec());
read += CHUNK_SIZE;
if offset >= CHUNK_SIZE {
offset -= CHUNK_SIZE;
}
// go to start of chunk we just read
volume_mgr
.file_seek_from_current(test_file, -(CHUNK_SIZE as i32))
.expect("seek");
}

assert_eq!(read, length);
assert_eq!(offset, 0);

let flat: Vec<u8> = contents.iter().flatten().copied().collect();

Expand All @@ -180,6 +184,27 @@ fn read_file_backwards() {
assert_eq!(&hash[..], TEST_DAT_SHA256_SUM);
}

#[test]
fn read_file_with_odd_seek() {
let time_source = utils::make_time_source();
let disk = utils::make_block_device(utils::DISK_SOURCE).unwrap();
let mut volume_mgr = embedded_sdmmc::VolumeManager::new(disk, time_source);

let mut volume = volume_mgr
.open_volume(embedded_sdmmc::VolumeIdx(0))
.unwrap();
let mut root_dir = volume.open_root_dir().unwrap();
let mut f = root_dir
.open_file_in_dir("64MB.DAT", embedded_sdmmc::Mode::ReadOnly)
.unwrap();
f.seek_from_start(0x2c).unwrap();
while f.offset() < 1000000 {
let mut buffer = [0u8; 2048];
f.read(&mut buffer).unwrap();
f.seek_from_current(-1024).unwrap();
}
}

// ****************************************************************************
//
// End Of File
Expand Down