file.rs 1.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940
  1. use super::{ClusterIterator, FatFs};
  2. use crate::{
  3. kernel::mem::{AsMemoryBlock as _, Page},
  4. KResult,
  5. };
  6. pub trait ClusterReadIterator<'data>: Iterator<Item = KResult<&'data [u8]>> + 'data {}
  7. impl<'a, I> ClusterReadIterator<'a> for I where I: Iterator<Item = KResult<&'a [u8]>> + 'a {}
  8. pub(super) trait ClusterRead<'data> {
  9. fn read<'vfs>(self, vfs: &'vfs FatFs, offset: usize) -> impl ClusterReadIterator<'data>
  10. where
  11. Self: Sized,
  12. 'vfs: 'data;
  13. }
  14. impl<'data, 'fat: 'data> ClusterRead<'data> for ClusterIterator<'fat> {
  15. fn read<'vfs: 'data>(self, vfs: &'vfs FatFs, offset: usize) -> impl ClusterReadIterator<'data> {
  16. const SECTOR_SIZE: usize = 512;
  17. let cluster_size = vfs.sectors_per_cluster as usize * SECTOR_SIZE;
  18. assert!(cluster_size <= 0x1000, "Cluster size is too large");
  19. let skip_clusters = offset / cluster_size;
  20. let mut inner_offset = offset % cluster_size;
  21. // TODO: Use block cache.
  22. let buffer_page = Page::alloc();
  23. self.skip(skip_clusters).map(move |cluster| {
  24. vfs.read_cluster(cluster, &buffer_page)?;
  25. let data = unsafe {
  26. // SAFETY: No one could be writing to it.
  27. &buffer_page.as_memblk().as_bytes()[inner_offset..]
  28. };
  29. inner_offset = 0;
  30. Ok(data)
  31. })
  32. }
  33. }