fat32.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. mod dir;
  2. use alloc::sync::Arc;
  3. use core::ops::Deref;
  4. use async_trait::async_trait;
  5. use dir::{as_raw_dirents, ParseDirent};
  6. use eonix_mm::paging::PAGE_SIZE;
  7. use eonix_sync::RwLock;
  8. use itertools::Itertools;
  9. use crate::io::{Buffer, ByteBuffer, UninitBuffer};
  10. use crate::kernel::block::{BlockDevice, BlockDeviceRequest};
  11. use crate::kernel::constants::{EINVAL, EIO};
  12. use crate::kernel::mem::{CachePage, Folio, FolioOwned, PageOffset};
  13. use crate::kernel::timer::Instant;
  14. use crate::kernel::vfs::dentry::Dentry;
  15. use crate::kernel::vfs::inode::{Ino, InodeInfo, InodeOps, InodeUse};
  16. use crate::kernel::vfs::mount::{register_filesystem, Mount, MountCreator};
  17. use crate::kernel::vfs::types::{DeviceId, Format, Permission};
  18. use crate::kernel::vfs::{SbRef, SbUse, SuperBlock, SuperBlockInfo};
  19. use crate::prelude::*;
  20. use crate::KResult;
  21. #[repr(transparent)]
  22. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
  23. struct Cluster(u32);
  24. #[repr(transparent)]
  25. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
  26. struct RawCluster(pub u32);
  27. impl RawCluster {
  28. const START: u32 = 2;
  29. const EOC: u32 = 0x0FFF_FFF8;
  30. const INVL: u32 = 0xF000_0000;
  31. fn parse(self) -> Option<Cluster> {
  32. match self.0 {
  33. ..Self::START | Self::EOC..Self::INVL => None,
  34. Self::INVL.. => {
  35. unreachable!("invalid cluster number: RawCluster({:#08x})", self.0)
  36. }
  37. no => Some(Cluster(no)),
  38. }
  39. }
  40. }
  41. impl Cluster {
  42. pub fn as_ino(self) -> Ino {
  43. Ino::new(self.0 as _)
  44. }
  45. pub fn from_ino(ino: Ino) -> Self {
  46. Self(ino.as_raw() as u32)
  47. }
  48. fn normalized(self) -> Self {
  49. Self(self.0 - 2)
  50. }
  51. }
  52. const SECTOR_SIZE: usize = 512;
  53. #[derive(Clone, Copy, Debug)]
  54. #[repr(C, packed)]
  55. struct Bootsector {
  56. jmp: [u8; 3],
  57. oem: [u8; 8],
  58. bytes_per_sector: u16,
  59. sectors_per_cluster: u8,
  60. reserved_sectors: u16,
  61. fat_copies: u8,
  62. root_entries: u16, // should be 0 for FAT32
  63. _total_sectors: u16, // outdated
  64. media: u8,
  65. _sectors_per_fat: u16, // outdated
  66. sectors_per_track: u16,
  67. heads: u16,
  68. hidden_sectors: u32,
  69. total_sectors: u32,
  70. sectors_per_fat: u32,
  71. flags: u16,
  72. fat_version: u16,
  73. root_cluster: RawCluster,
  74. fsinfo_sector: u16,
  75. backup_bootsector: u16,
  76. _reserved: [u8; 12],
  77. drive_number: u8,
  78. _reserved2: u8,
  79. ext_sig: u8,
  80. serial: u32,
  81. volume_label: [u8; 11],
  82. fs_type: [u8; 8],
  83. bootcode: [u8; 420],
  84. mbr_signature: u16,
  85. }
  86. /// # Lock order
  87. /// 2. FatTable
  88. /// 3. Inodes
  89. ///
  90. struct FatFs {
  91. sectors_per_cluster: u8,
  92. data_start_sector: u64,
  93. _rootdir_cluster: Cluster,
  94. _volume_label: Box<str>,
  95. device: Arc<BlockDevice>,
  96. fat: RwLock<Box<[RawCluster]>>,
  97. }
  98. impl SuperBlock for FatFs {}
  99. impl FatFs {
  100. async fn read_cluster(&self, mut cluster: Cluster, buf: &Folio) -> KResult<()> {
  101. cluster = cluster.normalized();
  102. let rq = BlockDeviceRequest::Read {
  103. sector: self.data_start_sector as u64
  104. + cluster.0 as u64 * self.sectors_per_cluster as u64,
  105. count: self.sectors_per_cluster as u64,
  106. buffer: core::slice::from_ref(buf),
  107. };
  108. self.device.commit_request(rq).await?;
  109. Ok(())
  110. }
  111. }
  112. impl FatFs {
  113. pub async fn create(device: DeviceId) -> KResult<(SbUse<Self>, InodeUse)> {
  114. let device = BlockDevice::get(device)?;
  115. let mut info = UninitBuffer::<Bootsector>::new();
  116. device.read_some(0, &mut info).await?.ok_or(EIO)?;
  117. let info = info.assume_filled_ref()?;
  118. let mut fat = Box::new_uninit_slice(
  119. 512 * info.sectors_per_fat as usize / core::mem::size_of::<Cluster>(),
  120. );
  121. device
  122. .read_some(
  123. info.reserved_sectors as usize * 512,
  124. &mut ByteBuffer::from(fat.as_mut()),
  125. )
  126. .await?
  127. .ok_or(EIO)?;
  128. let sectors_per_cluster = info.sectors_per_cluster;
  129. let rootdir_cluster = info.root_cluster.parse().ok_or(EINVAL)?;
  130. let data_start_sector =
  131. info.reserved_sectors as u64 + info.fat_copies as u64 * info.sectors_per_fat as u64;
  132. let volume_label = {
  133. let end = info
  134. .volume_label
  135. .iter()
  136. .position(|&c| c == b' ')
  137. .unwrap_or(info.volume_label.len());
  138. String::from_utf8_lossy(&info.volume_label[..end])
  139. .into_owned()
  140. .into_boxed_str()
  141. };
  142. let fat = unsafe { fat.assume_init() };
  143. let rootdir_cluster_count = ClusterIterator::new(fat.as_ref(), rootdir_cluster).count();
  144. let rootdir_size = rootdir_cluster_count as u32 * sectors_per_cluster as u32 * 512;
  145. let fatfs = SbUse::new(
  146. SuperBlockInfo {
  147. io_blksize: 4096,
  148. device_id: device.devid(),
  149. read_only: true,
  150. },
  151. Self {
  152. device,
  153. sectors_per_cluster,
  154. _rootdir_cluster: rootdir_cluster,
  155. data_start_sector,
  156. fat: RwLock::new(fat),
  157. _volume_label: volume_label,
  158. },
  159. );
  160. let sbref = SbRef::from(&fatfs);
  161. Ok((fatfs, DirInode::new(rootdir_cluster, sbref, rootdir_size)))
  162. }
  163. }
  164. struct ClusterIterator<'a> {
  165. fat: &'a [RawCluster],
  166. cur: Option<Cluster>,
  167. }
  168. impl<'a> ClusterIterator<'a> {
  169. fn new(fat: &'a [RawCluster], start: Cluster) -> Self {
  170. Self {
  171. fat,
  172. cur: Some(start),
  173. }
  174. }
  175. }
  176. impl<'fat> Iterator for ClusterIterator<'fat> {
  177. type Item = Cluster;
  178. fn next(&mut self) -> Option<Self::Item> {
  179. self.cur.inspect(|&Cluster(no)| {
  180. self.cur = self.fat[no as usize].parse();
  181. })
  182. }
  183. }
  184. struct FileInode;
  185. impl FileInode {
  186. fn new(cluster: Cluster, sb: SbRef<FatFs>, size: u32) -> InodeUse {
  187. InodeUse::new(
  188. sb,
  189. cluster.as_ino(),
  190. Format::REG,
  191. InodeInfo {
  192. size: size as u64,
  193. nlink: 1,
  194. uid: 0,
  195. gid: 0,
  196. perm: Permission::new(0o777),
  197. atime: Instant::UNIX_EPOCH,
  198. ctime: Instant::UNIX_EPOCH,
  199. mtime: Instant::UNIX_EPOCH,
  200. },
  201. Self,
  202. )
  203. }
  204. }
  205. impl InodeOps for FileInode {
  206. type SuperBlock = FatFs;
  207. async fn read(
  208. &self,
  209. _: SbUse<Self::SuperBlock>,
  210. inode: &InodeUse,
  211. buffer: &mut dyn Buffer,
  212. offset: usize,
  213. ) -> KResult<usize> {
  214. inode.get_page_cache().read(buffer, offset).await
  215. }
  216. async fn read_page(
  217. &self,
  218. sb: SbUse<Self::SuperBlock>,
  219. inode: &InodeUse,
  220. page: &mut CachePage,
  221. offset: PageOffset,
  222. ) -> KResult<()> {
  223. let fs = &sb.backend;
  224. let fat = sb.backend.fat.read().await;
  225. if offset >= PageOffset::from_byte_ceil(inode.info.lock().size as usize) {
  226. unreachable!("read_page called with offset beyond file size");
  227. }
  228. let cluster_size = fs.sectors_per_cluster as usize * SECTOR_SIZE;
  229. if cluster_size != PAGE_SIZE {
  230. unimplemented!("cluster size != PAGE_SIZE");
  231. }
  232. // XXX: Ugly and inefficient O(n^2) algorithm for sequential file read.
  233. let cluster = ClusterIterator::new(fat.as_ref(), Cluster::from_ino(inode.ino))
  234. .skip(offset.page_count())
  235. .next()
  236. .ok_or(EIO)?;
  237. fs.read_cluster(cluster, &page).await?;
  238. let real_len = (inode.info.lock().size as usize) - offset.byte_count();
  239. if real_len < PAGE_SIZE {
  240. let mut page = page.lock();
  241. page.as_bytes_mut()[real_len..].fill(0);
  242. }
  243. Ok(())
  244. }
  245. }
  246. struct DirInode {
  247. // TODO: Use the new PageCache...
  248. dir_pages: RwLock<Vec<FolioOwned>>,
  249. }
  250. impl DirInode {
  251. fn new(cluster: Cluster, sb: SbRef<FatFs>, size: u32) -> InodeUse {
  252. InodeUse::new(
  253. sb,
  254. cluster.as_ino(),
  255. Format::DIR,
  256. InodeInfo {
  257. size: size as u64,
  258. nlink: 2, // '.' and '..'
  259. uid: 0,
  260. gid: 0,
  261. perm: Permission::new(0o777),
  262. atime: Instant::UNIX_EPOCH,
  263. ctime: Instant::UNIX_EPOCH,
  264. mtime: Instant::UNIX_EPOCH,
  265. },
  266. Self {
  267. dir_pages: RwLock::new(Vec::new()),
  268. },
  269. )
  270. }
  271. async fn read_dir_pages(&self, sb: &SbUse<FatFs>, inode: &InodeUse) -> KResult<()> {
  272. let mut dir_pages = self.dir_pages.write().await;
  273. if !dir_pages.is_empty() {
  274. return Ok(());
  275. }
  276. let fs = &sb.backend;
  277. let fat = fs.fat.read().await;
  278. let clusters = ClusterIterator::new(fat.as_ref(), Cluster::from_ino(inode.ino));
  279. for cluster in clusters {
  280. let page = FolioOwned::alloc();
  281. fs.read_cluster(cluster, &page).await?;
  282. dir_pages.push(page);
  283. }
  284. Ok(())
  285. }
  286. async fn get_dir_pages(
  287. &self,
  288. sb: &SbUse<FatFs>,
  289. inode: &InodeUse,
  290. ) -> KResult<impl Deref<Target = Vec<FolioOwned>> + use<'_>> {
  291. {
  292. let dir_pages = self.dir_pages.read().await;
  293. if !dir_pages.is_empty() {
  294. return Ok(dir_pages);
  295. }
  296. }
  297. self.read_dir_pages(sb, inode).await?;
  298. if let Some(dir_pages) = self.dir_pages.try_read() {
  299. return Ok(dir_pages);
  300. }
  301. Ok(self.dir_pages.read().await)
  302. }
  303. }
  304. impl InodeOps for DirInode {
  305. type SuperBlock = FatFs;
  306. async fn lookup(
  307. &self,
  308. sb: SbUse<Self::SuperBlock>,
  309. inode: &InodeUse,
  310. dentry: &Arc<Dentry>,
  311. ) -> KResult<Option<InodeUse>> {
  312. let dir_pages = self.get_dir_pages(&sb, inode).await?;
  313. let dir_data = dir_pages.iter().map(|pg| pg.as_bytes());
  314. let raw_dirents = dir_data
  315. .map(as_raw_dirents)
  316. .take_while_inclusive(Result::is_ok)
  317. .flatten_ok();
  318. let mut dirents = futures::stream::iter(raw_dirents);
  319. while let Some(result) = dirents.next_dirent().await {
  320. let entry = result?;
  321. if *entry.filename != ****dentry.name() {
  322. continue;
  323. }
  324. let sbref = SbRef::from(&sb);
  325. if entry.is_directory {
  326. return Ok(Some(DirInode::new(entry.cluster, sbref, entry.size) as _));
  327. } else {
  328. return Ok(Some(FileInode::new(entry.cluster, sbref, entry.size) as _));
  329. }
  330. }
  331. Ok(None)
  332. }
  333. async fn readdir(
  334. &self,
  335. sb: SbUse<Self::SuperBlock>,
  336. inode: &InodeUse,
  337. offset: usize,
  338. callback: &mut (dyn FnMut(&[u8], Ino) -> KResult<bool> + Send),
  339. ) -> KResult<KResult<usize>> {
  340. let fs = &sb.backend;
  341. let dir_pages = self.get_dir_pages(&sb, inode).await?;
  342. let cluster_size = fs.sectors_per_cluster as usize * SECTOR_SIZE;
  343. let cluster_offset = offset / cluster_size;
  344. let inner_offset = offset % cluster_size;
  345. let inner_raw_dirent_offset = inner_offset / core::mem::size_of::<dir::RawDirEntry>();
  346. let dir_data = dir_pages
  347. .iter()
  348. .skip(cluster_offset)
  349. .map(|pg| pg.as_bytes());
  350. let raw_dirents = dir_data
  351. .map(as_raw_dirents)
  352. .take_while_inclusive(Result::is_ok)
  353. .flatten_ok()
  354. .skip(inner_raw_dirent_offset);
  355. let mut dirents = futures::stream::iter(raw_dirents);
  356. let mut nread = 0;
  357. while let Some(result) = dirents.next_dirent().await {
  358. let entry = result?;
  359. match callback(&entry.filename, entry.cluster.as_ino()) {
  360. Err(err) => return Ok(Err(err)),
  361. Ok(true) => nread += entry.entry_offset as usize,
  362. Ok(false) => break,
  363. }
  364. }
  365. Ok(Ok(nread))
  366. }
  367. }
  368. struct FatMountCreator;
  369. #[async_trait]
  370. impl MountCreator for FatMountCreator {
  371. fn check_signature(&self, mut first_block: &[u8]) -> KResult<bool> {
  372. match first_block.split_off(82..) {
  373. Some([b'F', b'A', b'T', b'3', b'2', b' ', b' ', b' ', ..]) => Ok(true),
  374. Some(..) => Ok(false),
  375. None => Err(EIO),
  376. }
  377. }
  378. async fn create_mount(&self, _source: &str, _flags: u64, mp: &Arc<Dentry>) -> KResult<Mount> {
  379. let (fatfs, root_inode) = FatFs::create(DeviceId::new(8, 1)).await?;
  380. Mount::new(mp, fatfs, root_inode)
  381. }
  382. }
  383. pub fn init() {
  384. register_filesystem("fat32", Arc::new(FatMountCreator)).unwrap();
  385. }