inode.rs 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. use super::{dentry::Dentry, s_isblk, s_ischr, vfs::Vfs, DevId};
  2. use crate::io::Stream;
  3. use crate::kernel::constants::{
  4. EINVAL, EISDIR, ENOTDIR, EPERM, STATX_ATIME, STATX_BLOCKS, STATX_CTIME, STATX_GID, STATX_INO,
  5. STATX_MODE, STATX_MTIME, STATX_NLINK, STATX_SIZE, STATX_TYPE, STATX_UID, S_IFDIR, S_IFMT,
  6. };
  7. use crate::kernel::timer::Instant;
  8. use crate::{io::Buffer, prelude::*};
  9. use alloc::sync::{Arc, Weak};
  10. use core::{
  11. mem::MaybeUninit,
  12. ops::ControlFlow,
  13. ptr::addr_of_mut,
  14. sync::atomic::{AtomicU32, AtomicU64, Ordering},
  15. };
  16. use eonix_runtime::task::Task;
  17. use eonix_sync::RwLock;
  18. use posix_types::stat::StatX;
  19. pub type Ino = u64;
  20. pub type AtomicIno = AtomicU64;
  21. #[allow(dead_code)]
  22. pub type ISize = u64;
  23. pub type AtomicISize = AtomicU64;
  24. #[allow(dead_code)]
  25. pub type Nlink = u64;
  26. pub type AtomicNlink = AtomicU64;
  27. #[allow(dead_code)]
  28. pub type Uid = u32;
  29. pub type AtomicUid = AtomicU32;
  30. #[allow(dead_code)]
  31. pub type Gid = u32;
  32. pub type AtomicGid = AtomicU32;
  33. pub type Mode = u32;
  34. pub type AtomicMode = AtomicU32;
  35. pub struct InodeData {
  36. pub ino: Ino,
  37. pub size: AtomicISize,
  38. pub nlink: AtomicNlink,
  39. pub uid: AtomicUid,
  40. pub gid: AtomicGid,
  41. pub mode: AtomicMode,
  42. pub atime: Spin<Instant>,
  43. pub ctime: Spin<Instant>,
  44. pub mtime: Spin<Instant>,
  45. pub rwsem: RwLock<()>,
  46. pub vfs: Weak<dyn Vfs>,
  47. }
  48. impl InodeData {
  49. pub const fn new(ino: Ino, vfs: Weak<dyn Vfs>) -> Self {
  50. Self {
  51. ino,
  52. vfs,
  53. atime: Spin::new(Instant::default()),
  54. ctime: Spin::new(Instant::default()),
  55. mtime: Spin::new(Instant::default()),
  56. rwsem: RwLock::new(()),
  57. size: AtomicU64::new(0),
  58. nlink: AtomicNlink::new(0),
  59. uid: AtomicUid::new(0),
  60. gid: AtomicGid::new(0),
  61. mode: AtomicMode::new(0),
  62. }
  63. }
  64. }
  65. #[allow(dead_code)]
  66. pub trait InodeInner:
  67. Send + Sync + core::ops::Deref<Target = InodeData> + core::ops::DerefMut
  68. {
  69. fn data(&self) -> &InodeData;
  70. fn data_mut(&mut self) -> &mut InodeData;
  71. }
  72. pub enum WriteOffset<'end> {
  73. Position(usize),
  74. End(&'end mut usize),
  75. }
  76. pub struct RenameData<'a, 'b> {
  77. pub old_dentry: &'a Arc<Dentry>,
  78. pub new_dentry: &'b Arc<Dentry>,
  79. pub new_parent: Arc<dyn Inode>,
  80. pub vfs: Arc<dyn Vfs>,
  81. pub is_exchange: bool,
  82. pub no_replace: bool,
  83. }
  84. #[allow(unused_variables)]
  85. pub trait Inode: Send + Sync + InodeInner + Any {
  86. fn is_dir(&self) -> bool {
  87. self.mode.load(Ordering::SeqCst) & S_IFDIR != 0
  88. }
  89. fn lookup(&self, dentry: &Arc<Dentry>) -> KResult<Option<Arc<dyn Inode>>> {
  90. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  91. }
  92. fn creat(&self, at: &Arc<Dentry>, mode: Mode) -> KResult<()> {
  93. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  94. }
  95. fn mkdir(&self, at: &Dentry, mode: Mode) -> KResult<()> {
  96. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  97. }
  98. fn mknod(&self, at: &Dentry, mode: Mode, dev: DevId) -> KResult<()> {
  99. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  100. }
  101. fn unlink(&self, at: &Arc<Dentry>) -> KResult<()> {
  102. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  103. }
  104. fn symlink(&self, at: &Arc<Dentry>, target: &[u8]) -> KResult<()> {
  105. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  106. }
  107. fn read(&self, buffer: &mut dyn Buffer, offset: usize) -> KResult<usize> {
  108. Err(if self.is_dir() { EISDIR } else { EINVAL })
  109. }
  110. fn write(&self, stream: &mut dyn Stream, offset: WriteOffset) -> KResult<usize> {
  111. Err(if self.is_dir() { EISDIR } else { EINVAL })
  112. }
  113. fn devid(&self) -> KResult<DevId> {
  114. Err(if self.is_dir() { EISDIR } else { EINVAL })
  115. }
  116. fn readlink(&self, buffer: &mut dyn Buffer) -> KResult<usize> {
  117. Err(if self.is_dir() { EISDIR } else { EINVAL })
  118. }
  119. fn truncate(&self, length: usize) -> KResult<()> {
  120. Err(if self.is_dir() { EISDIR } else { EPERM })
  121. }
  122. fn rename(&self, rename_data: RenameData) -> KResult<()> {
  123. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  124. }
  125. fn do_readdir(
  126. &self,
  127. offset: usize,
  128. callback: &mut dyn FnMut(&[u8], Ino) -> KResult<ControlFlow<(), ()>>,
  129. ) -> KResult<usize> {
  130. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  131. }
  132. fn chmod(&self, mode: Mode) -> KResult<()> {
  133. Err(EPERM)
  134. }
  135. fn chown(&self, uid: u32, gid: u32) -> KResult<()> {
  136. Err(EPERM)
  137. }
  138. fn statx(&self, stat: &mut StatX, mask: u32) -> KResult<()> {
  139. // Safety: ffi should have checked reference
  140. let vfs = self.vfs.upgrade().expect("Vfs is dropped");
  141. let size = self.size.load(Ordering::Relaxed);
  142. let mode = self.mode.load(Ordering::Relaxed);
  143. if mask & STATX_NLINK != 0 {
  144. stat.stx_nlink = self.nlink.load(Ordering::Acquire) as _;
  145. stat.stx_mask |= STATX_NLINK;
  146. }
  147. if mask & STATX_ATIME != 0 {
  148. let atime = *self.atime.lock();
  149. stat.stx_atime = atime.into();
  150. stat.stx_mask |= STATX_ATIME;
  151. }
  152. if mask & STATX_MTIME != 0 {
  153. let mtime = *self.mtime.lock();
  154. stat.stx_mtime = mtime.into();
  155. stat.stx_mask |= STATX_MTIME;
  156. }
  157. if mask & STATX_CTIME != 0 {
  158. let ctime = *self.ctime.lock();
  159. stat.stx_ctime = ctime.into();
  160. stat.stx_mask |= STATX_CTIME;
  161. }
  162. if mask & STATX_SIZE != 0 {
  163. stat.stx_size = self.size.load(Ordering::Relaxed) as _;
  164. stat.stx_mask |= STATX_SIZE;
  165. }
  166. stat.stx_mode = 0;
  167. if mask & STATX_MODE != 0 {
  168. stat.stx_mode |= (mode & !S_IFMT) as u16;
  169. stat.stx_mask |= STATX_MODE;
  170. }
  171. if mask & STATX_TYPE != 0 {
  172. stat.stx_mode |= (mode & S_IFMT) as u16;
  173. if s_isblk(mode) || s_ischr(mode) {
  174. let devid = self.devid();
  175. stat.stx_rdev_major = (devid? >> 8) & 0xff;
  176. stat.stx_rdev_minor = devid? & 0xff;
  177. }
  178. stat.stx_mask |= STATX_TYPE;
  179. }
  180. if mask & STATX_INO != 0 {
  181. stat.stx_ino = self.ino as _;
  182. stat.stx_mask |= STATX_INO;
  183. }
  184. if mask & STATX_BLOCKS != 0 {
  185. stat.stx_blocks = (size + 512 - 1) / 512;
  186. stat.stx_blksize = vfs.io_blksize() as _;
  187. stat.stx_mask |= STATX_BLOCKS;
  188. }
  189. if mask & STATX_UID != 0 {
  190. stat.stx_uid = self.uid.load(Ordering::Relaxed) as _;
  191. stat.stx_mask |= STATX_UID;
  192. }
  193. if mask & STATX_GID != 0 {
  194. stat.stx_gid = self.gid.load(Ordering::Relaxed) as _;
  195. stat.stx_mask |= STATX_GID;
  196. }
  197. let fsdev = vfs.fs_devid();
  198. stat.stx_dev_major = (fsdev >> 8) & 0xff;
  199. stat.stx_dev_minor = fsdev & 0xff;
  200. // TODO: support more attributes
  201. stat.stx_attributes_mask = 0;
  202. Ok(())
  203. }
  204. fn new_locked<F>(ino: Ino, vfs: Weak<dyn Vfs>, f: F) -> Arc<Self>
  205. where
  206. Self: Sized,
  207. F: FnOnce(*mut Self, &()),
  208. {
  209. let mut uninit = Arc::<Self>::new_uninit();
  210. let uninit_mut = Arc::get_mut(&mut uninit).unwrap();
  211. // Safety: `idata` is owned by `uninit`
  212. let idata = unsafe {
  213. addr_of_mut!(*(*uninit_mut.as_mut_ptr()).data_mut())
  214. .cast::<MaybeUninit<InodeData>>()
  215. .as_mut()
  216. .unwrap()
  217. };
  218. idata.write(InodeData::new(ino, vfs));
  219. f(
  220. uninit_mut.as_mut_ptr(),
  221. // SAFETY: `idata` is initialized and we will never move the lock.
  222. &Task::block_on(unsafe { idata.assume_init_ref() }.rwsem.read()),
  223. );
  224. // Safety: `uninit` is initialized
  225. unsafe { uninit.assume_init() }
  226. }
  227. }
  228. // TODO: define multiple inode structs a time
  229. macro_rules! define_struct_inode {
  230. ($v:vis struct $inode_t:ident;) => {
  231. $v struct $inode_t {
  232. /// Do not use this directly
  233. idata: $crate::kernel::vfs::inode::InodeData,
  234. }
  235. impl core::ops::Deref for $inode_t {
  236. type Target = $crate::kernel::vfs::inode::InodeData;
  237. fn deref(&self) -> &Self::Target {
  238. &self.idata
  239. }
  240. }
  241. impl core::ops::DerefMut for $inode_t {
  242. fn deref_mut(&mut self) -> &mut Self::Target {
  243. &mut self.idata
  244. }
  245. }
  246. impl $crate::kernel::vfs::inode::InodeInner for $inode_t {
  247. fn data(&self) -> &$crate::kernel::vfs::inode::InodeData {
  248. &self.idata
  249. }
  250. fn data_mut(&mut self) -> &mut $crate::kernel::vfs::inode::InodeData {
  251. &mut self.idata
  252. }
  253. }
  254. };
  255. ($v:vis struct $inode_t:ident { $($vis:vis $name:ident: $type:ty,)* }) => {
  256. $v struct $inode_t {
  257. /// Do not use this directly
  258. idata: $crate::kernel::vfs::inode::InodeData,
  259. $($vis $name: $type,)*
  260. }
  261. impl core::ops::Deref for $inode_t {
  262. type Target = $crate::kernel::vfs::inode::InodeData;
  263. fn deref(&self) -> &Self::Target {
  264. &self.idata
  265. }
  266. }
  267. impl core::ops::DerefMut for $inode_t {
  268. fn deref_mut(&mut self) -> &mut Self::Target {
  269. &mut self.idata
  270. }
  271. }
  272. impl $crate::kernel::vfs::inode::InodeInner for $inode_t {
  273. fn data(&self) -> &$crate::kernel::vfs::inode::InodeData {
  274. &self.idata
  275. }
  276. fn data_mut(&mut self) -> &mut $crate::kernel::vfs::inode::InodeData {
  277. &mut self.idata
  278. }
  279. }
  280. };
  281. }
  282. pub(crate) use define_struct_inode;