inode.rs 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. use alloc::sync::{Arc, Weak};
  2. use bindings::{
  3. statx, EINVAL, EISDIR, ENOTDIR, EPERM, STATX_ATIME, STATX_BLOCKS, STATX_CTIME, STATX_GID,
  4. STATX_INO, STATX_MODE, STATX_MTIME, STATX_NLINK, STATX_SIZE, STATX_TYPE, STATX_UID, S_IFDIR,
  5. S_IFMT,
  6. };
  7. use core::{
  8. mem::MaybeUninit,
  9. ops::ControlFlow,
  10. ptr::addr_of_mut,
  11. sync::atomic::{AtomicU32, AtomicU64, Ordering},
  12. };
  13. use super::{dentry::Dentry, s_isblk, s_ischr, vfs::Vfs, DevId, TimeSpec};
  14. use crate::{io::Buffer, prelude::*};
  15. pub type Ino = u64;
  16. pub type AtomicIno = AtomicU64;
  17. #[allow(dead_code)]
  18. pub type ISize = u64;
  19. pub type AtomicISize = AtomicU64;
  20. #[allow(dead_code)]
  21. pub type Nlink = u64;
  22. pub type AtomicNlink = AtomicU64;
  23. #[allow(dead_code)]
  24. pub type Uid = u32;
  25. pub type AtomicUid = AtomicU32;
  26. #[allow(dead_code)]
  27. pub type Gid = u32;
  28. pub type AtomicGid = AtomicU32;
  29. pub type Mode = u32;
  30. pub type AtomicMode = AtomicU32;
  31. pub struct InodeData {
  32. pub ino: Ino,
  33. pub size: AtomicISize,
  34. pub nlink: AtomicNlink,
  35. pub uid: AtomicUid,
  36. pub gid: AtomicGid,
  37. pub mode: AtomicMode,
  38. pub atime: Spin<TimeSpec>,
  39. pub ctime: Spin<TimeSpec>,
  40. pub mtime: Spin<TimeSpec>,
  41. pub rwsem: RwLock<()>,
  42. pub vfs: Weak<dyn Vfs>,
  43. }
  44. impl InodeData {
  45. pub fn new(ino: Ino, vfs: Weak<dyn Vfs>) -> Self {
  46. Self {
  47. ino,
  48. vfs,
  49. atime: Spin::new(TimeSpec::default()),
  50. ctime: Spin::new(TimeSpec::default()),
  51. mtime: Spin::new(TimeSpec::default()),
  52. rwsem: RwLock::new(()),
  53. size: Default::default(),
  54. nlink: Default::default(),
  55. uid: Default::default(),
  56. gid: Default::default(),
  57. mode: Default::default(),
  58. }
  59. }
  60. }
  61. #[allow(dead_code)]
  62. pub trait InodeInner:
  63. Send + Sync + core::ops::Deref<Target = InodeData> + core::ops::DerefMut
  64. {
  65. fn data(&self) -> &InodeData;
  66. fn data_mut(&mut self) -> &mut InodeData;
  67. }
  68. pub enum WriteOffset<'end> {
  69. Position(usize),
  70. End(&'end mut usize),
  71. }
  72. #[allow(unused_variables)]
  73. pub trait Inode: Send + Sync + InodeInner {
  74. fn is_dir(&self) -> bool {
  75. self.mode.load(Ordering::SeqCst) & S_IFDIR != 0
  76. }
  77. fn lookup(&self, dentry: &Arc<Dentry>) -> KResult<Option<Arc<dyn Inode>>> {
  78. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  79. }
  80. fn creat(&self, at: &Arc<Dentry>, mode: Mode) -> KResult<()> {
  81. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  82. }
  83. fn mkdir(&self, at: &Dentry, mode: Mode) -> KResult<()> {
  84. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  85. }
  86. fn mknod(&self, at: &Dentry, mode: Mode, dev: DevId) -> KResult<()> {
  87. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  88. }
  89. fn unlink(&self, at: &Arc<Dentry>) -> KResult<()> {
  90. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  91. }
  92. fn symlink(&self, at: &Arc<Dentry>, target: &[u8]) -> KResult<()> {
  93. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  94. }
  95. fn read(&self, buffer: &mut dyn Buffer, offset: usize) -> KResult<usize> {
  96. Err(if self.is_dir() { EISDIR } else { EINVAL })
  97. }
  98. fn write(&self, buffer: &[u8], offset: WriteOffset) -> KResult<usize> {
  99. Err(if self.is_dir() { EISDIR } else { EINVAL })
  100. }
  101. fn devid(&self) -> KResult<DevId> {
  102. Err(if self.is_dir() { EISDIR } else { EINVAL })
  103. }
  104. fn readlink(&self, buffer: &mut dyn Buffer) -> KResult<usize> {
  105. Err(if self.is_dir() { EISDIR } else { EINVAL })
  106. }
  107. fn truncate(&self, length: usize) -> KResult<()> {
  108. Err(if self.is_dir() { EISDIR } else { EPERM })
  109. }
  110. fn do_readdir(
  111. &self,
  112. offset: usize,
  113. callback: &mut dyn FnMut(&[u8], Ino) -> KResult<ControlFlow<(), ()>>,
  114. ) -> KResult<usize> {
  115. Err(if !self.is_dir() { ENOTDIR } else { EPERM })
  116. }
  117. fn chmod(&self, mode: Mode) -> KResult<()> {
  118. Err(EPERM)
  119. }
  120. fn statx(&self, stat: &mut statx, mask: u32) -> KResult<()> {
  121. // Safety: ffi should have checked reference
  122. let vfs = self.vfs.upgrade().expect("Vfs is dropped");
  123. let size = self.size.load(Ordering::Relaxed);
  124. let mode = self.mode.load(Ordering::Relaxed);
  125. if mask & STATX_NLINK != 0 {
  126. stat.stx_nlink = self.nlink.load(Ordering::Acquire) as _;
  127. stat.stx_mask |= STATX_NLINK;
  128. }
  129. if mask & STATX_ATIME != 0 {
  130. let atime = self.atime.lock();
  131. stat.stx_atime.tv_nsec = atime.nsec as _;
  132. stat.stx_atime.tv_sec = atime.sec as _;
  133. stat.stx_mask |= STATX_ATIME;
  134. }
  135. if mask & STATX_MTIME != 0 {
  136. let mtime = self.mtime.lock();
  137. stat.stx_mtime.tv_nsec = mtime.nsec as _;
  138. stat.stx_mtime.tv_sec = mtime.sec as _;
  139. stat.stx_mask |= STATX_MTIME;
  140. }
  141. if mask & STATX_CTIME != 0 {
  142. let ctime = self.ctime.lock();
  143. stat.stx_ctime.tv_nsec = ctime.nsec as _;
  144. stat.stx_ctime.tv_sec = ctime.sec as _;
  145. stat.stx_mask |= STATX_CTIME;
  146. }
  147. if mask & STATX_SIZE != 0 {
  148. stat.stx_size = self.size.load(Ordering::Relaxed) as _;
  149. stat.stx_mask |= STATX_SIZE;
  150. }
  151. stat.stx_mode = 0;
  152. if mask & STATX_MODE != 0 {
  153. stat.stx_mode |= (mode & !S_IFMT) as u16;
  154. stat.stx_mask |= STATX_MODE;
  155. }
  156. if mask & STATX_TYPE != 0 {
  157. stat.stx_mode |= (mode & S_IFMT) as u16;
  158. if s_isblk(mode) || s_ischr(mode) {
  159. let devid = self.devid();
  160. stat.stx_rdev_major = (devid? >> 8) & 0xff;
  161. stat.stx_rdev_minor = devid? & 0xff;
  162. }
  163. stat.stx_mask |= STATX_TYPE;
  164. }
  165. if mask & STATX_INO != 0 {
  166. stat.stx_ino = self.ino as _;
  167. stat.stx_mask |= STATX_INO;
  168. }
  169. if mask & STATX_BLOCKS != 0 {
  170. stat.stx_blocks = (size + 512 - 1) / 512;
  171. stat.stx_blksize = vfs.io_blksize() as _;
  172. stat.stx_mask |= STATX_BLOCKS;
  173. }
  174. if mask & STATX_UID != 0 {
  175. stat.stx_uid = self.uid.load(Ordering::Relaxed) as _;
  176. stat.stx_mask |= STATX_UID;
  177. }
  178. if mask & STATX_GID != 0 {
  179. stat.stx_gid = self.gid.load(Ordering::Relaxed) as _;
  180. stat.stx_mask |= STATX_GID;
  181. }
  182. let fsdev = vfs.fs_devid();
  183. stat.stx_dev_major = (fsdev >> 8) & 0xff;
  184. stat.stx_dev_minor = fsdev & 0xff;
  185. // TODO: support more attributes
  186. stat.stx_attributes_mask = 0;
  187. Ok(())
  188. }
  189. fn new_locked<F>(ino: Ino, vfs: Weak<dyn Vfs>, f: F) -> Arc<Self>
  190. where
  191. Self: Sized,
  192. F: FnOnce(*mut Self, &()),
  193. {
  194. let mut uninit = Arc::<Self>::new_uninit();
  195. let uninit_mut = Arc::get_mut(&mut uninit).unwrap();
  196. // Safety: `idata` is owned by `uninit`
  197. let idata = unsafe {
  198. addr_of_mut!(*(*uninit_mut.as_mut_ptr()).data_mut())
  199. .cast::<MaybeUninit<InodeData>>()
  200. .as_mut()
  201. .unwrap()
  202. };
  203. idata.write(InodeData::new(ino, vfs));
  204. f(
  205. uninit_mut.as_mut_ptr(),
  206. // SAFETY: `idata` is initialized and we will never move the lock.
  207. &unsafe { idata.assume_init_ref() }.rwsem.lock_shared(),
  208. );
  209. // Safety: `uninit` is initialized
  210. unsafe { uninit.assume_init() }
  211. }
  212. }
  213. // TODO: define multiple inode structs a time
  214. macro_rules! define_struct_inode {
  215. ($v:vis struct $inode_t:ident;) => {
  216. $v struct $inode_t {
  217. /// Do not use this directly
  218. idata: $crate::kernel::vfs::inode::InodeData,
  219. }
  220. impl core::ops::Deref for $inode_t {
  221. type Target = $crate::kernel::vfs::inode::InodeData;
  222. fn deref(&self) -> &Self::Target {
  223. &self.idata
  224. }
  225. }
  226. impl core::ops::DerefMut for $inode_t {
  227. fn deref_mut(&mut self) -> &mut Self::Target {
  228. &mut self.idata
  229. }
  230. }
  231. impl $crate::kernel::vfs::inode::InodeInner for $inode_t {
  232. fn data(&self) -> &$crate::kernel::vfs::inode::InodeData {
  233. &self.idata
  234. }
  235. fn data_mut(&mut self) -> &mut $crate::kernel::vfs::inode::InodeData {
  236. &mut self.idata
  237. }
  238. }
  239. };
  240. ($v:vis struct $inode_t:ident { $($vis:vis $name:ident: $type:ty,)* }) => {
  241. $v struct $inode_t {
  242. /// Do not use this directly
  243. idata: $crate::kernel::vfs::inode::InodeData,
  244. $($vis $name: $type,)*
  245. }
  246. impl core::ops::Deref for $inode_t {
  247. type Target = $crate::kernel::vfs::inode::InodeData;
  248. fn deref(&self) -> &Self::Target {
  249. &self.idata
  250. }
  251. }
  252. impl core::ops::DerefMut for $inode_t {
  253. fn deref_mut(&mut self) -> &mut Self::Target {
  254. &mut self.idata
  255. }
  256. }
  257. impl $crate::kernel::vfs::inode::InodeInner for $inode_t {
  258. fn data(&self) -> &$crate::kernel::vfs::inode::InodeData {
  259. &self.idata
  260. }
  261. fn data_mut(&mut self) -> &mut $crate::kernel::vfs::inode::InodeData {
  262. &mut self.idata
  263. }
  264. }
  265. };
  266. }
  267. pub(crate) use define_struct_inode;