filearray.rs 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. use core::sync::atomic::Ordering;
  2. use crate::{
  3. kernel::vfs::{dentry::Dentry, file::Pipe, s_isdir, s_isreg},
  4. path::Path,
  5. prelude::*,
  6. };
  7. use alloc::{
  8. collections::btree_map::{BTreeMap, Entry},
  9. sync::Arc,
  10. };
  11. use bindings::{
  12. current_process, kernel::tty::console, EBADF, EINVAL, EISDIR, ENOTDIR, FD_CLOEXEC, F_DUPFD,
  13. F_DUPFD_CLOEXEC, F_GETFD, F_SETFD, O_APPEND, O_CLOEXEC, O_DIRECTORY, O_RDWR, O_TRUNC, O_WRONLY,
  14. };
  15. use itertools::{
  16. FoldWhile::{Continue, Done},
  17. Itertools,
  18. };
  19. use super::{
  20. file::{File, InodeFile, TTYFile},
  21. inode::Mode,
  22. s_ischr, FsContext, Spin,
  23. };
  24. type FD = u32;
  25. #[derive(Clone)]
  26. struct OpenFile {
  27. /// File descriptor flags, only for `FD_CLOEXEC`.
  28. flags: u64,
  29. file: Arc<File>,
  30. }
  31. #[derive(Clone)]
  32. struct FileArrayInner {
  33. files: BTreeMap<FD, OpenFile>,
  34. fd_min_avail: FD,
  35. }
  36. pub struct FileArray {
  37. inner: Spin<FileArrayInner>,
  38. }
  39. impl OpenFile {
  40. pub fn close_on_exec(&self) -> bool {
  41. self.flags & O_CLOEXEC as u64 != 0
  42. }
  43. }
  44. #[no_mangle]
  45. pub extern "C" fn r_filearray_new_for_init() -> *const FileArray {
  46. Arc::into_raw(Arc::new(FileArray {
  47. inner: Spin::new(FileArrayInner {
  48. files: BTreeMap::new(),
  49. fd_min_avail: 0,
  50. }),
  51. }))
  52. }
  53. #[no_mangle]
  54. pub extern "C" fn r_filearray_new_shared(other: *const FileArray) -> *const FileArray {
  55. let other = BorrowedArc::from_raw(other);
  56. Arc::into_raw(FileArray::new_shared(&other))
  57. }
  58. #[no_mangle]
  59. pub extern "C" fn r_filearray_new_cloned(other: *const FileArray) -> *const FileArray {
  60. let other = BorrowedArc::from_raw(other);
  61. Arc::into_raw(FileArray::new_cloned(&other))
  62. }
  63. #[no_mangle]
  64. pub extern "C" fn r_filearray_drop(other: *const FileArray) {
  65. // SAFETY: `other` is a valid pointer from `Arc::into_raw()`.
  66. unsafe { Arc::from_raw(other) };
  67. }
  68. impl FileArray {
  69. pub fn get_current<'lt>() -> BorrowedArc<'lt, Self> {
  70. // SAFETY: `current_process` is always valid.
  71. let current = unsafe { current_process.as_mut().unwrap() };
  72. BorrowedArc::from_raw(current.files.m_handle as *const _)
  73. }
  74. pub fn new_shared(other: &Arc<Self>) -> Arc<Self> {
  75. other.clone()
  76. }
  77. pub fn new_cloned(other: &Arc<Self>) -> Arc<Self> {
  78. Arc::new(Self {
  79. inner: Spin::clone(&other.inner),
  80. })
  81. }
  82. /// Acquires the file array lock.
  83. pub fn get(&self, fd: FD) -> Option<Arc<File>> {
  84. self.inner.lock().get(fd)
  85. }
  86. pub fn close_all(&self) {
  87. let mut inner = self.inner.lock();
  88. inner.fd_min_avail = 0;
  89. inner.files.clear();
  90. }
  91. pub fn close(&self, fd: FD) -> KResult<()> {
  92. let mut inner = self.inner.lock();
  93. inner.files.remove(&fd).ok_or(EBADF)?;
  94. inner.release_fd(fd);
  95. Ok(())
  96. }
  97. pub fn on_exec(&self) -> () {
  98. let mut inner = self.inner.lock();
  99. // TODO: This is not efficient. We should avoid cloning.
  100. let fds_to_close = inner
  101. .files
  102. .iter()
  103. .filter(|(_, ofile)| ofile.close_on_exec())
  104. .map(|(&fd, _)| fd)
  105. .collect::<Vec<_>>();
  106. inner.files.retain(|_, ofile| !ofile.close_on_exec());
  107. fds_to_close.into_iter().for_each(|fd| inner.release_fd(fd));
  108. }
  109. }
  110. impl FileArray {
  111. pub fn dup(&self, old_fd: FD) -> KResult<FD> {
  112. let mut inner = self.inner.lock();
  113. let old_file = inner.files.get(&old_fd).ok_or(EBADF)?;
  114. let new_file_data = old_file.file.clone();
  115. let new_file_flags = old_file.flags;
  116. let new_fd = inner.next_fd();
  117. inner.do_insert(new_fd, new_file_flags, new_file_data);
  118. Ok(new_fd)
  119. }
  120. pub fn dup_to(&self, old_fd: FD, new_fd: FD, flags: u64) -> KResult<FD> {
  121. let mut inner = self.inner.lock();
  122. let old_file = inner.files.get(&old_fd).ok_or(EBADF)?;
  123. let new_file_data = old_file.file.clone();
  124. match inner.files.entry(new_fd) {
  125. Entry::Vacant(_) => {}
  126. Entry::Occupied(entry) => {
  127. let new_file = entry.into_mut();
  128. new_file.flags = flags;
  129. new_file.file = new_file_data;
  130. return Ok(new_fd);
  131. }
  132. }
  133. assert_eq!(new_fd, inner.allocate_fd(new_fd));
  134. inner.do_insert(new_fd, flags, new_file_data);
  135. Ok(new_fd)
  136. }
  137. /// # Return
  138. /// `(read_fd, write_fd)`
  139. pub fn pipe(&self) -> KResult<(FD, FD)> {
  140. let mut inner = self.inner.lock();
  141. let read_fd = inner.next_fd();
  142. let write_fd = inner.next_fd();
  143. let pipe = Pipe::new();
  144. let (read_end, write_end) = pipe.split();
  145. inner.do_insert(read_fd, 0, read_end);
  146. inner.do_insert(write_fd, 0, write_end);
  147. Ok((read_fd, write_fd))
  148. }
  149. pub fn open(&self, fs_context: &FsContext, path: Path, flags: u32, mode: Mode) -> KResult<FD> {
  150. let dentry = Dentry::open(fs_context, path, true)?;
  151. dentry.open_check(flags, mode)?;
  152. let fdflag = if flags & O_CLOEXEC != 0 { FD_CLOEXEC } else { 0 };
  153. let can_read = flags & O_WRONLY == 0;
  154. let can_write = flags & (O_WRONLY | O_RDWR) != 0;
  155. let append = flags & O_APPEND != 0;
  156. let inode = dentry.get_inode()?;
  157. let filemode = inode.mode.load(Ordering::Relaxed);
  158. if flags & O_DIRECTORY != 0 {
  159. if !s_isdir(filemode) {
  160. return Err(ENOTDIR);
  161. }
  162. } else {
  163. if s_isdir(filemode) && can_write {
  164. return Err(EISDIR);
  165. }
  166. }
  167. if flags & O_TRUNC != 0 {
  168. if can_write && s_isreg(filemode) {
  169. inode.truncate(0)?;
  170. }
  171. }
  172. let mut inner = self.inner.lock();
  173. let fd = inner.next_fd();
  174. if s_ischr(filemode) && inode.devid()? == 0x0501 {
  175. inner.do_insert(fd, fdflag as u64, TTYFile::new(unsafe { console }));
  176. } else {
  177. inner.do_insert(
  178. fd,
  179. fdflag as u64,
  180. InodeFile::new(dentry, (can_read, can_write, append)),
  181. );
  182. }
  183. Ok(fd)
  184. }
  185. pub fn fcntl(&self, fd: FD, cmd: u32, arg: usize) -> KResult<usize> {
  186. let mut inner = self.inner.lock();
  187. let ofile = inner.files.get_mut(&fd).ok_or(EBADF)?;
  188. match cmd {
  189. F_DUPFD | F_DUPFD_CLOEXEC => {
  190. let cloexec = cmd == F_DUPFD_CLOEXEC || (ofile.flags & FD_CLOEXEC as u64 != 0);
  191. let flags = if cloexec { O_CLOEXEC } else { 0 };
  192. let new_file_data = ofile.file.clone();
  193. let new_fd = inner.allocate_fd(arg as FD);
  194. inner.do_insert(new_fd, flags as u64, new_file_data);
  195. Ok(new_fd as usize)
  196. }
  197. F_GETFD => Ok(ofile.flags as usize),
  198. F_SETFD => {
  199. ofile.flags = arg as u64;
  200. Ok(0)
  201. }
  202. _ => unimplemented!("fcntl: cmd={}", cmd),
  203. }
  204. }
  205. }
  206. impl FileArrayInner {
  207. fn get(&mut self, fd: FD) -> Option<Arc<File>> {
  208. self.files.get(&fd).map(|f| f.file.clone())
  209. }
  210. fn find_available(&mut self, from: FD) -> FD {
  211. self.files
  212. .range(&from..)
  213. .fold_while(from, |current, (&key, _)| {
  214. if current == key {
  215. Continue(current + 1)
  216. } else {
  217. Done(current)
  218. }
  219. })
  220. .into_inner()
  221. }
  222. /// Allocate a new file descriptor starting from `from`.
  223. ///
  224. /// Returned file descriptor should be used immediately.
  225. ///
  226. fn allocate_fd(&mut self, from: FD) -> FD {
  227. let from = FD::max(from, self.fd_min_avail);
  228. if from == self.fd_min_avail {
  229. let next_min_avail = self.find_available(from + 1);
  230. let allocated = self.fd_min_avail;
  231. self.fd_min_avail = next_min_avail;
  232. allocated
  233. } else {
  234. self.find_available(from)
  235. }
  236. }
  237. fn release_fd(&mut self, fd: FD) {
  238. if fd < self.fd_min_avail {
  239. self.fd_min_avail = fd;
  240. }
  241. }
  242. fn next_fd(&mut self) -> FD {
  243. self.allocate_fd(self.fd_min_avail)
  244. }
  245. /// Insert a file description to the file array.
  246. fn do_insert(&mut self, fd: FD, flags: u64, file: Arc<File>) {
  247. assert!(self.files.insert(fd, OpenFile { flags, file }).is_none());
  248. }
  249. }