thread.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. use super::{
  2. signal::{RaiseResult, Signal, SignalList},
  3. Process, ProcessList,
  4. };
  5. use crate::{
  6. kernel::{
  7. cpu::local_cpu,
  8. user::dataflow::CheckedUserPointer,
  9. vfs::{filearray::FileArray, FsContext},
  10. },
  11. prelude::*,
  12. };
  13. use alloc::sync::Arc;
  14. use arch::{InterruptContext, UserTLS, _arch_fork_return};
  15. use core::{
  16. arch::asm,
  17. pin::Pin,
  18. ptr::NonNull,
  19. sync::atomic::{AtomicUsize, Ordering},
  20. task::Waker,
  21. };
  22. use eonix_mm::address::{Addr as _, VAddr};
  23. use eonix_runtime::{
  24. context::ExecutionContext,
  25. run::{Contexted, Run, RunState},
  26. };
  27. use eonix_sync::AsProofMut as _;
  28. use pointers::BorrowedArc;
  29. struct CurrentThread {
  30. thread: NonNull<Thread>,
  31. runnable: NonNull<ThreadRunnable>,
  32. }
  33. #[eonix_percpu::define_percpu]
  34. static CURRENT_THREAD: Option<CurrentThread> = None;
  35. pub struct ThreadBuilder {
  36. tid: Option<u32>,
  37. name: Option<Arc<[u8]>>,
  38. process: Option<Arc<Process>>,
  39. files: Option<Arc<FileArray>>,
  40. fs_context: Option<Arc<FsContext>>,
  41. signal_list: Option<SignalList>,
  42. tls: Option<UserTLS>,
  43. set_child_tid: Option<usize>,
  44. }
  45. #[derive(Debug)]
  46. struct ThreadInner {
  47. /// Thread name
  48. name: Arc<[u8]>,
  49. /// Thread TLS
  50. tls: Option<UserTLS>,
  51. /// User pointer
  52. /// Store child thread's tid when child thread returns to user space.
  53. set_child_tid: usize,
  54. }
  55. pub struct Thread {
  56. pub tid: u32,
  57. pub process: Arc<Process>,
  58. pub files: Arc<FileArray>,
  59. pub fs_context: Arc<FsContext>,
  60. pub signal_list: SignalList,
  61. inner: Spin<ThreadInner>,
  62. }
  63. pub struct ThreadRunnable {
  64. thread: Arc<Thread>,
  65. /// Interrupt context for the thread initialization.
  66. /// We store the kernel stack pointer in one of the fields for now.
  67. ///
  68. /// TODO: A better way to store the interrupt context.
  69. interrupt_context: InterruptContext,
  70. interrupt_stack_pointer: AtomicUsize,
  71. return_context: ExecutionContext,
  72. }
  73. #[repr(transparent)]
  74. #[derive(Debug, Clone, Copy)]
  75. pub struct UserDescriptorFlags(u32);
  76. #[repr(C)]
  77. #[derive(Debug, Clone, Copy)]
  78. pub struct UserDescriptor {
  79. entry: u32,
  80. base: u32,
  81. limit: u32,
  82. flags: UserDescriptorFlags,
  83. }
  84. #[allow(dead_code)]
  85. impl UserDescriptorFlags {
  86. fn is_32bit_segment(&self) -> bool {
  87. self.0 & 0b1 != 0
  88. }
  89. fn contents(&self) -> u32 {
  90. self.0 & 0b110
  91. }
  92. fn is_read_exec_only(&self) -> bool {
  93. self.0 & 0b1000 != 0
  94. }
  95. fn is_limit_in_pages(&self) -> bool {
  96. self.0 & 0b10000 != 0
  97. }
  98. fn is_present(&self) -> bool {
  99. self.0 & 0b100000 == 0
  100. }
  101. fn is_usable(&self) -> bool {
  102. self.0 & 0b1000000 != 0
  103. }
  104. }
  105. impl ThreadBuilder {
  106. pub fn new() -> Self {
  107. Self {
  108. tid: None,
  109. name: None,
  110. process: None,
  111. files: None,
  112. fs_context: None,
  113. signal_list: None,
  114. tls: None,
  115. set_child_tid: None,
  116. }
  117. }
  118. pub fn tid(mut self, tid: u32) -> Self {
  119. self.tid = Some(tid);
  120. self
  121. }
  122. pub fn name(mut self, name: Arc<[u8]>) -> Self {
  123. self.name = Some(name);
  124. self
  125. }
  126. pub fn process(mut self, process: Arc<Process>) -> Self {
  127. self.process = Some(process);
  128. self
  129. }
  130. pub fn files(mut self, files: Arc<FileArray>) -> Self {
  131. self.files = Some(files);
  132. self
  133. }
  134. pub fn fs_context(mut self, fs_context: Arc<FsContext>) -> Self {
  135. self.fs_context = Some(fs_context);
  136. self
  137. }
  138. pub fn signal_list(mut self, signal_list: SignalList) -> Self {
  139. self.signal_list = Some(signal_list);
  140. self
  141. }
  142. pub fn tls(mut self, tls: Option<UserTLS>) -> Self {
  143. self.tls = tls;
  144. self
  145. }
  146. pub fn set_child_tid(mut self, set_child_tid: usize) -> Self {
  147. self.set_child_tid = Some(set_child_tid);
  148. self
  149. }
  150. /// Fork the thread from another thread.
  151. ///
  152. /// Sets the thread's files, fs_context, signal_list, name, tls, and set_child_tid
  153. pub fn fork_from(self, thread: &Thread) -> Self {
  154. let inner = thread.inner.lock();
  155. self.files(FileArray::new_cloned(&thread.files))
  156. .fs_context(FsContext::new_cloned(&thread.fs_context))
  157. .signal_list(thread.signal_list.clone())
  158. .name(inner.name.clone())
  159. .tls(inner.tls.clone())
  160. .set_child_tid(inner.set_child_tid)
  161. }
  162. pub fn build(self, process_list: &mut ProcessList) -> Arc<Thread> {
  163. let tid = self.tid.expect("TID is not set");
  164. let name = self.name.expect("Name is not set");
  165. let process = self.process.expect("Process is not set");
  166. let files = self.files.unwrap_or_else(|| FileArray::new());
  167. let fs_context = self
  168. .fs_context
  169. .unwrap_or_else(|| FsContext::global().clone());
  170. let signal_list = self.signal_list.unwrap_or_else(|| SignalList::new());
  171. let set_child_tid = self.set_child_tid.unwrap_or(0);
  172. signal_list.clear_pending();
  173. let thread = Arc::new(Thread {
  174. tid,
  175. process: process.clone(),
  176. files,
  177. fs_context,
  178. signal_list,
  179. inner: Spin::new(ThreadInner {
  180. name,
  181. tls: self.tls,
  182. set_child_tid,
  183. }),
  184. });
  185. process_list.add_thread(&thread);
  186. process.add_thread(&thread, process_list.prove_mut());
  187. thread
  188. }
  189. }
  190. impl Thread {
  191. pub fn current<'lt>() -> BorrowedArc<'lt, Self> {
  192. // SAFETY: We won't change the thread pointer in the current CPU when
  193. // we return here after some preemption.
  194. let current: &Option<CurrentThread> = unsafe { CURRENT_THREAD.as_ref() };
  195. let current = current.as_ref().expect("Current thread is not set");
  196. // SAFETY: We can only use the returned value when we are in the context of the thread.
  197. unsafe { BorrowedArc::from_raw(current.thread) }
  198. }
  199. pub fn raise(self: &Arc<Self>, signal: Signal) -> RaiseResult {
  200. self.signal_list.raise(signal)
  201. }
  202. /// # Safety
  203. /// This function is unsafe because it accesses the `current_cpu()`, which needs
  204. /// to be called in a preemption disabled context.
  205. pub unsafe fn load_thread_area32(&self) {
  206. if let Some(tls) = self.inner.lock().tls.as_ref() {
  207. // SAFETY: Preemption is disabled.
  208. tls.load(local_cpu());
  209. }
  210. }
  211. pub fn set_thread_area(&self, desc: &mut UserDescriptor) -> KResult<()> {
  212. let mut inner = self.inner.lock();
  213. // Clear the TLS area if it is not present.
  214. if desc.flags.is_read_exec_only() && !desc.flags.is_present() {
  215. if desc.limit == 0 || desc.base == 0 {
  216. return Ok(());
  217. }
  218. let len = if desc.flags.is_limit_in_pages() {
  219. (desc.limit as usize) << 12
  220. } else {
  221. desc.limit as usize
  222. };
  223. CheckedUserPointer::new(desc.base as _, len)?.zero()?;
  224. return Ok(());
  225. }
  226. let (tls, entry) = UserTLS::new32(desc.base, desc.limit, desc.flags.is_limit_in_pages());
  227. desc.entry = entry;
  228. inner.tls = Some(tls);
  229. Ok(())
  230. }
  231. pub fn set_name(&self, name: Arc<[u8]>) {
  232. self.inner.lock().name = name;
  233. }
  234. pub fn get_name(&self) -> Arc<[u8]> {
  235. self.inner.lock().name.clone()
  236. }
  237. /// # Safety
  238. /// This function needs to be called with preempt count == 1.
  239. /// We won't return so clean all the resources before calling this.
  240. pub unsafe fn exit() -> ! {
  241. // SAFETY: We won't change the thread pointer in the current CPU when
  242. // we return here after some preemption.
  243. let current: &Option<CurrentThread> = unsafe { CURRENT_THREAD.as_ref() };
  244. let current = current.as_ref().expect("Current thread is not set");
  245. // SAFETY: We can only use the `run_context` when we are in the context of the thread.
  246. let runnable = unsafe { current.runnable.as_ref() };
  247. runnable.return_context.switch_noreturn()
  248. }
  249. }
  250. impl ThreadRunnable {
  251. pub fn new(thread: Arc<Thread>, entry: VAddr, stack_pointer: VAddr) -> Self {
  252. let mut interrupt_context = InterruptContext::default();
  253. interrupt_context.set_return_address(entry.addr() as _, true);
  254. interrupt_context.set_stack_pointer(stack_pointer.addr() as _, true);
  255. interrupt_context.set_interrupt_enabled(true);
  256. Self {
  257. thread,
  258. interrupt_context,
  259. interrupt_stack_pointer: AtomicUsize::new(0),
  260. return_context: ExecutionContext::new(),
  261. }
  262. }
  263. pub fn from_context(thread: Arc<Thread>, interrupt_context: InterruptContext) -> Self {
  264. Self {
  265. thread,
  266. interrupt_context,
  267. interrupt_stack_pointer: AtomicUsize::new(0),
  268. return_context: ExecutionContext::new(),
  269. }
  270. }
  271. }
  272. impl Contexted for ThreadRunnable {
  273. fn load_running_context(&self) {
  274. let thread: &Thread = &self.thread;
  275. match self.interrupt_stack_pointer.load(Ordering::Relaxed) {
  276. 0 => {}
  277. sp => unsafe {
  278. // SAFETY: Preemption is disabled.
  279. arch::load_interrupt_stack(local_cpu(), sp as u64);
  280. },
  281. }
  282. // SAFETY: Preemption is disabled.
  283. unsafe {
  284. // SAFETY: `self` and `thread` are valid and non-null.
  285. let current_thread = CurrentThread {
  286. thread: NonNull::new_unchecked(thread as *const _ as *mut _),
  287. runnable: NonNull::new_unchecked(self as *const _ as *mut _),
  288. };
  289. // SAFETY: Preemption is disabled.
  290. CURRENT_THREAD.swap(Some(current_thread));
  291. }
  292. thread.process.mm_list.activate();
  293. unsafe {
  294. // SAFETY: Preemption is disabled.
  295. thread.load_thread_area32();
  296. }
  297. }
  298. fn restore_running_context(&self) {
  299. self.thread.process.mm_list.deactivate();
  300. }
  301. }
  302. impl Run for ThreadRunnable {
  303. type Output = ();
  304. fn run(self: Pin<&mut Self>, _waker: &Waker) -> RunState<Self::Output> {
  305. let mut task_context = ExecutionContext::new();
  306. task_context.set_interrupt(false);
  307. task_context.set_ip(_arch_fork_return as _);
  308. task_context.set_sp(&self.interrupt_context as *const _ as _);
  309. eonix_preempt::disable();
  310. // TODO!!!!!: CHANGE THIS
  311. let sp = unsafe {
  312. let mut sp: usize;
  313. asm!(
  314. "mov %rsp, {0}",
  315. out(reg) sp,
  316. options(nomem, preserves_flags, att_syntax),
  317. );
  318. sp -= 512;
  319. sp &= !0xf;
  320. sp
  321. };
  322. self.interrupt_stack_pointer.store(sp, Ordering::Relaxed);
  323. unsafe {
  324. // SAFETY: Preemption is disabled.
  325. arch::load_interrupt_stack(local_cpu(), sp as u64);
  326. }
  327. eonix_preempt::enable();
  328. self.return_context.switch_to(&task_context);
  329. // We return here with preempt count == 1.
  330. eonix_preempt::enable();
  331. RunState::Finished(())
  332. }
  333. }