thread.rs 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152
  1. use core::{
  2. arch::{asm, naked_asm},
  3. cell::{RefCell, UnsafeCell},
  4. cmp,
  5. sync::atomic::{self, AtomicU32},
  6. };
  7. use crate::{
  8. kernel::{
  9. mem::{
  10. phys::{CachedPP, PhysPtr},
  11. MMList,
  12. },
  13. terminal::Terminal,
  14. user::dataflow::CheckedUserPointer,
  15. vfs::FsContext,
  16. },
  17. prelude::*,
  18. sync::{preempt, CondVar, SpinGuard},
  19. };
  20. use alloc::{
  21. collections::{btree_map::BTreeMap, vec_deque::VecDeque},
  22. sync::{Arc, Weak},
  23. };
  24. use bindings::{ECHILD, EINTR, EINVAL, EPERM, ESRCH};
  25. use lazy_static::lazy_static;
  26. use crate::kernel::vfs::filearray::FileArray;
  27. use super::{
  28. kstack, signal::{RaiseResult, Signal, SignalList}, KernelStack, Scheduler
  29. };
  30. use arch::{TaskContext, InterruptContext};
  31. #[derive(Debug, Clone, Copy, PartialEq, Eq)]
  32. pub enum ThreadState {
  33. Preparing,
  34. Running,
  35. Ready,
  36. Zombie,
  37. ISleep,
  38. USleep,
  39. }
  40. #[derive(Debug, Clone, Copy, PartialEq, Eq)]
  41. pub enum WaitType {
  42. Exited(u32),
  43. Signaled(Signal),
  44. Stopped(Signal),
  45. Continued,
  46. }
  47. #[derive(Debug, Clone, Copy)]
  48. pub struct WaitObject {
  49. pub pid: u32,
  50. pub code: WaitType,
  51. }
  52. impl WaitType {
  53. pub fn to_wstatus(self) -> u32 {
  54. match self {
  55. WaitType::Exited(status) => (status & 0xff) << 8,
  56. WaitType::Signaled(signal) if signal.is_coredump() => signal.to_signum() | 0x80,
  57. WaitType::Signaled(signal) => signal.to_signum(),
  58. WaitType::Stopped(signal) => 0x7f | (signal.to_signum() << 8),
  59. WaitType::Continued => 0xffff,
  60. }
  61. }
  62. }
  63. impl WaitObject {
  64. pub fn stopped(&self) -> Option<Signal> {
  65. if let WaitType::Stopped(signal) = self.code {
  66. Some(signal)
  67. } else {
  68. None
  69. }
  70. }
  71. pub fn is_continue(&self) -> bool {
  72. matches!(self.code, WaitType::Continued)
  73. }
  74. }
  75. #[derive(Debug)]
  76. struct SessionInner {
  77. /// Foreground process group
  78. foreground: Weak<ProcessGroup>,
  79. control_terminal: Option<Arc<Terminal>>,
  80. groups: BTreeMap<u32, Weak<ProcessGroup>>,
  81. }
  82. #[derive(Debug)]
  83. pub struct Session {
  84. sid: u32,
  85. leader: Weak<Process>,
  86. inner: Spin<SessionInner>,
  87. }
  88. #[derive(Debug)]
  89. pub struct ProcessGroup {
  90. pgid: u32,
  91. leader: Weak<Process>,
  92. session: Weak<Session>,
  93. processes: Spin<BTreeMap<u32, Weak<Process>>>,
  94. }
  95. #[derive(Debug)]
  96. struct ProcessInner {
  97. /// Parent process
  98. ///
  99. /// Parent process must be valid during the whole life of the process.
  100. /// The only case that parent process may be `None` is when this is the init process
  101. /// or the process is kernel thread.
  102. parent: Option<Arc<Process>>,
  103. /// Process group
  104. pgroup: Arc<ProcessGroup>,
  105. /// Session
  106. session: Arc<Session>,
  107. /// Children list
  108. children: BTreeMap<u32, Weak<Thread>>,
  109. /// Thread list
  110. threads: BTreeMap<u32, Weak<Thread>>,
  111. }
  112. #[derive(Debug)]
  113. pub struct WaitList {
  114. wait_procs: Spin<VecDeque<WaitObject>>,
  115. cv_wait_procs: CondVar,
  116. process: Weak<Process>,
  117. }
  118. pub struct NotifyBatch<'waitlist, 'cv, 'process> {
  119. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  120. cv: &'cv CondVar,
  121. process: &'process Weak<Process>,
  122. needs_notify: bool,
  123. }
  124. pub struct Entry<'waitlist, 'cv> {
  125. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  126. cv: &'cv CondVar,
  127. want_stop: bool,
  128. want_continue: bool,
  129. }
  130. pub struct DrainExited<'waitlist> {
  131. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  132. }
  133. #[derive(Debug)]
  134. pub struct Process {
  135. /// Process id
  136. ///
  137. /// This should never change during the life of the process.
  138. pub pid: u32,
  139. pub wait_list: WaitList,
  140. pub mm_list: Arc<MMList>,
  141. inner: Spin<ProcessInner>,
  142. }
  143. impl PartialOrd for Process {
  144. fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
  145. self.pid.partial_cmp(&other.pid)
  146. }
  147. }
  148. impl Ord for Process {
  149. fn cmp(&self, other: &Self) -> cmp::Ordering {
  150. self.pid.cmp(&other.pid)
  151. }
  152. }
  153. impl PartialEq for Process {
  154. fn eq(&self, other: &Self) -> bool {
  155. self.pid == other.pid
  156. }
  157. }
  158. impl Eq for Process {}
  159. #[derive(Debug)]
  160. struct ThreadInner {
  161. /// Thread name
  162. name: Arc<[u8]>,
  163. /// Thread TLS descriptor 32-bit
  164. tls_desc32: u64,
  165. /// User pointer
  166. /// Store child thread's tid when child thread returns to user space.
  167. set_child_tid: usize,
  168. }
  169. pub struct Thread {
  170. pub tid: u32,
  171. pub process: Arc<Process>,
  172. pub files: Arc<FileArray>,
  173. pub fs_context: Arc<FsContext>,
  174. pub signal_list: SignalList,
  175. /// Thread state for scheduler use.
  176. pub state: Spin<ThreadState>,
  177. /// Thread context
  178. pub context: UnsafeCell<TaskContext>,
  179. /// Kernel stack
  180. /// Never access this directly.
  181. ///
  182. /// We can only touch kernel stack when the process is neither running nor sleeping.
  183. /// AKA, the process is in the ready queue and will return to `schedule` context.
  184. kstack: RefCell<KernelStack>,
  185. inner: Spin<ThreadInner>,
  186. }
  187. impl PartialOrd for Thread {
  188. fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
  189. self.tid.partial_cmp(&other.tid)
  190. }
  191. }
  192. impl Ord for Thread {
  193. fn cmp(&self, other: &Self) -> cmp::Ordering {
  194. self.tid.cmp(&other.tid)
  195. }
  196. }
  197. impl PartialEq for Thread {
  198. fn eq(&self, other: &Self) -> bool {
  199. self.tid == other.tid
  200. }
  201. }
  202. impl Eq for Thread {}
  203. #[repr(transparent)]
  204. #[derive(Debug, Clone, Copy)]
  205. pub struct UserDescriptorFlags(u32);
  206. #[repr(C)]
  207. #[derive(Debug, Clone, Copy)]
  208. pub struct UserDescriptor {
  209. entry: u32,
  210. base: u32,
  211. limit: u32,
  212. flags: UserDescriptorFlags,
  213. }
  214. pub struct ProcessList {
  215. init: Arc<Process>,
  216. threads: Spin<BTreeMap<u32, Arc<Thread>>>,
  217. processes: Spin<BTreeMap<u32, Weak<Process>>>,
  218. pgroups: Spin<BTreeMap<u32, Weak<ProcessGroup>>>,
  219. sessions: Spin<BTreeMap<u32, Weak<Session>>>,
  220. }
  221. impl Session {
  222. fn new(sid: u32, leader: Weak<Process>) -> Arc<Self> {
  223. Arc::new(Self {
  224. sid,
  225. leader,
  226. inner: Spin::new(SessionInner {
  227. foreground: Weak::new(),
  228. control_terminal: None,
  229. groups: BTreeMap::new(),
  230. }),
  231. })
  232. }
  233. fn add_member(&self, pgroup: &Arc<ProcessGroup>) {
  234. self.inner
  235. .lock()
  236. .groups
  237. .insert(pgroup.pgid, Arc::downgrade(pgroup));
  238. }
  239. pub fn foreground_pgid(&self) -> Option<u32> {
  240. self.inner.lock().foreground.upgrade().map(|fg| fg.pgid)
  241. }
  242. /// Set the foreground process group.
  243. pub fn set_foreground_pgid(&self, pgid: u32) -> KResult<()> {
  244. let mut inner = self.inner.lock();
  245. let group = inner.groups.get(&pgid);
  246. if let Some(group) = group {
  247. inner.foreground = group.clone();
  248. Ok(())
  249. } else {
  250. // TODO!!!: Check if the process group is valid.
  251. // We assume that the process group is valid for now.
  252. Err(EPERM)
  253. }
  254. }
  255. /// Only session leaders can set the control terminal.
  256. /// Make sure we've checked that before calling this function.
  257. pub fn set_control_terminal(
  258. self: &Arc<Self>,
  259. terminal: &Arc<Terminal>,
  260. forced: bool,
  261. ) -> KResult<()> {
  262. let mut inner = self.inner.lock();
  263. if let Some(_) = inner.control_terminal.as_ref() {
  264. if let Some(session) = terminal.session().as_ref() {
  265. if session.sid == self.sid {
  266. return Ok(());
  267. }
  268. }
  269. return Err(EPERM);
  270. }
  271. terminal.set_session(self, forced)?;
  272. inner.control_terminal = Some(terminal.clone());
  273. inner.foreground = Arc::downgrade(&Thread::current().process.pgroup());
  274. Ok(())
  275. }
  276. /// Drop the control terminal reference inside the session.
  277. /// DO NOT TOUCH THE TERMINAL'S SESSION FIELD.
  278. pub fn drop_control_terminal(&self) -> Option<Arc<Terminal>> {
  279. let mut inner = self.inner.lock();
  280. inner.foreground = Weak::new();
  281. inner.control_terminal.take()
  282. }
  283. pub fn raise_foreground(&self, signal: Signal) {
  284. if let Some(fg) = self.inner.lock().foreground.upgrade() {
  285. fg.raise(signal);
  286. }
  287. }
  288. }
  289. impl ProcessGroup {
  290. fn new_for_init(pgid: u32, leader: Weak<Process>, session: Weak<Session>) -> Arc<Self> {
  291. Arc::new(Self {
  292. pgid,
  293. leader: leader.clone(),
  294. session,
  295. processes: Spin::new(BTreeMap::from([(pgid, leader)])),
  296. })
  297. }
  298. fn new(leader: &Arc<Process>, session: &Arc<Session>) -> Arc<Self> {
  299. let pgroup = Arc::new(Self {
  300. pgid: leader.pid,
  301. leader: Arc::downgrade(leader),
  302. session: Arc::downgrade(session),
  303. processes: Spin::new(BTreeMap::from([(leader.pid, Arc::downgrade(leader))])),
  304. });
  305. session.add_member(&pgroup);
  306. pgroup
  307. }
  308. }
  309. impl Drop for Thread {
  310. fn drop(&mut self) {
  311. let mut process = self.process.inner.lock();
  312. process.threads.remove(&self.tid);
  313. if let Some(parent) = &process.parent {
  314. parent.inner.lock().children.remove(&self.tid);
  315. }
  316. }
  317. }
  318. impl Drop for Process {
  319. fn drop(&mut self) {
  320. let inner = self.inner.lock();
  321. assert!(inner.children.is_empty());
  322. inner.pgroup.processes.lock().remove(&self.pid);
  323. ProcessList::get().processes.lock().remove(&self.pid);
  324. }
  325. }
  326. impl Drop for ProcessGroup {
  327. fn drop(&mut self) {
  328. if let Some(session) = self.session.upgrade() {
  329. session.inner.lock().groups.remove(&self.pgid);
  330. }
  331. }
  332. }
  333. lazy_static! {
  334. static ref GLOBAL_PROC_LIST: ProcessList = {
  335. let init_process = Process::new_for_init(1, None);
  336. let init_thread = Thread::new_for_init(b"[kernel kinit]".as_slice().into(), &init_process);
  337. Scheduler::set_current(init_thread.clone());
  338. let idle_process = Process::new_for_init(0, None);
  339. let idle_thread =
  340. Thread::new_for_init(b"[kernel idle#BS]".as_slice().into(), &idle_process);
  341. Scheduler::set_idle(idle_thread.clone());
  342. let init_session_weak = Arc::downgrade(&init_process.inner.lock().session);
  343. let init_pgroup_weak = Arc::downgrade(&init_process.inner.lock().pgroup);
  344. ProcessList {
  345. sessions: Spin::new(BTreeMap::from([(1, init_session_weak)])),
  346. pgroups: Spin::new(BTreeMap::from([(1, init_pgroup_weak)])),
  347. threads: Spin::new(BTreeMap::from([
  348. (1, init_thread.clone()),
  349. (0, idle_thread.clone()),
  350. ])),
  351. processes: Spin::new(BTreeMap::from([
  352. (1, Arc::downgrade(&init_process)),
  353. (0, Arc::downgrade(&idle_process)),
  354. ])),
  355. init: init_process,
  356. }
  357. };
  358. }
  359. impl ProcessList {
  360. pub fn get() -> &'static Self {
  361. &GLOBAL_PROC_LIST
  362. }
  363. pub fn add_session(&self, session: &Arc<Session>) {
  364. self.sessions
  365. .lock()
  366. .insert(session.sid, Arc::downgrade(session));
  367. }
  368. pub fn add_pgroup(&self, pgroup: &Arc<ProcessGroup>) {
  369. self.pgroups
  370. .lock()
  371. .insert(pgroup.pgid, Arc::downgrade(pgroup));
  372. }
  373. pub fn add_process(&self, process: &Arc<Process>) {
  374. self.processes
  375. .lock()
  376. .insert(process.pid, Arc::downgrade(process));
  377. }
  378. pub fn add_thread(&self, thread: &Arc<Thread>) {
  379. self.threads.lock().insert(thread.tid, thread.clone());
  380. }
  381. pub fn kill_current(signal: Signal) -> ! {
  382. ProcessList::get().do_kill_process(&Thread::current().process, WaitType::Signaled(signal));
  383. Scheduler::schedule_noreturn()
  384. }
  385. // TODO!!!!!!: Reconsider this
  386. fn remove(&self, tid: u32) {
  387. if let None = self.threads.lock().remove(&tid) {
  388. panic!("Thread {} not found", tid);
  389. }
  390. }
  391. pub fn try_find_process(&self, pid: u32) -> Option<Arc<Process>> {
  392. self.processes.lock().get(&pid).and_then(Weak::upgrade)
  393. }
  394. pub fn try_find_thread(&self, tid: u32) -> Option<Arc<Thread>> {
  395. self.threads.lock().get(&tid).cloned()
  396. }
  397. pub fn try_find_pgroup(&self, pgid: u32) -> Option<Arc<ProcessGroup>> {
  398. self.pgroups.lock().get(&pgid).and_then(Weak::upgrade)
  399. }
  400. pub fn try_find_session(&self, sid: u32) -> Option<Arc<Session>> {
  401. self.sessions.lock().get(&sid).and_then(Weak::upgrade)
  402. }
  403. /// Make the process a zombie and notify the parent.
  404. pub fn do_kill_process(&self, process: &Arc<Process>, status: WaitType) {
  405. if &self.init == process {
  406. panic!("init exited");
  407. }
  408. preempt::disable();
  409. let mut inner = process.inner.lock();
  410. // TODO!!!!!!: When we are killing multiple threads, we need to wait until all
  411. // the threads are stopped then proceed.
  412. for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
  413. assert!(&thread == Thread::current());
  414. Scheduler::get().lock().set_zombie(&thread);
  415. thread.files.close_all();
  416. }
  417. // If we are the session leader, we should drop the control terminal.
  418. if inner.session.sid == process.pid {
  419. if let Some(terminal) = inner.session.drop_control_terminal() {
  420. terminal.drop_session();
  421. }
  422. }
  423. // Unmap all user memory areas
  424. process.mm_list.clear_user();
  425. // Make children orphans (adopted by init)
  426. {
  427. let mut init_inner = self.init.inner.lock();
  428. inner.children.retain(|_, child| {
  429. let child = child.upgrade().unwrap();
  430. let mut child_inner = child.process.inner.lock();
  431. if child_inner.parent.as_ref().unwrap() == &self.init {
  432. return false;
  433. }
  434. child_inner.parent = Some(self.init.clone());
  435. init_inner.add_child(&child);
  436. false
  437. });
  438. }
  439. let mut init_notify = self.init.wait_list.notify_batch();
  440. process
  441. .wait_list
  442. .drain_exited()
  443. .into_iter()
  444. .for_each(|item| init_notify.notify(item));
  445. init_notify.finish();
  446. inner.parent.as_ref().unwrap().wait_list.notify(WaitObject {
  447. pid: process.pid,
  448. code: status,
  449. });
  450. preempt::enable();
  451. }
  452. }
  453. impl ProcessGroup {
  454. fn add_member(&self, process: &Arc<Process>) {
  455. self.processes
  456. .lock()
  457. .insert(process.pid, Arc::downgrade(process));
  458. }
  459. fn remove_member(&self, pid: u32) {
  460. self.processes.lock().remove(&pid);
  461. }
  462. pub fn raise(&self, signal: Signal) {
  463. let processes = self.processes.lock();
  464. for process in processes.values().map(|p| p.upgrade().unwrap()) {
  465. process.raise(signal);
  466. }
  467. }
  468. }
  469. impl ProcessInner {
  470. fn add_child(&mut self, child: &Arc<Thread>) {
  471. self.children.insert(child.tid, Arc::downgrade(child));
  472. }
  473. fn add_thread(&mut self, thread: &Arc<Thread>) {
  474. self.threads.insert(thread.tid, Arc::downgrade(thread));
  475. }
  476. }
  477. /// PID 0 and 1 is created manually so we start from 2.
  478. static NEXT_PID: AtomicU32 = AtomicU32::new(2);
  479. impl Process {
  480. fn alloc_pid() -> u32 {
  481. NEXT_PID.fetch_add(1, atomic::Ordering::Relaxed)
  482. }
  483. pub fn new_cloned(other: &Arc<Self>) -> Arc<Self> {
  484. let other_inner = other.inner.lock();
  485. let process = Arc::new_cyclic(|weak| Self {
  486. pid: Self::alloc_pid(),
  487. wait_list: WaitList::new(weak.clone()),
  488. mm_list: MMList::new_cloned(&other.mm_list),
  489. inner: Spin::new(ProcessInner {
  490. pgroup: other_inner.pgroup.clone(),
  491. session: other_inner.session.clone(),
  492. children: BTreeMap::new(),
  493. threads: BTreeMap::new(),
  494. parent: Some(other.clone()),
  495. }),
  496. });
  497. ProcessList::get().add_process(&process);
  498. other_inner.pgroup.add_member(&process);
  499. process
  500. }
  501. fn new_for_init(pid: u32, parent: Option<Arc<Self>>) -> Arc<Self> {
  502. let process = Arc::new_cyclic(|weak| {
  503. let session = Session::new(pid, weak.clone());
  504. let pgroup = ProcessGroup::new_for_init(pid, weak.clone(), Arc::downgrade(&session));
  505. session.add_member(&pgroup);
  506. Self {
  507. pid,
  508. wait_list: WaitList::new(weak.clone()),
  509. mm_list: MMList::new(),
  510. inner: Spin::new(ProcessInner {
  511. parent,
  512. pgroup,
  513. session,
  514. children: BTreeMap::new(),
  515. threads: BTreeMap::new(),
  516. }),
  517. }
  518. });
  519. process.inner.lock().pgroup.add_member(&process);
  520. process
  521. }
  522. pub fn raise(&self, signal: Signal) {
  523. let inner = self.inner.lock();
  524. for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
  525. if let RaiseResult::Finished = thread.raise(signal) {
  526. break;
  527. }
  528. }
  529. }
  530. fn add_child(&self, child: &Arc<Thread>) {
  531. self.inner.lock().add_child(child);
  532. }
  533. fn add_thread(&self, thread: &Arc<Thread>) {
  534. self.inner.lock().add_thread(thread);
  535. }
  536. pub fn wait(
  537. &self,
  538. no_block: bool,
  539. trace_stop: bool,
  540. trace_continue: bool,
  541. ) -> KResult<Option<WaitObject>> {
  542. let mut waits = self.wait_list.entry(trace_stop, trace_continue);
  543. let wait_object = loop {
  544. if let Some(object) = waits.get() {
  545. break object;
  546. }
  547. if self.inner.lock().children.is_empty() {
  548. return Err(ECHILD);
  549. }
  550. if no_block {
  551. return Ok(None);
  552. }
  553. waits.wait()?;
  554. };
  555. if wait_object.stopped().is_some() || wait_object.is_continue() {
  556. Ok(Some(wait_object))
  557. } else {
  558. ProcessList::get().remove(wait_object.pid);
  559. Ok(Some(wait_object))
  560. }
  561. }
  562. /// Create a new session for the process.
  563. pub fn setsid(self: &Arc<Self>) -> KResult<u32> {
  564. let mut inner = self.inner.lock();
  565. // If there exists a session that has the same sid as our pid, we can't create a new
  566. // session. The standard says that we should create a new process group and be the
  567. // only process in the new process group and session.
  568. if ProcessList::get().try_find_session(self.pid).is_some() {
  569. return Err(EPERM);
  570. }
  571. inner.session = Session::new(self.pid, Arc::downgrade(self));
  572. ProcessList::get().add_session(&inner.session);
  573. inner.pgroup.remove_member(self.pid);
  574. inner.pgroup = ProcessGroup::new(self, &inner.session);
  575. ProcessList::get().add_pgroup(&inner.pgroup);
  576. Ok(inner.pgroup.pgid)
  577. }
  578. /// Set the process group id of the process to `pgid`.
  579. ///
  580. /// This function does the actual work.
  581. fn do_setpgid(self: &Arc<Self>, pgid: u32) -> KResult<()> {
  582. let mut inner = self.inner.lock();
  583. // Changing the process group of a session leader is not allowed.
  584. if inner.session.sid == self.pid {
  585. return Err(EPERM);
  586. }
  587. // Move us to an existing process group.
  588. if let Some(pgroup) = ProcessList::get().try_find_pgroup(pgid) {
  589. // Move the process to a process group in a different session in not allowed.
  590. if pgroup.session.upgrade().unwrap().sid != inner.session.sid {
  591. return Err(EPERM);
  592. }
  593. // If we are already in the process group, we are done.
  594. if pgroup.pgid == inner.pgroup.pgid {
  595. return Ok(());
  596. }
  597. inner.pgroup.remove_member(self.pid);
  598. inner.pgroup = pgroup;
  599. } else {
  600. // Create a new process group only if `pgid` matches our `pid`.
  601. if pgid != self.pid {
  602. return Err(EPERM);
  603. }
  604. inner.pgroup.remove_member(self.pid);
  605. inner.pgroup = ProcessGroup::new(self, &inner.session);
  606. ProcessList::get().add_pgroup(&inner.pgroup);
  607. }
  608. Ok(())
  609. }
  610. /// Set the process group id of the process `pid` to `pgid`.
  611. ///
  612. /// This function should be called on the process that issued the syscall in order to do
  613. /// permission checks.
  614. pub fn setpgid(self: &Arc<Self>, pid: u32, pgid: u32) -> KResult<()> {
  615. // We may set pgid of either the calling process or a child process.
  616. if pid == self.pid {
  617. self.do_setpgid(pgid)
  618. } else {
  619. let child = {
  620. // If `pid` refers to one of our children, the thread leaders must be
  621. // in out children list.
  622. let inner = self.inner.lock();
  623. let child = {
  624. let child = inner.children.get(&pid);
  625. child.and_then(Weak::upgrade).ok_or(ESRCH)?
  626. };
  627. // Changing the process group of a child is only allowed
  628. // if we are in the same session.
  629. if child.process.sid() != inner.session.sid {
  630. return Err(EPERM);
  631. }
  632. child
  633. };
  634. // TODO: Check whether we, as a child, have already performed an `execve`.
  635. // If so, we should return `Err(EACCES)`.
  636. child.process.do_setpgid(pgid)
  637. }
  638. }
  639. pub fn sid(&self) -> u32 {
  640. self.inner.lock().session.sid
  641. }
  642. pub fn pgid(&self) -> u32 {
  643. self.inner.lock().pgroup.pgid
  644. }
  645. pub fn session(&self) -> Arc<Session> {
  646. self.inner.lock().session.clone()
  647. }
  648. pub fn pgroup(&self) -> Arc<ProcessGroup> {
  649. self.inner.lock().pgroup.clone()
  650. }
  651. }
  652. impl UserDescriptorFlags {
  653. fn is_32bit_segment(&self) -> bool {
  654. self.0 & 0b1 != 0
  655. }
  656. fn contents(&self) -> u32 {
  657. self.0 & 0b110
  658. }
  659. fn is_read_exec_only(&self) -> bool {
  660. self.0 & 0b1000 != 0
  661. }
  662. fn is_limit_in_pages(&self) -> bool {
  663. self.0 & 0b10000 != 0
  664. }
  665. fn is_present(&self) -> bool {
  666. self.0 & 0b100000 == 0
  667. }
  668. fn is_usable(&self) -> bool {
  669. self.0 & 0b1000000 != 0
  670. }
  671. }
  672. impl Thread {
  673. fn new_for_init(name: Arc<[u8]>, process: &Arc<Process>) -> Arc<Self> {
  674. let thread = Arc::new(Self {
  675. tid: process.pid,
  676. process: process.clone(),
  677. files: FileArray::new_for_init(),
  678. fs_context: FsContext::new_for_init(),
  679. signal_list: SignalList::new(),
  680. kstack: RefCell::new(KernelStack::new()),
  681. context: UnsafeCell::new(TaskContext::new()),
  682. state: Spin::new(ThreadState::Preparing),
  683. inner: Spin::new(ThreadInner {
  684. name,
  685. tls_desc32: 0,
  686. set_child_tid: 0,
  687. }),
  688. });
  689. process.add_thread(&thread);
  690. thread
  691. }
  692. pub fn new_cloned(other: &Self) -> Arc<Self> {
  693. let process = Process::new_cloned(&other.process);
  694. let other_state = other.state.lock();
  695. let other_inner = other.inner.lock();
  696. assert!(matches!(*other_state, ThreadState::Running));
  697. let signal_list = other.signal_list.clone();
  698. signal_list.clear_pending();
  699. let thread = Arc::new(Self {
  700. tid: process.pid,
  701. process: process.clone(),
  702. files: FileArray::new_cloned(&other.files),
  703. fs_context: FsContext::new_cloned(&other.fs_context),
  704. signal_list,
  705. kstack: RefCell::new(KernelStack::new()),
  706. context: UnsafeCell::new(TaskContext::new()),
  707. state: Spin::new(ThreadState::Preparing),
  708. inner: Spin::new(ThreadInner {
  709. name: other_inner.name.clone(),
  710. tls_desc32: other_inner.tls_desc32,
  711. set_child_tid: other_inner.set_child_tid,
  712. }),
  713. });
  714. ProcessList::get().add_thread(&thread);
  715. other.process.add_child(&thread);
  716. process.add_thread(&thread);
  717. thread
  718. }
  719. pub fn current<'lt>() -> &'lt Arc<Self> {
  720. Scheduler::current()
  721. }
  722. pub fn do_stop(self: &Arc<Self>, signal: Signal) {
  723. if let Some(parent) = self.process.parent() {
  724. parent.wait_list.notify(WaitObject {
  725. pid: self.process.pid,
  726. code: WaitType::Stopped(signal),
  727. });
  728. }
  729. preempt::disable();
  730. // `SIGSTOP` can only be waken up by `SIGCONT` or `SIGKILL`.
  731. // SAFETY: Preempt disabled above.
  732. Scheduler::get().lock().usleep(self);
  733. Scheduler::schedule();
  734. }
  735. pub fn do_continue(self: &Arc<Self>) {
  736. if let Some(parent) = self.process.parent() {
  737. parent.wait_list.notify(WaitObject {
  738. pid: self.process.pid,
  739. code: WaitType::Continued,
  740. });
  741. }
  742. }
  743. pub fn raise(self: &Arc<Thread>, signal: Signal) -> RaiseResult {
  744. match self.signal_list.raise(signal) {
  745. RaiseResult::ShouldIWakeUp => {
  746. Scheduler::get().lock_irq().iwake(self);
  747. RaiseResult::Finished
  748. }
  749. RaiseResult::ShouldUWakeUp => {
  750. Scheduler::get().lock_irq().uwake(self);
  751. RaiseResult::Finished
  752. }
  753. result => result,
  754. }
  755. }
  756. pub fn load_thread_area32(&self) {
  757. let inner = self.inner.lock();
  758. if inner.tls_desc32 == 0 {
  759. return;
  760. }
  761. // SAFETY: `tls32` should be per cpu.
  762. let tls32_addr = CachedPP::new(0x0 + 7 * 8);
  763. tls32_addr.as_mut::<u64>().clone_from(&inner.tls_desc32);
  764. unsafe {
  765. asm!(
  766. "mov %gs, %ax",
  767. "mov %ax, %gs",
  768. out("ax") _,
  769. options(att_syntax)
  770. )
  771. };
  772. }
  773. pub fn set_thread_area(&self, desc: &mut UserDescriptor) -> KResult<()> {
  774. let mut inner = self.inner.lock();
  775. // Clear the TLS area if it is not present.
  776. if desc.flags.is_read_exec_only() && !desc.flags.is_present() {
  777. if desc.limit != 0 && desc.base != 0 {
  778. CheckedUserPointer::new(desc.base as _, desc.limit as _)?.zero()?;
  779. }
  780. return Ok(());
  781. }
  782. if desc.entry != u32::MAX || !desc.flags.is_32bit_segment() {
  783. return Err(EINVAL);
  784. }
  785. desc.entry = 7;
  786. inner.tls_desc32 = desc.limit as u64 & 0xffff;
  787. inner.tls_desc32 |= (desc.base as u64 & 0xffffff) << 16;
  788. inner.tls_desc32 |= 0x4_0_f2_000000_0000;
  789. inner.tls_desc32 |= (desc.limit as u64 & 0xf_0000) << (48 - 16);
  790. if desc.flags.is_limit_in_pages() {
  791. inner.tls_desc32 |= 1 << 55;
  792. }
  793. inner.tls_desc32 |= (desc.base as u64 & 0xff_000000) << (56 - 24);
  794. Ok(())
  795. }
  796. pub fn fork_init(&self, interrupt_context: InterruptContext) {
  797. let mut state = self.state.lock();
  798. *state = ThreadState::USleep;
  799. let sp = self.kstack.borrow().init(interrupt_context);
  800. unsafe {
  801. (&mut(*self.get_context_mut_ptr())).init(fork_return as usize, sp);
  802. }
  803. }
  804. pub fn init(&self, entry: usize) {
  805. let mut state = self.state.lock();
  806. *state = ThreadState::USleep;
  807. unsafe {
  808. (&mut(*self.get_context_mut_ptr())).init(entry, self.get_kstack_bottom());
  809. }
  810. }
  811. pub fn load_interrupt_stack(&self) {
  812. self.kstack.borrow().load_interrupt_stack();
  813. }
  814. pub fn get_kstack_bottom(&self) -> usize {
  815. self.kstack.borrow().get_stack_bottom()
  816. }
  817. pub unsafe fn get_context_mut_ptr(&self) -> *mut TaskContext {
  818. self.context.get()
  819. }
  820. pub fn set_name(&self, name: Arc<[u8]>) {
  821. self.inner.lock().name = name;
  822. }
  823. pub fn get_name(&self) -> Arc<[u8]> {
  824. self.inner.lock().name.clone()
  825. }
  826. }
  827. #[naked]
  828. unsafe extern "C" fn fork_return() {
  829. // We don't land on the typical `Scheduler::schedule()` function, so we need to
  830. // manually enable preemption.
  831. naked_asm! {
  832. "
  833. call {preempt_enable}
  834. pop rax
  835. pop rbx
  836. pop rcx
  837. pop rdx
  838. pop rdi
  839. pop rsi
  840. pop r8
  841. pop r9
  842. pop r10
  843. pop r11
  844. pop r12
  845. pop r13
  846. pop r14
  847. pop r15
  848. pop rbp
  849. add rsp, 16
  850. iretq
  851. ",
  852. preempt_enable = sym preempt::enable,
  853. }
  854. }
  855. // TODO: Maybe we can find a better way instead of using `RefCell` for `KernelStack`?
  856. unsafe impl Sync for Thread {}
  857. impl WaitList {
  858. pub fn new(process: Weak<Process>) -> Self {
  859. Self {
  860. wait_procs: Spin::new(VecDeque::new()),
  861. cv_wait_procs: CondVar::new(),
  862. process,
  863. }
  864. }
  865. pub fn notify(&self, wait: WaitObject) {
  866. let mut wait_procs = self.wait_procs.lock();
  867. wait_procs.push_back(wait);
  868. self.cv_wait_procs.notify_all();
  869. self.process
  870. .upgrade()
  871. .expect("`process` must be valid if we are using `WaitList`")
  872. .raise(Signal::SIGCHLD);
  873. }
  874. /// Notify some processes in batch. The process is waken up if we have really notified
  875. /// some processes.
  876. ///
  877. /// # Lock
  878. /// This function locks the `wait_procs` and returns a `NotifyBatch` that
  879. /// will unlock it on dropped.
  880. pub fn notify_batch(&self) -> NotifyBatch {
  881. NotifyBatch {
  882. wait_procs: self.wait_procs.lock(),
  883. cv: &self.cv_wait_procs,
  884. needs_notify: false,
  885. process: &self.process,
  886. }
  887. }
  888. pub fn drain_exited(&self) -> DrainExited {
  889. DrainExited {
  890. wait_procs: self.wait_procs.lock(),
  891. }
  892. }
  893. pub fn entry(&self, want_stop: bool, want_continue: bool) -> Entry {
  894. Entry {
  895. wait_procs: self.wait_procs.lock(),
  896. cv: &self.cv_wait_procs,
  897. want_stop,
  898. want_continue,
  899. }
  900. }
  901. }
  902. impl Entry<'_, '_> {
  903. pub fn get(&mut self) -> Option<WaitObject> {
  904. if let Some(idx) = self
  905. .wait_procs
  906. .iter()
  907. .enumerate()
  908. .filter(|(_, item)| {
  909. if item.stopped().is_some() {
  910. self.want_stop
  911. } else if item.is_continue() {
  912. self.want_continue
  913. } else {
  914. true
  915. }
  916. })
  917. .map(|(idx, _)| idx)
  918. .next()
  919. {
  920. Some(self.wait_procs.remove(idx).unwrap())
  921. } else {
  922. None
  923. }
  924. }
  925. pub fn wait(&mut self) -> KResult<()> {
  926. self.cv.wait(&mut self.wait_procs);
  927. if Thread::current().signal_list.has_pending_signal() {
  928. return Err(EINTR);
  929. }
  930. Ok(())
  931. }
  932. }
  933. impl DrainExited<'_> {
  934. pub fn into_iter(&mut self) -> impl Iterator<Item = WaitObject> + '_ {
  935. // We don't propagate stop and continue to the new parent.
  936. self.wait_procs
  937. .drain(..)
  938. .filter(|item| item.stopped().is_none() && !item.is_continue())
  939. }
  940. }
  941. impl NotifyBatch<'_, '_, '_> {
  942. pub fn notify(&mut self, wait: WaitObject) {
  943. self.wait_procs.push_back(wait);
  944. }
  945. /// Finish the batch and notify all if we have notified some processes.
  946. pub fn finish(self) {}
  947. }
  948. impl Drop for NotifyBatch<'_, '_, '_> {
  949. fn drop(&mut self) {
  950. if self.needs_notify {
  951. self.cv.notify_all();
  952. self.process
  953. .upgrade()
  954. .expect("`process` must be valid if we are using `WaitList`")
  955. .raise(Signal::SIGCHLD);
  956. }
  957. }
  958. }
  959. impl Process {
  960. pub fn parent(&self) -> Option<Arc<Process>> {
  961. self.inner.lock().parent.clone()
  962. }
  963. }
  964. pub fn init_multitasking() {
  965. // Lazy init
  966. assert!(ProcessList::get().try_find_thread(1).is_some());
  967. Thread::current().load_interrupt_stack();
  968. Thread::current().process.mm_list.switch_page_table();
  969. }