thread.rs 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. use core::{
  2. cell::RefCell,
  3. cmp,
  4. sync::atomic::{self, AtomicU32},
  5. };
  6. use crate::{
  7. kernel::{
  8. mem::{
  9. phys::{CachedPP, PhysPtr},
  10. MMList,
  11. },
  12. terminal::Terminal,
  13. user::dataflow::CheckedUserPointer,
  14. vfs::FsContext,
  15. },
  16. prelude::*,
  17. sync::{preempt, CondVar, SpinGuard},
  18. };
  19. use alloc::{
  20. collections::{btree_map::BTreeMap, vec_deque::VecDeque},
  21. sync::{Arc, Weak},
  22. };
  23. use bindings::{ECHILD, EINTR, EINVAL, EPERM, ESRCH};
  24. use lazy_static::lazy_static;
  25. use crate::kernel::vfs::filearray::FileArray;
  26. use super::{
  27. signal::{RaiseResult, Signal, SignalList},
  28. KernelStack, Scheduler,
  29. };
  30. #[derive(Debug, Clone, Copy, PartialEq, Eq)]
  31. pub enum ThreadState {
  32. Preparing,
  33. Running,
  34. Ready,
  35. Zombie,
  36. ISleep,
  37. USleep,
  38. }
  39. #[derive(Debug, Clone, Copy, PartialEq, Eq)]
  40. pub enum WaitType {
  41. Exited(u32),
  42. Signaled(Signal),
  43. Stopped(Signal),
  44. Continued,
  45. }
  46. #[derive(Debug, Clone, Copy)]
  47. pub struct WaitObject {
  48. pub pid: u32,
  49. pub code: WaitType,
  50. }
  51. impl WaitType {
  52. pub fn to_wstatus(self) -> u32 {
  53. match self {
  54. WaitType::Exited(status) => (status & 0xff) << 8,
  55. WaitType::Signaled(signal) if signal.is_coredump() => signal.to_signum() | 0x80,
  56. WaitType::Signaled(signal) => signal.to_signum(),
  57. WaitType::Stopped(signal) => 0x7f | (signal.to_signum() << 8),
  58. WaitType::Continued => 0xffff,
  59. }
  60. }
  61. }
  62. impl WaitObject {
  63. pub fn stopped(&self) -> Option<Signal> {
  64. if let WaitType::Stopped(signal) = self.code {
  65. Some(signal)
  66. } else {
  67. None
  68. }
  69. }
  70. pub fn is_continue(&self) -> bool {
  71. matches!(self.code, WaitType::Continued)
  72. }
  73. }
  74. #[derive(Debug)]
  75. struct SessionInner {
  76. /// Foreground process group
  77. foreground: Weak<ProcessGroup>,
  78. control_terminal: Option<Arc<Terminal>>,
  79. groups: BTreeMap<u32, Weak<ProcessGroup>>,
  80. }
  81. #[derive(Debug)]
  82. pub struct Session {
  83. sid: u32,
  84. leader: Weak<Process>,
  85. inner: Spin<SessionInner>,
  86. }
  87. #[derive(Debug)]
  88. pub struct ProcessGroup {
  89. pgid: u32,
  90. leader: Weak<Process>,
  91. session: Weak<Session>,
  92. processes: Spin<BTreeMap<u32, Weak<Process>>>,
  93. }
  94. #[derive(Debug)]
  95. struct ProcessInner {
  96. /// Parent process
  97. ///
  98. /// Parent process must be valid during the whole life of the process.
  99. /// The only case that parent process may be `None` is when this is the init process
  100. /// or the process is kernel thread.
  101. parent: Option<Arc<Process>>,
  102. /// Process group
  103. pgroup: Arc<ProcessGroup>,
  104. /// Session
  105. session: Arc<Session>,
  106. /// Children list
  107. children: BTreeMap<u32, Weak<Thread>>,
  108. /// Thread list
  109. threads: BTreeMap<u32, Weak<Thread>>,
  110. }
  111. #[derive(Debug)]
  112. pub struct WaitList {
  113. wait_procs: Spin<VecDeque<WaitObject>>,
  114. cv_wait_procs: CondVar,
  115. process: Weak<Process>,
  116. }
  117. pub struct NotifyBatch<'waitlist, 'cv, 'process> {
  118. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  119. cv: &'cv CondVar,
  120. process: &'process Weak<Process>,
  121. needs_notify: bool,
  122. }
  123. pub struct Entry<'waitlist, 'cv> {
  124. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  125. cv: &'cv CondVar,
  126. want_stop: bool,
  127. want_continue: bool,
  128. }
  129. pub struct DrainExited<'waitlist> {
  130. wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
  131. }
  132. #[derive(Debug)]
  133. pub struct Process {
  134. /// Process id
  135. ///
  136. /// This should never change during the life of the process.
  137. pub pid: u32,
  138. pub wait_list: WaitList,
  139. pub mm_list: Arc<MMList>,
  140. inner: Spin<ProcessInner>,
  141. }
  142. impl PartialOrd for Process {
  143. fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
  144. self.pid.partial_cmp(&other.pid)
  145. }
  146. }
  147. impl Ord for Process {
  148. fn cmp(&self, other: &Self) -> cmp::Ordering {
  149. self.pid.cmp(&other.pid)
  150. }
  151. }
  152. impl PartialEq for Process {
  153. fn eq(&self, other: &Self) -> bool {
  154. self.pid == other.pid
  155. }
  156. }
  157. impl Eq for Process {}
  158. #[derive(Debug)]
  159. struct ThreadInner {
  160. /// Thread name
  161. name: Arc<[u8]>,
  162. /// Thread TLS descriptor 32-bit
  163. tls_desc32: Option<u64>,
  164. tls_base: Option<u64>,
  165. /// User pointer
  166. /// Store child thread's tid when child thread returns to user space.
  167. set_child_tid: usize,
  168. }
  169. pub struct Thread {
  170. pub tid: u32,
  171. pub process: Arc<Process>,
  172. pub files: Arc<FileArray>,
  173. pub fs_context: Arc<FsContext>,
  174. pub signal_list: SignalList,
  175. /// Thread state for scheduler use.
  176. pub state: Spin<ThreadState>,
  177. /// Kernel stack
  178. /// Never access this directly.
  179. ///
  180. /// We can only touch kernel stack when the process is neither running nor sleeping.
  181. /// AKA, the process is in the ready queue and will return to `schedule` context.
  182. kstack: RefCell<KernelStack>,
  183. inner: Spin<ThreadInner>,
  184. }
  185. impl PartialOrd for Thread {
  186. fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
  187. self.tid.partial_cmp(&other.tid)
  188. }
  189. }
  190. impl Ord for Thread {
  191. fn cmp(&self, other: &Self) -> cmp::Ordering {
  192. self.tid.cmp(&other.tid)
  193. }
  194. }
  195. impl PartialEq for Thread {
  196. fn eq(&self, other: &Self) -> bool {
  197. self.tid == other.tid
  198. }
  199. }
  200. impl Eq for Thread {}
  201. #[repr(transparent)]
  202. #[derive(Debug, Clone, Copy)]
  203. pub struct UserDescriptorFlags(u32);
  204. #[repr(C)]
  205. #[derive(Debug, Clone, Copy)]
  206. pub struct UserDescriptor {
  207. entry: u32,
  208. base: u32,
  209. limit: u32,
  210. flags: UserDescriptorFlags,
  211. }
  212. pub struct ProcessList {
  213. init: Arc<Process>,
  214. threads: Spin<BTreeMap<u32, Arc<Thread>>>,
  215. processes: Spin<BTreeMap<u32, Weak<Process>>>,
  216. pgroups: Spin<BTreeMap<u32, Weak<ProcessGroup>>>,
  217. sessions: Spin<BTreeMap<u32, Weak<Session>>>,
  218. }
  219. impl Session {
  220. fn new(sid: u32, leader: Weak<Process>) -> Arc<Self> {
  221. Arc::new(Self {
  222. sid,
  223. leader,
  224. inner: Spin::new(SessionInner {
  225. foreground: Weak::new(),
  226. control_terminal: None,
  227. groups: BTreeMap::new(),
  228. }),
  229. })
  230. }
  231. fn add_member(&self, pgroup: &Arc<ProcessGroup>) {
  232. self.inner
  233. .lock()
  234. .groups
  235. .insert(pgroup.pgid, Arc::downgrade(pgroup));
  236. }
  237. pub fn foreground_pgid(&self) -> Option<u32> {
  238. self.inner.lock().foreground.upgrade().map(|fg| fg.pgid)
  239. }
  240. /// Set the foreground process group.
  241. pub fn set_foreground_pgid(&self, pgid: u32) -> KResult<()> {
  242. let mut inner = self.inner.lock();
  243. let group = inner.groups.get(&pgid);
  244. if let Some(group) = group {
  245. inner.foreground = group.clone();
  246. Ok(())
  247. } else {
  248. // TODO!!!: Check if the process group is valid.
  249. // We assume that the process group is valid for now.
  250. Err(EPERM)
  251. }
  252. }
  253. /// Only session leaders can set the control terminal.
  254. /// Make sure we've checked that before calling this function.
  255. pub fn set_control_terminal(
  256. self: &Arc<Self>,
  257. terminal: &Arc<Terminal>,
  258. forced: bool,
  259. ) -> KResult<()> {
  260. let mut inner = self.inner.lock();
  261. if let Some(_) = inner.control_terminal.as_ref() {
  262. if let Some(session) = terminal.session().as_ref() {
  263. if session.sid == self.sid {
  264. return Ok(());
  265. }
  266. }
  267. return Err(EPERM);
  268. }
  269. terminal.set_session(self, forced)?;
  270. inner.control_terminal = Some(terminal.clone());
  271. inner.foreground = Arc::downgrade(&Thread::current().process.pgroup());
  272. Ok(())
  273. }
  274. /// Drop the control terminal reference inside the session.
  275. /// DO NOT TOUCH THE TERMINAL'S SESSION FIELD.
  276. pub fn drop_control_terminal(&self) -> Option<Arc<Terminal>> {
  277. let mut inner = self.inner.lock();
  278. inner.foreground = Weak::new();
  279. inner.control_terminal.take()
  280. }
  281. pub fn raise_foreground(&self, signal: Signal) {
  282. if let Some(fg) = self.inner.lock().foreground.upgrade() {
  283. fg.raise(signal);
  284. }
  285. }
  286. }
  287. impl ProcessGroup {
  288. fn new_for_init(pgid: u32, leader: Weak<Process>, session: Weak<Session>) -> Arc<Self> {
  289. Arc::new(Self {
  290. pgid,
  291. leader: leader.clone(),
  292. session,
  293. processes: Spin::new(BTreeMap::from([(pgid, leader)])),
  294. })
  295. }
  296. fn new(leader: &Arc<Process>, session: &Arc<Session>) -> Arc<Self> {
  297. let pgroup = Arc::new(Self {
  298. pgid: leader.pid,
  299. leader: Arc::downgrade(leader),
  300. session: Arc::downgrade(session),
  301. processes: Spin::new(BTreeMap::from([(leader.pid, Arc::downgrade(leader))])),
  302. });
  303. session.add_member(&pgroup);
  304. pgroup
  305. }
  306. }
  307. impl Drop for Thread {
  308. fn drop(&mut self) {
  309. let mut process = self.process.inner.lock();
  310. process.threads.remove(&self.tid);
  311. if let Some(parent) = &process.parent {
  312. parent.inner.lock().children.remove(&self.tid);
  313. }
  314. }
  315. }
  316. impl Drop for Process {
  317. fn drop(&mut self) {
  318. let inner = self.inner.lock();
  319. assert!(inner.children.is_empty());
  320. inner.pgroup.processes.lock().remove(&self.pid);
  321. ProcessList::get().processes.lock().remove(&self.pid);
  322. }
  323. }
  324. impl Drop for ProcessGroup {
  325. fn drop(&mut self) {
  326. if let Some(session) = self.session.upgrade() {
  327. session.inner.lock().groups.remove(&self.pgid);
  328. }
  329. }
  330. }
  331. lazy_static! {
  332. static ref GLOBAL_PROC_LIST: ProcessList = {
  333. let init_process = Process::new_for_init(1, None);
  334. let init_thread = Thread::new_for_init(b"[kernel kinit]".as_slice().into(), &init_process);
  335. Scheduler::set_current(init_thread.clone());
  336. let idle_process = Process::new_for_init(0, None);
  337. let idle_thread =
  338. Thread::new_for_init(b"[kernel idle#BS]".as_slice().into(), &idle_process);
  339. Scheduler::set_idle(idle_thread.clone());
  340. let init_session_weak = Arc::downgrade(&init_process.inner.lock().session);
  341. let init_pgroup_weak = Arc::downgrade(&init_process.inner.lock().pgroup);
  342. ProcessList {
  343. sessions: Spin::new(BTreeMap::from([(1, init_session_weak)])),
  344. pgroups: Spin::new(BTreeMap::from([(1, init_pgroup_weak)])),
  345. threads: Spin::new(BTreeMap::from([
  346. (1, init_thread.clone()),
  347. (0, idle_thread.clone()),
  348. ])),
  349. processes: Spin::new(BTreeMap::from([
  350. (1, Arc::downgrade(&init_process)),
  351. (0, Arc::downgrade(&idle_process)),
  352. ])),
  353. init: init_process,
  354. }
  355. };
  356. }
  357. impl ProcessList {
  358. pub fn get() -> &'static Self {
  359. &GLOBAL_PROC_LIST
  360. }
  361. pub fn add_session(&self, session: &Arc<Session>) {
  362. self.sessions
  363. .lock()
  364. .insert(session.sid, Arc::downgrade(session));
  365. }
  366. pub fn add_pgroup(&self, pgroup: &Arc<ProcessGroup>) {
  367. self.pgroups
  368. .lock()
  369. .insert(pgroup.pgid, Arc::downgrade(pgroup));
  370. }
  371. pub fn add_process(&self, process: &Arc<Process>) {
  372. self.processes
  373. .lock()
  374. .insert(process.pid, Arc::downgrade(process));
  375. }
  376. pub fn add_thread(&self, thread: &Arc<Thread>) {
  377. self.threads.lock().insert(thread.tid, thread.clone());
  378. }
  379. pub fn kill_current(signal: Signal) -> ! {
  380. ProcessList::get().do_kill_process(&Thread::current().process, WaitType::Signaled(signal));
  381. Scheduler::schedule_noreturn()
  382. }
  383. // TODO!!!!!!: Reconsider this
  384. fn remove(&self, tid: u32) {
  385. if let None = self.threads.lock().remove(&tid) {
  386. panic!("Thread {} not found", tid);
  387. }
  388. }
  389. pub fn try_find_process(&self, pid: u32) -> Option<Arc<Process>> {
  390. self.processes.lock().get(&pid).and_then(Weak::upgrade)
  391. }
  392. pub fn try_find_thread(&self, tid: u32) -> Option<Arc<Thread>> {
  393. self.threads.lock().get(&tid).cloned()
  394. }
  395. pub fn try_find_pgroup(&self, pgid: u32) -> Option<Arc<ProcessGroup>> {
  396. self.pgroups.lock().get(&pgid).and_then(Weak::upgrade)
  397. }
  398. pub fn try_find_session(&self, sid: u32) -> Option<Arc<Session>> {
  399. self.sessions.lock().get(&sid).and_then(Weak::upgrade)
  400. }
  401. /// Make the process a zombie and notify the parent.
  402. pub fn do_kill_process(&self, process: &Arc<Process>, status: WaitType) {
  403. if &self.init == process {
  404. panic!("init exited");
  405. }
  406. preempt::disable();
  407. let mut inner = process.inner.lock();
  408. // TODO!!!!!!: When we are killing multiple threads, we need to wait until all
  409. // the threads are stopped then proceed.
  410. for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
  411. assert!(&thread == Thread::current());
  412. Scheduler::get().lock().set_zombie(&thread);
  413. thread.files.close_all();
  414. }
  415. // If we are the session leader, we should drop the control terminal.
  416. if inner.session.sid == process.pid {
  417. if let Some(terminal) = inner.session.drop_control_terminal() {
  418. terminal.drop_session();
  419. }
  420. }
  421. // Unmap all user memory areas
  422. process.mm_list.clear_user();
  423. // Make children orphans (adopted by init)
  424. {
  425. let mut init_inner = self.init.inner.lock();
  426. inner.children.retain(|_, child| {
  427. let child = child.upgrade().unwrap();
  428. let mut child_inner = child.process.inner.lock();
  429. if child_inner.parent.as_ref().unwrap() == &self.init {
  430. return false;
  431. }
  432. child_inner.parent = Some(self.init.clone());
  433. init_inner.add_child(&child);
  434. false
  435. });
  436. }
  437. let mut init_notify = self.init.wait_list.notify_batch();
  438. process
  439. .wait_list
  440. .drain_exited()
  441. .into_iter()
  442. .for_each(|item| init_notify.notify(item));
  443. init_notify.finish();
  444. inner.parent.as_ref().unwrap().wait_list.notify(WaitObject {
  445. pid: process.pid,
  446. code: status,
  447. });
  448. preempt::enable();
  449. }
  450. }
  451. impl ProcessGroup {
  452. fn add_member(&self, process: &Arc<Process>) {
  453. self.processes
  454. .lock()
  455. .insert(process.pid, Arc::downgrade(process));
  456. }
  457. fn remove_member(&self, pid: u32) {
  458. self.processes.lock().remove(&pid);
  459. }
  460. pub fn raise(&self, signal: Signal) {
  461. let processes = self.processes.lock();
  462. for process in processes.values().map(|p| p.upgrade().unwrap()) {
  463. process.raise(signal);
  464. }
  465. }
  466. }
  467. impl ProcessInner {
  468. fn add_child(&mut self, child: &Arc<Thread>) {
  469. self.children.insert(child.tid, Arc::downgrade(child));
  470. }
  471. fn add_thread(&mut self, thread: &Arc<Thread>) {
  472. self.threads.insert(thread.tid, Arc::downgrade(thread));
  473. }
  474. }
  475. /// PID 0 and 1 is created manually so we start from 2.
  476. static NEXT_PID: AtomicU32 = AtomicU32::new(2);
  477. impl Process {
  478. fn alloc_pid() -> u32 {
  479. NEXT_PID.fetch_add(1, atomic::Ordering::Relaxed)
  480. }
  481. pub fn new_cloned(other: &Arc<Self>) -> Arc<Self> {
  482. let other_inner = other.inner.lock();
  483. let process = Arc::new_cyclic(|weak| Self {
  484. pid: Self::alloc_pid(),
  485. wait_list: WaitList::new(weak.clone()),
  486. mm_list: MMList::new_cloned(&other.mm_list),
  487. inner: Spin::new(ProcessInner {
  488. pgroup: other_inner.pgroup.clone(),
  489. session: other_inner.session.clone(),
  490. children: BTreeMap::new(),
  491. threads: BTreeMap::new(),
  492. parent: Some(other.clone()),
  493. }),
  494. });
  495. ProcessList::get().add_process(&process);
  496. other_inner.pgroup.add_member(&process);
  497. process
  498. }
  499. fn new_for_init(pid: u32, parent: Option<Arc<Self>>) -> Arc<Self> {
  500. let process = Arc::new_cyclic(|weak| {
  501. let session = Session::new(pid, weak.clone());
  502. let pgroup = ProcessGroup::new_for_init(pid, weak.clone(), Arc::downgrade(&session));
  503. session.add_member(&pgroup);
  504. Self {
  505. pid,
  506. wait_list: WaitList::new(weak.clone()),
  507. mm_list: MMList::new(),
  508. inner: Spin::new(ProcessInner {
  509. parent,
  510. pgroup,
  511. session,
  512. children: BTreeMap::new(),
  513. threads: BTreeMap::new(),
  514. }),
  515. }
  516. });
  517. process.inner.lock().pgroup.add_member(&process);
  518. process
  519. }
  520. pub fn raise(&self, signal: Signal) {
  521. let inner = self.inner.lock();
  522. for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
  523. if let RaiseResult::Finished = thread.raise(signal) {
  524. break;
  525. }
  526. }
  527. }
  528. fn add_child(&self, child: &Arc<Thread>) {
  529. self.inner.lock().add_child(child);
  530. }
  531. fn add_thread(&self, thread: &Arc<Thread>) {
  532. self.inner.lock().add_thread(thread);
  533. }
  534. pub fn wait(
  535. &self,
  536. no_block: bool,
  537. trace_stop: bool,
  538. trace_continue: bool,
  539. ) -> KResult<Option<WaitObject>> {
  540. let mut waits = self.wait_list.entry(trace_stop, trace_continue);
  541. let wait_object = loop {
  542. if let Some(object) = waits.get() {
  543. break object;
  544. }
  545. if self.inner.lock().children.is_empty() {
  546. return Err(ECHILD);
  547. }
  548. if no_block {
  549. return Ok(None);
  550. }
  551. waits.wait()?;
  552. };
  553. if wait_object.stopped().is_some() || wait_object.is_continue() {
  554. Ok(Some(wait_object))
  555. } else {
  556. ProcessList::get().remove(wait_object.pid);
  557. Ok(Some(wait_object))
  558. }
  559. }
  560. /// Create a new session for the process.
  561. pub fn setsid(self: &Arc<Self>) -> KResult<u32> {
  562. let mut inner = self.inner.lock();
  563. // If there exists a session that has the same sid as our pid, we can't create a new
  564. // session. The standard says that we should create a new process group and be the
  565. // only process in the new process group and session.
  566. if ProcessList::get().try_find_session(self.pid).is_some() {
  567. return Err(EPERM);
  568. }
  569. inner.session = Session::new(self.pid, Arc::downgrade(self));
  570. ProcessList::get().add_session(&inner.session);
  571. inner.pgroup.remove_member(self.pid);
  572. inner.pgroup = ProcessGroup::new(self, &inner.session);
  573. ProcessList::get().add_pgroup(&inner.pgroup);
  574. Ok(inner.pgroup.pgid)
  575. }
  576. /// Set the process group id of the process to `pgid`.
  577. ///
  578. /// This function does the actual work.
  579. fn do_setpgid(self: &Arc<Self>, pgid: u32) -> KResult<()> {
  580. let mut inner = self.inner.lock();
  581. // Changing the process group of a session leader is not allowed.
  582. if inner.session.sid == self.pid {
  583. return Err(EPERM);
  584. }
  585. // Move us to an existing process group.
  586. if let Some(pgroup) = ProcessList::get().try_find_pgroup(pgid) {
  587. // Move the process to a process group in a different session in not allowed.
  588. if pgroup.session.upgrade().unwrap().sid != inner.session.sid {
  589. return Err(EPERM);
  590. }
  591. // If we are already in the process group, we are done.
  592. if pgroup.pgid == inner.pgroup.pgid {
  593. return Ok(());
  594. }
  595. inner.pgroup.remove_member(self.pid);
  596. inner.pgroup = pgroup;
  597. } else {
  598. // Create a new process group only if `pgid` matches our `pid`.
  599. if pgid != self.pid {
  600. return Err(EPERM);
  601. }
  602. inner.pgroup.remove_member(self.pid);
  603. inner.pgroup = ProcessGroup::new(self, &inner.session);
  604. ProcessList::get().add_pgroup(&inner.pgroup);
  605. }
  606. Ok(())
  607. }
  608. /// Set the process group id of the process `pid` to `pgid`.
  609. ///
  610. /// This function should be called on the process that issued the syscall in order to do
  611. /// permission checks.
  612. pub fn setpgid(self: &Arc<Self>, pid: u32, pgid: u32) -> KResult<()> {
  613. // We may set pgid of either the calling process or a child process.
  614. if pid == self.pid {
  615. self.do_setpgid(pgid)
  616. } else {
  617. let child = {
  618. // If `pid` refers to one of our children, the thread leaders must be
  619. // in out children list.
  620. let inner = self.inner.lock();
  621. let child = {
  622. let child = inner.children.get(&pid);
  623. child.and_then(Weak::upgrade).ok_or(ESRCH)?
  624. };
  625. // Changing the process group of a child is only allowed
  626. // if we are in the same session.
  627. if child.process.sid() != inner.session.sid {
  628. return Err(EPERM);
  629. }
  630. child
  631. };
  632. // TODO: Check whether we, as a child, have already performed an `execve`.
  633. // If so, we should return `Err(EACCES)`.
  634. child.process.do_setpgid(pgid)
  635. }
  636. }
  637. pub fn sid(&self) -> u32 {
  638. self.inner.lock().session.sid
  639. }
  640. pub fn pgid(&self) -> u32 {
  641. self.inner.lock().pgroup.pgid
  642. }
  643. pub fn session(&self) -> Arc<Session> {
  644. self.inner.lock().session.clone()
  645. }
  646. pub fn pgroup(&self) -> Arc<ProcessGroup> {
  647. self.inner.lock().pgroup.clone()
  648. }
  649. }
  650. impl UserDescriptorFlags {
  651. fn is_32bit_segment(&self) -> bool {
  652. self.0 & 0b1 != 0
  653. }
  654. fn contents(&self) -> u32 {
  655. self.0 & 0b110
  656. }
  657. fn is_read_exec_only(&self) -> bool {
  658. self.0 & 0b1000 != 0
  659. }
  660. fn is_limit_in_pages(&self) -> bool {
  661. self.0 & 0b10000 != 0
  662. }
  663. fn is_present(&self) -> bool {
  664. self.0 & 0b100000 == 0
  665. }
  666. fn is_usable(&self) -> bool {
  667. self.0 & 0b1000000 != 0
  668. }
  669. }
  670. impl Thread {
  671. fn new_for_init(name: Arc<[u8]>, process: &Arc<Process>) -> Arc<Self> {
  672. let thread = Arc::new(Self {
  673. tid: process.pid,
  674. process: process.clone(),
  675. files: FileArray::new_for_init(),
  676. fs_context: FsContext::new_for_init(),
  677. signal_list: SignalList::new(),
  678. kstack: RefCell::new(KernelStack::new()),
  679. state: Spin::new(ThreadState::Preparing),
  680. inner: Spin::new(ThreadInner {
  681. name,
  682. tls_desc32: None,
  683. tls_base: None,
  684. set_child_tid: 0,
  685. }),
  686. });
  687. process.add_thread(&thread);
  688. thread
  689. }
  690. pub fn new_cloned(other: &Self) -> Arc<Self> {
  691. let process = Process::new_cloned(&other.process);
  692. let other_state = other.state.lock();
  693. let other_inner = other.inner.lock();
  694. assert!(matches!(*other_state, ThreadState::Running));
  695. let signal_list = other.signal_list.clone();
  696. signal_list.clear_pending();
  697. let thread = Arc::new(Self {
  698. tid: process.pid,
  699. process: process.clone(),
  700. files: FileArray::new_cloned(&other.files),
  701. fs_context: FsContext::new_cloned(&other.fs_context),
  702. signal_list,
  703. kstack: RefCell::new(KernelStack::new()),
  704. state: Spin::new(ThreadState::Preparing),
  705. inner: Spin::new(ThreadInner {
  706. name: other_inner.name.clone(),
  707. tls_desc32: other_inner.tls_desc32,
  708. tls_base: other_inner.tls_base,
  709. set_child_tid: other_inner.set_child_tid,
  710. }),
  711. });
  712. ProcessList::get().add_thread(&thread);
  713. other.process.add_child(&thread);
  714. process.add_thread(&thread);
  715. thread
  716. }
  717. pub fn current<'lt>() -> &'lt Arc<Self> {
  718. Scheduler::current()
  719. }
  720. pub fn do_stop(self: &Arc<Self>, signal: Signal) {
  721. if let Some(parent) = self.process.parent() {
  722. parent.wait_list.notify(WaitObject {
  723. pid: self.process.pid,
  724. code: WaitType::Stopped(signal),
  725. });
  726. }
  727. preempt::disable();
  728. // `SIGSTOP` can only be waken up by `SIGCONT` or `SIGKILL`.
  729. // SAFETY: Preempt disabled above.
  730. Scheduler::get().lock().usleep(self);
  731. Scheduler::schedule();
  732. }
  733. pub fn do_continue(self: &Arc<Self>) {
  734. if let Some(parent) = self.process.parent() {
  735. parent.wait_list.notify(WaitObject {
  736. pid: self.process.pid,
  737. code: WaitType::Continued,
  738. });
  739. }
  740. }
  741. pub fn raise(self: &Arc<Thread>, signal: Signal) -> RaiseResult {
  742. match self.signal_list.raise(signal) {
  743. RaiseResult::ShouldIWakeUp => {
  744. Scheduler::get().lock_irq().iwake(self);
  745. RaiseResult::Finished
  746. }
  747. RaiseResult::ShouldUWakeUp => {
  748. Scheduler::get().lock_irq().uwake(self);
  749. RaiseResult::Finished
  750. }
  751. result => result,
  752. }
  753. }
  754. pub fn load_thread_area32(&self) {
  755. const IA32_KERNEL_GS_BASE: u32 = 0xc0000102;
  756. let inner = self.inner.lock();
  757. if let Some(desc32) = inner.tls_desc32 {
  758. // SAFETY: `tls32` should be per cpu.
  759. let tls32_addr = CachedPP::new(0x0 + 7 * 8);
  760. tls32_addr.as_mut::<u64>().clone_from(&desc32);
  761. }
  762. if let Some(base) = inner.tls_base {
  763. arch::x86_64::task::wrmsr(IA32_KERNEL_GS_BASE, base);
  764. }
  765. }
  766. pub fn set_thread_area(&self, desc: &mut UserDescriptor) -> KResult<()> {
  767. let mut inner = self.inner.lock();
  768. // Clear the TLS area if it is not present.
  769. if desc.flags.is_read_exec_only() && !desc.flags.is_present() {
  770. if desc.limit == 0 || desc.base == 0 {
  771. return Ok(());
  772. }
  773. let len = if desc.flags.is_limit_in_pages() {
  774. (desc.limit as usize) << 12
  775. } else {
  776. desc.limit as usize
  777. };
  778. CheckedUserPointer::new(desc.base as _, len)?.zero()?;
  779. return Ok(());
  780. }
  781. if desc.entry != u32::MAX || !desc.flags.is_32bit_segment() {
  782. return Err(EINVAL);
  783. }
  784. desc.entry = 7;
  785. let mut desc32 = desc.limit as u64 & 0xffff;
  786. desc32 |= (desc.base as u64 & 0xffffff) << 16;
  787. desc32 |= 0x4_0_f2_000000_0000;
  788. desc32 |= (desc.limit as u64 & 0xf_0000) << (48 - 16);
  789. if desc.flags.is_limit_in_pages() {
  790. desc32 |= 1 << 55;
  791. }
  792. desc32 |= (desc.base as u64 & 0xff_000000) << (56 - 24);
  793. inner.tls_desc32 = Some(desc32);
  794. inner.tls_base = Some(desc.base as u64);
  795. Ok(())
  796. }
  797. /// This function is used to prepare the kernel stack for the thread in `Preparing` state.
  798. ///
  799. /// # Safety
  800. /// Calling this function on a thread that is not in `Preparing` state will panic.
  801. pub fn prepare_kernel_stack<F: FnOnce(&mut KernelStack)>(&self, func: F) {
  802. let mut state = self.state.lock();
  803. assert!(matches!(*state, ThreadState::Preparing));
  804. // SAFETY: We are in the preparing state with `state` locked.
  805. func(&mut self.kstack.borrow_mut());
  806. // Enter USleep state. Await for the thread to be scheduled manually.
  807. *state = ThreadState::USleep;
  808. }
  809. pub fn load_interrupt_stack(&self) {
  810. self.kstack.borrow().load_interrupt_stack();
  811. }
  812. /// Get a pointer to `self.sp` so we can use it in `context_switch()`.
  813. ///
  814. /// # Safety
  815. /// Save the pointer somewhere or pass it to a function that will use it is UB.
  816. pub unsafe fn get_sp_ptr(&self) -> *mut usize {
  817. self.kstack.borrow().get_sp_ptr()
  818. }
  819. pub fn set_name(&self, name: Arc<[u8]>) {
  820. self.inner.lock().name = name;
  821. }
  822. pub fn get_name(&self) -> Arc<[u8]> {
  823. self.inner.lock().name.clone()
  824. }
  825. }
  826. // TODO: Maybe we can find a better way instead of using `RefCell` for `KernelStack`?
  827. unsafe impl Sync for Thread {}
  828. impl WaitList {
  829. pub fn new(process: Weak<Process>) -> Self {
  830. Self {
  831. wait_procs: Spin::new(VecDeque::new()),
  832. cv_wait_procs: CondVar::new(),
  833. process,
  834. }
  835. }
  836. pub fn notify(&self, wait: WaitObject) {
  837. let mut wait_procs = self.wait_procs.lock();
  838. wait_procs.push_back(wait);
  839. self.cv_wait_procs.notify_all();
  840. self.process
  841. .upgrade()
  842. .expect("`process` must be valid if we are using `WaitList`")
  843. .raise(Signal::SIGCHLD);
  844. }
  845. /// Notify some processes in batch. The process is waken up if we have really notified
  846. /// some processes.
  847. ///
  848. /// # Lock
  849. /// This function locks the `wait_procs` and returns a `NotifyBatch` that
  850. /// will unlock it on dropped.
  851. pub fn notify_batch(&self) -> NotifyBatch {
  852. NotifyBatch {
  853. wait_procs: self.wait_procs.lock(),
  854. cv: &self.cv_wait_procs,
  855. needs_notify: false,
  856. process: &self.process,
  857. }
  858. }
  859. pub fn drain_exited(&self) -> DrainExited {
  860. DrainExited {
  861. wait_procs: self.wait_procs.lock(),
  862. }
  863. }
  864. pub fn entry(&self, want_stop: bool, want_continue: bool) -> Entry {
  865. Entry {
  866. wait_procs: self.wait_procs.lock(),
  867. cv: &self.cv_wait_procs,
  868. want_stop,
  869. want_continue,
  870. }
  871. }
  872. }
  873. impl Entry<'_, '_> {
  874. pub fn get(&mut self) -> Option<WaitObject> {
  875. if let Some(idx) = self
  876. .wait_procs
  877. .iter()
  878. .enumerate()
  879. .filter(|(_, item)| {
  880. if item.stopped().is_some() {
  881. self.want_stop
  882. } else if item.is_continue() {
  883. self.want_continue
  884. } else {
  885. true
  886. }
  887. })
  888. .map(|(idx, _)| idx)
  889. .next()
  890. {
  891. Some(self.wait_procs.remove(idx).unwrap())
  892. } else {
  893. None
  894. }
  895. }
  896. pub fn wait(&mut self) -> KResult<()> {
  897. self.cv.wait(&mut self.wait_procs);
  898. if Thread::current().signal_list.has_pending_signal() {
  899. return Err(EINTR);
  900. }
  901. Ok(())
  902. }
  903. }
  904. impl DrainExited<'_> {
  905. pub fn into_iter(&mut self) -> impl Iterator<Item = WaitObject> + '_ {
  906. // We don't propagate stop and continue to the new parent.
  907. self.wait_procs
  908. .drain(..)
  909. .filter(|item| item.stopped().is_none() && !item.is_continue())
  910. }
  911. }
  912. impl NotifyBatch<'_, '_, '_> {
  913. pub fn notify(&mut self, wait: WaitObject) {
  914. self.wait_procs.push_back(wait);
  915. }
  916. /// Finish the batch and notify all if we have notified some processes.
  917. pub fn finish(self) {}
  918. }
  919. impl Drop for NotifyBatch<'_, '_, '_> {
  920. fn drop(&mut self) {
  921. if self.needs_notify {
  922. self.cv.notify_all();
  923. self.process
  924. .upgrade()
  925. .expect("`process` must be valid if we are using `WaitList`")
  926. .raise(Signal::SIGCHLD);
  927. }
  928. }
  929. }
  930. impl Process {
  931. pub fn parent(&self) -> Option<Arc<Process>> {
  932. self.inner.lock().parent.clone()
  933. }
  934. }
  935. pub fn init_multitasking() {
  936. // Lazy init
  937. assert!(ProcessList::get().try_find_thread(1).is_some());
  938. Thread::current().load_interrupt_stack();
  939. Thread::current().process.mm_list.switch_page_table();
  940. }