123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122 |
- use core::{
- cell::RefCell,
- cmp,
- sync::atomic::{self, AtomicU32},
- };
- use crate::{
- kernel::{
- mem::{
- phys::{CachedPP, PhysPtr},
- MMList,
- },
- terminal::Terminal,
- user::dataflow::CheckedUserPointer,
- vfs::FsContext,
- },
- prelude::*,
- sync::{preempt, CondVar, SpinGuard},
- };
- use alloc::{
- collections::{btree_map::BTreeMap, vec_deque::VecDeque},
- sync::{Arc, Weak},
- };
- use bindings::{ECHILD, EINTR, EINVAL, EPERM, ESRCH};
- use lazy_static::lazy_static;
- use crate::kernel::vfs::filearray::FileArray;
- use super::{
- signal::{RaiseResult, Signal, SignalList},
- KernelStack, Scheduler,
- };
- #[derive(Debug, Clone, Copy, PartialEq, Eq)]
- pub enum ThreadState {
- Preparing,
- Running,
- Ready,
- Zombie,
- ISleep,
- USleep,
- }
- #[derive(Debug, Clone, Copy, PartialEq, Eq)]
- pub enum WaitType {
- Exited(u32),
- Signaled(Signal),
- Stopped(Signal),
- Continued,
- }
- #[derive(Debug, Clone, Copy)]
- pub struct WaitObject {
- pub pid: u32,
- pub code: WaitType,
- }
- impl WaitType {
- pub fn to_wstatus(self) -> u32 {
- match self {
- WaitType::Exited(status) => (status & 0xff) << 8,
- WaitType::Signaled(signal) if signal.is_coredump() => signal.to_signum() | 0x80,
- WaitType::Signaled(signal) => signal.to_signum(),
- WaitType::Stopped(signal) => 0x7f | (signal.to_signum() << 8),
- WaitType::Continued => 0xffff,
- }
- }
- }
- impl WaitObject {
- pub fn stopped(&self) -> Option<Signal> {
- if let WaitType::Stopped(signal) = self.code {
- Some(signal)
- } else {
- None
- }
- }
- pub fn is_continue(&self) -> bool {
- matches!(self.code, WaitType::Continued)
- }
- }
- #[derive(Debug)]
- struct SessionInner {
- /// Foreground process group
- foreground: Weak<ProcessGroup>,
- control_terminal: Option<Arc<Terminal>>,
- groups: BTreeMap<u32, Weak<ProcessGroup>>,
- }
- #[derive(Debug)]
- pub struct Session {
- sid: u32,
- leader: Weak<Process>,
- inner: Spin<SessionInner>,
- }
- #[derive(Debug)]
- pub struct ProcessGroup {
- pgid: u32,
- leader: Weak<Process>,
- session: Weak<Session>,
- processes: Spin<BTreeMap<u32, Weak<Process>>>,
- }
- #[derive(Debug)]
- struct ProcessInner {
- /// Parent process
- ///
- /// Parent process must be valid during the whole life of the process.
- /// The only case that parent process may be `None` is when this is the init process
- /// or the process is kernel thread.
- parent: Option<Arc<Process>>,
- /// Process group
- pgroup: Arc<ProcessGroup>,
- /// Session
- session: Arc<Session>,
- /// Children list
- children: BTreeMap<u32, Weak<Thread>>,
- /// Thread list
- threads: BTreeMap<u32, Weak<Thread>>,
- }
- #[derive(Debug)]
- pub struct WaitList {
- wait_procs: Spin<VecDeque<WaitObject>>,
- cv_wait_procs: CondVar,
- process: Weak<Process>,
- }
- pub struct NotifyBatch<'waitlist, 'cv, 'process> {
- wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
- cv: &'cv CondVar,
- process: &'process Weak<Process>,
- needs_notify: bool,
- }
- pub struct Entry<'waitlist, 'cv> {
- wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
- cv: &'cv CondVar,
- want_stop: bool,
- want_continue: bool,
- }
- pub struct DrainExited<'waitlist> {
- wait_procs: SpinGuard<'waitlist, VecDeque<WaitObject>>,
- }
- #[derive(Debug)]
- pub struct Process {
- /// Process id
- ///
- /// This should never change during the life of the process.
- pub pid: u32,
- pub wait_list: WaitList,
- pub mm_list: Arc<MMList>,
- inner: Spin<ProcessInner>,
- }
- impl PartialOrd for Process {
- fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
- self.pid.partial_cmp(&other.pid)
- }
- }
- impl Ord for Process {
- fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.pid.cmp(&other.pid)
- }
- }
- impl PartialEq for Process {
- fn eq(&self, other: &Self) -> bool {
- self.pid == other.pid
- }
- }
- impl Eq for Process {}
- #[derive(Debug)]
- struct ThreadInner {
- /// Thread name
- name: Arc<[u8]>,
- /// Thread TLS descriptor 32-bit
- tls_desc32: Option<u64>,
- tls_base: Option<u64>,
- /// User pointer
- /// Store child thread's tid when child thread returns to user space.
- set_child_tid: usize,
- }
- pub struct Thread {
- pub tid: u32,
- pub process: Arc<Process>,
- pub files: Arc<FileArray>,
- pub fs_context: Arc<FsContext>,
- pub signal_list: SignalList,
- /// Thread state for scheduler use.
- pub state: Spin<ThreadState>,
- /// Kernel stack
- /// Never access this directly.
- ///
- /// We can only touch kernel stack when the process is neither running nor sleeping.
- /// AKA, the process is in the ready queue and will return to `schedule` context.
- kstack: RefCell<KernelStack>,
- inner: Spin<ThreadInner>,
- }
- impl PartialOrd for Thread {
- fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
- self.tid.partial_cmp(&other.tid)
- }
- }
- impl Ord for Thread {
- fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.tid.cmp(&other.tid)
- }
- }
- impl PartialEq for Thread {
- fn eq(&self, other: &Self) -> bool {
- self.tid == other.tid
- }
- }
- impl Eq for Thread {}
- #[repr(transparent)]
- #[derive(Debug, Clone, Copy)]
- pub struct UserDescriptorFlags(u32);
- #[repr(C)]
- #[derive(Debug, Clone, Copy)]
- pub struct UserDescriptor {
- entry: u32,
- base: u32,
- limit: u32,
- flags: UserDescriptorFlags,
- }
- pub struct ProcessList {
- init: Arc<Process>,
- threads: Spin<BTreeMap<u32, Arc<Thread>>>,
- processes: Spin<BTreeMap<u32, Weak<Process>>>,
- pgroups: Spin<BTreeMap<u32, Weak<ProcessGroup>>>,
- sessions: Spin<BTreeMap<u32, Weak<Session>>>,
- }
- impl Session {
- fn new(sid: u32, leader: Weak<Process>) -> Arc<Self> {
- Arc::new(Self {
- sid,
- leader,
- inner: Spin::new(SessionInner {
- foreground: Weak::new(),
- control_terminal: None,
- groups: BTreeMap::new(),
- }),
- })
- }
- fn add_member(&self, pgroup: &Arc<ProcessGroup>) {
- self.inner
- .lock()
- .groups
- .insert(pgroup.pgid, Arc::downgrade(pgroup));
- }
- pub fn foreground_pgid(&self) -> Option<u32> {
- self.inner.lock().foreground.upgrade().map(|fg| fg.pgid)
- }
- /// Set the foreground process group.
- pub fn set_foreground_pgid(&self, pgid: u32) -> KResult<()> {
- let mut inner = self.inner.lock();
- let group = inner.groups.get(&pgid);
- if let Some(group) = group {
- inner.foreground = group.clone();
- Ok(())
- } else {
- // TODO!!!: Check if the process group is valid.
- // We assume that the process group is valid for now.
- Err(EPERM)
- }
- }
- /// Only session leaders can set the control terminal.
- /// Make sure we've checked that before calling this function.
- pub fn set_control_terminal(
- self: &Arc<Self>,
- terminal: &Arc<Terminal>,
- forced: bool,
- ) -> KResult<()> {
- let mut inner = self.inner.lock();
- if let Some(_) = inner.control_terminal.as_ref() {
- if let Some(session) = terminal.session().as_ref() {
- if session.sid == self.sid {
- return Ok(());
- }
- }
- return Err(EPERM);
- }
- terminal.set_session(self, forced)?;
- inner.control_terminal = Some(terminal.clone());
- inner.foreground = Arc::downgrade(&Thread::current().process.pgroup());
- Ok(())
- }
- /// Drop the control terminal reference inside the session.
- /// DO NOT TOUCH THE TERMINAL'S SESSION FIELD.
- pub fn drop_control_terminal(&self) -> Option<Arc<Terminal>> {
- let mut inner = self.inner.lock();
- inner.foreground = Weak::new();
- inner.control_terminal.take()
- }
- pub fn raise_foreground(&self, signal: Signal) {
- if let Some(fg) = self.inner.lock().foreground.upgrade() {
- fg.raise(signal);
- }
- }
- }
- impl ProcessGroup {
- fn new_for_init(pgid: u32, leader: Weak<Process>, session: Weak<Session>) -> Arc<Self> {
- Arc::new(Self {
- pgid,
- leader: leader.clone(),
- session,
- processes: Spin::new(BTreeMap::from([(pgid, leader)])),
- })
- }
- fn new(leader: &Arc<Process>, session: &Arc<Session>) -> Arc<Self> {
- let pgroup = Arc::new(Self {
- pgid: leader.pid,
- leader: Arc::downgrade(leader),
- session: Arc::downgrade(session),
- processes: Spin::new(BTreeMap::from([(leader.pid, Arc::downgrade(leader))])),
- });
- session.add_member(&pgroup);
- pgroup
- }
- }
- impl Drop for Thread {
- fn drop(&mut self) {
- let mut process = self.process.inner.lock();
- process.threads.remove(&self.tid);
- if let Some(parent) = &process.parent {
- parent.inner.lock().children.remove(&self.tid);
- }
- }
- }
- impl Drop for Process {
- fn drop(&mut self) {
- let inner = self.inner.lock();
- assert!(inner.children.is_empty());
- inner.pgroup.processes.lock().remove(&self.pid);
- ProcessList::get().processes.lock().remove(&self.pid);
- }
- }
- impl Drop for ProcessGroup {
- fn drop(&mut self) {
- if let Some(session) = self.session.upgrade() {
- session.inner.lock().groups.remove(&self.pgid);
- }
- }
- }
- lazy_static! {
- static ref GLOBAL_PROC_LIST: ProcessList = {
- let init_process = Process::new_for_init(1, None);
- let init_thread = Thread::new_for_init(b"[kernel kinit]".as_slice().into(), &init_process);
- Scheduler::set_current(init_thread.clone());
- let idle_process = Process::new_for_init(0, None);
- let idle_thread =
- Thread::new_for_init(b"[kernel idle#BS]".as_slice().into(), &idle_process);
- Scheduler::set_idle(idle_thread.clone());
- let init_session_weak = Arc::downgrade(&init_process.inner.lock().session);
- let init_pgroup_weak = Arc::downgrade(&init_process.inner.lock().pgroup);
- ProcessList {
- sessions: Spin::new(BTreeMap::from([(1, init_session_weak)])),
- pgroups: Spin::new(BTreeMap::from([(1, init_pgroup_weak)])),
- threads: Spin::new(BTreeMap::from([
- (1, init_thread.clone()),
- (0, idle_thread.clone()),
- ])),
- processes: Spin::new(BTreeMap::from([
- (1, Arc::downgrade(&init_process)),
- (0, Arc::downgrade(&idle_process)),
- ])),
- init: init_process,
- }
- };
- }
- impl ProcessList {
- pub fn get() -> &'static Self {
- &GLOBAL_PROC_LIST
- }
- pub fn add_session(&self, session: &Arc<Session>) {
- self.sessions
- .lock()
- .insert(session.sid, Arc::downgrade(session));
- }
- pub fn add_pgroup(&self, pgroup: &Arc<ProcessGroup>) {
- self.pgroups
- .lock()
- .insert(pgroup.pgid, Arc::downgrade(pgroup));
- }
- pub fn add_process(&self, process: &Arc<Process>) {
- self.processes
- .lock()
- .insert(process.pid, Arc::downgrade(process));
- }
- pub fn add_thread(&self, thread: &Arc<Thread>) {
- self.threads.lock().insert(thread.tid, thread.clone());
- }
- pub fn kill_current(signal: Signal) -> ! {
- ProcessList::get().do_kill_process(&Thread::current().process, WaitType::Signaled(signal));
- Scheduler::schedule_noreturn()
- }
- // TODO!!!!!!: Reconsider this
- fn remove(&self, tid: u32) {
- if let None = self.threads.lock().remove(&tid) {
- panic!("Thread {} not found", tid);
- }
- }
- pub fn try_find_process(&self, pid: u32) -> Option<Arc<Process>> {
- self.processes.lock().get(&pid).and_then(Weak::upgrade)
- }
- pub fn try_find_thread(&self, tid: u32) -> Option<Arc<Thread>> {
- self.threads.lock().get(&tid).cloned()
- }
- pub fn try_find_pgroup(&self, pgid: u32) -> Option<Arc<ProcessGroup>> {
- self.pgroups.lock().get(&pgid).and_then(Weak::upgrade)
- }
- pub fn try_find_session(&self, sid: u32) -> Option<Arc<Session>> {
- self.sessions.lock().get(&sid).and_then(Weak::upgrade)
- }
- /// Make the process a zombie and notify the parent.
- pub fn do_kill_process(&self, process: &Arc<Process>, status: WaitType) {
- if &self.init == process {
- panic!("init exited");
- }
- preempt::disable();
- let mut inner = process.inner.lock();
- // TODO!!!!!!: When we are killing multiple threads, we need to wait until all
- // the threads are stopped then proceed.
- for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
- assert!(&thread == Thread::current());
- Scheduler::get().lock().set_zombie(&thread);
- thread.files.close_all();
- }
- // If we are the session leader, we should drop the control terminal.
- if inner.session.sid == process.pid {
- if let Some(terminal) = inner.session.drop_control_terminal() {
- terminal.drop_session();
- }
- }
- // Unmap all user memory areas
- process.mm_list.clear_user();
- // Make children orphans (adopted by init)
- {
- let mut init_inner = self.init.inner.lock();
- inner.children.retain(|_, child| {
- let child = child.upgrade().unwrap();
- let mut child_inner = child.process.inner.lock();
- if child_inner.parent.as_ref().unwrap() == &self.init {
- return false;
- }
- child_inner.parent = Some(self.init.clone());
- init_inner.add_child(&child);
- false
- });
- }
- let mut init_notify = self.init.wait_list.notify_batch();
- process
- .wait_list
- .drain_exited()
- .into_iter()
- .for_each(|item| init_notify.notify(item));
- init_notify.finish();
- inner.parent.as_ref().unwrap().wait_list.notify(WaitObject {
- pid: process.pid,
- code: status,
- });
- preempt::enable();
- }
- }
- impl ProcessGroup {
- fn add_member(&self, process: &Arc<Process>) {
- self.processes
- .lock()
- .insert(process.pid, Arc::downgrade(process));
- }
- fn remove_member(&self, pid: u32) {
- self.processes.lock().remove(&pid);
- }
- pub fn raise(&self, signal: Signal) {
- let processes = self.processes.lock();
- for process in processes.values().map(|p| p.upgrade().unwrap()) {
- process.raise(signal);
- }
- }
- }
- impl ProcessInner {
- fn add_child(&mut self, child: &Arc<Thread>) {
- self.children.insert(child.tid, Arc::downgrade(child));
- }
- fn add_thread(&mut self, thread: &Arc<Thread>) {
- self.threads.insert(thread.tid, Arc::downgrade(thread));
- }
- }
- /// PID 0 and 1 is created manually so we start from 2.
- static NEXT_PID: AtomicU32 = AtomicU32::new(2);
- impl Process {
- fn alloc_pid() -> u32 {
- NEXT_PID.fetch_add(1, atomic::Ordering::Relaxed)
- }
- pub fn new_cloned(other: &Arc<Self>) -> Arc<Self> {
- let other_inner = other.inner.lock();
- let process = Arc::new_cyclic(|weak| Self {
- pid: Self::alloc_pid(),
- wait_list: WaitList::new(weak.clone()),
- mm_list: MMList::new_cloned(&other.mm_list),
- inner: Spin::new(ProcessInner {
- pgroup: other_inner.pgroup.clone(),
- session: other_inner.session.clone(),
- children: BTreeMap::new(),
- threads: BTreeMap::new(),
- parent: Some(other.clone()),
- }),
- });
- ProcessList::get().add_process(&process);
- other_inner.pgroup.add_member(&process);
- process
- }
- fn new_for_init(pid: u32, parent: Option<Arc<Self>>) -> Arc<Self> {
- let process = Arc::new_cyclic(|weak| {
- let session = Session::new(pid, weak.clone());
- let pgroup = ProcessGroup::new_for_init(pid, weak.clone(), Arc::downgrade(&session));
- session.add_member(&pgroup);
- Self {
- pid,
- wait_list: WaitList::new(weak.clone()),
- mm_list: MMList::new(),
- inner: Spin::new(ProcessInner {
- parent,
- pgroup,
- session,
- children: BTreeMap::new(),
- threads: BTreeMap::new(),
- }),
- }
- });
- process.inner.lock().pgroup.add_member(&process);
- process
- }
- pub fn raise(&self, signal: Signal) {
- let inner = self.inner.lock();
- for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
- if let RaiseResult::Finished = thread.raise(signal) {
- break;
- }
- }
- }
- fn add_child(&self, child: &Arc<Thread>) {
- self.inner.lock().add_child(child);
- }
- fn add_thread(&self, thread: &Arc<Thread>) {
- self.inner.lock().add_thread(thread);
- }
- pub fn wait(
- &self,
- no_block: bool,
- trace_stop: bool,
- trace_continue: bool,
- ) -> KResult<Option<WaitObject>> {
- let mut waits = self.wait_list.entry(trace_stop, trace_continue);
- let wait_object = loop {
- if let Some(object) = waits.get() {
- break object;
- }
- if self.inner.lock().children.is_empty() {
- return Err(ECHILD);
- }
- if no_block {
- return Ok(None);
- }
- waits.wait()?;
- };
- if wait_object.stopped().is_some() || wait_object.is_continue() {
- Ok(Some(wait_object))
- } else {
- ProcessList::get().remove(wait_object.pid);
- Ok(Some(wait_object))
- }
- }
- /// Create a new session for the process.
- pub fn setsid(self: &Arc<Self>) -> KResult<u32> {
- let mut inner = self.inner.lock();
- // If there exists a session that has the same sid as our pid, we can't create a new
- // session. The standard says that we should create a new process group and be the
- // only process in the new process group and session.
- if ProcessList::get().try_find_session(self.pid).is_some() {
- return Err(EPERM);
- }
- inner.session = Session::new(self.pid, Arc::downgrade(self));
- ProcessList::get().add_session(&inner.session);
- inner.pgroup.remove_member(self.pid);
- inner.pgroup = ProcessGroup::new(self, &inner.session);
- ProcessList::get().add_pgroup(&inner.pgroup);
- Ok(inner.pgroup.pgid)
- }
- /// Set the process group id of the process to `pgid`.
- ///
- /// This function does the actual work.
- fn do_setpgid(self: &Arc<Self>, pgid: u32) -> KResult<()> {
- let mut inner = self.inner.lock();
- // Changing the process group of a session leader is not allowed.
- if inner.session.sid == self.pid {
- return Err(EPERM);
- }
- // Move us to an existing process group.
- if let Some(pgroup) = ProcessList::get().try_find_pgroup(pgid) {
- // Move the process to a process group in a different session in not allowed.
- if pgroup.session.upgrade().unwrap().sid != inner.session.sid {
- return Err(EPERM);
- }
- // If we are already in the process group, we are done.
- if pgroup.pgid == inner.pgroup.pgid {
- return Ok(());
- }
- inner.pgroup.remove_member(self.pid);
- inner.pgroup = pgroup;
- } else {
- // Create a new process group only if `pgid` matches our `pid`.
- if pgid != self.pid {
- return Err(EPERM);
- }
- inner.pgroup.remove_member(self.pid);
- inner.pgroup = ProcessGroup::new(self, &inner.session);
- ProcessList::get().add_pgroup(&inner.pgroup);
- }
- Ok(())
- }
- /// Set the process group id of the process `pid` to `pgid`.
- ///
- /// This function should be called on the process that issued the syscall in order to do
- /// permission checks.
- pub fn setpgid(self: &Arc<Self>, pid: u32, pgid: u32) -> KResult<()> {
- // We may set pgid of either the calling process or a child process.
- if pid == self.pid {
- self.do_setpgid(pgid)
- } else {
- let child = {
- // If `pid` refers to one of our children, the thread leaders must be
- // in out children list.
- let inner = self.inner.lock();
- let child = {
- let child = inner.children.get(&pid);
- child.and_then(Weak::upgrade).ok_or(ESRCH)?
- };
- // Changing the process group of a child is only allowed
- // if we are in the same session.
- if child.process.sid() != inner.session.sid {
- return Err(EPERM);
- }
- child
- };
- // TODO: Check whether we, as a child, have already performed an `execve`.
- // If so, we should return `Err(EACCES)`.
- child.process.do_setpgid(pgid)
- }
- }
- pub fn sid(&self) -> u32 {
- self.inner.lock().session.sid
- }
- pub fn pgid(&self) -> u32 {
- self.inner.lock().pgroup.pgid
- }
- pub fn session(&self) -> Arc<Session> {
- self.inner.lock().session.clone()
- }
- pub fn pgroup(&self) -> Arc<ProcessGroup> {
- self.inner.lock().pgroup.clone()
- }
- }
- impl UserDescriptorFlags {
- fn is_32bit_segment(&self) -> bool {
- self.0 & 0b1 != 0
- }
- fn contents(&self) -> u32 {
- self.0 & 0b110
- }
- fn is_read_exec_only(&self) -> bool {
- self.0 & 0b1000 != 0
- }
- fn is_limit_in_pages(&self) -> bool {
- self.0 & 0b10000 != 0
- }
- fn is_present(&self) -> bool {
- self.0 & 0b100000 == 0
- }
- fn is_usable(&self) -> bool {
- self.0 & 0b1000000 != 0
- }
- }
- impl Thread {
- fn new_for_init(name: Arc<[u8]>, process: &Arc<Process>) -> Arc<Self> {
- let thread = Arc::new(Self {
- tid: process.pid,
- process: process.clone(),
- files: FileArray::new_for_init(),
- fs_context: FsContext::new_for_init(),
- signal_list: SignalList::new(),
- kstack: RefCell::new(KernelStack::new()),
- state: Spin::new(ThreadState::Preparing),
- inner: Spin::new(ThreadInner {
- name,
- tls_desc32: None,
- tls_base: None,
- set_child_tid: 0,
- }),
- });
- process.add_thread(&thread);
- thread
- }
- pub fn new_cloned(other: &Self) -> Arc<Self> {
- let process = Process::new_cloned(&other.process);
- let other_state = other.state.lock();
- let other_inner = other.inner.lock();
- assert!(matches!(*other_state, ThreadState::Running));
- let signal_list = other.signal_list.clone();
- signal_list.clear_pending();
- let thread = Arc::new(Self {
- tid: process.pid,
- process: process.clone(),
- files: FileArray::new_cloned(&other.files),
- fs_context: FsContext::new_cloned(&other.fs_context),
- signal_list,
- kstack: RefCell::new(KernelStack::new()),
- state: Spin::new(ThreadState::Preparing),
- inner: Spin::new(ThreadInner {
- name: other_inner.name.clone(),
- tls_desc32: other_inner.tls_desc32,
- tls_base: other_inner.tls_base,
- set_child_tid: other_inner.set_child_tid,
- }),
- });
- ProcessList::get().add_thread(&thread);
- other.process.add_child(&thread);
- process.add_thread(&thread);
- thread
- }
- pub fn current<'lt>() -> &'lt Arc<Self> {
- Scheduler::current()
- }
- pub fn do_stop(self: &Arc<Self>, signal: Signal) {
- if let Some(parent) = self.process.parent() {
- parent.wait_list.notify(WaitObject {
- pid: self.process.pid,
- code: WaitType::Stopped(signal),
- });
- }
- preempt::disable();
- // `SIGSTOP` can only be waken up by `SIGCONT` or `SIGKILL`.
- // SAFETY: Preempt disabled above.
- Scheduler::get().lock().usleep(self);
- Scheduler::schedule();
- }
- pub fn do_continue(self: &Arc<Self>) {
- if let Some(parent) = self.process.parent() {
- parent.wait_list.notify(WaitObject {
- pid: self.process.pid,
- code: WaitType::Continued,
- });
- }
- }
- pub fn raise(self: &Arc<Thread>, signal: Signal) -> RaiseResult {
- match self.signal_list.raise(signal) {
- RaiseResult::ShouldIWakeUp => {
- Scheduler::get().lock_irq().iwake(self);
- RaiseResult::Finished
- }
- RaiseResult::ShouldUWakeUp => {
- Scheduler::get().lock_irq().uwake(self);
- RaiseResult::Finished
- }
- result => result,
- }
- }
- pub fn load_thread_area32(&self) {
- const IA32_KERNEL_GS_BASE: u32 = 0xc0000102;
- let inner = self.inner.lock();
- if let Some(desc32) = inner.tls_desc32 {
- // SAFETY: `tls32` should be per cpu.
- let tls32_addr = CachedPP::new(0x0 + 7 * 8);
- tls32_addr.as_mut::<u64>().clone_from(&desc32);
- }
- if let Some(base) = inner.tls_base {
- arch::x86_64::task::wrmsr(IA32_KERNEL_GS_BASE, base);
- }
- }
- pub fn set_thread_area(&self, desc: &mut UserDescriptor) -> KResult<()> {
- let mut inner = self.inner.lock();
- // Clear the TLS area if it is not present.
- if desc.flags.is_read_exec_only() && !desc.flags.is_present() {
- if desc.limit == 0 || desc.base == 0 {
- return Ok(());
- }
- let len = if desc.flags.is_limit_in_pages() {
- (desc.limit as usize) << 12
- } else {
- desc.limit as usize
- };
- CheckedUserPointer::new(desc.base as _, len)?.zero()?;
- return Ok(());
- }
- if desc.entry != u32::MAX || !desc.flags.is_32bit_segment() {
- return Err(EINVAL);
- }
- desc.entry = 7;
- let mut desc32 = desc.limit as u64 & 0xffff;
- desc32 |= (desc.base as u64 & 0xffffff) << 16;
- desc32 |= 0x4_0_f2_000000_0000;
- desc32 |= (desc.limit as u64 & 0xf_0000) << (48 - 16);
- if desc.flags.is_limit_in_pages() {
- desc32 |= 1 << 55;
- }
- desc32 |= (desc.base as u64 & 0xff_000000) << (56 - 24);
- inner.tls_desc32 = Some(desc32);
- inner.tls_base = Some(desc.base as u64);
- Ok(())
- }
- /// This function is used to prepare the kernel stack for the thread in `Preparing` state.
- ///
- /// # Safety
- /// Calling this function on a thread that is not in `Preparing` state will panic.
- pub fn prepare_kernel_stack<F: FnOnce(&mut KernelStack)>(&self, func: F) {
- let mut state = self.state.lock();
- assert!(matches!(*state, ThreadState::Preparing));
- // SAFETY: We are in the preparing state with `state` locked.
- func(&mut self.kstack.borrow_mut());
- // Enter USleep state. Await for the thread to be scheduled manually.
- *state = ThreadState::USleep;
- }
- pub fn load_interrupt_stack(&self) {
- self.kstack.borrow().load_interrupt_stack();
- }
- /// Get a pointer to `self.sp` so we can use it in `context_switch()`.
- ///
- /// # Safety
- /// Save the pointer somewhere or pass it to a function that will use it is UB.
- pub unsafe fn get_sp_ptr(&self) -> *mut usize {
- self.kstack.borrow().get_sp_ptr()
- }
- pub fn set_name(&self, name: Arc<[u8]>) {
- self.inner.lock().name = name;
- }
- pub fn get_name(&self) -> Arc<[u8]> {
- self.inner.lock().name.clone()
- }
- }
- // TODO: Maybe we can find a better way instead of using `RefCell` for `KernelStack`?
- unsafe impl Sync for Thread {}
- impl WaitList {
- pub fn new(process: Weak<Process>) -> Self {
- Self {
- wait_procs: Spin::new(VecDeque::new()),
- cv_wait_procs: CondVar::new(),
- process,
- }
- }
- pub fn notify(&self, wait: WaitObject) {
- let mut wait_procs = self.wait_procs.lock();
- wait_procs.push_back(wait);
- self.cv_wait_procs.notify_all();
- self.process
- .upgrade()
- .expect("`process` must be valid if we are using `WaitList`")
- .raise(Signal::SIGCHLD);
- }
- /// Notify some processes in batch. The process is waken up if we have really notified
- /// some processes.
- ///
- /// # Lock
- /// This function locks the `wait_procs` and returns a `NotifyBatch` that
- /// will unlock it on dropped.
- pub fn notify_batch(&self) -> NotifyBatch {
- NotifyBatch {
- wait_procs: self.wait_procs.lock(),
- cv: &self.cv_wait_procs,
- needs_notify: false,
- process: &self.process,
- }
- }
- pub fn drain_exited(&self) -> DrainExited {
- DrainExited {
- wait_procs: self.wait_procs.lock(),
- }
- }
- pub fn entry(&self, want_stop: bool, want_continue: bool) -> Entry {
- Entry {
- wait_procs: self.wait_procs.lock(),
- cv: &self.cv_wait_procs,
- want_stop,
- want_continue,
- }
- }
- }
- impl Entry<'_, '_> {
- pub fn get(&mut self) -> Option<WaitObject> {
- if let Some(idx) = self
- .wait_procs
- .iter()
- .enumerate()
- .filter(|(_, item)| {
- if item.stopped().is_some() {
- self.want_stop
- } else if item.is_continue() {
- self.want_continue
- } else {
- true
- }
- })
- .map(|(idx, _)| idx)
- .next()
- {
- Some(self.wait_procs.remove(idx).unwrap())
- } else {
- None
- }
- }
- pub fn wait(&mut self) -> KResult<()> {
- self.cv.wait(&mut self.wait_procs);
- if Thread::current().signal_list.has_pending_signal() {
- return Err(EINTR);
- }
- Ok(())
- }
- }
- impl DrainExited<'_> {
- pub fn into_iter(&mut self) -> impl Iterator<Item = WaitObject> + '_ {
- // We don't propagate stop and continue to the new parent.
- self.wait_procs
- .drain(..)
- .filter(|item| item.stopped().is_none() && !item.is_continue())
- }
- }
- impl NotifyBatch<'_, '_, '_> {
- pub fn notify(&mut self, wait: WaitObject) {
- self.wait_procs.push_back(wait);
- }
- /// Finish the batch and notify all if we have notified some processes.
- pub fn finish(self) {}
- }
- impl Drop for NotifyBatch<'_, '_, '_> {
- fn drop(&mut self) {
- if self.needs_notify {
- self.cv.notify_all();
- self.process
- .upgrade()
- .expect("`process` must be valid if we are using `WaitList`")
- .raise(Signal::SIGCHLD);
- }
- }
- }
- impl Process {
- pub fn parent(&self) -> Option<Arc<Process>> {
- self.inner.lock().parent.clone()
- }
- }
- pub fn init_multitasking() {
- // Lazy init
- assert!(ProcessList::get().try_find_thread(1).is_some());
- Thread::current().load_interrupt_stack();
- Thread::current().process.mm_list.switch_page_table();
- }
|