Browse Source

feat: make CURRENT and IDLE_TASK per cpu

feat: percpu pointer varaibles
greatbridf 4 months ago
parent
commit
29a3461810

+ 10 - 2
arch/percpu/macros/src/lib.rs

@@ -18,8 +18,10 @@ pub fn define_percpu(attrs: TokenStream, item: TokenStream) -> TokenStream {
     let ty = &item.ty;
     let expr = &item.expr;
 
-    if !["bool", "u8", "u16", "u32", "u64", "usize"].contains(&quote!(#ty).to_string().as_str()) {
-        panic!("`define_percpu` only supports bool, u8, u16, u32, u64 and usize");
+    if !["bool", "u8", "u16", "u32", "u64", "usize"].contains(&quote!(#ty).to_string().as_str())
+        && !quote!(#ty).to_string().contains("NonNull")
+    {
+        panic!("`define_percpu` only supports bool, u8, u16, u32, u64, usize and pointers");
     }
 
     let inner_ident = format_ident!("_percpu_inner_{}", ident);
@@ -27,6 +29,7 @@ pub fn define_percpu(attrs: TokenStream, item: TokenStream) -> TokenStream {
 
     let integer_methods = match quote!(#ty).to_string().as_str() {
         "bool" => quote! {},
+        name if name.contains("NonNull") => quote! {},
         _ => quote! {
             pub fn add(&self, value: #ty) {
                 *unsafe { self.as_mut() } += value;
@@ -61,6 +64,11 @@ pub fn define_percpu(attrs: TokenStream, item: TokenStream) -> TokenStream {
                 unsafe { self.as_ptr().write(value) }
             }
 
+            pub fn swap(&self, mut value: #ty) -> #ty {
+                unsafe { self.as_ptr().swap(&mut value) }
+                value
+            }
+
             /// # Safety
             /// This function is unsafe because it allows for immutable aliasing of the percpu
             /// variable.

+ 1 - 1
src/kernel/syscall/procops.rs

@@ -470,7 +470,7 @@ global_asm!(
 );
 
 fn sys_fork(int_stack: &mut interrupt_stack, mmxregs: &mut mmx_registers) -> usize {
-    let new_thread = Thread::new_cloned(Thread::current());
+    let new_thread = Thread::new_cloned(&Thread::current());
 
     // TODO: We should make the preparation of the kernel stack more abstract.
     //       Currently, we can see that we are directly writing to the kernel stack,

+ 34 - 30
src/kernel/task/scheduler.rs

@@ -1,4 +1,7 @@
-use core::sync::atomic::{compiler_fence, Ordering};
+use core::{
+    ptr::NonNull,
+    sync::atomic::{compiler_fence, Ordering},
+};
 
 use crate::{prelude::*, sync::preempt};
 
@@ -15,20 +18,13 @@ pub struct Scheduler {
 }
 
 /// Idle task thread
-///
-/// # Safety
-/// This variable is per cpu. So no need to synchronize accesses to it.
-///
-/// TODO!!!: This should be per cpu in smp environment.
-static mut IDLE_TASK: Option<Arc<Thread>> = None;
+/// All the idle task threads belongs to `pid 0` and are pinned to the current cpu.
+#[arch::define_percpu]
+static IDLE_TASK: Option<NonNull<Thread>> = None;
 
 /// Current thread
-///
-/// # Safety
-/// This variable is per cpu. So no need to synchronize accesses to it.
-///
-/// TODO!!!: This should be per cpu in smp environment.
-static mut CURRENT: Option<Arc<Thread>> = None;
+#[arch::define_percpu]
+static mut CURRENT: Option<NonNull<Thread>> = None;
 
 lazy_static! {
     static ref GLOBAL_SCHEDULER: Spin<Scheduler> = Spin::new(Scheduler {
@@ -48,17 +44,21 @@ impl Scheduler {
         &GLOBAL_SCHEDULER
     }
 
-    pub fn current<'lt>() -> &'lt Arc<Thread> {
-        // SAFETY: `CURRENT` is per cpu.
-        unsafe { CURRENT.as_ref().unwrap() }
+    /// # Safety
+    /// We should never "inspect" a change in `current`.
+    /// The change of `CURRENT` will only happen in the scheduler. And if we are preempted,
+    /// when we DO return, the `CURRENT` will be the same and remain valid.
+    pub fn current<'lt>() -> BorrowedArc<'lt, Thread> {
+        BorrowedArc::from_raw(CURRENT.get().unwrap().as_ptr())
     }
 
-    pub fn idle_task() -> &'static Arc<Thread> {
-        // SAFETY: `IDLE_TASK` is per cpu.
-        unsafe { IDLE_TASK.as_ref().unwrap() }
+    /// # Safety
+    /// Idle task should never change so we can borrow it without touching the refcount.
+    pub fn idle_task() -> BorrowedArc<'static, Thread> {
+        BorrowedArc::from_raw(IDLE_TASK.get().unwrap().as_ptr())
     }
 
-    pub(super) fn set_idle(thread: Arc<Thread>) {
+    pub(super) unsafe fn set_idle(thread: Arc<Thread>) {
         thread.prepare_kernel_stack(|kstack| {
             let mut writer = kstack.get_writer();
             writer.flags = 0x200;
@@ -67,13 +67,15 @@ impl Scheduler {
         });
         // We don't wake the idle thread to prevent from accidentally being scheduled there.
 
-        // TODO!!!: Set per cpu variable.
-        unsafe { IDLE_TASK = Some(thread) };
+        let old = IDLE_TASK.swap(NonNull::new(Arc::into_raw(thread) as *mut _));
+        assert!(old.is_none(), "Idle task is already set");
     }
 
-    pub(super) fn set_current(thread: Arc<Thread>) {
-        // TODO!!!: Set per cpu variable.
-        unsafe { CURRENT = Some(thread) };
+    pub(super) unsafe fn set_current(thread: Arc<Thread>) {
+        let old = CURRENT.swap(NonNull::new(Arc::into_raw(thread) as *mut _));
+        if let Some(thread_pointer) = old {
+            Arc::from_raw(thread_pointer.as_ptr());
+        }
     }
 
     fn enqueue(&mut self, thread: &Arc<Thread>) {
@@ -161,7 +163,7 @@ impl Scheduler {
         //
         // Since we might never return to here, we can't take ownership of `current()`.
         // Is it safe to believe that `current()` will never change across calls?
-        context_switch_light(Thread::current(), Scheduler::idle_task());
+        context_switch_light(&Thread::current(), &Scheduler::idle_task());
         preempt::enable();
     }
 
@@ -192,11 +194,11 @@ extern "C" fn idle_task() {
             // No other thread to run, return to current running thread without changing its state.
             if scheduler.ready.is_empty() {
                 drop(scheduler);
-                context_switch_light(Scheduler::idle_task(), Thread::current());
+                context_switch_light(&Scheduler::idle_task(), &Thread::current());
                 continue;
             } else {
                 // Put it into `Ready` state
-                scheduler.put_ready(Thread::current());
+                scheduler.put_ready(&Thread::current());
             }
         }
 
@@ -217,7 +219,9 @@ extern "C" fn idle_task() {
         drop(scheduler);
 
         next_thread.process.mm_list.switch_page_table();
-        unsafe { CURRENT = Some(next_thread) };
+        unsafe {
+            Scheduler::set_current(next_thread);
+        }
 
         Thread::current().load_interrupt_stack();
         Thread::current().load_thread_area32();
@@ -226,6 +230,6 @@ extern "C" fn idle_task() {
         //
         // The other cpu should see the changes of kernel stack of the target thread
         // made in this cpu.
-        context_switch_light(Scheduler::idle_task(), Thread::current());
+        context_switch_light(&Scheduler::idle_task(), &Thread::current());
     }
 }

+ 4 - 4
src/kernel/task/thread.rs

@@ -395,12 +395,12 @@ lazy_static! {
     static ref GLOBAL_PROC_LIST: ProcessList = {
         let init_process = Process::new_for_init(1, None);
         let init_thread = Thread::new_for_init(b"[kernel kinit]".as_slice().into(), &init_process);
-        Scheduler::set_current(init_thread.clone());
+        unsafe { Scheduler::set_current(init_thread.clone()) };
 
         let idle_process = Process::new_for_init(0, None);
         let idle_thread =
             Thread::new_for_init(b"[kernel idle#BS]".as_slice().into(), &idle_process);
-        Scheduler::set_idle(idle_thread.clone());
+        unsafe { Scheduler::set_idle(idle_thread.clone()) };
 
         let init_session_weak = Arc::downgrade(&init_process.inner.lock().session);
         let init_pgroup_weak = Arc::downgrade(&init_process.inner.lock().pgroup);
@@ -487,7 +487,7 @@ impl ProcessList {
         // TODO!!!!!!: When we are killing multiple threads, we need to wait until all
         // the threads are stopped then proceed.
         for thread in inner.threads.values().map(|t| t.upgrade().unwrap()) {
-            assert!(&thread == Thread::current());
+            assert!(&thread == Thread::current().as_ref());
             Scheduler::get().lock().set_zombie(&thread);
             thread.files.close_all();
         }
@@ -854,7 +854,7 @@ impl Thread {
         thread
     }
 
-    pub fn current<'lt>() -> &'lt Arc<Self> {
+    pub fn current<'lt>() -> BorrowedArc<'lt, Self> {
         Scheduler::current()
     }
 

+ 1 - 1
src/kernel/vfs/filearray.rs

@@ -58,7 +58,7 @@ impl OpenFile {
 
 impl FileArray {
     pub fn get_current<'lt>() -> &'lt Arc<Self> {
-        &Thread::current().files
+        &Thread::current().borrow().files
     }
 
     pub fn new_for_init() -> Arc<Self> {

+ 1 - 2
src/kernel/vfs/mod.rs

@@ -52,8 +52,7 @@ pub struct FsContext {
 
 impl FsContext {
     pub fn get_current<'lt>() -> &'lt Arc<Self> {
-        let current = Thread::current();
-        &current.fs_context
+        &Thread::current().borrow().fs_context
     }
 
     pub fn new_for_init() -> Arc<Self> {

+ 1 - 1
src/lib.rs

@@ -175,7 +175,7 @@ pub extern "C" fn rust_kinit(early_kstack_pfn: usize) -> ! {
     // To satisfy the `Scheduler` "preempt count == 0" assertion.
     preempt::disable();
 
-    Scheduler::get().lock().uwake(Thread::current());
+    Scheduler::get().lock().uwake(&Thread::current());
 
     arch::task::context_switch_light(
         CachedPP::new(early_kstack_pfn).as_ptr(), // We will never come back

+ 11 - 1
src/prelude.rs

@@ -32,7 +32,7 @@ pub(crate) use alloc::{boxed::Box, string::String, vec, vec::Vec};
 pub(crate) use core::{any::Any, fmt::Write, marker::PhantomData, str};
 use core::{mem::ManuallyDrop, ops::Deref};
 
-pub use crate::sync::{Mutex, RwSemaphore, Semaphore, Spin, Locked};
+pub use crate::sync::{Locked, Mutex, RwSemaphore, Semaphore, Spin};
 
 pub struct BorrowedArc<'lt, T: ?Sized> {
     arc: ManuallyDrop<Arc<T>>,
@@ -55,6 +55,16 @@ impl<'lt, T: ?Sized> BorrowedArc<'lt, T> {
             _phantom: PhantomData,
         }
     }
+
+    pub fn borrow(&self) -> &'lt T {
+        let reference: &T = &self.arc;
+        let ptr = reference as *const T;
+
+        // SAFETY: `ptr` is a valid pointer to `T` because `reference` is a valid reference to `T`.
+        // `ptr` is also guaranteed to be valid for the lifetime `'lt` because it is derived from
+        // `self.arc` which is guaranteed to be valid for the lifetime `'lt`.
+        unsafe { ptr.as_ref().unwrap() }
+    }
 }
 
 impl<'lt, T: ?Sized> Deref for BorrowedArc<'lt, T> {

+ 3 - 3
src/sync/condvar.rs

@@ -38,9 +38,9 @@ impl<const I: bool> CondVar<I> {
 
     fn sleep(scheduler: &mut Scheduler) {
         if I {
-            scheduler.isleep(Thread::current());
+            scheduler.isleep(&Thread::current());
         } else {
-            scheduler.usleep(Thread::current());
+            scheduler.usleep(&Thread::current());
         }
     }
 
@@ -88,6 +88,6 @@ impl<const I: bool> CondVar<I> {
 
         self.waiters
             .lock_irq()
-            .retain(|waiter| waiter != Thread::current());
+            .retain(|waiter| waiter != Thread::current().as_ref());
     }
 }