Forráskód Böngészése

Merge branch 'sched-rewrite' into shiai-master

greatbridf 10 hónapja
szülő
commit
a396b8add4
61 módosított fájl, 2063 hozzáadás és 1328 törlés
  1. 45 0
      Cargo.lock
  2. 18 0
      Cargo.toml
  3. 2 2
      arch/percpu-macros/src/lib.rs
  4. 8 0
      crates/eonix_log/Cargo.toml
  5. 112 0
      crates/eonix_log/src/lib.rs
  6. 7 0
      crates/eonix_preempt/Cargo.toml
  7. 67 0
      crates/eonix_preempt/src/lib.rs
  8. 20 0
      crates/eonix_runtime/Cargo.toml
  9. 5 5
      crates/eonix_runtime/src/context.rs
  10. 137 0
      crates/eonix_runtime/src/executor.rs
  11. 67 0
      crates/eonix_runtime/src/executor/builder.rs
  12. 4 0
      crates/eonix_runtime/src/executor/execute_status.rs
  13. 64 0
      crates/eonix_runtime/src/executor/output_handle.rs
  14. 4 0
      crates/eonix_runtime/src/executor/stack.rs
  15. 10 0
      crates/eonix_runtime/src/lib.rs
  16. 4 6
      crates/eonix_runtime/src/ready_queue.rs
  17. 13 6
      crates/eonix_runtime/src/run.rs
  18. 34 0
      crates/eonix_runtime/src/run/future_run.rs
  19. 299 0
      crates/eonix_runtime/src/scheduler.rs
  20. 171 0
      crates/eonix_runtime/src/task.rs
  21. 13 0
      crates/eonix_runtime/src/task/adapter.rs
  22. 28 0
      crates/eonix_runtime/src/task/task_state.rs
  23. 7 0
      crates/eonix_sync/Cargo.toml
  24. 159 0
      crates/eonix_sync/src/guard.rs
  25. 13 0
      crates/eonix_sync/src/lib.rs
  26. 139 0
      crates/eonix_sync/src/lock.rs
  27. 44 52
      crates/eonix_sync/src/spin.rs
  28. 45 0
      crates/eonix_sync/src/strategy.rs
  29. 6 0
      crates/pointers/Cargo.toml
  30. 77 0
      crates/pointers/src/lib.rs
  31. 2 1
      src/driver/ahci/port.rs
  32. 102 37
      src/driver/serial.rs
  33. 0 1
      src/kernel.rs
  34. 10 12
      src/kernel/chardev.rs
  35. 14 32
      src/kernel/console.rs
  36. 3 0
      src/kernel/constants.rs
  37. 5 2
      src/kernel/mem/mm_list.rs
  38. 10 9
      src/kernel/smp.rs
  39. 17 21
      src/kernel/syscall/procops.rs
  40. 2 6
      src/kernel/task.rs
  41. 10 2
      src/kernel/task/kernel_stack.rs
  42. 9 11
      src/kernel/task/process.rs
  43. 3 3
      src/kernel/task/process_list.rs
  44. 0 218
      src/kernel/task/scheduler.rs
  45. 29 33
      src/kernel/task/signal.rs
  46. 0 342
      src/kernel/task/task.rs
  47. 78 55
      src/kernel/task/thread.rs
  48. 22 16
      src/kernel/terminal.rs
  49. 6 9
      src/kernel/timer.rs
  50. 6 5
      src/kernel/user/dataflow.rs
  51. 8 11
      src/kernel/vfs/filearray.rs
  52. 10 11
      src/lib.rs
  53. 1 60
      src/prelude.rs
  54. 16 32
      src/rcu.rs
  55. 16 56
      src/sync.rs
  56. 9 5
      src/sync/arcswap.rs
  57. 44 14
      src/sync/condvar.rs
  58. 0 218
      src/sync/lock.rs
  59. 5 4
      src/sync/locked.rs
  60. 4 3
      src/sync/semaphore.rs
  61. 0 28
      src/sync/strategy.rs

+ 45 - 0
Cargo.lock

@@ -87,6 +87,42 @@ version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
 
+[[package]]
+name = "eonix_log"
+version = "0.1.0"
+dependencies = [
+ "eonix_sync",
+ "lazy_static",
+]
+
+[[package]]
+name = "eonix_preempt"
+version = "0.1.0"
+dependencies = [
+ "arch",
+]
+
+[[package]]
+name = "eonix_runtime"
+version = "0.1.0"
+dependencies = [
+ "arch",
+ "atomic_unique_refcell",
+ "eonix_log",
+ "eonix_preempt",
+ "eonix_sync",
+ "intrusive-collections",
+ "lazy_static",
+ "pointers",
+]
+
+[[package]]
+name = "eonix_sync"
+version = "0.1.0"
+dependencies = [
+ "eonix_preempt",
+]
+
 [[package]]
 name = "gbos-rust-part"
 version = "0.1.0"
@@ -95,9 +131,14 @@ dependencies = [
  "atomic_unique_refcell",
  "bindgen",
  "bitflags",
+ "eonix_log",
+ "eonix_preempt",
+ "eonix_runtime",
+ "eonix_sync",
  "intrusive-collections",
  "itertools",
  "lazy_static",
+ "pointers",
  "spin",
 ]
 
@@ -206,6 +247,10 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "pointers"
+version = "0.1.0"
+
 [[package]]
 name = "prettyplease"
 version = "0.2.25"

+ 18 - 0
Cargo.toml

@@ -11,6 +11,12 @@ arch = { path = "./arch" }
 atomic_unique_refcell = { path = "./crates/atomic_unique_refcell", features = [
     "no_std",
 ] }
+eonix_preempt = { path = "./crates/eonix_preempt" }
+eonix_runtime = { path = "./crates/eonix_runtime" }
+eonix_sync = { path = "./crates/eonix_sync" }
+eonix_log = { path = "./crates/eonix_log" }
+pointers = { path = "./crates/pointers" }
+
 bitflags = "2.6.0"
 intrusive-collections = "0.9.7"
 itertools = { version = "0.13.0", default-features = false }
@@ -38,6 +44,18 @@ bindgen = "0.70.1"
 [profile.dev]
 panic = "abort"
 
+[profile.dev.package.arch]
+opt-level = 0
+
+[profile.dev.package.eonix_preempt]
+opt-level = 0
+
+[profile.dev.package.eonix_runtime]
+opt-level = 0
+
+[profile.dev.package.eonix_sync]
+opt-level = 0
+
 [profile.dev.package."*"]
 opt-level = 2
 debug = false

+ 2 - 2
arch/percpu-macros/src/lib.rs

@@ -42,13 +42,13 @@ pub fn define_percpu(attrs: TokenStream, item: TokenStream) -> TokenStream {
     };
 
     let preempt_disable = if !is_atomic_like {
-        quote! { crate::sync::preempt::disable(); }
+        quote! { eonix_preempt::disable(); }
     } else {
         quote! {}
     };
 
     let preempt_enable = if !is_atomic_like {
-        quote! { crate::sync::preempt::enable(); }
+        quote! { eonix_preempt::enable(); }
     } else {
         quote! {}
     };

+ 8 - 0
crates/eonix_log/Cargo.toml

@@ -0,0 +1,8 @@
+[package]
+name = "eonix_log"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+eonix_sync = { path = "../eonix_sync" }
+lazy_static = { version = "1.5.0", features = ["spin_no_std"] }

+ 112 - 0
crates/eonix_log/src/lib.rs

@@ -0,0 +1,112 @@
+#![no_std]
+
+use core::fmt::{self, Write};
+
+use alloc::sync::Arc;
+use eonix_sync::Spin;
+use lazy_static::lazy_static;
+
+extern crate alloc;
+
+pub trait ConsoleWrite: Send + Sync {
+    fn write(&self, s: &str);
+}
+
+struct Console {
+    console: Option<Arc<dyn ConsoleWrite>>,
+}
+
+// TODO!!!: We should use a `RwLock` here for better performance.
+lazy_static! {
+    static ref CONSOLE: Spin<Console> = Spin::new(Console::new());
+}
+
+impl Console {
+    const fn new() -> Self {
+        Self { console: None }
+    }
+}
+
+impl Write for Console {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        if let Some(console) = self.console.as_ref() {
+            console.write(s);
+        }
+        Ok(())
+    }
+}
+
+pub fn set_console(console: Arc<dyn ConsoleWrite>) {
+    CONSOLE.lock_irq().console.replace(console);
+}
+
+#[doc(hidden)]
+pub fn do_print(args: fmt::Arguments) {
+    let _ = CONSOLE.lock_irq().write_fmt(args);
+}
+
+#[macro_export]
+macro_rules! print {
+    ($($arg:tt)*) => {
+        $crate::do_print(format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println {
+    () => {
+        $crate::print!("\n")
+    };
+    ($($arg:tt)*) => {
+        $crate::print!("{}\n", format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println_warn {
+    ($($arg:tt)*) => {
+        $crate::println!("[kernel: warn] {}", format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println_debug {
+    ($($arg:tt)*) => {
+        $crate::println!("[kernel:debug] {}", format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println_info {
+    ($($arg:tt)*) => {
+        $crate::println!("[kernel: info] {}", format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println_fatal {
+    () => {
+        $crate::println!("[kernel:fatal] ")
+    };
+    ($($arg:tt)*) => {
+        $crate::println!("[kernel:fatal] {}", format_args!($($arg)*))
+    };
+}
+
+#[macro_export]
+macro_rules! println_trace {
+    ($feat:literal) => {
+        #[deny(unexpected_cfgs)]
+        {
+            #[cfg(feature = $feat)]
+            $crate::println!("[kernel:trace] ")
+        }
+    };
+    ($feat:literal, $($arg:tt)*) => {{
+        #[deny(unexpected_cfgs)]
+        {
+            #[cfg(feature = $feat)]
+            $crate::println!("[kernel:trace] {}", format_args!($($arg)*))
+        }
+    }};
+}

+ 7 - 0
crates/eonix_preempt/Cargo.toml

@@ -0,0 +1,7 @@
+[package]
+name = "eonix_preempt"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+arch = { path = "../../arch" }

+ 67 - 0
crates/eonix_preempt/src/lib.rs

@@ -0,0 +1,67 @@
+#![no_std]
+
+use core::sync::atomic::{compiler_fence, Ordering};
+
+#[arch::define_percpu]
+static PREEMPT_COUNT: usize = 0;
+
+#[inline(always)]
+pub fn disable() {
+    PREEMPT_COUNT.add(1);
+    compiler_fence(Ordering::AcqRel);
+}
+
+#[inline(always)]
+pub fn enable() {
+    compiler_fence(Ordering::AcqRel);
+    PREEMPT_COUNT.sub(1);
+}
+
+#[inline(always)]
+pub fn count() -> usize {
+    PREEMPT_COUNT.get()
+}
+
+#[macro_export]
+macro_rules! assert_preempt_enabled {
+    () => {{
+        assert_eq!($crate::count(), 0, "Preemption is not enabled",);
+    }};
+
+    ($msg:literal) => {{
+        assert_eq!($crate::count(), 0, "{}: Preemption is not enabled", $msg,);
+    }};
+}
+
+#[macro_export]
+macro_rules! assert_preempt_disabled {
+    () => {{
+        assert_ne!($crate::count(), 0, "Preemption is not disabled",);
+    }};
+
+    ($msg:literal) => {{
+        assert_ne!($crate::count(), 0, "{}: Preemption is not disabled", $msg,);
+    }};
+}
+
+#[macro_export]
+macro_rules! assert_preempt_count_eq {
+    ($n:expr) => {{
+        assert_eq!(
+            $crate::count(),
+            $n,
+            "Preemption count does not equal to {}",
+            $n,
+        );
+    }};
+
+    ($n:expr, $msg:literal) => {{
+        assert_eq!(
+            $crate::count(),
+            $n,
+            "{}: Preemption count does not equal to {}",
+            $msg,
+            $n,
+        );
+    }};
+}

+ 20 - 0
crates/eonix_runtime/Cargo.toml

@@ -0,0 +1,20 @@
+[package]
+name = "eonix_runtime"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+arch = { path = "../../arch" }
+atomic_unique_refcell = { path = "../atomic_unique_refcell" }
+eonix_log = { path = "../eonix_log" }
+eonix_preempt = { path = "../eonix_preempt" }
+eonix_sync = { path = "../eonix_sync" }
+pointers = { path = "../pointers" }
+
+intrusive-collections = "0.9.7"
+lazy_static = { version = "1.5.0", features = ["spin_no_std"] }
+
+[features]
+default = []
+trace_scheduler = []
+log_trace = ["trace_scheduler"]

+ 5 - 5
src/kernel/task/task/context.rs → crates/eonix_runtime/src/context.rs

@@ -1,11 +1,11 @@
 use core::{cell::UnsafeCell, mem::transmute};
 
 #[derive(Debug)]
-pub struct TaskContext(UnsafeCell<arch::TaskContext>);
+pub struct ExecutionContext(UnsafeCell<arch::TaskContext>);
 
-unsafe impl Sync for TaskContext {}
+unsafe impl Sync for ExecutionContext {}
 
-impl TaskContext {
+impl ExecutionContext {
     pub const fn new() -> Self {
         Self(UnsafeCell::new(arch::TaskContext::new()))
     }
@@ -25,11 +25,11 @@ impl TaskContext {
         context.get_mut().interrupt(is_enabled);
     }
 
-    pub fn call2<T, U>(&mut self, func: unsafe extern "C" fn(T, U) -> !, args: [usize; 2]) {
+    pub fn call1<T>(&mut self, func: unsafe extern "C" fn(T) -> !, arg: usize) {
         let Self(context) = self;
         context
             .get_mut()
-            .call2(unsafe { transmute(func as *mut ()) }, args);
+            .call1(unsafe { transmute(func as *mut ()) }, [arg]);
     }
 
     pub fn switch_to(&self, to: &Self) {

+ 137 - 0
crates/eonix_runtime/src/executor.rs

@@ -0,0 +1,137 @@
+mod builder;
+mod execute_status;
+mod output_handle;
+mod stack;
+
+use crate::{
+    run::{Contexted, PinRun, RunState},
+    scheduler::Scheduler,
+    task::Task,
+};
+use alloc::sync::Weak;
+use core::{
+    pin::Pin,
+    sync::atomic::{compiler_fence, fence, AtomicBool, Ordering},
+    task::Waker,
+};
+use eonix_sync::Spin;
+
+pub use builder::ExecutorBuilder;
+pub use execute_status::ExecuteStatus;
+pub use output_handle::OutputHandle;
+pub use stack::Stack;
+
+/// An `Executor` executes a `PinRun` object in a separate thread of execution
+/// where we have a dedicated stack and context.
+pub trait Executor: Send {
+    fn progress(&self) -> ExecuteStatus;
+}
+
+struct RealExecutor<S, R>
+where
+    R: PinRun + Send + Contexted + 'static,
+    R::Output: Send,
+{
+    _stack: S,
+    runnable: R,
+    output_handle: Weak<Spin<OutputHandle<R::Output>>>,
+    finished: AtomicBool,
+}
+
+impl<S, R> RealExecutor<S, R>
+where
+    R: PinRun + Send + Contexted + 'static,
+    R::Output: Send,
+{
+    extern "C" fn execute(self: Pin<&Self>) -> ! {
+        // We get here with preempt count == 1.
+        eonix_preempt::enable();
+
+        {
+            let waker = Waker::from(Task::current().clone());
+
+            let output_data = loop {
+                // TODO!!!!!!: CHANGE THIS.
+                let runnable_pointer = &raw const self.get_ref().runnable;
+
+                // SAFETY: We don't move the runnable object and we MIGHT not be using the
+                //         part that is used in `pinned_run` in the runnable...?
+                let mut pinned_runnable =
+                    unsafe { Pin::new_unchecked(&mut *(runnable_pointer as *mut R)) };
+
+                match pinned_runnable.as_mut().pinned_run(&waker) {
+                    RunState::Finished(output) => break output,
+                    RunState::Running => {
+                        if Task::current().is_runnable() {
+                            continue;
+                        }
+
+                        // We need to set the preempt count to 0 to allow preemption.
+                        eonix_preempt::disable();
+
+                        // SAFETY: We are in the scheduler context and preemption is disabled.
+                        unsafe { Scheduler::goto_scheduler(&Task::current().execution_context) };
+
+                        eonix_preempt::enable();
+                    }
+                }
+            };
+
+            if let Some(output_handle) = self.output_handle.upgrade() {
+                output_handle.lock().commit_output(output_data);
+            }
+        }
+
+        // SAFETY: We are on the same CPU as the task.
+        self.finished.store(true, Ordering::Relaxed);
+
+        unsafe {
+            // SAFETY: `preempt::count()` == 1.
+            eonix_preempt::disable();
+            Scheduler::goto_scheduler_noreturn()
+        }
+    }
+}
+
+impl<S, R> Executor for RealExecutor<S, R>
+where
+    S: Send,
+    R: PinRun + Contexted + Send,
+    R::Output: Send,
+{
+    fn progress(&self) -> ExecuteStatus {
+        // TODO!!!: If the task comes from another cpu, we need to sync.
+        //
+        // The other cpu should see the changes of kernel stack of the target thread
+        // made in this cpu.
+        //
+        // Can we find a better way other than `fence`s?
+        //
+        // An alternative way is to use an atomic variable to store the cpu id of
+        // the current task. Then we can use acquire release swap to ensure that the
+        // other cpu sees the changes.
+        fence(Ordering::SeqCst);
+        compiler_fence(Ordering::SeqCst);
+
+        // TODO!!!: We should load the context only if the previous task is
+        // different from the current task.
+
+        self.runnable.load_running_context();
+
+        unsafe {
+            // SAFETY: We are in the scheduler context and we are not preempted.
+            Scheduler::go_from_scheduler(&Task::current().execution_context);
+        }
+
+        self.runnable.restore_running_context();
+
+        compiler_fence(Ordering::SeqCst);
+        fence(Ordering::SeqCst);
+
+        if self.finished.load(Ordering::Acquire) {
+            ExecuteStatus::Finished
+        } else {
+            ExecuteStatus::Executing
+        }
+    }
+}

+ 67 - 0
crates/eonix_runtime/src/executor/builder.rs

@@ -0,0 +1,67 @@
+use super::{Executor, OutputHandle, RealExecutor, Stack};
+use crate::{
+    context::ExecutionContext,
+    run::{Contexted, PinRun},
+};
+use alloc::{boxed::Box, sync::Arc};
+use core::{pin::Pin, sync::atomic::AtomicBool};
+use eonix_sync::Spin;
+
+pub struct ExecutorBuilder<S, R> {
+    stack: Option<S>,
+    runnable: Option<R>,
+}
+
+impl<S, R> ExecutorBuilder<S, R>
+where
+    S: Stack,
+    R: PinRun + Contexted + Send + 'static,
+    R::Output: Send,
+{
+    pub fn new() -> Self {
+        Self {
+            stack: None,
+            runnable: None,
+        }
+    }
+
+    pub fn stack(mut self, stack: S) -> Self {
+        self.stack.replace(stack);
+        self
+    }
+
+    pub fn runnable(mut self, runnable: R) -> Self {
+        self.runnable.replace(runnable);
+        self
+    }
+
+    pub fn build(
+        mut self,
+    ) -> (
+        Pin<Box<impl Executor>>,
+        ExecutionContext,
+        Arc<Spin<OutputHandle<R::Output>>>,
+    ) {
+        let stack = self.stack.take().expect("Stack is required");
+        let runnable = self.runnable.take().expect("Runnable is required");
+
+        let mut execution_context = ExecutionContext::new();
+        let output_handle = OutputHandle::new();
+
+        execution_context.set_sp(stack.get_bottom() as *const _ as _);
+
+        let executor = Box::pin(RealExecutor {
+            _stack: stack,
+            runnable,
+            output_handle: Arc::downgrade(&output_handle),
+            finished: AtomicBool::new(false),
+        });
+
+        execution_context.call1(
+            RealExecutor::<S, R>::execute,
+            executor.as_ref().get_ref() as *const _ as usize,
+        );
+
+        (executor, execution_context, output_handle)
+    }
+}

+ 4 - 0
crates/eonix_runtime/src/executor/execute_status.rs

@@ -0,0 +1,4 @@
+pub enum ExecuteStatus {
+    Executing,
+    Finished,
+}

+ 64 - 0
crates/eonix_runtime/src/executor/output_handle.rs

@@ -0,0 +1,64 @@
+use alloc::sync::Arc;
+use core::task::Waker;
+use eonix_sync::Spin;
+
+enum OutputState<Output>
+where
+    Output: Send,
+{
+    Waiting(Option<Waker>),
+    Finished(Option<Output>),
+    TakenOut,
+}
+
+pub struct OutputHandle<Output>
+where
+    Output: Send,
+{
+    inner: OutputState<Output>,
+}
+
+impl<Output> OutputHandle<Output>
+where
+    Output: Send,
+{
+    pub fn new() -> Arc<Spin<Self>> {
+        Arc::new(Spin::new(Self {
+            inner: OutputState::Waiting(None),
+        }))
+    }
+
+    pub fn try_resolve(&mut self) -> Option<Output> {
+        let output = match &mut self.inner {
+            OutputState::Waiting(_) => return None,
+            OutputState::Finished(output) => output.take(),
+            OutputState::TakenOut => panic!("Output already taken out"),
+        };
+
+        self.inner = OutputState::TakenOut;
+        if let Some(output) = output {
+            Some(output)
+        } else {
+            unreachable!("Output should be present")
+        }
+    }
+
+    pub fn register_waiter(&mut self, waker: Waker) {
+        if let OutputState::Waiting(inner_waker) = &mut self.inner {
+            inner_waker.replace(waker);
+        } else {
+            panic!("Output is not waiting");
+        }
+    }
+
+    pub fn commit_output(&mut self, output: Output) {
+        if let OutputState::Waiting(inner_waker) = &mut self.inner {
+            if let Some(waker) = inner_waker.take() {
+                waker.wake();
+            }
+            self.inner = OutputState::Finished(Some(output));
+        } else {
+            panic!("Output is not waiting");
+        }
+    }
+}

+ 4 - 0
crates/eonix_runtime/src/executor/stack.rs

@@ -0,0 +1,4 @@
+pub trait Stack: Sized + Send {
+    fn new() -> Self;
+    fn get_bottom(&self) -> &();
+}

+ 10 - 0
crates/eonix_runtime/src/lib.rs

@@ -0,0 +1,10 @@
+#![no_std]
+
+pub mod context;
+pub mod executor;
+mod ready_queue;
+pub mod run;
+pub mod scheduler;
+pub mod task;
+
+extern crate alloc;

+ 4 - 6
src/kernel/task/readyqueue.rs → crates/eonix_runtime/src/ready_queue.rs

@@ -1,8 +1,6 @@
+use crate::task::Task;
 use alloc::{collections::VecDeque, sync::Arc};
-
-use crate::sync::Spin;
-
-use super::Task;
+use eonix_sync::Spin;
 
 #[arch::define_percpu]
 static READYQUEUE: Option<Spin<FifoReadyQueue>> = None;
@@ -34,7 +32,7 @@ impl ReadyQueue for FifoReadyQueue {
     }
 }
 
-pub fn rq_thiscpu() -> &'static Spin<dyn ReadyQueue> {
+pub fn local_rq() -> &'static Spin<dyn ReadyQueue> {
     // SAFETY: When we use ReadyQueue on this CPU, we will lock it with `lock_irq()`
     //         and if we use ReadyQueue on other CPU, we won't be able to touch it on this CPU.
     //         So no issue here.
@@ -43,6 +41,6 @@ pub fn rq_thiscpu() -> &'static Spin<dyn ReadyQueue> {
         .expect("ReadyQueue should be initialized")
 }
 
-pub fn init_rq_thiscpu() {
+pub fn init_local_rq() {
     READYQUEUE.set(Some(Spin::new(FifoReadyQueue::new())));
 }

+ 13 - 6
src/kernel/task/task/runnable.rs → crates/eonix_runtime/src/run.rs

@@ -1,4 +1,7 @@
+mod future_run;
+
 use core::{pin::Pin, task::Waker};
+pub use future_run::FutureRun;
 
 pub enum RunState<Output> {
     Running,
@@ -7,11 +10,15 @@ pub enum RunState<Output> {
 
 pub trait Contexted {
     /// # Safety
-    /// This function will be called in a preemption disabled context.
-    fn load_running_context(&mut self);
+    /// This function should be called in a preemption disabled context.
+    fn load_running_context(&self) {}
+
+    /// # Safety
+    /// This function should be called in a preemption disabled context.
+    fn restore_running_context(&self) {}
 }
 
-pub trait Runnable {
+pub trait Run {
     type Output;
 
     fn run(&mut self, waker: &Waker) -> RunState<Self::Output>;
@@ -26,7 +33,7 @@ pub trait Runnable {
     }
 }
 
-pub trait PinRunnable {
+pub trait PinRun {
     type Output;
 
     fn pinned_run(self: Pin<&mut Self>, waker: &Waker) -> RunState<Self::Output>;
@@ -41,9 +48,9 @@ pub trait PinRunnable {
     }
 }
 
-impl<R> Runnable for R
+impl<R> Run for R
 where
-    R: PinRunnable + Unpin,
+    R: PinRun + Unpin,
 {
     type Output = R::Output;
 

+ 34 - 0
crates/eonix_runtime/src/run/future_run.rs

@@ -0,0 +1,34 @@
+use super::{Contexted, PinRun, RunState};
+use core::{
+    pin::Pin,
+    task::{Context, Poll, Waker},
+};
+
+pub struct FutureRun<F: Future>(F);
+
+impl<F> FutureRun<F>
+where
+    F: Future,
+{
+    pub const fn new(future: F) -> Self {
+        Self(future)
+    }
+}
+
+impl<F> Contexted for FutureRun<F> where F: Future {}
+impl<F> PinRun for FutureRun<F>
+where
+    F: Future + 'static,
+{
+    type Output = F::Output;
+
+    fn pinned_run(self: Pin<&mut Self>, waker: &Waker) -> RunState<Self::Output> {
+        let mut future = unsafe { self.map_unchecked_mut(|me| &mut me.0) };
+        let mut context = Context::from_waker(waker);
+
+        match future.as_mut().poll(&mut context) {
+            Poll::Ready(output) => RunState::Finished(output),
+            Poll::Pending => RunState::Running,
+        }
+    }
+}

+ 299 - 0
crates/eonix_runtime/src/scheduler.rs

@@ -0,0 +1,299 @@
+use crate::{
+    context::ExecutionContext,
+    executor::{ExecuteStatus, OutputHandle, Stack},
+    ready_queue::{init_local_rq, local_rq, ReadyQueue},
+    run::{Contexted, PinRun},
+    task::{Task, TaskAdapter, TaskHandle},
+};
+use alloc::sync::Arc;
+use core::{
+    future::Future,
+    mem::forget,
+    pin::Pin,
+    ptr::NonNull,
+    sync::atomic::{compiler_fence, Ordering},
+    task::{Context, Poll, Waker},
+};
+use eonix_log::println_trace;
+use eonix_preempt::assert_preempt_count_eq;
+use eonix_sync::Spin;
+use intrusive_collections::RBTree;
+use lazy_static::lazy_static;
+use pointers::BorrowedArc;
+
+#[arch::define_percpu]
+static CURRENT_TASK: Option<NonNull<Task>> = None;
+
+#[arch::define_percpu]
+static LOCAL_SCHEDULER_CONTEXT: ExecutionContext = ExecutionContext::new();
+
+lazy_static! {
+    static ref TASKS: Spin<RBTree<TaskAdapter>> = Spin::new(RBTree::new(TaskAdapter::new()));
+}
+
+pub struct Scheduler;
+
+pub struct JoinHandle<Output>(Arc<Spin<OutputHandle<Output>>>)
+where
+    Output: Send;
+
+impl Task {
+    pub fn current<'a>() -> BorrowedArc<'a, Task> {
+        unsafe {
+            // SAFETY:
+            // We should never "inspect" a change in `current`.
+            // The change of `CURRENT` will only happen in the scheduler. And if we are preempted,
+            // when we DO return, the `CURRENT` will be the same and remain valid.
+            BorrowedArc::from_raw(CURRENT_TASK.get().expect("Current task should be present"))
+        }
+    }
+}
+
+impl<O> JoinHandle<O>
+where
+    O: Send,
+{
+    pub fn join(self) -> O {
+        let Self(output) = self;
+        let mut waker = Some(Waker::from(Task::current().clone()));
+
+        loop {
+            let mut locked = output.lock();
+            match locked.try_resolve() {
+                Some(output) => break output,
+                None => {
+                    if let Some(waker) = waker.take() {
+                        locked.register_waiter(waker);
+                    }
+                }
+            }
+        }
+    }
+}
+
+impl Scheduler {
+    /// `Scheduler` might be used in various places. Do not hold it for a long time.
+    ///
+    /// # Safety
+    /// The locked returned by this function should be locked with `lock_irq` to prevent from
+    /// rescheduling during access to the scheduler. Disabling preemption will do the same.
+    ///
+    /// Drop the lock before calling `schedule`.
+    pub fn get() -> &'static Self {
+        static GLOBAL_SCHEDULER: Scheduler = Scheduler;
+        &GLOBAL_SCHEDULER
+    }
+
+    pub fn init_local_scheduler<S>()
+    where
+        S: Stack,
+    {
+        init_local_rq();
+
+        let stack = S::new();
+
+        unsafe {
+            eonix_preempt::disable();
+            // SAFETY: Preemption is disabled.
+            let context: &mut ExecutionContext = LOCAL_SCHEDULER_CONTEXT.as_mut();
+            context.set_ip(local_scheduler as _);
+            context.set_sp(stack.get_bottom() as *const _ as usize);
+            eonix_preempt::enable();
+        }
+
+        // We don't need to keep the stack around.
+        forget(stack);
+    }
+
+    /// # Safety
+    /// This function must not be called inside of the scheulder context.
+    ///
+    /// The caller must ensure that `preempt::count` == 1.
+    pub unsafe fn go_from_scheduler(to: &ExecutionContext) {
+        // SAFETY: Preemption is disabled.
+        unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() }.switch_to(to);
+    }
+
+    /// # Safety
+    /// This function must not be called inside of the scheulder context.
+    ///
+    /// The caller must ensure that `preempt::count` == 1.
+    pub unsafe fn goto_scheduler(from: &ExecutionContext) {
+        // SAFETY: Preemption is disabled.
+        from.switch_to(unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() });
+    }
+
+    /// # Safety
+    /// This function must not be called inside of the scheulder context.
+    ///
+    /// The caller must ensure that `preempt::count` == 1.
+    pub unsafe fn goto_scheduler_noreturn() -> ! {
+        // SAFETY: Preemption is disabled.
+        unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref().switch_noreturn() }
+    }
+
+    fn add_task(task: Arc<Task>) {
+        TASKS.lock().insert(task);
+    }
+
+    fn remove_task(task: &Task) {
+        unsafe { TASKS.lock().cursor_mut_from_ptr(task as *const _).remove() };
+    }
+
+    fn select_rq_for_task(&self, _task: &Task) -> &'static Spin<dyn ReadyQueue> {
+        // TODO: Select an appropriate ready queue.
+        local_rq()
+    }
+
+    pub fn activate(&self, task: &Arc<Task>) {
+        if !task.on_rq.swap(true, Ordering::AcqRel) {
+            let rq = self.select_rq_for_task(&task);
+            rq.lock_irq().put(task.clone());
+        }
+    }
+
+    pub fn spawn<S, R>(&self, runnable: R) -> JoinHandle<R::Output>
+    where
+        S: Stack + 'static,
+        R: PinRun + Contexted + Send + 'static,
+        R::Output: Send + 'static,
+    {
+        let TaskHandle {
+            task,
+            output_handle,
+        } = Task::new::<S, _>(runnable);
+
+        Self::add_task(task.clone());
+        self.activate(&task);
+
+        JoinHandle(output_handle)
+    }
+
+    /// Go to idle task. Call this with `preempt_count == 1`.
+    /// The preempt count will be decremented by this function.
+    ///
+    /// # Safety
+    /// We might never return from here.
+    /// Drop all variables that take ownership of some resource before calling this function.
+    pub fn schedule() {
+        assert_preempt_count_eq!(1, "Scheduler::schedule");
+
+        // Make sure all works are done before scheduling.
+        compiler_fence(Ordering::SeqCst);
+
+        // TODO!!!!!: Use of reference here needs further consideration.
+        //
+        // Since we might never return to here, we can't take ownership of `current()`.
+        // Is it safe to believe that `current()` will never change across calls?
+        unsafe {
+            // SAFETY: Preemption is disabled.
+            Scheduler::goto_scheduler(&Task::current().execution_context);
+        }
+        eonix_preempt::enable();
+    }
+
+    pub async fn yield_now() {
+        struct Yield(bool);
+
+        impl Future for Yield {
+            type Output = ();
+
+            fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+                match *self {
+                    Yield(true) => Poll::Ready(()),
+                    Yield(false) => {
+                        self.set(Yield(true));
+                        cx.waker().wake_by_ref();
+                        Poll::Pending
+                    }
+                }
+            }
+        }
+
+        Yield(false).await
+    }
+
+    pub async fn sleep() {
+        struct Sleep(bool);
+
+        impl Future for Sleep {
+            type Output = ();
+
+            fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
+                match *self {
+                    Sleep(true) => Poll::Ready(()),
+                    Sleep(false) => {
+                        self.set(Sleep(true));
+                        Poll::Pending
+                    }
+                }
+            }
+        }
+
+        Sleep(false).await
+    }
+}
+
+extern "C" fn local_scheduler() -> ! {
+    loop {
+        assert_preempt_count_eq!(1, "Scheduler::idle_task");
+        let previous_task = CURRENT_TASK
+            .get()
+            .map(|ptr| unsafe { Arc::from_raw(ptr.as_ptr()) });
+        let next_task = local_rq().lock().get();
+
+        match (previous_task, next_task) {
+            (None, None) => {
+                // Nothing to do, halt the cpu and rerun the loop.
+                arch::halt();
+                continue;
+            }
+            (None, Some(next)) => {
+                CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
+            }
+            (Some(previous), None) if previous.is_runnable() => {
+                // Previous thread is `Running`, return to the current running thread.
+                println_trace!(
+                    "trace_scheduler",
+                    "Returning to task id({}) without doing context switch",
+                    previous.id
+                );
+
+                CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
+            }
+            (Some(previous), None) => {
+                // Nothing to do, halt the cpu and rerun the loop.
+                CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
+                arch::halt();
+                continue;
+            }
+            (Some(previous), Some(next)) => {
+                println_trace!(
+                    "trace_scheduler",
+                    "Switching from task id({}) to task id({})",
+                    previous.id,
+                    next.id
+                );
+
+                debug_assert_ne!(previous.id, next.id, "Switching to the same task");
+
+                let mut rq = local_rq().lock();
+                if previous.is_runnable() {
+                    rq.put(previous);
+                } else {
+                    // TODO!!!!!!!!!: There is a race condition here if we reach here and there
+                    // is another thread waking the task up. They might read `on_rq` == true so
+                    // the task will never be waken up.
+                    previous.on_rq.store(false, Ordering::Release);
+                }
+
+                CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
+            }
+        }
+
+        // TODO: We can move the release of finished tasks to some worker thread.
+        if let ExecuteStatus::Finished = Task::current().run() {
+            Scheduler::remove_task(&Task::current());
+        }
+    }
+}

+ 171 - 0
crates/eonix_runtime/src/task.rs

@@ -0,0 +1,171 @@
+mod adapter;
+mod task_state;
+
+use crate::{
+    context::ExecutionContext,
+    executor::{ExecuteStatus, Executor, ExecutorBuilder, OutputHandle, Stack},
+    run::{Contexted, PinRun},
+    scheduler::Scheduler,
+};
+use alloc::{boxed::Box, sync::Arc, task::Wake};
+use atomic_unique_refcell::AtomicUniqueRefCell;
+use core::{
+    pin::Pin,
+    sync::atomic::{AtomicBool, AtomicU32, Ordering},
+    task::Waker,
+};
+use eonix_sync::Spin;
+use intrusive_collections::RBTreeAtomicLink;
+use task_state::TaskState;
+
+pub use adapter::TaskAdapter;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub struct TaskId(u32);
+
+pub struct UniqueWaker(Arc<Task>);
+
+pub struct TaskHandle<Output>
+where
+    Output: Send,
+{
+    pub(crate) task: Arc<Task>,
+    pub(crate) output_handle: Arc<Spin<OutputHandle<Output>>>,
+}
+
+/// A `Task` represents a schedulable unit.
+pub struct Task {
+    /// Unique identifier of the task.
+    pub id: TaskId,
+    /// Whether the task is on some run queue.
+    pub(crate) on_rq: AtomicBool,
+    /// Task execution context.
+    pub(crate) execution_context: ExecutionContext,
+    /// Executor object.
+    executor: AtomicUniqueRefCell<Option<Pin<Box<dyn Executor>>>>,
+    /// Task state.
+    state: TaskState,
+    /// Link in the global task list.
+    link_task_list: RBTreeAtomicLink,
+}
+
+impl<Output> TaskHandle<Output>
+where
+    Output: Send,
+{
+    pub fn waker(&self) -> Waker {
+        Waker::from(self.task.clone())
+    }
+}
+
+impl Task {
+    pub fn new<S, R>(runnable: R) -> TaskHandle<R::Output>
+    where
+        S: Stack + 'static,
+        R: PinRun + Contexted + Send + 'static,
+        R::Output: Send + 'static,
+    {
+        static ID: AtomicU32 = AtomicU32::new(0);
+
+        let (executor, execution_context, output) = ExecutorBuilder::new()
+            .stack(S::new())
+            .runnable(runnable)
+            .build();
+
+        let task = Arc::new(Self {
+            id: TaskId(ID.fetch_add(1, Ordering::Relaxed)),
+            on_rq: AtomicBool::new(false),
+            executor: AtomicUniqueRefCell::new(Some(executor)),
+            execution_context,
+            state: TaskState::new(TaskState::RUNNING),
+            link_task_list: RBTreeAtomicLink::new(),
+        });
+
+        TaskHandle {
+            task,
+            output_handle: output,
+        }
+    }
+
+    pub fn is_runnable(&self) -> bool {
+        self.state.is_runnable()
+    }
+
+    pub(super) fn set_usleep(&self) {
+        let prev_state = self.state.swap(TaskState::USLEEP);
+        assert_eq!(
+            prev_state,
+            TaskState::RUNNING,
+            "Trying to set task {} usleep that is not running",
+            self.id.0
+        );
+    }
+
+    pub fn usleep(self: &Arc<Self>) -> Arc<UniqueWaker> {
+        // No need to dequeue. We have proved that the task is running so not in the queue.
+        self.set_usleep();
+
+        Arc::new(UniqueWaker(self.clone()))
+    }
+
+    pub fn isleep(self: &Arc<Self>) -> Arc<Self> {
+        // No need to dequeue. We have proved that the task is running so not in the queue.
+        let prev_state = self.state.cmpxchg(TaskState::RUNNING, TaskState::ISLEEP);
+
+        assert_eq!(
+            prev_state,
+            TaskState::RUNNING,
+            "Trying to sleep task {} that is not running",
+            self.id.0
+        );
+
+        self.clone()
+    }
+
+    pub fn run(&self) -> ExecuteStatus {
+        let mut executor_borrow = self.executor.borrow();
+
+        let executor = executor_borrow
+            .as_ref()
+            .expect("Executor should be present")
+            .as_ref()
+            .get_ref();
+
+        if let ExecuteStatus::Finished = executor.progress() {
+            executor_borrow.take();
+            self.set_usleep();
+            ExecuteStatus::Finished
+        } else {
+            ExecuteStatus::Executing
+        }
+    }
+}
+
+impl Wake for Task {
+    fn wake(self: Arc<Self>) {
+        self.wake_by_ref();
+    }
+
+    fn wake_by_ref(self: &Arc<Self>) {
+        match self.state.cmpxchg(TaskState::ISLEEP, TaskState::RUNNING) {
+            TaskState::RUNNING | TaskState::USLEEP => return,
+            TaskState::ISLEEP => Scheduler::get().activate(self),
+            state => panic!("Invalid transition from state {:?} to `Running`", state),
+        }
+    }
+}
+
+impl Wake for UniqueWaker {
+    fn wake(self: Arc<Self>) {
+        self.wake_by_ref();
+    }
+
+    fn wake_by_ref(self: &Arc<Self>) {
+        let Self(task) = &**self;
+
+        let prev_state = task.state.swap(TaskState::RUNNING);
+        assert_eq!(prev_state, TaskState::USLEEP);
+
+        Scheduler::get().activate(task);
+    }
+}

+ 13 - 0
crates/eonix_runtime/src/task/adapter.rs

@@ -0,0 +1,13 @@
+use alloc::sync::Arc;
+use intrusive_collections::{intrusive_adapter, KeyAdapter, RBTreeAtomicLink};
+
+use super::{Task, TaskId};
+
+intrusive_adapter!(pub TaskAdapter = Arc<Task>: Task { link_task_list: RBTreeAtomicLink });
+
+impl<'a> KeyAdapter<'a> for TaskAdapter {
+    type Key = TaskId;
+    fn get_key(&self, task: &'a Task) -> Self::Key {
+        task.id
+    }
+}

+ 28 - 0
crates/eonix_runtime/src/task/task_state.rs

@@ -0,0 +1,28 @@
+use core::sync::atomic::{AtomicU32, Ordering};
+
+#[derive(Debug)]
+pub struct TaskState(AtomicU32);
+
+impl TaskState {
+    pub const RUNNING: u32 = 0;
+    pub const ISLEEP: u32 = 1;
+    pub const USLEEP: u32 = 2;
+
+    pub const fn new(state: u32) -> Self {
+        Self(AtomicU32::new(state))
+    }
+
+    pub fn swap(&self, state: u32) -> u32 {
+        self.0.swap(state, Ordering::AcqRel)
+    }
+
+    pub fn cmpxchg(&self, current: u32, new: u32) -> u32 {
+        self.0
+            .compare_exchange(current, new, Ordering::AcqRel, Ordering::Relaxed)
+            .unwrap_or_else(|x| x)
+    }
+
+    pub fn is_runnable(&self) -> bool {
+        self.0.load(Ordering::Acquire) == Self::RUNNING
+    }
+}

+ 7 - 0
crates/eonix_sync/Cargo.toml

@@ -0,0 +1,7 @@
+[package]
+name = "eonix_sync"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+eonix_preempt = { path = "../eonix_preempt" }

+ 159 - 0
crates/eonix_sync/src/guard.rs

@@ -0,0 +1,159 @@
+use crate::{Lock, LockStrategy};
+use core::{
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+    ptr,
+};
+
+pub struct Guard<'a, T, S, L, const WRITE: bool = true>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    pub(crate) lock: &'a Lock<T, L>,
+    pub(crate) strategy_data: &'a S::StrategyData,
+    pub(crate) context: S::GuardContext,
+}
+
+pub struct UnlockedGuard<'a, T, S, L, const WRITE: bool = true>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    pub(crate) lock: &'a Lock<T, L>,
+    pub(crate) strategy_data: &'a S::StrategyData,
+    pub(crate) context: S::GuardContext,
+}
+
+impl<'a, T, S, L, const W: bool> Guard<'a, T, S, L, W>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    #[must_use = "The returned `UnlockedGuard` must be used to relock the lock."]
+    pub fn unlock(mut self) -> UnlockedGuard<'a, T, S, L, W> {
+        unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
+
+        UnlockedGuard {
+            lock: self.lock,
+            strategy_data: self.strategy_data,
+            context: {
+                let me = ManuallyDrop::new(self);
+                // SAFETY: We are using `ManuallyDrop` to prevent the destructor from running.
+                unsafe { ptr::read(&me.context) }
+            },
+        }
+    }
+
+    /// # Safety
+    /// This function is unsafe because it allows you to unlock the lock without
+    /// dropping the guard. Using the guard after calling this function is
+    /// undefined behavior.
+    pub unsafe fn force_unlock(&mut self) {
+        unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
+    }
+
+    /// # Safety
+    /// Calling this function twice on a force unlocked guard will cause deadlocks.
+    pub unsafe fn force_relock(&mut self) {
+        unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
+    }
+}
+
+impl<'a, T, S, L, const W: bool> UnlockedGuard<'a, T, S, L, W>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    #[must_use = "Throwing away the relocked guard is pointless."]
+    pub fn relock(mut self) -> Guard<'a, T, S, L, W> {
+        unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
+
+        Guard {
+            lock: self.lock,
+            strategy_data: self.strategy_data,
+            context: {
+                let me = ManuallyDrop::new(self);
+                // SAFETY: We are using `ManuallyDrop` to prevent the destructor from running.
+                unsafe { ptr::read(&me.context) }
+            },
+        }
+    }
+}
+
+impl<T, S, L, const W: bool> Deref for Guard<'_, T, S, L, W>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        unsafe { &*self.lock.value.get() }
+    }
+}
+
+impl<T, S, L> DerefMut for Guard<'_, T, S, L, true>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        unsafe { &mut *self.lock.value.get() }
+    }
+}
+
+impl<T, S, L, const WRITE: bool> AsRef<T> for Guard<'_, T, S, L, WRITE>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    fn as_ref(&self) -> &T {
+        unsafe { &*self.lock.value.get() }
+    }
+}
+
+impl<T, S, L> AsMut<T> for Guard<'_, T, S, L, true>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    fn as_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.lock.value.get() }
+    }
+}
+
+impl<T, S, L, const WRITE: bool> Drop for UnlockedGuard<'_, T, S, L, WRITE>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    fn drop(&mut self) {
+        // SAFETY: If we are stubborn enough to drop the unlocked guard, relock it and
+        //         then unlock it again to prevent anything weird from happening.
+        unsafe {
+            S::do_relock(&self.strategy_data, &mut self.context);
+            S::do_unlock(&self.strategy_data, &mut self.context);
+        }
+    }
+}
+
+impl<T, S, L, const WRITE: bool> Drop for Guard<'_, T, S, L, WRITE>
+where
+    T: ?Sized,
+    S: LockStrategy,
+    L: LockStrategy,
+{
+    fn drop(&mut self) {
+        unsafe { S::do_unlock(&self.strategy_data, &mut self.context) }
+    }
+}

+ 13 - 0
crates/eonix_sync/src/lib.rs

@@ -0,0 +1,13 @@
+#![no_std]
+
+mod guard;
+mod lock;
+mod spin;
+mod strategy;
+
+pub use guard::Guard;
+pub use lock::Lock;
+pub use spin::{IrqStrategy, SpinStrategy};
+pub use strategy::LockStrategy;
+
+pub type Spin<T> = Lock<T, SpinStrategy>;

+ 139 - 0
crates/eonix_sync/src/lock.rs

@@ -0,0 +1,139 @@
+use super::{spin::IrqStrategy, strategy::LockStrategy};
+use crate::Guard;
+use core::{cell::UnsafeCell, fmt};
+
+pub struct Lock<T, S>
+where
+    T: ?Sized,
+    S: LockStrategy,
+{
+    pub(crate) strategy_data: S::StrategyData,
+    pub(crate) value: UnsafeCell<T>,
+}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         the lock itself is also able to be shared between threads.
+unsafe impl<T, S> Send for Lock<T, S>
+where
+    T: ?Sized + Send,
+    S: LockStrategy,
+{
+}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         the lock will provide synchronization between threads.
+unsafe impl<T, S> Sync for Lock<T, S>
+where
+    T: ?Sized + Send,
+    S: LockStrategy,
+{
+}
+
+impl<T, S> Lock<T, S>
+where
+    S: LockStrategy,
+{
+    #[inline(always)]
+    pub fn new(value: T) -> Self {
+        Self {
+            strategy_data: S::new_data(),
+            value: UnsafeCell::new(value),
+        }
+    }
+}
+
+impl<T, S> Lock<T, S>
+where
+    T: ?Sized,
+    S: LockStrategy,
+{
+    pub fn is_locked(&self) -> bool {
+        unsafe { S::is_locked(&self.strategy_data) }
+    }
+
+    pub fn try_lock(&self) -> Option<Guard<T, S, S>> {
+        if !unsafe { S::is_locked(&self.strategy_data) } {
+            unsafe { S::try_lock(&self.strategy_data) }.map(|context| Guard {
+                lock: self,
+                strategy_data: &self.strategy_data,
+                context,
+            })
+        } else {
+            None
+        }
+    }
+
+    pub fn lock(&self) -> Guard<T, S, S> {
+        Guard {
+            lock: self,
+            strategy_data: &self.strategy_data,
+            context: unsafe { S::do_lock(&self.strategy_data) },
+        }
+    }
+
+    pub fn lock_irq(&self) -> Guard<T, IrqStrategy<S>, S> {
+        Guard {
+            lock: self,
+            strategy_data: &self.strategy_data,
+            context: unsafe { IrqStrategy::<S>::do_lock(&self.strategy_data) },
+        }
+    }
+
+    pub fn lock_shared(&self) -> Guard<T, S, S, false> {
+        Guard {
+            lock: self,
+            strategy_data: &self.strategy_data,
+            context: unsafe { S::do_lock_shared(&self.strategy_data) },
+        }
+    }
+
+    pub fn lock_shared_irq(&self) -> Guard<T, IrqStrategy<S>, S, false> {
+        Guard {
+            lock: self,
+            strategy_data: &self.strategy_data,
+            context: unsafe { IrqStrategy::<S>::do_lock(&self.strategy_data) },
+        }
+    }
+
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.value.get() }
+    }
+}
+
+impl<T, S> fmt::Debug for Lock<T, S>
+where
+    T: fmt::Debug,
+    S: LockStrategy,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Lock")
+            .field("locked_value", &self.value)
+            .finish()
+    }
+}
+
+impl<T, S> Clone for Lock<T, S>
+where
+    T: Clone,
+    S: LockStrategy,
+{
+    fn clone(&self) -> Self {
+        Self {
+            strategy_data: S::new_data(),
+            value: UnsafeCell::new(self.lock_shared().clone()),
+        }
+    }
+}
+
+impl<T, S> Default for Lock<T, S>
+where
+    T: Default,
+    S: LockStrategy,
+{
+    fn default() -> Self {
+        Self {
+            strategy_data: S::new_data(),
+            value: Default::default(),
+        }
+    }
+}

+ 44 - 52
src/sync/spin.rs → crates/eonix_sync/src/spin.rs

@@ -1,14 +1,14 @@
+use super::strategy::LockStrategy;
 use core::{
     arch::asm,
+    marker::PhantomData,
     sync::atomic::{AtomicBool, Ordering},
 };
 
-use super::{preempt, strategy::LockStrategy};
-
 pub struct SpinStrategy;
+pub struct IrqStrategy<Strategy: LockStrategy>(PhantomData<Strategy>);
 
 impl SpinStrategy {
-    #[inline(always)]
     fn is_locked(data: &<Self as LockStrategy>::StrategyData) -> bool {
         data.load(Ordering::Relaxed)
     }
@@ -18,20 +18,17 @@ unsafe impl LockStrategy for SpinStrategy {
     type StrategyData = AtomicBool;
     type GuardContext = ();
 
-    #[inline(always)]
-    fn data() -> Self::StrategyData {
+    fn new_data() -> Self::StrategyData {
         AtomicBool::new(false)
     }
 
-    #[inline(always)]
     unsafe fn is_locked(data: &Self::StrategyData) -> bool {
         data.load(Ordering::Relaxed)
     }
 
-    #[inline(always)]
     unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
         use Ordering::{Acquire, Relaxed};
-        preempt::disable();
+        eonix_preempt::disable();
 
         if data.compare_exchange(false, true, Acquire, Relaxed).is_ok() {
             Some(())
@@ -40,10 +37,9 @@ unsafe impl LockStrategy for SpinStrategy {
         }
     }
 
-    #[inline(always)]
     unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
         use Ordering::{Acquire, Relaxed};
-        preempt::disable();
+        eonix_preempt::disable();
 
         while data
             .compare_exchange_weak(false, true, Acquire, Relaxed)
@@ -55,76 +51,72 @@ unsafe impl LockStrategy for SpinStrategy {
         }
     }
 
-    #[inline(always)]
     unsafe fn do_unlock(data: &Self::StrategyData, _: &mut Self::GuardContext) {
         data.store(false, Ordering::Release);
-        preempt::enable();
+        eonix_preempt::enable();
     }
 }
 
-pub struct IrqStrategy<Strategy: LockStrategy> {
-    _phantom: core::marker::PhantomData<Strategy>,
-}
-
 unsafe impl<Strategy: LockStrategy> LockStrategy for IrqStrategy<Strategy> {
     type StrategyData = Strategy::StrategyData;
     type GuardContext = (Strategy::GuardContext, usize);
 
-    #[inline(always)]
-    fn data() -> Self::StrategyData {
-        Strategy::data()
+    fn new_data() -> Self::StrategyData {
+        Strategy::new_data()
     }
 
-    #[inline(always)]
     unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
         let mut context: usize;
-        asm!(
-            "pushf",
-            "pop {context}",
-            "cli",
-            context = out(reg) context,
-        );
-
-        (Strategy::do_lock(data), context)
+
+        unsafe {
+            asm!(
+                "pushf",
+                "pop {context}",
+                "cli",
+                context = out(reg) context,
+            );
+        }
+
+        unsafe { (Strategy::do_lock(data), context) }
     }
 
-    #[inline(always)]
     unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        Strategy::do_unlock(data, &mut context.0);
-
-        asm!(
-            "push {context}",
-            "popf",
-            context = in(reg) context.1,
-            options(nomem),
-        )
+        unsafe {
+            Strategy::do_unlock(data, &mut context.0);
+
+            asm!(
+                "push {context}",
+                "popf",
+                context = in(reg) context.1,
+                options(nomem),
+            )
+        }
     }
 
-    #[inline(always)]
     unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        Strategy::do_unlock(data, &mut context.0)
+        unsafe { Strategy::do_unlock(data, &mut context.0) }
     }
 
-    #[inline(always)]
     unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        Strategy::do_relock(data, &mut context.0);
+        unsafe { Strategy::do_relock(data, &mut context.0) }
     }
 
-    #[inline(always)]
     unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        Strategy::is_locked(data)
+        unsafe { Strategy::is_locked(data) }
     }
 
-    #[inline(always)]
     unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
         let mut irq_context: usize;
-        asm!(
-            "pushf",
-            "pop {context}",
-            "cli",
-            context = out(reg) irq_context,
-        );
-
-        Strategy::try_lock(data).map(|lock_context| (lock_context, irq_context))
+        unsafe {
+            asm!(
+                "pushf",
+                "pop {context}",
+                "cli",
+                context = out(reg) irq_context,
+            );
+        }
+
+        let lock_context = unsafe { Strategy::try_lock(data) };
+        lock_context.map(|lock_context| (lock_context, irq_context))
     }
 }

+ 45 - 0
crates/eonix_sync/src/strategy.rs

@@ -0,0 +1,45 @@
+pub unsafe trait LockStrategy {
+    type StrategyData;
+    type GuardContext;
+
+    fn new_data() -> Self::StrategyData
+    where
+        Self: Sized;
+
+    unsafe fn is_locked(data: &Self::StrategyData) -> bool
+    where
+        Self: Sized;
+
+    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext>
+    where
+        Self: Sized;
+
+    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext
+    where
+        Self: Sized;
+
+    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext)
+    where
+        Self: Sized;
+
+    unsafe fn do_lock_shared(data: &Self::StrategyData) -> Self::GuardContext
+    where
+        Self: Sized,
+    {
+        unsafe { Self::do_lock(data) }
+    }
+
+    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext)
+    where
+        Self: Sized,
+    {
+        unsafe { Self::do_unlock(data, context) }
+    }
+
+    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext)
+    where
+        Self: Sized,
+    {
+        *context = unsafe { Self::do_lock(data) };
+    }
+}

+ 6 - 0
crates/pointers/Cargo.toml

@@ -0,0 +1,6 @@
+[package]
+name = "pointers"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]

+ 77 - 0
crates/pointers/src/lib.rs

@@ -0,0 +1,77 @@
+#![no_std]
+
+use alloc::sync::Arc;
+use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref, ptr::NonNull};
+
+extern crate alloc;
+
+/// BorrowedArc is a wrapper around `Arc` that allows us to create an `Arc` from a raw pointer
+/// that was created by `Arc::into_raw` when we are confident about that the original `Arc`
+/// would be still valid during the whold lifetime of `BorrowedArc`.
+///
+/// # Example
+///
+/// ```should_run
+/// use pointers::BorrowedArc;
+/// use alloc::sync::Arc;
+///
+/// let arc = Arc::new(42);
+/// let ptr = NonNull::new(Arc::into_raw(arc.clone())).unwrap();
+///
+/// // We know that the original `Arc` is still valid.
+/// let borrowed_arc = unsafe { BorrowedArc::from_raw(ptr) };
+///
+/// let arc_reference: &Arc<i32> = &borrowed_arc;
+/// assert_eq!(**arc_reference, 42);
+/// ```
+pub struct BorrowedArc<'a, T: ?Sized> {
+    arc: ManuallyDrop<Arc<T>>,
+    _phantom: PhantomData<&'a ()>,
+}
+
+impl<'a, T: ?Sized> BorrowedArc<'a, T> {
+    /// # Safety
+    /// If `ptr` is not a valid pointer to an `Arc<T>`, this will lead to undefined behavior.
+    ///
+    /// If the `Arc<T>` is dropped while `BorrowedArc` is still in use, this will lead
+    /// to undefined behavior.
+    pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+        Self {
+            arc: ManuallyDrop::new(unsafe { Arc::from_raw(ptr.as_ptr()) }),
+            _phantom: PhantomData,
+        }
+    }
+
+    #[allow(dead_code)]
+    pub fn new(ptr: &'a *const T) -> Self {
+        assert!(!ptr.is_null());
+        Self {
+            arc: ManuallyDrop::new(unsafe { Arc::from_raw(*ptr) }),
+            _phantom: PhantomData,
+        }
+    }
+
+    pub fn borrow(&self) -> &'a T {
+        let reference: &T = &self.arc;
+        let ptr = reference as *const T;
+
+        // SAFETY: `ptr` is a valid pointer to `T` because `reference` is a valid reference to `T`.
+        // `ptr` is also guaranteed to be valid for the lifetime `'lt` because it is derived from
+        // `self.arc` which is guaranteed to be valid for the lifetime `'lt`.
+        unsafe { ptr.as_ref().unwrap() }
+    }
+}
+
+impl<'a, T: ?Sized> Deref for BorrowedArc<'a, T> {
+    type Target = Arc<T>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.arc
+    }
+}
+
+impl<'a, T: ?Sized> AsRef<Arc<T>> for BorrowedArc<'a, T> {
+    fn as_ref(&self) -> &Arc<T> {
+        &self.arc
+    }
+}

+ 2 - 1
src/driver/ahci/port.rs

@@ -1,5 +1,6 @@
 use alloc::collections::vec_deque::VecDeque;
 use bindings::{EINVAL, EIO};
+use eonix_preempt::assert_preempt_enabled;
 
 use crate::prelude::*;
 
@@ -286,7 +287,7 @@ impl AdapterPort {
     /// # Might Sleep
     /// This function **might sleep**, so call it in a preemptible context
     fn send_command(&self, cmd: &impl Command) -> KResult<()> {
-        might_sleep!();
+        assert_preempt_enabled!("AdapterPort::send_command");
 
         let pages = cmd.pages();
         let cmdtable_page = Page::alloc_one();

+ 102 - 37
src/driver/serial.rs

@@ -1,22 +1,33 @@
-use alloc::{format, sync::Arc};
-use bindings::EIO;
-
+use super::Port8;
 use crate::{
     kernel::{
-        block::make_device, interrupt::register_irq_handler, CharDevice, CharDeviceType, Console,
-        Terminal, TerminalDevice,
+        block::make_device, console::set_console, constants::EIO, interrupt::register_irq_handler,
+        task::KernelStack, CharDevice, CharDeviceType, Terminal, TerminalDevice,
     },
     prelude::*,
+    sync::UCondVar,
 };
-
-use super::Port8;
+use alloc::{collections::vec_deque::VecDeque, format, sync::Arc};
+use bitflags::bitflags;
+use eonix_runtime::{run::FutureRun, scheduler::Scheduler};
+
+bitflags! {
+    struct LineStatus: u8 {
+        const RX_READY = 0x01;
+        const TX_READY = 0x20;
+    }
+}
 
 #[allow(dead_code)]
 struct Serial {
     id: u32,
     name: Arc<str>,
 
-    terminal: Option<Arc<Terminal>>,
+    terminal: Spin<Option<Arc<Terminal>>>,
+    cv_worker: UCondVar,
+
+    working: Spin<bool>,
+    tx_buffer: Spin<VecDeque<u8>>,
 
     tx_rx: Port8,
     int_ena: Port8,
@@ -37,14 +48,73 @@ impl Serial {
 
     fn enable_interrupts(&self) {
         // Enable interrupt #0: Received data available
-        self.int_ena.write(0x01);
+        self.int_ena.write(0x03);
+    }
+
+    fn disable_interrupts(&self) {
+        // Disable interrupt #0: Received data available
+        self.int_ena.write(0x00);
+    }
+
+    fn line_status(&self) -> LineStatus {
+        LineStatus::from_bits_truncate(self.line_status.read())
+    }
+
+    async fn wait_for_interrupt(&self) {
+        let mut working = self.working.lock_irq();
+        self.enable_interrupts();
+        *working = false;
+
+        self.cv_worker.async_wait(&mut working).await;
+
+        *working = true;
+        self.disable_interrupts();
+    }
+
+    async fn worker(port: Arc<Self>) {
+        let terminal = port.terminal.lock().clone();
+
+        loop {
+            while port.line_status().contains(LineStatus::RX_READY) {
+                let ch = port.tx_rx.read();
+
+                if let Some(terminal) = terminal.as_ref() {
+                    terminal.commit_char(ch);
+                }
+            }
+
+            let should_wait = {
+                let mut tx_buffer = port.tx_buffer.lock();
+
+                // Give it a chance to receive data.
+                let count = tx_buffer.len().min(64);
+                for ch in tx_buffer.drain(..count) {
+                    if port.line_status().contains(LineStatus::TX_READY) {
+                        port.tx_rx.write(ch);
+                    } else {
+                        break;
+                    }
+                }
+
+                tx_buffer.is_empty()
+            };
+
+            if should_wait {
+                port.wait_for_interrupt().await;
+            } else {
+                Scheduler::yield_now().await;
+            }
+        }
     }
 
     pub fn new(id: u32, base_port: u16) -> KResult<Self> {
         let port = Self {
             id,
             name: Arc::from(format!("ttyS{id}")),
-            terminal: None,
+            terminal: Spin::new(None),
+            cv_worker: UCondVar::new(),
+            working: Spin::new(true),
+            tx_buffer: Spin::new(VecDeque::new()),
             tx_rx: Port8::new(base_port),
             int_ena: Port8::new(base_port + 1),
             int_ident: Port8::new(base_port + 2),
@@ -73,24 +143,24 @@ impl Serial {
         Ok(port)
     }
 
-    fn irq_handler(&self) {
-        let terminal = self.terminal.as_ref();
-        while self.line_status.read() & 0x01 != 0 {
-            let ch = self.tx_rx.read();
-
-            if let Some(terminal) = terminal {
-                terminal.commit_char(ch);
-            }
+    fn wakeup_worker(&self) {
+        let working = self.working.lock_irq();
+        if !*working {
+            self.cv_worker.notify_all();
         }
     }
 
+    fn irq_handler(&self) {
+        // Read the interrupt ID register to clear the interrupt.
+        self.int_ident.read();
+        self.wakeup_worker();
+    }
+
     fn register_char_device(port: Self) -> KResult<()> {
-        let mut port = Arc::new(port);
+        let port = Arc::new(port);
         let terminal = Terminal::new(port.clone());
 
-        // TODO!!!!!!: This is unsafe, we should find a way to avoid this.
-        //             Under smp, we should make the publish of terminal atomic.
-        unsafe { Arc::get_mut_unchecked(&mut port) }.terminal = Some(terminal.clone());
+        port.terminal.lock().replace(terminal.clone());
 
         {
             let port = port.clone();
@@ -104,8 +174,11 @@ impl Serial {
                 port.irq_handler();
             })?;
         }
-        port.enable_interrupts();
-        dont_check!(Console::register_terminal(&terminal));
+
+        Scheduler::get().spawn::<KernelStack, _>(FutureRun::new(Self::worker(port.clone())));
+
+        let _ = set_console(terminal.clone());
+        eonix_log::set_console(terminal.clone());
 
         CharDevice::register(
             make_device(4, 64 + port.id),
@@ -119,26 +192,18 @@ impl Serial {
 
 impl TerminalDevice for Serial {
     fn putchar(&self, ch: u8) {
-        loop {
-            // If we poll the status and get the corresponding bit, we should handle the action.
-            let status = self.line_status.read();
-            if status & 0x20 != 0 {
-                self.tx_rx.write(ch);
-                return;
-            }
-        }
+        let mut tx_buffer = self.tx_buffer.lock();
+        tx_buffer.push_back(ch);
+        self.wakeup_worker();
     }
 }
 
 pub fn init() -> KResult<()> {
-    let com0 = Serial::new(0, Serial::COM0_BASE);
-    let com1 = Serial::new(1, Serial::COM1_BASE);
-
-    if let Ok(port) = com0 {
+    if let Ok(port) = Serial::new(0, Serial::COM0_BASE) {
         Serial::register_char_device(port)?;
     }
 
-    if let Ok(port) = com1 {
+    if let Ok(port) = Serial::new(1, Serial::COM1_BASE) {
         Serial::register_char_device(port)?;
     }
 

+ 0 - 1
src/kernel.rs

@@ -18,5 +18,4 @@ mod terminal;
 
 #[allow(unused_imports)]
 pub use chardev::{CharDevice, CharDeviceType, VirtualCharDevice};
-pub use console::Console;
 pub use terminal::{Terminal, TerminalDevice};

+ 10 - 12
src/kernel/chardev.rs

@@ -1,14 +1,7 @@
-use alloc::{
-    boxed::Box,
-    collections::btree_map::{BTreeMap, Entry},
-    sync::Arc,
-};
-use bindings::{EEXIST, EIO};
-
-use crate::{io::Buffer, kernel::console::CONSOLE, prelude::*, sync::AsRefPosition as _};
-
 use super::{
     block::make_device,
+    console::get_console,
+    constants::{EEXIST, EIO},
     task::{ProcessList, Thread},
     terminal::Terminal,
     vfs::{
@@ -16,7 +9,12 @@ use super::{
         DevId,
     },
 };
-
+use crate::{io::Buffer, prelude::*, sync::AsRefPosition as _};
+use alloc::{
+    boxed::Box,
+    collections::btree_map::{BTreeMap, Entry},
+    sync::Arc,
+};
 use lazy_static::lazy_static;
 
 pub trait VirtualCharDevice: Send + Sync {
@@ -120,12 +118,12 @@ impl VirtualCharDevice for ZeroDevice {
 struct ConsoleDevice;
 impl VirtualCharDevice for ConsoleDevice {
     fn read(&self, buffer: &mut dyn Buffer) -> KResult<usize> {
-        let console_terminal = CONSOLE.lock_irq().get_terminal().ok_or(EIO)?;
+        let console_terminal = get_console().ok_or(EIO)?;
         console_terminal.read(buffer)
     }
 
     fn write(&self, data: &[u8]) -> KResult<usize> {
-        let console_terminal = CONSOLE.lock_irq().get_terminal().ok_or(EIO)?;
+        let console_terminal = get_console().ok_or(EIO)?;
         for &ch in data.iter() {
             console_terminal.show_char(ch);
         }

+ 14 - 32
src/kernel/console.rs

@@ -1,48 +1,30 @@
 use crate::prelude::*;
-
 use alloc::sync::Arc;
-use bindings::EEXIST;
 use lazy_static::lazy_static;
 
-pub struct Console {
-    terminal: Option<Arc<Terminal>>,
+lazy_static! {
+    pub static ref CONSOLE: Spin<Option<Arc<Terminal>>> = Spin::new(None);
 }
 
-impl Console {
-    pub fn get_terminal(&self) -> Option<Arc<Terminal>> {
-        self.terminal.clone()
-    }
-
-    pub fn register_terminal(terminal: &Arc<Terminal>) -> KResult<()> {
-        let mut console = CONSOLE.lock_irq();
-        if console.terminal.is_some() {
-            return Err(EEXIST);
-        }
-
-        console.terminal = Some(terminal.clone());
+pub fn set_console(terminal: Arc<Terminal>) -> KResult<()> {
+    let mut console = CONSOLE.lock();
+    if console.is_none() {
+        *console = Some(terminal);
         Ok(())
+    } else {
+        Err(EEXIST)
     }
 }
 
-impl Write for Console {
-    fn write_str(&mut self, s: &str) -> core::fmt::Result {
-        if let Some(console) = &self.terminal {
-            for &ch in s.as_bytes() {
-                console.show_char(ch)
-            }
-        }
-
-        Ok(())
-    }
+pub fn get_console() -> Option<Arc<Terminal>> {
+    let console = CONSOLE.lock();
+    console.clone()
 }
 
 #[doc(hidden)]
 pub fn _print(args: core::fmt::Arguments) {
-    dont_check!(CONSOLE.lock_irq().write_fmt(args))
-}
-
-lazy_static! {
-    pub static ref CONSOLE: Spin<Console> = Spin::new(Console { terminal: None });
+    // TODO!!!!!!!!!!!!!: REMOVE THIS AND USE `eonix_log`.
+    eonix_log::do_print(args);
 }
 
 macro_rules! print {
@@ -105,7 +87,7 @@ macro_rules! println_trace {
     }};
 }
 
-use super::terminal::Terminal;
+use super::{constants::EEXIST, terminal::Terminal};
 
 pub(crate) use {
     print, println, println_debug, println_fatal, println_info, println_trace, println_warn,

+ 3 - 0
src/kernel/constants.rs

@@ -19,8 +19,11 @@ pub const SA_RESTORER: u32 = 0x04000000;
 pub const CLOCK_REALTIME: u32 = 0;
 pub const CLOCK_MONOTONIC: u32 = 1;
 
+pub const ENOENT: u32 = 2;
+pub const EIO: u32 = 5;
 pub const ENXIO: u32 = 6;
 pub const ENOEXEC: u32 = 8;
+pub const EEXIST: u32 = 17;
 pub const ENOSYS: u32 = 38;
 
 #[allow(dead_code)]

+ 5 - 2
src/kernel/mem/mm_list.rs

@@ -1,6 +1,9 @@
 mod page_fault;
 
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::{
+    ops::Sub as _,
+    sync::atomic::{AtomicUsize, Ordering},
+};
 
 use crate::{prelude::*, sync::ArcSwap};
 
@@ -362,7 +365,7 @@ impl MMList {
             .get(&break_start)
             .expect("Program break area should be valid");
 
-        let len = pos - current_break;
+        let len: usize = pos.sub(current_break);
         inner.page_table.set_anonymous(
             VRange::from(program_break.range().end()).grow(len),
             Permission {

+ 10 - 9
src/kernel/smp.rs

@@ -1,16 +1,14 @@
-use arch::define_smp_bootstrap;
-
+use super::cpu::init_thiscpu;
 use crate::{
     kernel::{
         cpu::current_cpu,
         mem::{paging::Page, phys::PhysPtr as _},
-        task::Task,
+        task::KernelStack,
     },
     println_debug,
-    sync::preempt,
 };
-
-use super::{cpu::init_thiscpu, task::Scheduler};
+use arch::define_smp_bootstrap;
+use eonix_runtime::scheduler::Scheduler;
 
 define_smp_bootstrap!(4, ap_entry, {
     let page = Page::alloc_many(9);
@@ -21,14 +19,17 @@ define_smp_bootstrap!(4, ap_entry, {
 
 unsafe extern "C" fn ap_entry() -> ! {
     init_thiscpu();
-    Scheduler::init_scheduler_thiscpu();
+    Scheduler::init_local_scheduler::<KernelStack>();
     println_debug!("AP{} started", current_cpu().cpuid());
 
-    preempt::disable();
+    eonix_preempt::disable();
     arch::enable_irqs();
 
     // TODO!!!!!: Free the stack after having switched to idle task.
-    Task::switch_noreturn(&Task::idle());
+    unsafe {
+        // SAFETY: `preempt::count()` == 1.
+        Scheduler::goto_scheduler_noreturn()
+    }
 }
 
 pub unsafe fn bootstrap_smp() {

+ 17 - 21
src/kernel/syscall/procops.rs

@@ -1,9 +1,5 @@
-use alloc::borrow::ToOwned;
-use alloc::ffi::CString;
-use arch::{ExtendedContext, InterruptContext};
-use bindings::{EINVAL, ENOENT, ENOTDIR, ERANGE, ESRCH};
-use bitflags::bitflags;
-
+use super::sysinfo::TimeVal;
+use super::{define_syscall32, register_syscall};
 use crate::elf::ParsedElf32;
 use crate::io::Buffer;
 use crate::kernel::constants::{
@@ -11,20 +7,22 @@ use crate::kernel::constants::{
 };
 use crate::kernel::mem::{Page, PageBuffer, VAddr};
 use crate::kernel::task::{
-    ProcessBuilder, ProcessList, Scheduler, Signal, SignalAction, Task, Thread, ThreadBuilder,
+    KernelStack, ProcessBuilder, ProcessList, Signal, SignalAction, Thread, ThreadBuilder,
     ThreadRunnable, UserDescriptor, WaitObject, WaitType,
 };
 use crate::kernel::user::dataflow::UserString;
 use crate::kernel::user::{UserPointer, UserPointerMut};
 use crate::kernel::vfs::dentry::Dentry;
+use crate::kernel::vfs::{self, FsContext};
 use crate::path::Path;
-use crate::sync::{preempt, AsRefPosition as _};
+use crate::sync::AsRefPosition as _;
 use crate::{kernel::user::dataflow::UserBuffer, prelude::*};
-
-use crate::kernel::vfs::{self, FsContext};
-
-use super::sysinfo::TimeVal;
-use super::{define_syscall32, register_syscall};
+use alloc::borrow::ToOwned;
+use alloc::ffi::CString;
+use arch::{ExtendedContext, InterruptContext};
+use bindings::{EINVAL, ENOENT, ENOTDIR, ERANGE, ESRCH};
+use bitflags::bitflags;
+use eonix_runtime::scheduler::Scheduler;
 
 fn do_umask(mask: u32) -> KResult<u32> {
     let context = FsContext::get_current();
@@ -162,7 +160,7 @@ fn sys_exit(int_stack: &mut InterruptContext, _: &mut ExtendedContext) -> usize
 
     unsafe {
         let mut procs = ProcessList::get().lock();
-        preempt::disable();
+        eonix_preempt::disable();
 
         // SAFETY: Preemption is disabled.
         procs.do_kill_process(&Thread::current().process, WaitType::Exited(status));
@@ -170,7 +168,7 @@ fn sys_exit(int_stack: &mut InterruptContext, _: &mut ExtendedContext) -> usize
 
     unsafe {
         // SAFETY: Preempt count == 1.
-        Thread::runnable().exit();
+        Thread::exit();
     }
 }
 
@@ -292,9 +290,9 @@ fn do_set_thread_area(desc: *mut UserDescriptor) -> KResult<()> {
 
     // SAFETY: Preemption is disabled on calling `load_thread_area32()`.
     unsafe {
-        preempt::disable();
+        eonix_preempt::disable();
         Thread::current().load_thread_area32();
-        preempt::enable();
+        eonix_preempt::enable();
     }
 
     Ok(())
@@ -603,10 +601,8 @@ fn sys_fork(int_stack: &mut InterruptContext, _: &mut ExtendedContext) -> usize
         .thread_builder(thread_builder)
         .build(&mut procs);
 
-    Scheduler::get().spawn(Task::new(ThreadRunnable::from_context(
-        new_thread,
-        new_int_context,
-    )));
+    Scheduler::get()
+        .spawn::<KernelStack, _>(ThreadRunnable::from_context(new_thread, new_int_context));
 
     new_process.pid as usize
 }

+ 2 - 6
src/kernel/task.rs

@@ -1,19 +1,15 @@
+mod kernel_stack;
 mod process;
 mod process_group;
 mod process_list;
-mod readyqueue;
-mod scheduler;
 mod session;
 mod signal;
-mod task;
 mod thread;
 
+pub use kernel_stack::KernelStack;
 pub use process::{Process, ProcessBuilder, WaitObject, WaitType};
 pub use process_group::ProcessGroup;
 pub use process_list::ProcessList;
-pub use readyqueue::init_rq_thiscpu;
-pub use scheduler::Scheduler;
 pub use session::Session;
 pub use signal::{Signal, SignalAction};
-pub use task::{FutureRunnable, Task, TaskContext};
 pub use thread::{Thread, ThreadBuilder, ThreadRunnable, UserDescriptor};

+ 10 - 2
src/kernel/task/task/kstack.rs → src/kernel/task/kernel_stack.rs

@@ -1,4 +1,5 @@
 use crate::kernel::mem::{paging::Page, phys::PhysPtr};
+use eonix_runtime::executor::Stack;
 
 #[derive(Debug)]
 pub struct KernelStack {
@@ -20,8 +21,15 @@ impl KernelStack {
             bottom,
         }
     }
+}
+
+impl Stack for KernelStack {
+    fn new() -> Self {
+        Self::new()
+    }
 
-    pub fn get_stack_bottom(&self) -> usize {
-        self.bottom
+    fn get_bottom(&self) -> &() {
+        // SAFETY: We hold the ownership of a valid stack.
+        unsafe { &*(self.bottom as *const ()) }
     }
 }

+ 9 - 11
src/kernel/task/process.rs

@@ -1,11 +1,7 @@
-use core::sync::atomic::{AtomicU32, Ordering};
-
-use alloc::{
-    collections::{btree_map::BTreeMap, vec_deque::VecDeque},
-    sync::{Arc, Weak},
+use super::{
+    process_group::ProcessGroupBuilder, signal::RaiseResult, thread::ThreadBuilder, ProcessGroup,
+    ProcessList, Session, Signal, Thread,
 };
-use bindings::{ECHILD, EINTR, EPERM, ESRCH};
-
 use crate::{
     kernel::mem::MMList,
     prelude::*,
@@ -15,11 +11,13 @@ use crate::{
         RwSemReadGuard, SpinGuard,
     },
 };
-
-use super::{
-    process_group::ProcessGroupBuilder, signal::RaiseResult, thread::ThreadBuilder, ProcessGroup,
-    ProcessList, Session, Signal, Thread,
+use alloc::{
+    collections::{btree_map::BTreeMap, vec_deque::VecDeque},
+    sync::{Arc, Weak},
 };
+use bindings::{ECHILD, EINTR, EPERM, ESRCH};
+use core::sync::atomic::{AtomicU32, Ordering};
+use pointers::BorrowedArc;
 
 pub struct ProcessBuilder {
     mm_list: Option<MMList>,

+ 3 - 3
src/kernel/task/process_list.rs

@@ -7,7 +7,7 @@ use bindings::KERNEL_PML4;
 use crate::{
     prelude::*,
     rcu::rcu_sync,
-    sync::{preempt, AsRefMutPosition as _, AsRefPosition as _},
+    sync::{AsRefMutPosition as _, AsRefPosition as _},
 };
 
 use lazy_static::lazy_static;
@@ -63,7 +63,7 @@ impl ProcessList {
     pub fn kill_current(signal: Signal) -> ! {
         unsafe {
             let mut process_list = ProcessList::get().lock();
-            preempt::disable();
+            eonix_preempt::disable();
 
             // SAFETY: Preemption disabled.
             process_list.do_kill_process(&Thread::current().process, WaitType::Signaled(signal));
@@ -71,7 +71,7 @@ impl ProcessList {
 
         unsafe {
             // SAFETY: Preempt count == 1.
-            Thread::runnable().exit();
+            Thread::exit();
         }
     }
 

+ 0 - 218
src/kernel/task/scheduler.rs

@@ -1,218 +0,0 @@
-use core::{
-    future::Future,
-    pin::Pin,
-    ptr::NonNull,
-    sync::atomic::{compiler_fence, Ordering},
-    task::{Context, Poll, Waker},
-};
-
-use crate::{kernel::console::println_trace, prelude::*, sync::preempt};
-
-use alloc::sync::Arc;
-
-use intrusive_collections::RBTree;
-use lazy_static::lazy_static;
-
-use super::{
-    init_rq_thiscpu,
-    readyqueue::rq_thiscpu,
-    task::{FutureRunnable, TaskAdapter, TaskHandle, TaskOutput},
-    Task,
-};
-
-pub struct Scheduler;
-
-pub struct JoinHandle<Output>(Arc<Spin<TaskOutput<Output>>>)
-where
-    Output: Send;
-
-/// Idle task
-/// All the idle tasks are pinned to the current cpu.
-#[arch::define_percpu]
-static IDLE_TASK: Option<NonNull<Task>> = None;
-
-/// Current running task
-#[arch::define_percpu]
-static CURRENT: Option<NonNull<Task>> = None;
-
-lazy_static! {
-    static ref TASKS: Spin<RBTree<TaskAdapter>> = Spin::new(RBTree::new(TaskAdapter::new()));
-}
-
-impl Task {
-    /// # Safety
-    /// We should never "inspect" a change in `current`.
-    /// The change of `CURRENT` will only happen in the scheduler. And if we are preempted,
-    /// when we DO return, the `CURRENT` will be the same and remain valid.
-    pub fn current<'a>() -> BorrowedArc<'a, Task> {
-        BorrowedArc::from_raw(CURRENT.get().unwrap().as_ptr())
-    }
-
-    /// # Safety
-    /// Idle task should never change so we can borrow it without touching the refcount.
-    pub fn idle() -> BorrowedArc<'static, Task> {
-        BorrowedArc::from_raw(IDLE_TASK.get().unwrap().as_ptr())
-    }
-
-    pub fn add(task: Arc<Self>) {
-        TASKS.lock().insert(task);
-    }
-
-    pub fn remove(&self) {
-        unsafe { TASKS.lock().cursor_mut_from_ptr(self as *const _) }.remove();
-    }
-}
-
-impl Scheduler {
-    /// `Scheduler` might be used in various places. Do not hold it for a long time.
-    ///
-    /// # Safety
-    /// The locked returned by this function should be locked with `lock_irq` to prevent from
-    /// rescheduling during access to the scheduler. Disabling preemption will do the same.
-    ///
-    /// Drop the lock before calling `schedule`.
-    pub fn get() -> &'static Self {
-        static GLOBAL_SCHEDULER: Scheduler = Scheduler;
-        &GLOBAL_SCHEDULER
-    }
-
-    pub fn init_scheduler_thiscpu() {
-        let runnable = FutureRunnable::new(idle_task());
-        let (init_task, _) = Self::extract_handle(Task::new(runnable));
-        TASKS.lock().insert(init_task.clone());
-
-        init_rq_thiscpu();
-        Self::set_idle_and_current(init_task);
-    }
-
-    pub fn set_idle_and_current(task: Arc<Task>) {
-        task.set_usleep();
-
-        let old = IDLE_TASK.swap(NonNull::new(Arc::into_raw(task.clone()) as *mut _));
-        assert!(old.is_none(), "Idle task is already set");
-
-        let old = CURRENT.swap(NonNull::new(Arc::into_raw(task) as *mut _));
-        assert!(old.is_none(), "Current is already set");
-    }
-
-    pub fn activate(&self, task: &Arc<Task>) {
-        // TODO: Select an appropriate ready queue to enqueue.
-        if !task.on_rq.swap(true, Ordering::AcqRel) {
-            rq_thiscpu().lock_irq().put(task.clone());
-        }
-    }
-
-    pub fn spawn<O>(&self, task: TaskHandle<O>) -> JoinHandle<O>
-    where
-        O: Send,
-    {
-        let (task, output) = Self::extract_handle(task);
-        TASKS.lock().insert(task.clone());
-        self.activate(&task);
-
-        JoinHandle(output)
-    }
-
-    /// Go to idle task. Call this with `preempt_count == 1`.
-    /// The preempt count will be decremented by this function.
-    ///
-    /// # Safety
-    /// We might never return from here.
-    /// Drop all variables that take ownership of some resource before calling this function.
-    pub fn schedule() {
-        might_sleep!(1);
-
-        // Make sure all works are done before scheduling.
-        compiler_fence(Ordering::SeqCst);
-
-        // TODO!!!!!: Use of reference here needs further consideration.
-        //
-        // Since we might never return to here, we can't take ownership of `current()`.
-        // Is it safe to believe that `current()` will never change across calls?
-        Task::switch(&Task::current(), &Task::idle());
-        preempt::enable();
-    }
-
-    pub fn schedule_noreturn() -> ! {
-        preempt::disable();
-        Self::schedule();
-        panic!("Scheduler::schedule_noreturn(): Should never return")
-    }
-
-    pub async fn yield_now() {
-        struct Yield(bool);
-
-        impl Future for Yield {
-            type Output = ();
-
-            fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
-                match *self {
-                    Yield(true) => Poll::Ready(()),
-                    Yield(false) => {
-                        self.set(Yield(true));
-                        cx.waker().wake_by_ref();
-                        Poll::Pending
-                    }
-                }
-            }
-        }
-
-        Yield(false).await
-    }
-}
-
-async fn idle_task() {
-    preempt::disable();
-    let mut cx = Context::from_waker(Waker::noop());
-
-    loop {
-        debug_assert_eq!(
-            preempt::count(),
-            1,
-            "Scheduler::idle_task() preempt count != 1"
-        );
-
-        let next = rq_thiscpu().lock().get();
-        match next {
-            None if Task::current().is_runnable() => {
-                println_trace!(
-                    "trace_scheduler",
-                    "Returning to task id({}) without doing context switch",
-                    Task::current().id
-                );
-
-                // Previous thread is `Running`, return to the current running thread.
-                Task::current().run(&mut cx);
-            }
-            None => {
-                // Halt the cpu and rerun the loop.
-                arch::halt();
-            }
-            Some(next) => {
-                println_trace!(
-                    "trace_scheduler",
-                    "Switching from task id({}) to task id({})",
-                    Task::current().id,
-                    next.id
-                );
-
-                debug_assert_ne!(next.id, Task::current().id, "Switching to the same task");
-
-                if let Some(task_pointer) =
-                    CURRENT.swap(NonNull::new(Arc::into_raw(next) as *mut _))
-                {
-                    let task = unsafe { Arc::from_raw(task_pointer.as_ptr()) };
-                    let mut rq = rq_thiscpu().lock();
-
-                    if task.is_runnable() {
-                        rq.put(task);
-                    } else {
-                        task.on_rq.store(false, Ordering::Release);
-                    }
-                }
-
-                Task::current().run(&mut cx);
-            }
-        }
-    }
-}

+ 29 - 33
src/kernel/task/signal.rs

@@ -1,5 +1,4 @@
-use core::{cmp::Reverse, task::Waker};
-
+use super::{ProcessList, Thread, WaitObject, WaitType};
 use crate::{
     io::BufferFill,
     kernel::{
@@ -7,14 +6,13 @@ use crate::{
         user::{dataflow::UserBuffer, UserPointer},
     },
     prelude::*,
-    sync::{preempt, AsRefPosition as _},
+    sync::AsRefPosition as _,
 };
-
 use alloc::collections::{binary_heap::BinaryHeap, btree_map::BTreeMap};
 use arch::{ExtendedContext, InterruptContext};
 use bindings::{EFAULT, EINVAL};
-
-use super::{ProcessList, Scheduler, Task, Thread, WaitObject, WaitType};
+use core::{cmp::Reverse, task::Waker};
+use eonix_runtime::{scheduler::Scheduler, task::Task};
 
 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
 pub struct Signal(u32);
@@ -78,7 +76,6 @@ struct SignalListInner {
 
 #[derive(Debug)]
 pub struct SignalList {
-    /// We might use this inside interrupt handler, so we need to use `lock_irq`.
     inner: Spin<SignalListInner>,
 }
 
@@ -266,12 +263,11 @@ impl SignalListInner {
                 self.stop_waker.take().map(|waker| waker.wake());
             }
             _ => {
-                let waker = self
-                    .signal_waker
+                // If we don't have a waker here, we might be at initialization step.
+                // We would run in the end anyway.
+                self.signal_waker
                     .as_ref()
-                    .expect("We should have a signal waker");
-
-                waker.wake_by_ref();
+                    .inspect(|waker| waker.wake_by_ref());
             }
         }
 
@@ -293,19 +289,19 @@ impl SignalList {
     }
 
     pub fn get_mask(&self) -> u64 {
-        self.inner.lock_irq().get_mask()
+        self.inner.lock().get_mask()
     }
 
     pub fn set_mask(&self, mask: u64) {
-        self.inner.lock_irq().set_mask(mask)
+        self.inner.lock().set_mask(mask)
     }
 
     pub fn mask(&self, mask: u64) {
-        self.inner.lock_irq().set_mask(mask)
+        self.inner.lock().set_mask(mask)
     }
 
     pub fn unmask(&self, mask: u64) {
-        self.inner.lock_irq().unmask(mask)
+        self.inner.lock().unmask(mask)
     }
 
     pub fn set_handler(&self, signal: Signal, action: &SignalAction) -> KResult<()> {
@@ -313,7 +309,7 @@ impl SignalList {
             return Err(EINVAL);
         }
 
-        let mut inner = self.inner.lock_irq();
+        let mut inner = self.inner.lock();
         if action.is_default() {
             inner.handlers.remove(&signal);
         } else {
@@ -325,7 +321,7 @@ impl SignalList {
 
     pub fn get_handler(&self, signal: Signal) -> SignalAction {
         self.inner
-            .lock_irq()
+            .lock()
             .handlers
             .get(&signal)
             .cloned()
@@ -334,7 +330,7 @@ impl SignalList {
 
     // TODO!!!: Find a better way.
     pub fn set_signal_waker(&self, waker: Waker) {
-        let mut inner = self.inner.lock_irq();
+        let mut inner = self.inner.lock();
         let old_waker = inner.signal_waker.replace(waker);
         assert!(old_waker.is_none(), "We should not have a waker here");
     }
@@ -343,7 +339,7 @@ impl SignalList {
     /// This is used when `execve` is called.
     pub fn clear_non_ignore(&self) {
         self.inner
-            .lock_irq()
+            .lock()
             .handlers
             .retain(|_, action| action.is_ignore());
     }
@@ -351,16 +347,16 @@ impl SignalList {
     /// Clear all pending signals.
     /// This is used when `fork` is called.
     pub fn clear_pending(&self) {
-        self.inner.lock_irq().pending.clear()
+        self.inner.lock().pending.clear()
     }
 
     pub fn has_pending_signal(&self) -> bool {
-        !self.inner.lock_irq().pending.is_empty()
+        !self.inner.lock().pending.is_empty()
     }
 
     /// Do not use this, use `Thread::raise` instead.
     pub(super) fn raise(&self, signal: Signal) -> RaiseResult {
-        self.inner.lock_irq().raise(signal)
+        self.inner.lock().raise(signal)
     }
 
     /// Handle signals in the context of `Thread::current()`.
@@ -371,27 +367,27 @@ impl SignalList {
     pub fn handle(&self, int_stack: &mut InterruptContext, ext_ctx: &mut ExtendedContext) {
         loop {
             let signal = {
-                let signal = match self.inner.lock_irq().pop() {
+                let signal = match self.inner.lock().pop() {
                     Some(signal) => signal,
                     None => return,
                 };
 
-                let handler = self.inner.lock_irq().handlers.get(&signal).cloned();
+                let handler = self.inner.lock().handlers.get(&signal).cloned();
                 if let Some(handler) = handler {
                     if !signal.is_now() {
                         let old_mask = {
-                            let mut inner = self.inner.lock_irq();
+                            let mut inner = self.inner.lock();
                             let old_mask = inner.mask;
                             inner.mask(handler.sa_mask as u64);
                             old_mask
                         };
                         let result = handler.handle(signal, old_mask, int_stack, ext_ctx);
                         if result.is_err() {
-                            self.inner.lock_irq().set_mask(old_mask);
+                            self.inner.lock().set_mask(old_mask);
                         }
                         match result {
-                            Err(EFAULT) => self.inner.lock_irq().raise(Signal::SIGSEGV),
-                            Err(_) => self.inner.lock_irq().raise(Signal::SIGSYS),
+                            Err(EFAULT) => self.inner.lock().raise(Signal::SIGSEGV),
+                            Err(_) => self.inner.lock().raise(Signal::SIGSYS),
                             Ok(()) => return,
                         };
                         continue;
@@ -403,7 +399,7 @@ impl SignalList {
                 // Default actions include stopping the thread, continuing the thread and
                 // terminating the process. All these actions will block the thread or return
                 // to the thread immediately. So we can unmask these signals now.
-                self.inner.lock_irq().unmask(signal.to_mask());
+                self.inner.lock().unmask(signal.to_mask());
                 signal
             };
 
@@ -421,12 +417,12 @@ impl SignalList {
                         );
                     }
 
-                    preempt::disable();
+                    eonix_preempt::disable();
 
                     // `SIGSTOP` can only be waken up by `SIGCONT` or `SIGKILL`.
                     // SAFETY: Preempt disabled above.
                     {
-                        let mut inner = self.inner.lock_irq();
+                        let mut inner = self.inner.lock();
                         let waker = Waker::from(Task::current().usleep());
                         let old_waker = inner.stop_waker.replace(waker);
                         assert!(old_waker.is_none(), "We should not have a waker here");
@@ -471,7 +467,7 @@ impl SignalList {
         *ext_ctx = UserPointer::<ExtendedContext>::new_vaddr(old_mmxregs_vaddr)?.read()?;
         *int_stack = UserPointer::<InterruptContext>::new_vaddr(old_int_stack_vaddr)?.read()?;
 
-        self.inner.lock_irq().set_mask(old_mask);
+        self.inner.lock().set_mask(old_mask);
         Ok(int_stack.rax as usize)
     }
 }

+ 0 - 342
src/kernel/task/task.rs

@@ -1,342 +0,0 @@
-mod context;
-mod kstack;
-mod runnable;
-
-pub use context::TaskContext;
-pub use runnable::{Contexted, PinRunnable, RunState};
-
-use atomic_unique_refcell::AtomicUniqueRefCell;
-use kstack::KernelStack;
-
-use core::{
-    future::Future,
-    pin::Pin,
-    sync::atomic::{fence, AtomicBool, AtomicU32, Ordering},
-    task::{Context, Poll, Waker},
-};
-
-use alloc::{
-    boxed::Box,
-    sync::{Arc, Weak},
-    task::Wake,
-};
-use intrusive_collections::{intrusive_adapter, KeyAdapter, RBTreeAtomicLink};
-
-use crate::{kernel::task::Scheduler, sync::preempt, Spin};
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
-pub struct TaskId(u32);
-
-#[derive(Debug)]
-pub struct TaskState(AtomicU32);
-
-pub struct UniqueWaker(Arc<Task>);
-
-pub struct TaskHandle<Output: Send> {
-    /// The task itself.
-    task: Arc<Task>,
-    /// The output of the task.
-    output: Arc<Spin<TaskOutput<Output>>>,
-}
-
-pub struct TaskOutput<Output: Send> {
-    output: Option<Output>,
-    waker: Option<Waker>,
-}
-
-/// A `Task` represents a schedulable unit.
-pub struct Task {
-    /// Unique identifier of the task.
-    pub id: TaskId,
-    /// Whether the task is on some run queue.
-    pub(super) on_rq: AtomicBool,
-    /// Executor object.
-    executor: AtomicUniqueRefCell<Option<Pin<Box<dyn Future<Output = ()> + Send>>>>,
-    /// Task execution context.
-    task_context: TaskContext,
-    /// Task state.
-    state: TaskState,
-    /// Link in the global task list.
-    link_task_list: RBTreeAtomicLink,
-}
-
-intrusive_adapter!(pub TaskAdapter = Arc<Task>: Task { link_task_list: RBTreeAtomicLink });
-impl<'a> KeyAdapter<'a> for TaskAdapter {
-    type Key = TaskId;
-    fn get_key(&self, task: &'a Task) -> Self::Key {
-        task.id
-    }
-}
-
-impl Scheduler {
-    pub(super) fn extract_handle<O>(handle: TaskHandle<O>) -> (Arc<Task>, Arc<Spin<TaskOutput<O>>>)
-    where
-        O: Send,
-    {
-        let TaskHandle { task, output } = handle;
-        (task, output)
-    }
-}
-
-impl TaskState {
-    pub const RUNNING: u32 = 0;
-    pub const ISLEEP: u32 = 1;
-    pub const USLEEP: u32 = 2;
-
-    pub const fn new(state: u32) -> Self {
-        Self(AtomicU32::new(state))
-    }
-
-    pub fn swap(&self, state: u32) -> u32 {
-        self.0.swap(state, Ordering::AcqRel)
-    }
-
-    pub fn cmpxchg(&self, current: u32, new: u32) -> u32 {
-        self.0
-            .compare_exchange(current, new, Ordering::AcqRel, Ordering::Acquire)
-            .unwrap_or_else(|x| x)
-    }
-
-    pub fn is_runnable(&self) -> bool {
-        self.0.load(Ordering::Acquire) == Self::RUNNING
-    }
-}
-
-impl Task {
-    pub fn new<R, O>(runnable: R) -> TaskHandle<R::Output>
-    where
-        O: Send,
-        R: PinRunnable<Output = O> + Contexted + Send + 'static,
-    {
-        static ID: AtomicU32 = AtomicU32::new(0);
-
-        let output = Arc::new(Spin::new(TaskOutput {
-            output: None,
-            waker: None,
-        }));
-
-        let kernel_stack = KernelStack::new();
-        let mut task_context = TaskContext::new();
-        task_context.set_sp(kernel_stack.get_stack_bottom());
-
-        let mut executor = Box::pin(Executor::new(kernel_stack, runnable));
-
-        task_context.call2(
-            Self::_executor::<O, R>,
-            [
-                unsafe { executor.as_mut().get_unchecked_mut() } as *mut _ as _,
-                Weak::into_raw(Arc::downgrade(&output)) as usize,
-            ],
-        );
-
-        let task = Arc::new(Self {
-            id: TaskId(ID.fetch_add(1, Ordering::Relaxed)),
-            on_rq: AtomicBool::new(false),
-            executor: AtomicUniqueRefCell::new(Some(executor)),
-            task_context,
-            state: TaskState::new(TaskState::RUNNING),
-            link_task_list: RBTreeAtomicLink::new(),
-        });
-
-        TaskHandle { task, output }
-    }
-
-    pub fn is_runnable(&self) -> bool {
-        self.state.is_runnable()
-    }
-
-    pub(super) fn set_usleep(&self) {
-        let prev_state = self.state.swap(TaskState::USLEEP);
-        assert_eq!(prev_state, TaskState::RUNNING);
-    }
-
-    pub fn usleep(self: &Arc<Self>) -> Arc<UniqueWaker> {
-        // No need to dequeue. We have proved that the task is running so not in the queue.
-        self.set_usleep();
-
-        Arc::new(UniqueWaker(self.clone()))
-    }
-
-    pub fn isleep(self: &Arc<Self>) -> Arc<Self> {
-        // No need to dequeue. We have proved that the task is running so not in the queue.
-        let prev_state = self.state.swap(TaskState::ISLEEP);
-        assert_eq!(prev_state, TaskState::RUNNING);
-
-        self.clone()
-    }
-
-    pub fn switch(from: &Self, to: &Self) {
-        from.task_context.switch_to(&to.task_context);
-    }
-
-    pub fn switch_noreturn(to: &Self) -> ! {
-        to.task_context.switch_noreturn();
-    }
-
-    unsafe extern "C" fn _executor<O, R>(
-        executor: Pin<&mut Executor<R>>,
-        output: *const Spin<TaskOutput<R::Output>>,
-    ) -> !
-    where
-        O: Send,
-        R: PinRunnable<Output = O> + Send + Contexted,
-    {
-        // We get here with preempt count == 1.
-        preempt::enable();
-
-        let output = Weak::from_raw(output);
-        let executor = unsafe { executor.get_unchecked_mut() };
-        let runnable = unsafe { Pin::new_unchecked(&mut executor.runnable) };
-
-        {
-            let waker = Waker::from(Task::current().clone());
-            let output_data = runnable.pinned_join(&waker);
-
-            if let Some(output) = output.upgrade() {
-                let mut output = output.lock();
-                let old = output.output.replace(output_data);
-                debug_assert!(old.is_none(), "Output should be empty");
-
-                if let Some(waker) = output.waker.take() {
-                    waker.wake();
-                }
-            }
-        }
-
-        // SAFETY: We are on the same CPU as the task.
-        executor.finished.store(true, Ordering::Relaxed);
-
-        // Idle task needs preempt count == 1.
-        preempt::disable();
-        Task::switch_noreturn(&Task::idle());
-    }
-
-    pub fn run(&self, cx: &mut Context) {
-        let mut executor = self.executor.borrow();
-        let real_executor = executor.as_mut().expect("Executor should be present");
-
-        if let Poll::Ready(_) = real_executor.as_mut().poll(cx) {
-            executor.take();
-            self.set_usleep();
-            Self::remove(self);
-        }
-    }
-}
-
-impl Wake for Task {
-    fn wake(self: Arc<Self>) {
-        self.wake_by_ref();
-    }
-
-    fn wake_by_ref(self: &Arc<Self>) {
-        match self.state.cmpxchg(TaskState::ISLEEP, TaskState::RUNNING) {
-            TaskState::RUNNING | TaskState::USLEEP => return,
-            TaskState::ISLEEP => Scheduler::get().activate(self),
-            state => panic!("Invalid transition from state {:?} to `Running`", state),
-        }
-    }
-}
-
-impl Wake for UniqueWaker {
-    fn wake(self: Arc<Self>) {
-        self.wake_by_ref();
-    }
-
-    fn wake_by_ref(self: &Arc<Self>) {
-        let Self(task) = &**self;
-
-        let prev_state = task.state.swap(TaskState::RUNNING);
-        assert_eq!(prev_state, TaskState::USLEEP);
-
-        Scheduler::get().activate(task);
-    }
-}
-
-struct Executor<R>
-where
-    R: PinRunnable + Send + Contexted + 'static,
-{
-    _kernel_stack: KernelStack,
-    runnable: R,
-    finished: AtomicBool,
-}
-
-impl<R> Executor<R>
-where
-    R: PinRunnable + Send + Contexted + 'static,
-{
-    pub fn new(kernel_stack: KernelStack, runnable: R) -> Self {
-        Self {
-            _kernel_stack: kernel_stack,
-            runnable,
-            finished: AtomicBool::new(false),
-        }
-    }
-}
-
-impl<R> Future for Executor<R>
-where
-    R: PinRunnable + Send + Contexted + 'static,
-{
-    type Output = ();
-
-    fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
-        // TODO!!!: We should load the context only if the previous task is
-        // different from the current task.
-
-        // SAFETY: We don't move the runnable object.
-        let executor = unsafe { self.get_unchecked_mut() };
-        executor.runnable.load_running_context();
-
-        // TODO!!!: If the task comes from another cpu, we need to sync.
-        //
-        // The other cpu should see the changes of kernel stack of the target thread
-        // made in this cpu.
-        //
-        // Can we find a better way other than `fence`s?
-        //
-        // An alternative way is to use an atomic variable to store the cpu id of
-        // the current task. Then we can use acquire release swap to ensure that the
-        // other cpu sees the changes.
-        fence(Ordering::SeqCst);
-
-        Task::switch(&Task::idle(), &Task::current());
-
-        fence(Ordering::SeqCst);
-
-        if executor.finished.load(Ordering::Relaxed) {
-            return Poll::Ready(());
-        }
-
-        return Poll::Pending;
-    }
-}
-
-pub struct FutureRunnable<F: Future>(F);
-
-impl<F> FutureRunnable<F>
-where
-    F: Future,
-{
-    pub const fn new(future: F) -> Self {
-        Self(future)
-    }
-}
-
-impl<F: Future + 'static> Contexted for FutureRunnable<F> {
-    fn load_running_context(&mut self) {}
-}
-
-impl<F: Future + 'static> PinRunnable for FutureRunnable<F> {
-    type Output = F::Output;
-
-    fn pinned_run(self: Pin<&mut Self>, waker: &Waker) -> RunState<Self::Output> {
-        let mut future = unsafe { self.map_unchecked_mut(|me| &mut me.0) };
-        let mut context = Context::from_waker(waker);
-
-        match future.as_mut().poll(&mut context) {
-            Poll::Ready(output) => RunState::Finished(output),
-            Poll::Pending => RunState::Running,
-        }
-    }
-}

+ 78 - 55
src/kernel/task/thread.rs

@@ -1,5 +1,7 @@
-use core::{arch::asm, pin::Pin, ptr::NonNull, task::Waker};
-
+use super::{
+    signal::{RaiseResult, Signal, SignalList},
+    Process, ProcessList,
+};
 use crate::{
     kernel::{
         cpu::current_cpu,
@@ -8,18 +10,23 @@ use crate::{
         vfs::{filearray::FileArray, FsContext},
     },
     prelude::*,
-    sync::{preempt, AsRefMutPosition as _},
+    sync::AsRefMutPosition as _,
 };
-
 use alloc::sync::Arc;
-
-use super::{
-    signal::{RaiseResult, Signal, SignalList},
-    task::{Contexted, PinRunnable, RunState},
-    Process, ProcessList, TaskContext,
-};
-
 use arch::{InterruptContext, UserTLS, _arch_fork_return};
+use bindings::KERNEL_PML4;
+use core::{
+    arch::asm,
+    pin::Pin,
+    ptr::NonNull,
+    sync::atomic::{AtomicUsize, Ordering},
+    task::Waker,
+};
+use eonix_runtime::{
+    context::ExecutionContext,
+    run::{Contexted, PinRun, RunState},
+};
+use pointers::BorrowedArc;
 
 struct CurrentThread {
     thread: NonNull<Thread>,
@@ -64,6 +71,17 @@ pub struct Thread {
     inner: Spin<ThreadInner>,
 }
 
+pub struct ThreadRunnable {
+    thread: Arc<Thread>,
+    /// Interrupt context for the thread initialization.
+    /// We store the kernel stack pointer in one of the fields for now.
+    ///
+    /// TODO: A better way to store the interrupt context.
+    interrupt_context: InterruptContext,
+    interrupt_stack_pointer: AtomicUsize,
+    return_context: ExecutionContext,
+}
+
 #[repr(transparent)]
 #[derive(Debug, Clone, Copy)]
 pub struct UserDescriptorFlags(u32);
@@ -210,17 +228,9 @@ impl Thread {
         // we return here after some preemption.
         let current: &Option<CurrentThread> = unsafe { CURRENT_THREAD.as_ref() };
         let current = current.as_ref().expect("Current thread is not set");
-        BorrowedArc::from_raw(current.thread.as_ptr())
-    }
-
-    pub fn runnable<'lt>() -> &'lt ThreadRunnable {
-        // SAFETY: We won't change the thread pointer in the current CPU when
-        // we return here after some preemption.
-        let current: &Option<CurrentThread> = unsafe { CURRENT_THREAD.as_ref() };
-        let current = current.as_ref().expect("Current thread is not set");
 
         // SAFETY: We can only use the returned value when we are in the context of the thread.
-        unsafe { &*current.runnable.as_ptr() }
+        unsafe { BorrowedArc::from_raw(current.thread) }
     }
 
     pub fn raise(self: &Arc<Self>, signal: Signal) -> RaiseResult {
@@ -269,16 +279,21 @@ impl Thread {
     pub fn get_name(&self) -> Arc<[u8]> {
         self.inner.lock().name.clone()
     }
-}
 
-pub struct ThreadRunnable {
-    thread: Arc<Thread>,
-    /// Interrupt context for the thread initialization.
-    /// We store the kernel stack pointer in one of the fields for now.
-    ///
-    /// TODO: A better way to store the interrupt context.
-    interrupt_context: InterruptContext,
-    return_context: TaskContext,
+    /// # Safety
+    /// This function needs to be called with preempt count == 1.
+    /// We won't return so clean all the resources before calling this.
+    pub unsafe fn exit() -> ! {
+        // SAFETY: We won't change the thread pointer in the current CPU when
+        // we return here after some preemption.
+        let current: &Option<CurrentThread> = unsafe { CURRENT_THREAD.as_ref() };
+        let current = current.as_ref().expect("Current thread is not set");
+
+        // SAFETY: We can only use the `run_context` when we are in the context of the thread.
+        let runnable = unsafe { current.runnable.as_ref() };
+
+        runnable.return_context.switch_noreturn()
+    }
 }
 
 impl ThreadRunnable {
@@ -293,7 +308,8 @@ impl ThreadRunnable {
         Self {
             thread,
             interrupt_context,
-            return_context: TaskContext::new(),
+            interrupt_stack_pointer: AtomicUsize::new(0),
+            return_context: ExecutionContext::new(),
         }
     }
 
@@ -301,24 +317,22 @@ impl ThreadRunnable {
         Self {
             thread,
             interrupt_context,
-            return_context: TaskContext::new(),
+            interrupt_stack_pointer: AtomicUsize::new(0),
+            return_context: ExecutionContext::new(),
         }
     }
-
-    /// # Safety
-    /// This function needs to be called with preempt count == 1.
-    pub unsafe fn exit(&self) -> ! {
-        self.return_context.switch_noreturn();
-    }
 }
 
 impl Contexted for ThreadRunnable {
-    fn load_running_context(&mut self) {
-        let thread = self.thread.as_ref();
-
-        unsafe {
-            // SAFETY: Preemption is disabled.
-            arch::load_interrupt_stack(current_cpu(), self.interrupt_context.int_no as u64);
+    fn load_running_context(&self) {
+        let thread: &Thread = &self.thread;
+
+        match self.interrupt_stack_pointer.load(Ordering::Relaxed) {
+            0 => {}
+            sp => unsafe {
+                // SAFETY: Preemption is disabled.
+                arch::load_interrupt_stack(current_cpu(), sp as u64);
+            },
         }
 
         // SAFETY: Preemption is disabled.
@@ -340,43 +354,52 @@ impl Contexted for ThreadRunnable {
             thread.load_thread_area32();
         }
     }
+
+    fn restore_running_context(&self) {
+        arch::set_root_page_table(KERNEL_PML4 as usize);
+    }
 }
 
-impl PinRunnable for ThreadRunnable {
+impl PinRun for ThreadRunnable {
     type Output = ();
 
-    fn pinned_run(mut self: Pin<&mut Self>, waker: &Waker) -> RunState<Self::Output> {
-        let mut task_context = TaskContext::new();
+    fn pinned_run(self: Pin<&mut Self>, waker: &Waker) -> RunState<Self::Output> {
+        let mut task_context = ExecutionContext::new();
         task_context.set_interrupt(false);
         task_context.set_ip(_arch_fork_return as _);
-        task_context.set_sp(&mut self.interrupt_context as *mut _ as _);
+        task_context.set_sp(&self.interrupt_context as *const _ as _);
 
         self.thread.signal_list.set_signal_waker(waker.clone());
 
-        preempt::disable();
+        eonix_preempt::disable();
 
         // TODO!!!!!: CHANGE THIS
-        unsafe {
+        let sp = unsafe {
+            let mut sp: usize;
             asm!(
                 "mov %rsp, {0}",
-                out(reg) self.interrupt_context.int_no,
+                out(reg) sp,
                 options(nomem, preserves_flags, att_syntax),
             );
-            self.interrupt_context.int_no -= 512;
-            self.interrupt_context.int_no &= !0xf;
+            sp -= 512;
+            sp &= !0xf;
+
+            sp
         };
 
+        self.interrupt_stack_pointer.store(sp, Ordering::Relaxed);
+
         unsafe {
             // SAFETY: Preemption is disabled.
-            arch::load_interrupt_stack(current_cpu(), self.interrupt_context.int_no as u64);
+            arch::load_interrupt_stack(current_cpu(), sp as u64);
         }
 
-        preempt::enable();
+        eonix_preempt::enable();
 
         self.return_context.switch_to(&task_context);
 
         // We return here with preempt count == 1.
-        preempt::enable();
+        eonix_preempt::enable();
 
         RunState::Finished(())
     }

+ 22 - 16
src/kernel/terminal.rs

@@ -1,20 +1,19 @@
-use alloc::{
-    collections::vec_deque::VecDeque,
-    sync::{Arc, Weak},
+use super::{
+    task::{ProcessList, Session, Signal, Thread},
+    user::{UserPointer, UserPointerMut},
 };
-use bindings::{EINTR, ENOTTY, EPERM};
-use bitflags::bitflags;
-
 use crate::{
     io::Buffer,
     prelude::*,
     sync::{AsRefPosition as _, CondVar},
 };
-
-use super::{
-    task::{ProcessList, Session, Signal, Thread},
-    user::{UserPointer, UserPointerMut},
+use alloc::{
+    collections::vec_deque::VecDeque,
+    sync::{Arc, Weak},
 };
+use bindings::{EINTR, ENOTTY, EPERM};
+use bitflags::bitflags;
+use eonix_log::ConsoleWrite;
 
 const BUFFER_SIZE: usize = 4096;
 
@@ -358,8 +357,7 @@ struct TerminalInner {
 }
 
 pub struct Terminal {
-    /// Lock with IRQ disabled. We might use this in IRQ context.
-    inner: Spin<TerminalInner>,
+    inner: Mutex<TerminalInner>,
     device: Arc<dyn TerminalDevice>,
     cv: CondVar,
 }
@@ -401,7 +399,7 @@ impl core::fmt::Debug for Terminal {
 impl Terminal {
     pub fn new(device: Arc<dyn TerminalDevice>) -> Arc<Self> {
         Arc::new(Self {
-            inner: Spin::new(TerminalInner {
+            inner: Mutex::new(TerminalInner {
                 termio: Termios::new_standard(),
                 session: Weak::new(),
                 buffer: VecDeque::with_capacity(BUFFER_SIZE),
@@ -486,7 +484,7 @@ impl Terminal {
 
     // TODO: Find a better way to handle this.
     pub fn commit_char(&self, ch: u8) {
-        let mut inner = self.inner.lock_irq();
+        let mut inner = self.inner.lock();
         if inner.termio.isig() {
             match ch {
                 0xff => {}
@@ -534,7 +532,7 @@ impl Terminal {
     }
 
     pub fn poll_in(&self) -> KResult<()> {
-        let mut inner = self.inner.lock_irq();
+        let mut inner = self.inner.lock();
         if inner.buffer.is_empty() {
             self.cv.wait(&mut inner);
 
@@ -553,7 +551,7 @@ impl Terminal {
                 break 'block &tmp_buffer[..0];
             }
 
-            let mut inner = self.inner.lock_irq();
+            let mut inner = self.inner.lock();
             if inner.buffer.is_empty() {
                 self.cv.wait(&mut inner);
 
@@ -679,3 +677,11 @@ impl Terminal {
         self.inner.lock().session.upgrade()
     }
 }
+
+impl ConsoleWrite for Terminal {
+    fn write(&self, s: &str) {
+        for &ch in s.as_bytes() {
+            self.show_char(ch);
+        }
+    }
+}

+ 6 - 9
src/kernel/timer.rs

@@ -1,8 +1,6 @@
+use super::interrupt::end_of_interrupt;
 use core::sync::atomic::{AtomicUsize, Ordering};
-
-use crate::sync::preempt;
-
-use super::{interrupt::end_of_interrupt, task::Scheduler};
+use eonix_runtime::scheduler::Scheduler;
 
 static TICKS: AtomicUsize = AtomicUsize::new(0);
 
@@ -28,14 +26,13 @@ impl Ticks {
 }
 
 pub fn timer_interrupt() {
+    end_of_interrupt();
     TICKS.fetch_add(1, Ordering::Relaxed);
-    if preempt::count() == 0 {
+
+    if eonix_preempt::count() == 0 {
         // To make scheduler satisfied.
-        preempt::disable();
-        end_of_interrupt();
+        eonix_preempt::disable();
         Scheduler::schedule();
-    } else {
-        end_of_interrupt();
     }
 }
 

+ 6 - 5
src/kernel/user/dataflow.rs

@@ -1,6 +1,7 @@
 use core::{arch::asm, ffi::CStr};
 
 use bindings::{EFAULT, EINVAL};
+use eonix_preempt::assert_preempt_enabled;
 
 use crate::{
     io::{Buffer, FillResult},
@@ -93,7 +94,7 @@ impl CheckedUserPointer {
 
     /// # Might Sleep
     pub fn read(&self, buffer: *mut (), total: usize) -> KResult<()> {
-        might_sleep!();
+        assert_preempt_enabled!("UserPointer::read");
 
         if total > self.len {
             return Err(EINVAL);
@@ -128,7 +129,7 @@ impl CheckedUserPointer {
 
     /// # Might Sleep
     pub fn write(&self, data: *mut (), total: usize) -> KResult<()> {
-        might_sleep!();
+        assert_preempt_enabled!("UserPointer::write");
 
         if total > self.len {
             return Err(EINVAL);
@@ -164,7 +165,7 @@ impl CheckedUserPointer {
 
     /// # Might Sleep
     pub fn zero(&self) -> KResult<()> {
-        might_sleep!();
+        assert_preempt_enabled!("CheckedUserPointer::zero");
 
         if self.len == 0 {
             return Ok(());
@@ -228,7 +229,7 @@ impl<'lt> Buffer for UserBuffer<'lt> {
 
     /// # Might Sleep
     fn fill(&mut self, data: &[u8]) -> KResult<FillResult> {
-        might_sleep!();
+        assert_preempt_enabled!("UserBuffer::fill");
 
         let to_write = data.len().min(self.remaining());
         if to_write == 0 {
@@ -250,7 +251,7 @@ impl<'lt> Buffer for UserBuffer<'lt> {
 impl<'lt> UserString<'lt> {
     /// # Might Sleep
     pub fn new(ptr: *const u8) -> KResult<Self> {
-        might_sleep!();
+        assert_preempt_enabled!("UserString::new");
 
         const MAX_LEN: usize = 4096;
         // TODO

+ 8 - 11
src/kernel/vfs/filearray.rs

@@ -1,8 +1,11 @@
-use core::sync::atomic::Ordering;
-
+use super::{
+    file::{File, InodeFile, TerminalFile},
+    inode::Mode,
+    s_ischr, FsContext, Spin,
+};
 use crate::{
     kernel::{
-        console::CONSOLE,
+        console::get_console,
         constants::ENXIO,
         task::Thread,
         vfs::{dentry::Dentry, file::Pipe, s_isdir, s_isreg},
@@ -11,7 +14,6 @@ use crate::{
     path::Path,
     prelude::*,
 };
-
 use alloc::{
     collections::btree_map::{BTreeMap, Entry},
     sync::Arc,
@@ -20,17 +22,12 @@ use bindings::{
     EBADF, EISDIR, ENOTDIR, FD_CLOEXEC, F_DUPFD, F_DUPFD_CLOEXEC, F_GETFD, F_SETFD, O_APPEND,
     O_CLOEXEC, O_DIRECTORY, O_RDWR, O_TRUNC, O_WRONLY,
 };
+use core::sync::atomic::Ordering;
 use itertools::{
     FoldWhile::{Continue, Done},
     Itertools,
 };
 
-use super::{
-    file::{File, InodeFile, TerminalFile},
-    inode::Mode,
-    s_ischr, FsContext, Spin,
-};
-
 type FD = u32;
 
 #[derive(Clone)]
@@ -246,7 +243,7 @@ impl FileArray {
     pub fn open_console(&self) {
         let mut inner = self.inner.lock();
         let (stdin, stdout, stderr) = (inner.next_fd(), inner.next_fd(), inner.next_fd());
-        let console_terminal = CONSOLE.lock_irq().get_terminal().unwrap();
+        let console_terminal = get_console().expect("No console terminal");
 
         inner.do_insert(
             stdin,

+ 10 - 11
src/lib.rs

@@ -28,12 +28,11 @@ mod sync;
 use alloc::{ffi::CString, sync::Arc};
 use core::alloc::{GlobalAlloc, Layout};
 use elf::ParsedElf32;
+use eonix_runtime::{run::FutureRun, scheduler::Scheduler};
 use kernel::{
     cpu::init_thiscpu,
     mem::Page,
-    task::{
-        FutureRunnable, ProcessBuilder, ProcessList, Scheduler, Task, ThreadBuilder, ThreadRunnable,
-    },
+    task::{KernelStack, ProcessBuilder, ProcessList, ThreadBuilder, ThreadRunnable},
     vfs::{
         dentry::Dentry,
         mount::{do_mount, MS_NOATIME, MS_NODEV, MS_NOSUID, MS_RDONLY},
@@ -43,7 +42,6 @@ use kernel::{
 };
 use path::Path;
 use prelude::*;
-use sync::preempt;
 
 #[panic_handler]
 fn panic(info: &core::panic::PanicInfo) -> ! {
@@ -112,16 +110,18 @@ pub extern "C" fn rust_kinit(early_kstack_pfn: usize) -> ! {
     kernel::vfs::mount::init_vfs().unwrap();
 
     // To satisfy the `Scheduler` "preempt count == 0" assertion.
-    preempt::disable();
+    eonix_preempt::disable();
 
     // We need root dentry to be present in constructor of `FsContext`.
     // So call `init_vfs` first, then `init_multitasking`.
-    Scheduler::init_scheduler_thiscpu();
+    Scheduler::init_local_scheduler::<KernelStack>();
 
-    let runnable = FutureRunnable::new(init_process(early_kstack_pfn));
-    Scheduler::get().spawn(Task::new(runnable));
+    Scheduler::get().spawn::<KernelStack, _>(FutureRun::new(init_process(early_kstack_pfn)));
 
-    Task::switch_noreturn(&Task::idle());
+    unsafe {
+        // SAFETY: `preempt::count()` == 1.
+        Scheduler::goto_scheduler_noreturn()
+    }
 }
 
 async fn init_process(early_kstack_pfn: usize) {
@@ -190,6 +190,5 @@ async fn init_process(early_kstack_pfn: usize) {
     // TODO!!!: Remove this.
     thread.files.open_console();
 
-    let task = Task::new(ThreadRunnable::new(thread, ip, sp));
-    Scheduler::get().spawn(task);
+    Scheduler::get().spawn::<KernelStack, _>(ThreadRunnable::new(thread, ip, sp));
 }

+ 1 - 60
src/prelude.rs

@@ -10,78 +10,19 @@ macro_rules! dont_check {
     };
 }
 
-use alloc::sync::Arc;
-#[allow(unused_imports)]
 pub(crate) use dont_check;
 
-#[allow(unused_imports)]
 pub use crate::bindings::root as bindings;
 
-#[allow(unused_imports)]
 pub(crate) use crate::kernel::console::{
     print, println, println_debug, println_fatal, println_info, println_trace, println_warn,
 };
 
-#[allow(unused_imports)]
-pub(crate) use crate::sync::might_sleep;
-
-#[allow(unused_imports)]
 pub(crate) use alloc::{boxed::Box, string::String, vec, vec::Vec};
 
-#[allow(unused_imports)]
 pub(crate) use core::{any::Any, fmt::Write, marker::PhantomData, str};
-use core::{mem::ManuallyDrop, ops::Deref};
-
-#[allow(unused_imports)]
-pub use crate::sync::{Locked, Mutex, RwSemaphore, Semaphore, Spin};
-
-pub struct BorrowedArc<'lt, T: ?Sized> {
-    arc: ManuallyDrop<Arc<T>>,
-    _phantom: PhantomData<&'lt ()>,
-}
-
-impl<'lt, T: ?Sized> BorrowedArc<'lt, T> {
-    pub fn from_raw(ptr: *const T) -> Self {
-        assert!(!ptr.is_null());
-        Self {
-            arc: ManuallyDrop::new(unsafe { Arc::from_raw(ptr) }),
-            _phantom: PhantomData,
-        }
-    }
-
-    #[allow(dead_code)]
-    pub fn new(ptr: &'lt *const T) -> Self {
-        assert!(!ptr.is_null());
-        Self {
-            arc: ManuallyDrop::new(unsafe { Arc::from_raw(*ptr) }),
-            _phantom: PhantomData,
-        }
-    }
 
-    pub fn borrow(&self) -> &'lt T {
-        let reference: &T = &self.arc;
-        let ptr = reference as *const T;
-
-        // SAFETY: `ptr` is a valid pointer to `T` because `reference` is a valid reference to `T`.
-        // `ptr` is also guaranteed to be valid for the lifetime `'lt` because it is derived from
-        // `self.arc` which is guaranteed to be valid for the lifetime `'lt`.
-        unsafe { ptr.as_ref().unwrap() }
-    }
-}
-
-impl<'lt, T: ?Sized> Deref for BorrowedArc<'lt, T> {
-    type Target = Arc<T>;
-
-    fn deref(&self) -> &Self::Target {
-        &self.arc
-    }
-}
-
-impl<'lt, T: ?Sized> AsRef<Arc<T>> for BorrowedArc<'lt, T> {
-    fn as_ref(&self) -> &Arc<T> {
-        &self.arc
-    }
-}
+pub use crate::sync::{Locked, Mutex, RwSemaphore, Spin};
 
 #[allow(dead_code)]
 pub trait AsAny: Send + Sync {

+ 16 - 32
src/rcu.rs

@@ -1,22 +1,16 @@
+use crate::{prelude::*, sync::RwSemReadGuard};
+use alloc::sync::Arc;
 use core::{
     ops::Deref,
     ptr::NonNull,
     sync::atomic::{AtomicPtr, Ordering},
 };
-
-use crate::{
-    prelude::*,
-    sync::{lock::Guard, semaphore::RwSemaphoreStrategy},
-};
-
-use alloc::sync::Arc;
-
 use lazy_static::lazy_static;
+use pointers::BorrowedArc;
 
-#[allow(dead_code)]
 pub struct RCUReadGuard<'data, T: 'data> {
     value: T,
-    guard: Guard<'data, (), RwSemaphoreStrategy, false>,
+    guard: RwSemReadGuard<'data, ()>,
     _phantom: PhantomData<&'data T>,
 }
 
@@ -156,29 +150,29 @@ impl<T: RCUNode<T>> RCUList<T> {
 
         RCUIterator {
             // SAFETY: We have a read lock, so the node is still alive.
-            cur: self.head.load(Ordering::SeqCst),
+            cur: NonNull::new(self.head.load(Ordering::SeqCst)),
             _lock: _lck,
         }
     }
 }
 
 pub struct RCUIterator<'lt, T: RCUNode<T>> {
-    cur: *const T,
-    _lock: Guard<'lt, (), RwSemaphoreStrategy, false>,
+    cur: Option<NonNull<T>>,
+    _lock: RwSemReadGuard<'lt, ()>,
 }
 
 impl<'lt, T: RCUNode<T>> Iterator for RCUIterator<'lt, T> {
     type Item = BorrowedArc<'lt, T>;
 
     fn next(&mut self) -> Option<Self::Item> {
-        match unsafe { self.cur.as_ref() } {
+        match self.cur {
             None => None,
-            Some(real) => {
+            Some(pointer) => {
                 // SAFETY: We have a read lock, so the node is still alive.
-                let ret = self.cur;
-                self.cur = real.rcu_next().load(Ordering::SeqCst);
+                let reference = unsafe { pointer.as_ref() };
 
-                Some(BorrowedArc::from_raw(ret))
+                self.cur = NonNull::new(reference.rcu_next().load(Ordering::SeqCst));
+                Some(unsafe { BorrowedArc::from_raw(pointer) })
             }
         }
     }
@@ -190,7 +184,7 @@ impl<T: core::fmt::Debug> core::fmt::Debug for RCUPointer<T> {
     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
         match NonNull::new(self.0.load(Ordering::Acquire)) {
             Some(pointer) => {
-                let borrowed = BorrowedArc::from_raw(pointer.as_ptr());
+                let borrowed = unsafe { BorrowedArc::from_raw(pointer) };
                 f.write_str("RCUPointer of ")?;
                 borrowed.fmt(f)
             }
@@ -209,24 +203,14 @@ impl<T> RCUPointer<T> {
     }
 
     pub fn load<'lt>(&self) -> Option<RCUReadGuard<'lt, BorrowedArc<'lt, T>>> {
-        let ptr = self.0.load(Ordering::Acquire);
-
-        if ptr.is_null() {
-            None
-        } else {
-            Some(RCUReadGuard::lock(BorrowedArc::from_raw(ptr)))
-        }
+        NonNull::new(self.0.load(Ordering::Acquire))
+            .map(|p| RCUReadGuard::lock(unsafe { BorrowedArc::from_raw(p) }))
     }
 
     /// # Safety
     /// Caller must ensure no writers are updating the pointer.
     pub unsafe fn load_locked<'lt>(&self) -> Option<BorrowedArc<'lt, T>> {
-        let ptr = self.0.load(Ordering::Acquire);
-        if ptr.is_null() {
-            None
-        } else {
-            Some(BorrowedArc::from_raw(ptr))
-        }
+        NonNull::new(self.0.load(Ordering::Acquire)).map(|p| unsafe { BorrowedArc::from_raw(p) })
     }
 
     /// # Safety

+ 16 - 56
src/sync.rs

@@ -1,91 +1,51 @@
 mod arcswap;
 mod condvar;
-pub mod lock;
 mod locked;
 pub mod semaphore;
-pub mod spin;
-pub mod strategy;
 
-pub mod preempt {
-    use core::sync::atomic::{compiler_fence, Ordering};
-
-    #[arch::define_percpu]
-    static PREEMPT_COUNT: usize = 0;
-
-    #[inline(always)]
-    pub fn disable() {
-        PREEMPT_COUNT.add(1);
-        compiler_fence(Ordering::SeqCst);
-    }
-
-    #[inline(always)]
-    pub fn enable() {
-        compiler_fence(Ordering::SeqCst);
-        PREEMPT_COUNT.sub(1);
-    }
-
-    #[inline(always)]
-    pub fn count() -> usize {
-        PREEMPT_COUNT.get()
-    }
-}
+pub use eonix_sync::{Guard, Lock, Spin, SpinStrategy};
 
 #[no_mangle]
 pub extern "C" fn r_preempt_disable() {
-    preempt::disable();
+    eonix_preempt::disable();
 }
 
 #[no_mangle]
 pub extern "C" fn r_preempt_enable() {
-    preempt::enable();
+    eonix_preempt::enable();
 }
 
 #[no_mangle]
 pub extern "C" fn r_preempt_count() -> usize {
-    preempt::count()
+    eonix_preempt::count()
 }
 
-pub type Spin<T> = lock::Lock<T, spin::SpinStrategy>;
-pub type Mutex<T> = lock::Lock<T, semaphore::SemaphoreStrategy<1>>;
+pub type Mutex<T> = Lock<T, semaphore::SemaphoreStrategy<1>>;
 #[allow(dead_code)]
-pub type Semaphore<T> = lock::Lock<T, semaphore::SemaphoreStrategy>;
-pub type RwSemaphore<T> = lock::Lock<T, semaphore::RwSemaphoreStrategy>;
+pub type Semaphore<T> = Lock<T, semaphore::SemaphoreStrategy>;
+pub type RwSemaphore<T> = Lock<T, semaphore::RwSemaphoreStrategy>;
 
 #[allow(dead_code)]
-pub type SpinGuard<'lock, T> = lock::Guard<'lock, T, spin::SpinStrategy, true>;
+pub type SpinGuard<'lock, T> = Guard<'lock, T, SpinStrategy, SpinStrategy, true>;
 
 #[allow(dead_code)]
-pub type MutexGuard<'lock, T> = lock::Guard<'lock, T, semaphore::SemaphoreStrategy<1>, true>;
+pub type MutexGuard<'lock, T> =
+    Guard<'lock, T, semaphore::SemaphoreStrategy<1>, semaphore::SemaphoreStrategy<1>, true>;
 
 #[allow(dead_code)]
-pub type SemGuard<'lock, T> = lock::Guard<'lock, T, semaphore::SemaphoreStrategy, true>;
+pub type SemGuard<'lock, T> =
+    Guard<'lock, T, semaphore::SemaphoreStrategy, semaphore::SemaphoreStrategy, true>;
 
 #[allow(dead_code)]
-pub type RwSemReadGuard<'lock, T> = lock::Guard<'lock, T, semaphore::RwSemaphoreStrategy, false>;
+pub type RwSemReadGuard<'lock, T> =
+    Guard<'lock, T, semaphore::RwSemaphoreStrategy, semaphore::RwSemaphoreStrategy, false>;
 
 #[allow(dead_code)]
-pub type RwSemWriteGuard<'lock, T> = lock::Guard<'lock, T, semaphore::RwSemaphoreStrategy, true>;
+pub type RwSemWriteGuard<'lock, T> =
+    Guard<'lock, T, semaphore::RwSemaphoreStrategy, semaphore::RwSemaphoreStrategy, true>;
 
 pub type CondVar = condvar::CondVar<true>;
 pub type UCondVar = condvar::CondVar<false>;
 
-macro_rules! might_sleep {
-    () => {
-        assert_eq!(
-            $crate::sync::preempt::count(),
-            0,
-            "a might_sleep function called with preempt disabled"
-        );
-    };
-    ($n:expr) => {
-        assert_eq!(
-            $crate::sync::preempt::count(),
-            $n,
-            "a might_sleep function called with the preempt count not satisfying its requirement",
-        );
-    };
-}
-
 pub use arcswap::ArcSwap;
 pub use locked::{AsRefMutPosition, AsRefPosition, Locked, RefMutPosition, RefPosition};
-pub(crate) use might_sleep;

+ 9 - 5
src/sync/arcswap.rs

@@ -1,11 +1,10 @@
+use alloc::sync::Arc;
 use core::{
     fmt::{self, Debug, Formatter},
+    ptr::NonNull,
     sync::atomic::{AtomicPtr, Ordering},
 };
-
-use alloc::sync::Arc;
-
-use crate::BorrowedArc;
+use pointers::BorrowedArc;
 
 unsafe impl<T> Send for ArcSwap<T> where T: Send + Sync {}
 unsafe impl<T> Sync for ArcSwap<T> where T: Send + Sync {}
@@ -35,7 +34,12 @@ impl<T> ArcSwap<T> {
     }
 
     pub fn borrow(&self) -> BorrowedArc<T> {
-        BorrowedArc::from_raw(self.pointer.load(Ordering::Relaxed))
+        unsafe {
+            BorrowedArc::from_raw(
+                NonNull::new(self.pointer.load(Ordering::Relaxed))
+                    .expect("ArcSwap: pointer should not be null."),
+            )
+        }
     }
 }
 

+ 44 - 14
src/sync/condvar.rs

@@ -1,16 +1,9 @@
-use core::task::Waker;
-
-use crate::{
-    kernel::{
-        console::println_trace,
-        task::{Scheduler, Task},
-    },
-    prelude::*,
-    sync::preempt,
-};
-
-use super::{lock::Guard, strategy::LockStrategy};
+use crate::prelude::*;
 use alloc::collections::vec_deque::VecDeque;
+use core::task::Waker;
+use eonix_preempt::{assert_preempt_count_eq, assert_preempt_enabled};
+use eonix_runtime::{scheduler::Scheduler, task::Task};
+use eonix_sync::{Guard, LockStrategy};
 
 pub struct CondVar<const INTERRUPTIBLE: bool> {
     waiters: Spin<VecDeque<Waker>>,
@@ -71,8 +64,13 @@ impl<const I: bool> CondVar<I> {
     ///
     /// # Might Sleep
     /// This function **might sleep**, so call it in a preemptible context.
-    pub fn wait<'a, T, S: LockStrategy, const W: bool>(&self, guard: &mut Guard<'a, T, S, W>) {
-        preempt::disable();
+    pub fn wait<'a, T, S, L, const W: bool>(&self, guard: &mut Guard<'a, T, S, L, W>)
+    where
+        T: ?Sized,
+        S: LockStrategy,
+        L: LockStrategy,
+    {
+        eonix_preempt::disable();
         let waker = Self::sleep();
         self.waiters.lock().push_back(waker);
 
@@ -83,7 +81,39 @@ impl<const I: bool> CondVar<I> {
         // If the flag is already set, we don't need to sleep.
 
         unsafe { guard.force_unlock() };
+
+        assert_preempt_count_eq!(1, "CondVar::wait");
         Scheduler::schedule();
+
+        unsafe { guard.force_relock() };
+
+        assert!(Task::current().is_runnable());
+    }
+
+    /// Unlock the `guard`. Then wait until being waken up. Relock the `guard` before returning.
+    ///
+    /// # Might Sleep
+    /// This function **might sleep**, so call it in a preemptible context.
+    pub async fn async_wait<'a, T, S, L, const W: bool>(&self, guard: &mut Guard<'a, T, S, L, W>)
+    where
+        T: ?Sized,
+        S: LockStrategy,
+        L: LockStrategy,
+    {
+        let waker = Self::sleep();
+        self.waiters.lock().push_back(waker);
+
+        // TODO!!!: Another way to do this:
+        //
+        // Store a flag in our entry in the waiting list.
+        // Check the flag before doing `schedule()` but after we've unlocked the `guard`.
+        // If the flag is already set, we don't need to sleep.
+
+        unsafe { guard.force_unlock() };
+
+        assert_preempt_enabled!("CondVar::async_wait");
+        Scheduler::sleep().await;
+
         unsafe { guard.force_relock() };
 
         assert!(Task::current().is_runnable());

+ 0 - 218
src/sync/lock.rs

@@ -1,218 +0,0 @@
-use core::{
-    cell::UnsafeCell,
-    ops::{Deref, DerefMut},
-};
-
-use super::{
-    semaphore::{RwSemaphoreStrategy, SemaphoreStrategy},
-    spin::IrqStrategy,
-    strategy::LockStrategy,
-    RwSemWriteGuard, SemGuard,
-};
-
-pub struct Lock<Value: ?Sized, Strategy: LockStrategy> {
-    strategy_data: Strategy::StrategyData,
-    value: UnsafeCell<Value>,
-}
-
-unsafe impl<T: ?Sized + Send, S: LockStrategy> Send for Lock<T, S> {}
-unsafe impl<T: ?Sized + Send, S: LockStrategy> Sync for Lock<T, S> {}
-
-impl<Value, Strategy: LockStrategy> Lock<Value, Strategy> {
-    #[inline(always)]
-    pub fn new(value: Value) -> Self {
-        Self {
-            strategy_data: Strategy::data(),
-            value: UnsafeCell::new(value),
-        }
-    }
-}
-
-impl<Value: core::fmt::Debug, Strategy: LockStrategy> core::fmt::Debug for Lock<Value, Strategy> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        f.debug_struct("Lock")
-            .field("locked_value", &self.value)
-            .finish()
-    }
-}
-
-impl<Value: Clone, Strategy: LockStrategy> Clone for Lock<Value, Strategy> {
-    fn clone(&self) -> Self {
-        Self {
-            strategy_data: Strategy::data(),
-            value: UnsafeCell::new(self.lock_shared().clone()),
-        }
-    }
-}
-
-impl<Value: Default, Strategy: LockStrategy> Default for Lock<Value, Strategy> {
-    fn default() -> Self {
-        Self {
-            strategy_data: Strategy::data(),
-            value: Default::default(),
-        }
-    }
-}
-
-#[allow(dead_code)]
-impl<Value: ?Sized> Lock<Value, SemaphoreStrategy> {
-    #[inline(always)]
-    pub fn lock_nosleep(&self) -> SemGuard<'_, Value> {
-        loop {
-            if !self.is_locked() {
-                if let Some(guard) = self.try_lock() {
-                    return guard;
-                }
-            }
-
-            arch::pause();
-        }
-    }
-}
-
-impl<Value: ?Sized> Lock<Value, RwSemaphoreStrategy> {
-    #[inline(always)]
-    pub fn lock_nosleep(&self) -> RwSemWriteGuard<'_, Value> {
-        loop {
-            if self.is_locked() {
-                if let Some(guard) = self.try_lock() {
-                    return guard;
-                }
-            }
-
-            arch::pause();
-        }
-    }
-}
-
-#[allow(dead_code)]
-impl<Value: ?Sized, Strategy: LockStrategy> Lock<Value, Strategy> {
-    #[inline(always)]
-    pub fn is_locked(&self) -> bool {
-        unsafe { Strategy::is_locked(&self.strategy_data) }
-    }
-
-    #[inline(always)]
-    pub fn try_lock<'lt>(&'lt self) -> Option<Guard<'lt, Value, Strategy>> {
-        if unsafe { Strategy::is_locked(&self.strategy_data) } {
-            return None;
-        }
-
-        unsafe { Strategy::try_lock(&self.strategy_data) }.map(|context| Guard {
-            _phantom: core::marker::PhantomData,
-            value: &self.value,
-            strategy_data: &self.strategy_data,
-            context,
-        })
-    }
-
-    #[inline(always)]
-    pub fn lock<'lt>(&'lt self) -> Guard<'lt, Value, Strategy> {
-        Guard {
-            _phantom: core::marker::PhantomData,
-            value: &self.value,
-            strategy_data: &self.strategy_data,
-            context: unsafe { Strategy::do_lock(&self.strategy_data) },
-        }
-    }
-
-    #[inline(always)]
-    pub fn lock_irq<'lt>(&'lt self) -> Guard<'lt, Value, IrqStrategy<Strategy>> {
-        Guard {
-            _phantom: core::marker::PhantomData,
-            value: &self.value,
-            strategy_data: &self.strategy_data,
-            context: unsafe { IrqStrategy::<Strategy>::do_lock(&self.strategy_data) },
-        }
-    }
-
-    #[inline(always)]
-    pub fn lock_shared<'lt>(&'lt self) -> Guard<'lt, Value, Strategy, false> {
-        Guard {
-            _phantom: core::marker::PhantomData,
-            value: &self.value,
-            strategy_data: &self.strategy_data,
-            context: unsafe { Strategy::do_lock_shared(&self.strategy_data) },
-        }
-    }
-
-    #[inline(always)]
-    pub fn lock_shared_irq<'lt>(&'lt self) -> Guard<'lt, Value, IrqStrategy<Strategy>, false> {
-        Guard {
-            _phantom: core::marker::PhantomData,
-            value: &self.value,
-            strategy_data: &self.strategy_data,
-            context: unsafe { IrqStrategy::<Strategy>::do_lock(&self.strategy_data) },
-        }
-    }
-
-    #[inline(always)]
-    pub fn get_mut(&mut self) -> &mut Value {
-        unsafe { &mut *self.value.get() }
-    }
-}
-
-pub struct Guard<'lock, Value: ?Sized, Strategy: LockStrategy, const WRITE: bool = true> {
-    _phantom: core::marker::PhantomData<Strategy>,
-    value: &'lock UnsafeCell<Value>,
-    strategy_data: &'lock Strategy::StrategyData,
-    context: Strategy::GuardContext,
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy, const W: bool> Guard<'lock, Value, Strategy, W> {
-    /// # Safety
-    /// Use of the lock after calling this function without relocking is undefined behavior.
-    #[inline(always)]
-    pub unsafe fn force_unlock(&mut self) {
-        Strategy::do_temporary_unlock(&self.strategy_data, &mut self.context)
-    }
-
-    /// # Safety
-    /// Calling this function more than once will cause deadlocks.
-    #[inline(always)]
-    pub unsafe fn force_relock(&mut self) {
-        Strategy::do_relock(&self.strategy_data, &mut self.context)
-    }
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy, const WRITE: bool> Deref
-    for Guard<'lock, Value, Strategy, WRITE>
-{
-    type Target = Value;
-
-    fn deref(&self) -> &Self::Target {
-        unsafe { &*self.value.get() }
-    }
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy> DerefMut
-    for Guard<'lock, Value, Strategy, true>
-{
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        unsafe { &mut *self.value.get() }
-    }
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy, const WRITE: bool> AsRef<Value>
-    for Guard<'lock, Value, Strategy, WRITE>
-{
-    fn as_ref(&self) -> &Value {
-        unsafe { &*self.value.get() }
-    }
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy> AsMut<Value>
-    for Guard<'lock, Value, Strategy, true>
-{
-    fn as_mut(&mut self) -> &mut Value {
-        unsafe { &mut *self.value.get() }
-    }
-}
-
-impl<'lock, Value: ?Sized, Strategy: LockStrategy, const WRITE: bool> Drop
-    for Guard<'lock, Value, Strategy, WRITE>
-{
-    fn drop(&mut self) {
-        unsafe { Strategy::do_unlock(&self.strategy_data, &mut self.context) }
-    }
-}

+ 5 - 4
src/sync/locked.rs

@@ -1,6 +1,5 @@
 use core::{cell::UnsafeCell, marker::PhantomData};
-
-use super::{lock::Guard, strategy::LockStrategy};
+use eonix_sync::{Guard, LockStrategy};
 
 pub struct RefMutPosition<'pos, T: ?Sized> {
     address: *const T,
@@ -63,10 +62,11 @@ impl<'lock, 'pos, T: ?Sized> AsRefMutPosition<'lock, 'pos, T> for &'lock mut T {
     }
 }
 
-impl<'lock, 'pos, T, S> AsRefMutPosition<'lock, 'pos, T> for Guard<'lock, T, S, true>
+impl<'lock, 'pos, T, S, L> AsRefMutPosition<'lock, 'pos, T> for Guard<'lock, T, S, L, true>
 where
     T: ?Sized,
     S: LockStrategy + 'lock,
+    L: LockStrategy + 'lock,
 {
     fn as_pos_mut(&self) -> RefMutPosition<'pos, T>
     where
@@ -103,10 +103,11 @@ impl<'lock, 'pos, T: ?Sized> AsRefPosition<'lock, 'pos, T> for &'lock mut T {
     }
 }
 
-impl<'lock, 'pos, T, S, const B: bool> AsRefPosition<'lock, 'pos, T> for Guard<'lock, T, S, B>
+impl<'lock, 'pos, T, S, L, const B: bool> AsRefPosition<'lock, 'pos, T> for Guard<'lock, T, S, L, B>
 where
     T: ?Sized,
     S: LockStrategy + 'lock,
+    L: LockStrategy + 'lock,
 {
     fn as_pos(&self) -> RefPosition<'pos, T>
     where

+ 4 - 3
src/sync/semaphore.rs

@@ -1,4 +1,5 @@
-use super::{strategy::LockStrategy, Spin, UCondVar};
+use super::{Spin, UCondVar};
+use eonix_sync::LockStrategy;
 
 pub struct SemaphoreStrategy<const MAX: usize = { core::usize::MAX }>;
 
@@ -21,7 +22,7 @@ unsafe impl<const MAX: usize> LockStrategy for SemaphoreStrategy<MAX> {
     type GuardContext = ();
 
     #[inline(always)]
-    fn data() -> Self::StrategyData {
+    fn new_data() -> Self::StrategyData {
         SemaphoreData {
             counter: Spin::new(0),
             cv: UCondVar::new(),
@@ -123,7 +124,7 @@ unsafe impl<const READ_MAX: isize> LockStrategy for RwSemaphoreStrategy<READ_MAX
     }
 
     #[inline(always)]
-    fn data() -> Self::StrategyData {
+    fn new_data() -> Self::StrategyData {
         RwSemaphoreData {
             counter: Spin::new(0),
             read_cv: UCondVar::new(),

+ 0 - 28
src/sync/strategy.rs

@@ -1,28 +0,0 @@
-pub unsafe trait LockStrategy {
-    type StrategyData;
-    type GuardContext;
-
-    fn data() -> Self::StrategyData;
-
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool;
-
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext>;
-
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext;
-
-    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext);
-
-    unsafe fn do_lock_shared(data: &Self::StrategyData) -> Self::GuardContext {
-        Self::do_lock(data)
-    }
-
-    #[inline(always)]
-    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        Self::do_unlock(data, context);
-    }
-
-    #[inline(always)]
-    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        *context = Self::do_lock(data);
-    }
-}