Bläddra i källkod

lazy_lock: replace old lazy_static

separate `lock_irq` from `Spin`
greatbridf 10 månader sedan
förälder
incheckning
5b315d7831

+ 11 - 39
Cargo.lock

@@ -91,8 +91,8 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
 name = "eonix_log"
 version = "0.1.0"
 dependencies = [
+ "eonix_spin_irq",
  "eonix_sync",
- "lazy_static",
 ]
 
 [[package]]
@@ -110,17 +110,24 @@ dependencies = [
  "atomic_unique_refcell",
  "eonix_log",
  "eonix_preempt",
+ "eonix_spin_irq",
  "eonix_sync",
  "intrusive-collections",
- "lazy_static",
  "pointers",
 ]
 
 [[package]]
-name = "eonix_sync"
+name = "eonix_spin_irq"
 version = "0.1.0"
 dependencies = [
  "arch",
+ "eonix_sync",
+]
+
+[[package]]
+name = "eonix_sync"
+version = "0.1.0"
+dependencies = [
  "eonix_preempt",
 ]
 
@@ -135,12 +142,11 @@ dependencies = [
  "eonix_log",
  "eonix_preempt",
  "eonix_runtime",
+ "eonix_spin_irq",
  "eonix_sync",
  "intrusive-collections",
  "itertools",
- "lazy_static",
  "pointers",
- "spin",
 ]
 
 [[package]]
@@ -167,15 +173,6 @@ dependencies = [
  "either",
 ]
 
-[[package]]
-name = "lazy_static"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
-dependencies = [
- "spin",
-]
-
 [[package]]
 name = "libc"
 version = "0.2.164"
@@ -192,16 +189,6 @@ dependencies = [
  "windows-targets",
 ]
 
-[[package]]
-name = "lock_api"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
-dependencies = [
- "autocfg",
- "scopeguard",
-]
-
 [[package]]
 name = "log"
 version = "0.4.22"
@@ -315,27 +302,12 @@ version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
 
-[[package]]
-name = "scopeguard"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
-
 [[package]]
 name = "shlex"
 version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
 
-[[package]]
-name = "spin"
-version = "0.9.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
-dependencies = [
- "lock_api",
-]
-
 [[package]]
 name = "syn"
 version = "2.0.89"

+ 1 - 2
Cargo.toml

@@ -13,6 +13,7 @@ atomic_unique_refcell = { path = "./crates/atomic_unique_refcell", features = [
 ] }
 eonix_preempt = { path = "./crates/eonix_preempt" }
 eonix_runtime = { path = "./crates/eonix_runtime" }
+eonix_spin_irq = { path = "./crates/eonix_spin_irq" }
 eonix_sync = { path = "./crates/eonix_sync" }
 eonix_log = { path = "./crates/eonix_log" }
 pointers = { path = "./crates/pointers" }
@@ -20,8 +21,6 @@ pointers = { path = "./crates/pointers" }
 bitflags = "2.6.0"
 intrusive-collections = "0.9.7"
 itertools = { version = "0.13.0", default-features = false }
-lazy_static = { version = "1.5.0", features = ["spin_no_std"] }
-spin = "0.9.8"
 
 [features]
 default = ["smp"]

+ 1 - 1
crates/eonix_log/Cargo.toml

@@ -4,5 +4,5 @@ version = "0.1.0"
 edition = "2024"
 
 [dependencies]
+eonix_spin_irq = { path = "../eonix_spin_irq" }
 eonix_sync = { path = "../eonix_sync" }
-lazy_static = { version = "1.5.0", features = ["spin_no_std"] }

+ 2 - 4
crates/eonix_log/src/lib.rs

@@ -3,8 +3,8 @@
 use core::fmt::{self, Write};
 
 use alloc::sync::Arc;
+use eonix_spin_irq::SpinIrq as _;
 use eonix_sync::Spin;
-use lazy_static::lazy_static;
 
 extern crate alloc;
 
@@ -17,9 +17,7 @@ struct Console {
 }
 
 // TODO!!!: We should use a `RwLock` here for better performance.
-lazy_static! {
-    static ref CONSOLE: Spin<Console> = Spin::new(Console::new());
-}
+static CONSOLE: Spin<Console> = Spin::new(Console::new());
 
 impl Console {
     const fn new() -> Self {

+ 1 - 1
crates/eonix_runtime/Cargo.toml

@@ -8,11 +8,11 @@ arch = { path = "../../arch" }
 atomic_unique_refcell = { path = "../atomic_unique_refcell" }
 eonix_log = { path = "../eonix_log" }
 eonix_preempt = { path = "../eonix_preempt" }
+eonix_spin_irq = { path = "../eonix_spin_irq" }
 eonix_sync = { path = "../eonix_sync" }
 pointers = { path = "../pointers" }
 
 intrusive-collections = "0.9.7"
-lazy_static = { version = "1.5.0", features = ["spin_no_std"] }
 
 [features]
 default = []

+ 5 - 12
crates/eonix_runtime/src/ready_queue.rs

@@ -1,9 +1,10 @@
 use crate::task::Task;
 use alloc::{collections::VecDeque, sync::Arc};
-use eonix_sync::Spin;
+use eonix_sync::{LazyLock, Spin};
 
 #[arch::define_percpu]
-static READYQUEUE: Option<Spin<FifoReadyQueue>> = None;
+static READYQUEUE: LazyLock<Spin<FifoReadyQueue>> =
+    LazyLock::new(|| Spin::new(FifoReadyQueue::new()));
 
 pub trait ReadyQueue {
     fn get(&mut self) -> Option<Arc<Task>>;
@@ -33,14 +34,6 @@ impl ReadyQueue for FifoReadyQueue {
 }
 
 pub fn local_rq() -> &'static Spin<dyn ReadyQueue> {
-    // SAFETY: When we use ReadyQueue on this CPU, we will lock it with `lock_irq()`
-    //         and if we use ReadyQueue on other CPU, we won't be able to touch it on this CPU.
-    //         So no issue here.
-    unsafe { READYQUEUE.as_ref() }
-        .as_ref()
-        .expect("ReadyQueue should be initialized")
-}
-
-pub fn init_local_rq() {
-    READYQUEUE.set(Some(Spin::new(FifoReadyQueue::new())));
+    // SAFETY: The inner rq is protected by `Spin`.
+    unsafe { &**READYQUEUE.as_ref() }
 }

+ 5 - 8
crates/eonix_runtime/src/scheduler.rs

@@ -1,7 +1,7 @@
 use crate::{
     context::ExecutionContext,
     executor::{ExecuteStatus, OutputHandle, Stack},
-    ready_queue::{init_local_rq, local_rq, ReadyQueue},
+    ready_queue::{local_rq, ReadyQueue},
     run::{Contexted, PinRun},
     task::{Task, TaskAdapter, TaskHandle},
 };
@@ -16,9 +16,9 @@ use core::{
 };
 use eonix_log::println_trace;
 use eonix_preempt::assert_preempt_count_eq;
-use eonix_sync::Spin;
+use eonix_spin_irq::SpinIrq as _;
+use eonix_sync::{LazyLock, Spin};
 use intrusive_collections::RBTree;
-use lazy_static::lazy_static;
 use pointers::BorrowedArc;
 
 #[arch::define_percpu]
@@ -27,9 +27,8 @@ static CURRENT_TASK: Option<NonNull<Task>> = None;
 #[arch::define_percpu]
 static LOCAL_SCHEDULER_CONTEXT: ExecutionContext = ExecutionContext::new();
 
-lazy_static! {
-    static ref TASKS: Spin<RBTree<TaskAdapter>> = Spin::new(RBTree::new(TaskAdapter::new()));
-}
+static TASKS: LazyLock<Spin<RBTree<TaskAdapter>>> =
+    LazyLock::new(|| Spin::new(RBTree::new(TaskAdapter::new())));
 
 pub struct Scheduler;
 
@@ -88,8 +87,6 @@ impl Scheduler {
     where
         S: Stack,
     {
-        init_local_rq();
-
         let stack = S::new();
 
         unsafe {

+ 8 - 0
crates/eonix_spin_irq/Cargo.toml

@@ -0,0 +1,8 @@
+[package]
+name = "eonix_spin_irq"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+arch = { path = "../../arch" }
+eonix_sync = { path = "../eonix_sync" }

+ 170 - 0
crates/eonix_spin_irq/src/lib.rs

@@ -0,0 +1,170 @@
+#![no_std]
+
+use core::{
+    marker::PhantomData,
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+};
+use eonix_sync::{
+    marker::NotSend, ForceUnlockableGuard, Relax, Spin, SpinGuard, SpinRelax, UnlockableGuard,
+    UnlockedGuard, UnlockedSpinGuard,
+};
+
+pub trait SpinIrq<T, R = SpinRelax>
+where
+    T: ?Sized,
+{
+    fn lock_irq(&self) -> SpinIrqGuard<'_, T, R>;
+}
+
+struct IrqStateGuard(ManuallyDrop<arch::IrqState>);
+
+pub struct SpinIrqGuard<'a, T, R = SpinRelax>
+where
+    T: ?Sized,
+{
+    guard: SpinGuard<'a, T, R>,
+    irq_state: IrqStateGuard,
+    /// We don't want this to be `Send` because we don't want to allow the guard to be
+    /// transferred to another thread since we have disabled the preemption and saved
+    /// IRQ states on the local cpu.
+    _not_send: PhantomData<NotSend>,
+}
+
+pub struct UnlockedSpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+{
+    unlocked_guard: UnlockedSpinGuard<'a, T, R>,
+    irq_state: IrqStateGuard,
+}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can access the guard from multiple threads.
+unsafe impl<T, R> Sync for SpinIrqGuard<'_, T, R> where T: ?Sized + Sync {}
+
+impl<T, R> SpinIrq<T, R> for Spin<T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    fn lock_irq(&self) -> SpinIrqGuard<'_, T, R> {
+        let irq_state = arch::disable_irqs_save();
+        let guard = self.lock();
+
+        SpinIrqGuard {
+            guard,
+            irq_state: IrqStateGuard::new(irq_state),
+            _not_send: PhantomData,
+        }
+    }
+}
+
+impl IrqStateGuard {
+    pub const fn new(irq_state: arch::IrqState) -> Self {
+        Self(ManuallyDrop::new(irq_state))
+    }
+}
+
+impl Drop for IrqStateGuard {
+    fn drop(&mut self) {
+        let Self(irq_state) = self;
+
+        unsafe {
+            // SAFETY: We are dropping the guard, so we are never going to access the value.
+            ManuallyDrop::take(irq_state).restore();
+        }
+    }
+}
+
+impl<T, R> Deref for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.guard.deref()
+    }
+}
+
+impl<T, R> DerefMut for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.guard.deref_mut()
+    }
+}
+
+impl<T, U, R> AsRef<U> for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
+{
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
+    }
+}
+
+impl<T, U, R> AsMut<U> for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsMut<U>,
+{
+    fn as_mut(&mut self) -> &mut U {
+        self.deref_mut().as_mut()
+    }
+}
+
+impl<'a, T, R> UnlockableGuard for SpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Unlocked = UnlockedSpinIrqGuard<'a, T, R>;
+
+    fn unlock(self) -> Self::Unlocked {
+        UnlockedSpinIrqGuard {
+            unlocked_guard: self.guard.unlock(),
+            irq_state: self.irq_state,
+        }
+    }
+}
+
+// SAFETY: The guard is stateless so no more process needed.
+unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Guard = SpinIrqGuard<'a, T, R>;
+
+    fn relock(self) -> Self::Guard {
+        SpinIrqGuard {
+            guard: self.unlocked_guard.relock(),
+            irq_state: self.irq_state,
+            _not_send: PhantomData,
+        }
+    }
+}
+
+impl<'a, T, R> ForceUnlockableGuard for SpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    unsafe fn force_unlock(&mut self) {
+        unsafe {
+            self.guard.force_unlock();
+        }
+    }
+
+    unsafe fn force_relock(&mut self) {
+        unsafe {
+            self.guard.force_relock();
+        }
+    }
+}

+ 0 - 1
crates/eonix_sync/Cargo.toml

@@ -4,7 +4,6 @@ version = "0.1.0"
 edition = "2024"
 
 [dependencies]
-arch = { path = "../../arch" }
 eonix_preempt = { path = "../eonix_preempt" }
 
 [features]

+ 1 - 1
crates/eonix_sync/src/guard.rs

@@ -7,7 +7,7 @@ pub trait UnlockableGuard {
 
 /// # Safety
 /// Implementors of this trait MUST ensure that the lock is correctly unlocked if
-/// dropped accidentally.
+/// the lock is stateful and dropped accidentally.
 pub unsafe trait UnlockedGuard {
     type Guard: UnlockableGuard;
 

+ 150 - 0
crates/eonix_sync/src/lazy_lock.rs

@@ -0,0 +1,150 @@
+use crate::{Relax, SpinRelax};
+use core::{
+    cell::UnsafeCell,
+    marker::PhantomData,
+    ops::Deref,
+    sync::atomic::{AtomicU8, Ordering},
+};
+
+enum LazyState<T, F>
+where
+    F: FnOnce() -> T,
+{
+    Uninitialized(F),
+    Initializing,
+    Initialized(T),
+}
+
+pub struct LazyLock<T, F = fn() -> T, R = SpinRelax>
+where
+    F: FnOnce() -> T,
+    R: Relax,
+{
+    value: UnsafeCell<LazyState<T, F>>,
+    state: AtomicU8,
+    _phantom: PhantomData<R>,
+}
+
+unsafe impl<T, F, R> Sync for LazyLock<T, F, R>
+where
+    T: Send + Sync,
+    F: FnOnce() -> T,
+    F: Send,
+    R: Relax,
+{
+}
+
+impl<T, F, R> LazyLock<T, F, R>
+where
+    F: FnOnce() -> T,
+    R: Relax,
+{
+    const UNINITIALIZED: u8 = 0;
+    const INITIALIZING: u8 = 1;
+    const INITIALIZED: u8 = 2;
+
+    pub const fn new(init: F) -> Self {
+        Self {
+            value: UnsafeCell::new(LazyState::Uninitialized(init)),
+            state: AtomicU8::new(Self::UNINITIALIZED),
+            _phantom: PhantomData,
+        }
+    }
+
+    /// # Safety
+    /// We should sync with the writer when calling this function or we could read stale data.
+    unsafe fn get_initialized_value(&self) -> Option<&T> {
+        // SAFETY: We're synced with the cpu that initialized it.
+        if let LazyState::Initialized(value) = unsafe { &*self.value.get() } {
+            Some(value)
+        } else {
+            None
+        }
+    }
+
+    /// Does the initialization of the value and leave `self.state` untouched. The caller
+    /// should set the state to `INITIALIZED` after calling this function.
+    ///
+    /// # Safety
+    /// This function is unsafe because concurrent calls would result in undefined behavior.
+    /// We should call this function exactly once with `self.state == INITIALIZING`.
+    unsafe fn do_initialization(&self) {
+        // SAFETY: We are the only thread that can access the value initializer.
+        let stateref = unsafe { &mut *self.value.get() };
+        let mut state = LazyState::Initializing;
+        core::mem::swap(stateref, &mut state);
+
+        if let LazyState::Uninitialized(init_func) = state {
+            state = LazyState::Initialized(init_func());
+        } else {
+            unreachable!("Invalid LazyLock state.");
+        };
+
+        core::mem::swap(stateref, &mut state);
+    }
+
+    /// Spin until the value is initialized. Guarantees that the initialized value is
+    /// visible to the caller cpu.
+    fn spin_until_initialized(&self) {
+        while self.state.load(Ordering::Acquire) != Self::INITIALIZED {
+            R::relax();
+        }
+    }
+
+    /// Get immutable reference to the wrapped value if initialized. Block until
+    /// the value is initialized by someone (including the caller itself) otherwise.
+    pub fn get(&self) -> &T {
+        match self.state.load(Ordering::Acquire) {
+            Self::UNINITIALIZED => match self.state.compare_exchange(
+                Self::UNINITIALIZED,
+                Self::INITIALIZING,
+                Ordering::Acquire,
+                Ordering::Acquire,
+            ) {
+                Ok(_) => unsafe {
+                    // SAFETY: We are the only thread doing initialization.
+                    self.do_initialization();
+                    self.state.store(Self::INITIALIZED, Ordering::Release);
+                },
+                Err(Self::INITIALIZING) => self.spin_until_initialized(),
+                Err(Self::INITIALIZED) => {}
+                Err(_) => unreachable!("Invalid LazyLock state."),
+            },
+            Self::INITIALIZING => self.spin_until_initialized(),
+            Self::INITIALIZED => {}
+            _ => unreachable!("Invalid LazyLock state."),
+        }
+
+        unsafe {
+            // SAFETY: If we're the spin waiter, we're synced with the cpu that initialized
+            //         it using `Acquire`. If we're the one that initialized it, no
+            //         synchronization is needed.
+            self.get_initialized_value()
+                .expect("Value should be initialized.")
+        }
+    }
+}
+
+impl<T, F, R> Deref for LazyLock<T, F, R>
+where
+    F: FnOnce() -> T,
+    R: Relax,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.get()
+    }
+}
+
+impl<T, U, F, R> AsRef<U> for LazyLock<T, F, R>
+where
+    U: ?Sized,
+    F: FnOnce() -> T,
+    R: Relax,
+    <Self as Deref>::Target: AsRef<U>,
+{
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
+    }
+}

+ 4 - 2
crates/eonix_sync/src/lib.rs

@@ -1,14 +1,16 @@
 #![no_std]
 
 mod guard;
+mod lazy_lock;
 mod locked;
-mod marker;
+pub mod marker;
 mod mutex;
 mod rwlock;
 mod spin;
 
 pub use guard::{ForceUnlockableGuard, UnlockableGuard, UnlockedGuard};
+pub use lazy_lock::LazyLock;
 pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
 pub use mutex::{Mutex, MutexGuard, Wait as MutexWait};
 pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard, Wait as RwLockWait};
-pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinIrqGuard, SpinRelax};
+pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinRelax, UnlockedSpinGuard};

+ 1 - 15
crates/eonix_sync/src/spin.rs

@@ -4,11 +4,10 @@ mod relax;
 use core::{
     cell::UnsafeCell,
     marker::PhantomData,
-    mem::ManuallyDrop,
     sync::atomic::{AtomicBool, Ordering},
 };
 
-pub use guard::{SpinGuard, SpinIrqGuard};
+pub use guard::{SpinGuard, UnlockedSpinGuard};
 pub use relax::{LoopRelax, Relax, SpinRelax};
 
 //// A spinlock is a lock that uses busy-waiting to acquire the lock.
@@ -67,19 +66,6 @@ where
         }
     }
 
-    pub fn lock_irq(&self) -> SpinIrqGuard<'_, T, R> {
-        let irq_state = arch::disable_irqs_save();
-        self.do_lock();
-
-        SpinIrqGuard {
-            lock: self,
-            // SAFETY: We are holding the lock, so we can safely access the value.
-            value: unsafe { &mut *self.value.get() },
-            irq_state: ManuallyDrop::new(irq_state),
-            _not_send: PhantomData,
-        }
-    }
-
     pub fn get_mut(&mut self) -> &mut T {
         // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
         unsafe { &mut *self.value.get() }

+ 0 - 141
crates/eonix_sync/src/spin/guard.rs

@@ -17,39 +17,14 @@ where
     pub(super) _not_send: PhantomData<NotSend>,
 }
 
-pub struct SpinIrqGuard<'a, T, R = SpinRelax>
-where
-    T: ?Sized,
-{
-    pub(super) lock: &'a Spin<T, R>,
-    pub(super) value: &'a mut T,
-    pub(super) irq_state: ManuallyDrop<arch::IrqState>,
-    /// We don't want this to be `Send` because we don't want to allow the guard to be
-    /// transferred to another thread since we have disabled the preemption and saved
-    /// IRQ states on the local cpu.
-    pub(super) _not_send: PhantomData<NotSend>,
-}
-
 pub struct UnlockedSpinGuard<'a, T, R>(&'a Spin<T, R>)
 where
     T: ?Sized;
 
-pub struct UnlockedSpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-{
-    lock: &'a Spin<T, R>,
-    irq_state: arch::IrqState,
-}
-
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can access the guard from multiple threads.
 unsafe impl<T, R> Sync for SpinGuard<'_, T, R> where T: ?Sized + Sync {}
 
-// SAFETY: As long as the value protected by the lock is able to be shared between threads,
-//         we can access the guard from multiple threads.
-unsafe impl<T, R> Sync for SpinIrqGuard<'_, T, R> where T: ?Sized + Sync {}
-
 impl<T, R> Drop for SpinGuard<'_, T, R>
 where
     T: ?Sized,
@@ -62,21 +37,6 @@ where
     }
 }
 
-impl<T, R> Drop for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-{
-    fn drop(&mut self) {
-        unsafe {
-            // SAFETY: We are dropping the guard, so we are not holding the lock anymore.
-            self.lock.do_unlock();
-
-            // SAFETY: We are dropping the guard, so we are never going to access the value.
-            ManuallyDrop::take(&mut self.irq_state).restore();
-        }
-    }
-}
-
 impl<T, R> Deref for SpinGuard<'_, T, R>
 where
     T: ?Sized,
@@ -121,50 +81,6 @@ where
     }
 }
 
-impl<T, R> Deref for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-{
-    type Target = T;
-
-    fn deref(&self) -> &Self::Target {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
-    }
-}
-
-impl<T, R> DerefMut for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-{
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
-    }
-}
-
-impl<T, U, R> AsRef<U> for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-    U: ?Sized,
-    <Self as Deref>::Target: AsRef<U>,
-{
-    fn as_ref(&self) -> &U {
-        self.deref().as_ref()
-    }
-}
-
-impl<T, U, R> AsMut<U> for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-    U: ?Sized,
-    <Self as Deref>::Target: AsMut<U>,
-{
-    fn as_mut(&mut self) -> &mut U {
-        self.deref_mut().as_mut()
-    }
-}
-
 impl<'a, T, R> UnlockableGuard for SpinGuard<'a, T, R>
 where
     T: ?Sized,
@@ -183,28 +99,6 @@ where
     }
 }
 
-impl<'a, T, R> UnlockableGuard for SpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-    R: Relax,
-{
-    type Unlocked = UnlockedSpinIrqGuard<'a, T, R>;
-
-    fn unlock(self) -> Self::Unlocked {
-        let mut me = ManuallyDrop::new(self);
-        unsafe {
-            // SAFETY: No access is possible after unlocking.
-            me.lock.do_unlock();
-        }
-
-        UnlockedSpinIrqGuard {
-            lock: me.lock,
-            // SAFETY: `me` is going to be dropped so never used again.
-            irq_state: unsafe { ManuallyDrop::take(&mut me.irq_state) },
-        }
-    }
-}
-
 // SAFETY: The guard is stateless so no more process needed.
 unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinGuard<'a, T, R>
 where
@@ -219,22 +113,6 @@ where
     }
 }
 
-// SAFETY: The guard is stateless so no more process needed.
-unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-    R: Relax,
-{
-    type Guard = SpinIrqGuard<'a, T, R>;
-
-    fn relock(self) -> Self::Guard {
-        let mut guard = self.lock.lock_irq();
-
-        guard.irq_state = ManuallyDrop::new(self.irq_state);
-        guard
-    }
-}
-
 impl<'a, T, R> ForceUnlockableGuard for SpinGuard<'a, T, R>
 where
     T: ?Sized,
@@ -251,22 +129,3 @@ where
         self.lock.do_lock();
     }
 }
-
-impl<'a, T, R> ForceUnlockableGuard for SpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-    R: Relax,
-{
-    unsafe fn force_unlock(&mut self) {
-        unsafe {
-            // SAFETY: The caller assures that the value is no longer accessed.
-            self.lock.do_unlock();
-        }
-
-        // IRQ state is not restored.
-    }
-
-    unsafe fn force_relock(&mut self) {
-        self.lock.do_lock();
-    }
-}

+ 1 - 1
rust-toolchain

@@ -1 +1 @@
-nightly
+nightly-2025-03-22

+ 1 - 1
src/driver/ahci/mod.rs

@@ -7,7 +7,6 @@ use crate::{
     },
     prelude::*,
 };
-
 use alloc::{format, sync::Arc};
 use bindings::{
     kernel::hw::pci::{self, pci_device},
@@ -15,6 +14,7 @@ use bindings::{
 };
 use control::AdapterControl;
 use defs::*;
+use eonix_spin_irq::SpinIrq as _;
 use port::AdapterPort;
 
 mod command;

+ 9 - 12
src/driver/ahci/port.rs

@@ -1,20 +1,17 @@
-use alloc::collections::vec_deque::VecDeque;
-use bindings::{EINVAL, EIO};
-use eonix_preempt::assert_preempt_enabled;
-
-use crate::prelude::*;
-
-use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
-use crate::kernel::mem::paging::Page;
-
-use crate::kernel::mem::phys::{NoCachePP, PhysPtr};
-use crate::sync::UCondVar;
-
 use super::command::{Command, IdentifyCommand, ReadLBACommand};
 use super::{
     vread, vwrite, CommandHeader, PRDTEntry, FISH2D, PORT_CMD_CR, PORT_CMD_FR, PORT_CMD_FRE,
     PORT_CMD_ST, PORT_IE_DEFAULT,
 };
+use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
+use crate::kernel::mem::paging::Page;
+use crate::kernel::mem::phys::{NoCachePP, PhysPtr};
+use crate::prelude::*;
+use crate::sync::UCondVar;
+use alloc::collections::vec_deque::VecDeque;
+use bindings::{EINVAL, EIO};
+use eonix_preempt::assert_preempt_enabled;
+use eonix_spin_irq::SpinIrq as _;
 
 fn spinwait_clear(refval: *const u32, mask: u32) -> KResult<()> {
     const SPINWAIT_MAX: usize = 1000;

+ 1 - 0
src/driver/serial.rs

@@ -10,6 +10,7 @@ use crate::{
 use alloc::{collections::vec_deque::VecDeque, format, sync::Arc};
 use bitflags::bitflags;
 use eonix_runtime::{run::FutureRun, scheduler::Scheduler};
+use eonix_spin_irq::SpinIrq as _;
 
 bitflags! {
     struct LineStatus: u8 {

+ 8 - 17
src/fs/procfs.rs

@@ -13,15 +13,11 @@ use crate::{
     },
     prelude::*,
 };
-use alloc::{
-    collections::btree_map::BTreeMap,
-    sync::{Arc, Weak},
-};
+use alloc::sync::{Arc, Weak};
 use bindings::{EACCES, ENOTDIR};
 use core::{ops::ControlFlow, sync::atomic::Ordering};
-use eonix_sync::{AsProof as _, AsProofMut as _, Locked};
+use eonix_sync::{AsProof as _, AsProofMut as _, LazyLock, Locked};
 use itertools::Itertools;
-use lazy_static::lazy_static;
 
 fn split_len_offset(data: &[u8], len: usize, offset: usize) -> Option<&[u8]> {
     let real_data = data.split_at_checked(len).map(|(data, _)| data)?;
@@ -182,17 +178,12 @@ impl Vfs for ProcFs {
     }
 }
 
-lazy_static! {
-    static ref ICACHE: Spin<BTreeMap<Ino, ProcFsNode>> = Spin::new(BTreeMap::new());
-    static ref GLOBAL_PROCFS: Arc<ProcFs> = {
-        let fs: Arc<ProcFs> = Arc::new_cyclic(|weak: &Weak<ProcFs>| ProcFs {
-            root_node: DirInode::new(0, weak.clone()),
-            next_ino: AtomicIno::new(1),
-        });
-
-        fs
-    };
-}
+static GLOBAL_PROCFS: LazyLock<Arc<ProcFs>> = LazyLock::new(|| {
+    Arc::new_cyclic(|weak: &Weak<ProcFs>| ProcFs {
+        root_node: DirInode::new(0, weak.clone()),
+        next_ino: AtomicIno::new(1),
+    })
+});
 
 struct ProcFsMountCreator;
 

+ 4 - 12
src/kernel/block.rs

@@ -1,19 +1,14 @@
-use core::cmp::Ordering;
-
+use super::{constants::ENOENT, mem::paging::Page, vfs::DevId};
 use crate::{
     io::{Buffer, FillResult, UninitBuffer},
     prelude::*,
 };
-
 use alloc::{
     collections::btree_map::{BTreeMap, Entry},
     sync::Arc,
 };
-use bindings::{EEXIST, EINVAL, EIO, ENOENT};
-
-use lazy_static::lazy_static;
-
-use super::{mem::paging::Page, vfs::DevId};
+use bindings::{EEXIST, EINVAL, EIO};
+use core::cmp::Ordering;
 
 pub fn make_device(major: u32, minor: u32) -> DevId {
     (major << 8) & 0xff00u32 | minor & 0xffu32
@@ -72,10 +67,7 @@ impl Ord for BlockDevice {
     }
 }
 
-lazy_static! {
-    static ref BLOCK_DEVICE_LIST: Spin<BTreeMap<DevId, Arc<BlockDevice>>> =
-        Spin::new(BTreeMap::new());
-}
+static BLOCK_DEVICE_LIST: Spin<BTreeMap<DevId, Arc<BlockDevice>>> = Spin::new(BTreeMap::new());
 
 #[derive(Debug, Clone, Copy)]
 #[repr(C)]

+ 1 - 5
src/kernel/chardev.rs

@@ -16,7 +16,6 @@ use alloc::{
     sync::Arc,
 };
 use eonix_sync::AsProof as _;
-use lazy_static::lazy_static;
 
 pub trait VirtualCharDevice: Send + Sync {
     fn read(&self, buffer: &mut dyn Buffer) -> KResult<usize>;
@@ -34,10 +33,7 @@ pub struct CharDevice {
     device: CharDeviceType,
 }
 
-lazy_static! {
-    pub static ref CHAR_DEVICES: Spin<BTreeMap<DevId, Arc<CharDevice>>> =
-        Spin::new(BTreeMap::new());
-}
+static CHAR_DEVICES: Spin<BTreeMap<DevId, Arc<CharDevice>>> = Spin::new(BTreeMap::new());
 
 impl CharDevice {
     pub fn read(&self, buffer: &mut dyn Buffer) -> KResult<usize> {

+ 1 - 4
src/kernel/console.rs

@@ -1,10 +1,7 @@
 use crate::prelude::*;
 use alloc::sync::Arc;
-use lazy_static::lazy_static;
 
-lazy_static! {
-    pub static ref CONSOLE: Spin<Option<Arc<Terminal>>> = Spin::new(None);
-}
+static CONSOLE: Spin<Option<Arc<Terminal>>> = Spin::new(None);
 
 pub fn set_console(terminal: Arc<Terminal>) -> KResult<()> {
     let mut console = CONSOLE.lock();

+ 7 - 12
src/kernel/interrupt.rs

@@ -1,26 +1,21 @@
-use alloc::sync::Arc;
-
-use arch::{ExtendedContext, InterruptContext};
-use lazy_static::lazy_static;
-
-use crate::bindings::root::EINVAL;
-use crate::{driver::Port8, prelude::*};
-
 use super::cpu::current_cpu;
 use super::mem::handle_page_fault;
 use super::syscall::handle_syscall32;
 use super::task::{ProcessList, Signal};
 use super::timer::timer_interrupt;
+use crate::bindings::root::EINVAL;
+use crate::{driver::Port8, prelude::*};
+use alloc::sync::Arc;
+use arch::{ExtendedContext, InterruptContext};
+use eonix_spin_irq::SpinIrq as _;
 
 const PIC1_COMMAND: Port8 = Port8::new(0x20);
 const PIC1_DATA: Port8 = Port8::new(0x21);
 const PIC2_COMMAND: Port8 = Port8::new(0xA0);
 const PIC2_DATA: Port8 = Port8::new(0xA1);
 
-lazy_static! {
-    static ref IRQ_HANDLERS: Spin<[Option<Arc<dyn Fn() + Send + Sync>>; 16]> =
-        Spin::new([const { None }; 16]);
-}
+static IRQ_HANDLERS: Spin<[Option<Arc<dyn Fn() + Send + Sync>>; 16]> =
+    Spin::new([const { None }; 16]);
 
 fn irq_handler(irqno: usize) {
     assert!(irqno < 16);

+ 3 - 6
src/kernel/mem/page_alloc.rs

@@ -4,7 +4,6 @@ use crate::{container_of, prelude::*};
 use bitflags::bitflags;
 use core::sync::atomic::Ordering;
 use core::{ptr::NonNull, sync::atomic::AtomicU32};
-use lazy_static::lazy_static;
 
 const MAX_PAGE_ORDER: u32 = 10;
 const PAGE_ALLOC_COSTLY_ORDER: u32 = 3;
@@ -142,7 +141,7 @@ struct Zone {
 
 struct PerCpuPages {
     batch: u32,
-    high: u32,  // TODO: use in future
+    _high: u32, // TODO: use in future
     free_areas: [FreeArea; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
 }
 
@@ -150,7 +149,7 @@ impl PerCpuPages {
     const fn new() -> Self {
         Self {
             batch: BATCH_SIZE,
-            high: 0,
+            _high: 0,
             free_areas: [const { FreeArea::new() }; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
         }
     }
@@ -386,9 +385,7 @@ impl Zone {
 #[arch::define_percpu]
 static PER_CPU_PAGES: PerCpuPages = PerCpuPages::new();
 
-lazy_static! {
-    static ref ZONE: Spin<Zone> = Spin::new(Zone::new());
-}
+static ZONE: Spin<Zone> = Spin::new(Zone::new());
 
 fn __alloc_pages(order: u32) -> PagePtr {
     let pages_ptr;

+ 8 - 13
src/kernel/mem/page_table.rs

@@ -1,15 +1,12 @@
-use lazy_static::lazy_static;
-
-use crate::prelude::*;
-
-use crate::bindings::root::{EINVAL, KERNEL_PML4};
-
 use super::{
     paging::Page,
     phys::{CachedPP, PhysPtr as _},
     VAddr, VRange,
 };
 use super::{MMArea, Permission};
+use crate::bindings::root::{EINVAL, KERNEL_PML4};
+use crate::prelude::*;
+use eonix_sync::LazyLock;
 
 const PA_P: usize = 0x001;
 const PA_RW: usize = 0x002;
@@ -55,13 +52,11 @@ pub struct PTEIterator<'lt, const KERNEL: bool> {
     _phantom: core::marker::PhantomData<&'lt ()>,
 }
 
-lazy_static! {
-    static ref EMPTY_PAGE: Page = {
-        let page = Page::alloc_one();
-        page.zero();
-        page
-    };
-}
+static EMPTY_PAGE: LazyLock<Page> = LazyLock::new(|| {
+    let page = Page::alloc_one();
+    page.zero();
+    page
+});
 
 impl PTE {
     pub fn is_user(&self) -> bool {

+ 2 - 2
src/kernel/task/process_group.rs

@@ -14,7 +14,7 @@ pub struct ProcessGroupBuilder {
 #[derive(Debug)]
 pub struct ProcessGroup {
     pub pgid: u32,
-    pub leader: Weak<Process>,
+    pub _leader: Weak<Process>,
     pub session: Weak<Session>,
 
     pub processes: Locked<BTreeMap<u32, Weak<Process>>, ProcessList>,
@@ -49,7 +49,7 @@ impl ProcessGroupBuilder {
             pgid,
             session: Arc::downgrade(&session),
             processes: Locked::new(BTreeMap::from([(pgid, leader.clone())]), process_list),
-            leader,
+            _leader: leader,
         });
 
         process_list.add_pgroup(&pgroup);

+ 7 - 12
src/kernel/task/process_list.rs

@@ -6,7 +6,6 @@ use alloc::{
 };
 use bindings::KERNEL_PML4;
 use eonix_sync::{AsProof as _, AsProofMut as _};
-use lazy_static::lazy_static;
 
 pub struct ProcessList {
     /// The init process.
@@ -21,17 +20,13 @@ pub struct ProcessList {
     sessions: BTreeMap<u32, Weak<Session>>,
 }
 
-lazy_static! {
-    static ref GLOBAL_PROC_LIST: RwLock<ProcessList> = {
-        rwlock_new(ProcessList {
-            init: None,
-            threads: BTreeMap::new(),
-            processes: BTreeMap::new(),
-            pgroups: BTreeMap::new(),
-            sessions: BTreeMap::new(),
-        })
-    };
-}
+static GLOBAL_PROC_LIST: RwLock<ProcessList> = rwlock_new(ProcessList {
+    init: None,
+    threads: BTreeMap::new(),
+    processes: BTreeMap::new(),
+    pgroups: BTreeMap::new(),
+    sessions: BTreeMap::new(),
+});
 
 impl ProcessList {
     pub fn get() -> &'static RwLock<Self> {

+ 27 - 12
src/kernel/vfs/dentry.rs

@@ -1,11 +1,9 @@
 pub mod dcache;
 
-use core::{
-    hash::{BuildHasher, BuildHasherDefault, Hasher},
-    ops::ControlFlow,
-    sync::atomic::{AtomicPtr, Ordering},
+use super::{
+    inode::{Ino, Inode, Mode, WriteOffset},
+    s_isblk, s_ischr, s_isdir, s_isreg, DevId, FsContext,
 };
-
 use crate::{
     hash::KernelHasher,
     io::{Buffer, ByteBuffer},
@@ -14,16 +12,17 @@ use crate::{
     prelude::*,
     rcu::{RCUNode, RCUPointer},
 };
-
 use alloc::sync::Arc;
 use bindings::{
     statx, EEXIST, EINVAL, EISDIR, ELOOP, ENOENT, ENOTDIR, EPERM, ERANGE, O_CREAT, O_EXCL,
 };
-
-use super::{
-    inode::{Ino, Inode, Mode, WriteOffset},
-    s_isblk, s_ischr, s_isdir, s_isreg, DevId, FsContext,
+use core::{
+    fmt,
+    hash::{BuildHasher, BuildHasherDefault, Hasher},
+    ops::ControlFlow,
+    sync::atomic::{AtomicPtr, Ordering},
 };
+use eonix_sync::LazyLock;
 
 struct DentryData {
     inode: Arc<dyn Inode>,
@@ -50,8 +49,24 @@ pub struct Dentry {
     data: RCUPointer<DentryData>,
 }
 
-impl core::fmt::Debug for Dentry {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+pub(super) static DROOT: LazyLock<Arc<Dentry>> = LazyLock::new(|| unsafe {
+    let mut dentry = Arc::new_uninit();
+    let parent = dentry.clone().assume_init();
+
+    Arc::get_mut_unchecked(&mut dentry).write(Dentry {
+        parent,
+        name: Arc::from("[root]".as_ref()),
+        hash: 0,
+        prev: AtomicPtr::default(),
+        next: AtomicPtr::default(),
+        data: RCUPointer::empty(),
+    });
+
+    dentry.assume_init()
+});
+
+impl fmt::Debug for Dentry {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         f.debug_struct("Dentry")
             .field("name", &String::from_utf8_lossy(&self.name))
             .field("parent", &String::from_utf8_lossy(&self.parent.name))

+ 7 - 36
src/kernel/vfs/dentry/dcache.rs

@@ -1,46 +1,17 @@
-use core::{
-    mem::MaybeUninit,
-    sync::atomic::{AtomicPtr, Ordering},
-};
-
-use alloc::sync::Arc;
-use bindings::ENOENT;
-
+use super::{Dentry, Inode};
 use crate::{
     kernel::vfs::{s_isdir, s_islnk},
     prelude::*,
-    rcu::{RCUIterator, RCUList, RCUPointer},
+    rcu::{RCUIterator, RCUList},
 };
-
-use super::{Dentry, Inode};
-
-use lazy_static::lazy_static;
+use alloc::sync::Arc;
+use bindings::ENOENT;
+use core::sync::atomic::Ordering;
 
 const DCACHE_HASH_BITS: u32 = 8;
 
-lazy_static! {
-    static ref DCACHE: [RCUList<Dentry>; 1 << DCACHE_HASH_BITS] =
-        core::array::from_fn(|_| RCUList::new());
-    static ref DROOT: Arc<Dentry> = {
-        let dentry = Arc::new_uninit();
-        let fake_parent = unsafe { dentry.clone().assume_init() };
-
-        unsafe { &mut *(Arc::as_ptr(&dentry) as *mut MaybeUninit<Dentry>) }.write(Dentry {
-            parent: fake_parent,
-            name: b"[root]".as_slice().into(),
-            hash: 0,
-            prev: AtomicPtr::default(),
-            next: AtomicPtr::default(),
-            data: RCUPointer::empty(),
-        });
-
-        unsafe { dentry.assume_init() }
-    };
-}
-
-pub fn _looped_droot() -> &'static Arc<Dentry> {
-    &DROOT
-}
+static DCACHE: [RCUList<Dentry>; 1 << DCACHE_HASH_BITS] =
+    [const { RCUList::new() }; 1 << DCACHE_HASH_BITS];
 
 pub fn d_hinted(hash: u64) -> &'static RCUList<Dentry> {
     let hash = hash as usize & ((1 << DCACHE_HASH_BITS) - 1);

+ 8 - 11
src/kernel/vfs/mod.rs

@@ -1,14 +1,11 @@
+use super::task::Thread;
 use crate::prelude::*;
-
 use alloc::sync::Arc;
 use bindings::{dev_t, S_IFBLK, S_IFCHR, S_IFDIR, S_IFLNK, S_IFMT, S_IFREG};
 use dentry::Dentry;
+use eonix_sync::LazyLock;
 use inode::Mode;
 
-use super::task::Thread;
-
-use lazy_static::lazy_static;
-
 pub mod dentry;
 pub mod file;
 pub mod filearray;
@@ -52,13 +49,13 @@ pub struct FsContext {
     pub umask: Spin<Mode>,
 }
 
-lazy_static! {
-    static ref GLOBAL_FS_CONTEXT: Arc<FsContext> = Arc::new(FsContext {
-        fsroot: Dentry::kernel_root_dentry(),
-        cwd: Spin::new(Dentry::kernel_root_dentry()),
+static GLOBAL_FS_CONTEXT: LazyLock<Arc<FsContext>> = LazyLock::new(|| {
+    Arc::new(FsContext {
+        fsroot: Dentry::root().clone(),
+        cwd: Spin::new(Dentry::root().clone()),
         umask: Spin::new(0o022),
-    });
-}
+    })
+});
 
 impl TimeSpec {
     pub const fn default() -> Self {

+ 49 - 66
src/kernel/vfs/mount.rs

@@ -1,18 +1,12 @@
-use crate::{fs::tmpfs, prelude::*};
-
-use alloc::{
-    collections::btree_map::{BTreeMap, Entry},
-    sync::Arc,
-};
-use bindings::{EEXIST, ENODEV, ENOTDIR};
-
-use lazy_static::lazy_static;
-
 use super::{
-    dentry::{dcache, Dentry},
+    dentry::{dcache, Dentry, DROOT},
     inode::Inode,
     vfs::Vfs,
 };
+use crate::prelude::*;
+use alloc::{collections::btree_map::BTreeMap, string::ToString as _, sync::Arc};
+use bindings::{EEXIST, ENODEV, ENOTDIR};
+use eonix_sync::LazyLock;
 
 pub const MS_RDONLY: u64 = 1 << 0;
 pub const MS_NOSUID: u64 = 1 << 1;
@@ -32,17 +26,11 @@ const MOUNT_FLAGS: [(u64, &str); 6] = [
     (MS_LAZYTIME, ",lazytime"),
 ];
 
-lazy_static! {
-    static ref MOUNT_CREATORS: Spin<BTreeMap<String, Arc<dyn MountCreator>>> =
-        Spin::new(BTreeMap::new());
-    static ref MOUNTS: Spin<Vec<(Arc<Dentry>, MountPointData)>> = Spin::new(vec![]);
-}
-
-static mut ROOTFS: Option<Arc<Dentry>> = None;
+static MOUNT_CREATORS: Spin<BTreeMap<String, Arc<dyn MountCreator>>> = Spin::new(BTreeMap::new());
+static MOUNTS: Spin<Vec<(Arc<Dentry>, MountPointData)>> = Spin::new(vec![]);
 
-#[allow(dead_code)]
 pub struct Mount {
-    vfs: Arc<dyn Vfs>,
+    _vfs: Arc<dyn Vfs>,
     root: Arc<Dentry>,
 }
 
@@ -52,7 +40,7 @@ impl Mount {
         root_dentry.save_dir(root_inode)?;
 
         Ok(Self {
-            vfs,
+            _vfs: vfs,
             root: root_dentry,
         })
     }
@@ -71,12 +59,11 @@ pub trait MountCreator: Send + Sync {
 
 pub fn register_filesystem(fstype: &str, creator: Arc<dyn MountCreator>) -> KResult<()> {
     let mut creators = MOUNT_CREATORS.lock();
-    match creators.entry(String::from(fstype)) {
-        Entry::Occupied(_) => Err(EEXIST),
-        Entry::Vacant(entry) => {
-            entry.insert(creator);
-            Ok(())
-        }
+    if !creators.contains_key(fstype) {
+        creators.insert(fstype.to_string(), creator);
+        Ok(())
+    } else {
+        Err(EEXIST)
     }
 }
 
@@ -164,45 +151,41 @@ pub fn dump_mounts(buffer: &mut dyn core::fmt::Write) {
     }
 }
 
-pub fn init_vfs() -> KResult<()> {
-    tmpfs::init();
-
-    let source = String::from("rootfs");
-    let fstype = String::from("tmpfs");
-    let flags = MS_NOATIME;
-
-    let mount = {
-        let creators = MOUNT_CREATORS.lock();
-        let creator = creators.get(&fstype).ok_or(ENODEV)?;
-
-        creator.create_mount(&source, flags, dcache::_looped_droot())?
-    };
-
-    let root_dentry = mount.root().clone();
-    dcache::d_add(&root_dentry);
-
-    unsafe { ROOTFS = Some(root_dentry) };
-
-    let mpdata = MountPointData {
-        mount,
-        source,
-        mountpoint: String::from("/"),
-        fstype,
-        flags,
-    };
-
-    MOUNTS
-        .lock()
-        .push((dcache::_looped_droot().clone(), mpdata));
-
-    Ok(())
-}
-
 impl Dentry {
-    pub fn kernel_root_dentry() -> Arc<Dentry> {
-        #[allow(static_mut_refs)]
-        unsafe {
-            ROOTFS.as_ref().cloned().unwrap()
-        }
+    pub fn root() -> &'static Arc<Dentry> {
+        static ROOT: LazyLock<Arc<Dentry>> = LazyLock::new(|| {
+            let source = String::from("(rootfs)");
+            let fstype = String::from("tmpfs");
+            let mount_flags = MS_NOATIME;
+
+            let creator = MOUNT_CREATORS
+                .lock()
+                .get(&fstype)
+                .cloned()
+                .expect("tmpfs not registered.");
+
+            let mount = creator
+                .create_mount(&source, mount_flags, &DROOT)
+                .expect("Failed to create root mount.");
+
+            let root_dentry = mount.root().clone();
+
+            dcache::d_add(&root_dentry);
+
+            MOUNTS.lock().push((
+                DROOT.clone(),
+                MountPointData {
+                    mount,
+                    source,
+                    mountpoint: String::from("/"),
+                    fstype,
+                    flags: mount_flags,
+                },
+            ));
+
+            root_dentry
+        });
+
+        &ROOT
     }
 }

+ 3 - 4
src/lib.rs

@@ -67,7 +67,7 @@ extern "C" {
     fn init_pci();
 }
 
-struct Allocator {}
+struct Allocator;
 unsafe impl GlobalAlloc for Allocator {
     unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
         let result = _do_allocate(layout.size());
@@ -88,7 +88,7 @@ unsafe impl GlobalAlloc for Allocator {
 }
 
 #[global_allocator]
-static ALLOCATOR: Allocator = Allocator {};
+static ALLOCATOR: Allocator = Allocator;
 
 extern "C" {
     fn init_allocator();
@@ -107,8 +107,6 @@ pub extern "C" fn rust_kinit(early_kstack_pfn: usize) -> ! {
     // TODO: Move this to rust.
     unsafe { init_pci() };
 
-    kernel::vfs::mount::init_vfs().unwrap();
-
     // To satisfy the `Scheduler` "preempt count == 0" assertion.
     eonix_preempt::disable();
 
@@ -136,6 +134,7 @@ async fn init_process(early_kstack_pfn: usize) {
     driver::e1000e::register_e1000e_driver();
     driver::ahci::register_ahci_driver();
 
+    fs::tmpfs::init();
     fs::procfs::init();
     fs::fat32::init();
 

+ 1 - 4
src/path.rs

@@ -1,7 +1,4 @@
-use crate::prelude::*;
-
-use bindings::ENOENT;
-
+use crate::{kernel::constants::ENOENT, prelude::*};
 use core::fmt::{self, Debug, Formatter};
 
 pub struct Path<'lt> {

+ 1 - 5
src/rcu.rs

@@ -54,7 +54,7 @@ pub struct RCUList<T: RCUNode<T>> {
 }
 
 impl<T: RCUNode<T>> RCUList<T> {
-    pub fn new() -> Self {
+    pub const fn new() -> Self {
         Self {
             head: AtomicPtr::new(core::ptr::null_mut()),
             reader_lock: rwlock_new(()),
@@ -195,10 +195,6 @@ impl<T: core::fmt::Debug> core::fmt::Debug for RCUPointer<T> {
 }
 
 impl<T> RCUPointer<T> {
-    pub fn new_with(value: Arc<T>) -> Self {
-        Self(AtomicPtr::new(Arc::into_raw(value) as *mut _))
-    }
-
     pub fn empty() -> Self {
         Self(AtomicPtr::new(core::ptr::null_mut()))
     }

+ 2 - 1
src/sync.rs

@@ -1,9 +1,10 @@
 mod arcswap;
 mod condvar;
 
-pub use eonix_sync::Spin;
 use eonix_sync::{MutexWait, RwLockWait};
 
+pub use eonix_sync::Spin;
+
 #[doc(hidden)]
 #[derive(Debug)]
 pub struct RwLockWaitImpl {