Browse Source

Merge branch 'master' into riscv64-support

Heinz 8 tháng trước cách đây
mục cha
commit
61dd83dec7

+ 0 - 1
.gitignore

@@ -1,6 +1,5 @@
 build/
 
-.vscode/
 .idea/
 
 test/

+ 58 - 0
.vscode/launch.json

@@ -0,0 +1,58 @@
+{
+    "configurations": [
+        {
+            "type": "cppdbg",
+            "request": "launch",
+            "name": "Launch Kernel",
+            "program": "${workspaceFolder}/build/kernel.out",
+            "args": [],
+            "stopAtEntry": false,
+            "cwd": "${workspaceFolder}",
+            "environment": [],
+            "externalConsole": false,
+            "MIMode": "gdb",
+            "miDebuggerPath": "x86_64-elf-gdb",
+            "miDebuggerServerAddress": "127.0.0.1:1234",
+            "setupCommands": [
+                // {
+                //     "text": "source ${env:HOME}/.rustup/toolchains/nightly-aarch64-apple-darwin/lib/rustlib/etc/gdb_load_rust_pretty_printers.py",
+                //     "description": "Load Rust pretty printers",
+                //     "ignoreFailures": false
+                // },
+                {
+                    "text": "-enable-pretty-printing",
+                    "description": "Enable GDB pretty printing",
+                    "ignoreFailures": true
+                },
+                {
+                    "text": "source ${workspaceFolder}/pretty-print.py",
+                    "description": "Load GDB pretty printers",
+                    "ignoreFailures": false
+                },
+            ],
+            "preLaunchTask": "debug run",
+            "postDebugTask": "kill qemu"
+        },
+        {
+            "type": "cppdbg",
+            "request": "launch",
+            "name": "Attach Kernel",
+            "program": "${workspaceFolder}/build/kernel.out",
+            "args": [],
+            "stopAtEntry": false,
+            "cwd": "${workspaceFolder}",
+            "environment": [],
+            "externalConsole": false,
+            "MIMode": "gdb",
+            "miDebuggerPath": "x86_64-elf-gdb",
+            "miDebuggerServerAddress": "127.0.0.1:1234",
+            "setupCommands": [
+                {
+                    "text": "-enable-pretty-printing",
+                    "description": "Enable GDB pretty printing",
+                    "ignoreFailures": true
+                }
+            ]
+        }
+    ]
+}

+ 4 - 0
.vscode/settings.json

@@ -0,0 +1,4 @@
+{
+    "makefile.configureOnOpen": false,
+    "rust-analyzer.check.allTargets": false,
+}

+ 63 - 0
.vscode/tasks.json

@@ -0,0 +1,63 @@
+{
+    // See https://go.microsoft.com/fwlink/?LinkId=733558
+    // for the documentation about the tasks.json format
+    "version": "2.0.0",
+    "tasks": [
+        {
+            "label": "debug run",
+            "type": "shell",
+            "command": "make srun",
+            "isBackground": true,
+            "problemMatcher": [
+                {
+                    "owner": "cpp",
+                    "fileLocation": [
+                        "relative",
+                        "${workspaceFolder}"
+                    ],
+                    "pattern": {
+                        "regexp": "^(.*):(\\d+):(\\d+):\\s+(warning|error):\\s+(.*)$",
+                        "file": 1,
+                        "line": 2,
+                        "column": 3,
+                        "severity": 4,
+                        "message": 5
+                    },
+                    "background": {
+                        "activeOnStart": true,
+                        "beginsPattern": "cmake --build",
+                        "endsPattern": "qemu"
+                    }
+                }
+            ],
+            "presentation": {
+                "echo": false,
+                "reveal": "always",
+                "focus": false,
+                "panel": "shared",
+                "showReuseMessage": false,
+                "clear": true
+            },
+            "group": {
+                "kind": "build",
+                "isDefault": true
+            }
+        },
+        {
+            "label": "kill qemu",
+            "type": "shell",
+            "command": "killall qemu-system-x86_64",
+            "presentation": {
+                "echo": false,
+                "reveal": "never",
+                "focus": false,
+                "panel": "shared",
+                "showReuseMessage": false,
+                "clear": true
+            },
+            "group": {
+                "kind": "none",
+            }
+        }
+    ]
+}

+ 2 - 3
crates/eonix_log/src/lib.rs

@@ -1,9 +1,8 @@
 #![no_std]
 
-use core::fmt::{self, Write};
-
 use alloc::sync::Arc;
-use eonix_sync::Spin;
+use core::fmt::{self, Write};
+use eonix_sync::{Spin, SpinIrq as _};
 
 extern crate alloc;
 

+ 1 - 1
crates/eonix_runtime/src/scheduler.rs

@@ -14,7 +14,7 @@ use core::{
 };
 use eonix_log::println_trace;
 use eonix_preempt::assert_preempt_count_eq;
-use eonix_sync::{LazyLock, Spin};
+use eonix_sync::{LazyLock, Spin, SpinIrq as _};
 use intrusive_collections::RBTree;
 use pointers::BorrowedArc;
 

+ 2 - 2
crates/eonix_sync/src/lib.rs

@@ -15,8 +15,8 @@ pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
 pub use mutex::{Mutex, MutexGuard};
 pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
 pub use spin::{
-    LoopRelax, Relax, Spin, SpinGuard, SpinIrqGuard, SpinRelax, UnlockedSpinGuard,
-    UnlockedSpinIrqGuard,
+    ContextUnlock, DisablePreemption, LoopRelax, NoContext, Relax, Spin, SpinContext, SpinGuard,
+    SpinIrq, SpinRelax, UnlockedContext, UnlockedSpinGuard,
 };
 pub use wait_list::WaitList;
 

+ 89 - 31
crates/eonix_sync/src/spin.rs

@@ -7,11 +7,31 @@ use core::{
     marker::PhantomData,
     sync::atomic::{AtomicBool, Ordering},
 };
-use spin_irq::IrqStateGuard;
 
 pub use guard::{SpinGuard, UnlockedSpinGuard};
 pub use relax::{LoopRelax, Relax, SpinRelax};
-pub use spin_irq::{SpinIrqGuard, UnlockedSpinIrqGuard};
+pub use spin_irq::SpinIrq;
+
+pub trait SpinContext {
+    fn save() -> Self;
+    fn restore(self);
+}
+
+pub trait ContextUnlock: SpinContext {
+    type Unlocked: UnlockedContext<Relocked = Self>;
+
+    fn unlock(self) -> Self::Unlocked;
+}
+
+pub trait UnlockedContext {
+    type Relocked: ContextUnlock<Unlocked = Self>;
+
+    fn relock(self) -> Self::Relocked;
+}
+
+pub struct NoContext;
+
+pub struct DisablePreemption();
 
 //// A spinlock is a lock that uses busy-waiting to acquire the lock.
 /// It is useful for short critical sections where the overhead of a context switch
@@ -49,7 +69,6 @@ where
     unsafe fn do_unlock(&self) {
         let locked = self.locked.swap(false, Ordering::Release);
         debug_assert!(locked, "Spin::unlock(): Unlocking an unlocked lock");
-        eonix_preempt::enable();
     }
 }
 
@@ -58,26 +77,24 @@ where
     T: ?Sized,
     R: Relax,
 {
-    pub fn lock(&self) -> SpinGuard<'_, T, R> {
+    pub fn lock_with_context<C>(&self, context: C) -> SpinGuard<T, C, R>
+    where
+        C: SpinContext,
+    {
         self.do_lock();
 
-        SpinGuard {
-            lock: self,
-            // SAFETY: We are holding the lock, so we can safely access the value.
-            value: unsafe { &mut *self.value.get() },
-            _not_send: PhantomData,
-        }
+        SpinGuard::new(
+            self,
+            unsafe {
+                // SAFETY: We are holding the lock, so we can safely access the value.
+                &mut *self.value.get()
+            },
+            context,
+        )
     }
 
-    pub fn lock_irq(&self) -> SpinIrqGuard<'_, T, R> {
-        let irq_state = arch::disable_irqs_save();
-        let guard = self.lock();
-
-        SpinIrqGuard {
-            guard,
-            irq_state: IrqStateGuard::new(irq_state),
-            _not_send: PhantomData,
-        }
+    pub fn lock(&self) -> SpinGuard<T, DisablePreemption, R> {
+        self.lock_with_context(DisablePreemption::save())
     }
 
     pub fn get_mut(&mut self) -> &mut T {
@@ -86,8 +103,6 @@ where
     }
 
     fn do_lock(&self) {
-        eonix_preempt::disable();
-
         while let Err(_) =
             self.locked
                 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
@@ -97,16 +112,6 @@ where
     }
 }
 
-impl<T, R> Clone for Spin<T, R>
-where
-    T: ?Sized + Clone,
-    R: Relax,
-{
-    fn clone(&self) -> Self {
-        Self::new(self.lock().clone())
-    }
-}
-
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can send the lock between threads.
 unsafe impl<T, R> Send for Spin<T, R> where T: ?Sized + Send {}
@@ -114,3 +119,56 @@ unsafe impl<T, R> Send for Spin<T, R> where T: ?Sized + Send {}
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can provide exclusive access guarantees to the lock.
 unsafe impl<T, R> Sync for Spin<T, R> where T: ?Sized + Send {}
+
+impl SpinContext for NoContext {
+    fn save() -> Self {
+        Self
+    }
+
+    fn restore(self) {}
+}
+
+impl ContextUnlock for NoContext {
+    type Unlocked = NoContext;
+
+    fn unlock(self) -> Self::Unlocked {
+        self
+    }
+}
+
+impl UnlockedContext for NoContext {
+    type Relocked = NoContext;
+
+    fn relock(self) -> Self::Relocked {
+        self
+    }
+}
+
+impl SpinContext for DisablePreemption {
+    fn save() -> Self {
+        eonix_preempt::disable();
+        Self()
+    }
+
+    fn restore(self) {
+        eonix_preempt::enable();
+    }
+}
+
+impl ContextUnlock for DisablePreemption {
+    type Unlocked = DisablePreemption;
+
+    fn unlock(self) -> Self::Unlocked {
+        eonix_preempt::enable();
+        self
+    }
+}
+
+impl UnlockedContext for DisablePreemption {
+    type Relocked = DisablePreemption;
+
+    fn relock(self) -> Self::Relocked {
+        eonix_preempt::disable();
+        self
+    }
+}

+ 70 - 21
crates/eonix_sync/src/spin/guard.rs

@@ -1,4 +1,6 @@
-use super::{Relax, Spin, SpinRelax};
+use super::{
+    ContextUnlock, DisablePreemption, Relax, Spin, SpinContext, SpinRelax, UnlockedContext,
+};
 use crate::{marker::NotSend, UnlockableGuard, UnlockedGuard};
 use core::{
     marker::PhantomData,
@@ -6,40 +8,70 @@ use core::{
     ops::{Deref, DerefMut},
 };
 
-pub struct SpinGuard<'a, T, R = SpinRelax>
+pub struct SpinGuard<'a, T, C = DisablePreemption, R = SpinRelax>
 where
     T: ?Sized,
+    C: SpinContext,
 {
-    pub(super) lock: &'a Spin<T, R>,
-    pub(super) value: &'a mut T,
+    lock: &'a Spin<T, R>,
+    value: &'a mut T,
+    context: Option<C>,
     /// We don't want this to be `Send` because we don't want to allow the guard to be
     /// transferred to another thread since we have disabled the preemption on the local cpu.
-    pub(super) _not_send: PhantomData<NotSend>,
+    _not_send: PhantomData<NotSend>,
 }
 
-pub struct UnlockedSpinGuard<'a, T, R>(&'a Spin<T, R>)
+pub struct UnlockedSpinGuard<'a, T, C, R>(&'a Spin<T, R>, C::Unlocked)
 where
-    T: ?Sized;
+    T: ?Sized,
+    C: ContextUnlock;
 
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can access the guard from multiple threads.
-unsafe impl<T, R> Sync for SpinGuard<'_, T, R> where T: ?Sized + Sync {}
+unsafe impl<T, C, R> Sync for SpinGuard<'_, T, C, R>
+where
+    T: ?Sized + Sync,
+    C: SpinContext,
+{
+}
+
+impl<'a, T, C, R> SpinGuard<'a, T, C, R>
+where
+    T: ?Sized,
+    C: SpinContext,
+{
+    pub(super) fn new(lock: &'a Spin<T, R>, value: &'a mut T, context: C) -> Self {
+        Self {
+            lock,
+            value,
+            context: Some(context),
+            _not_send: PhantomData,
+        }
+    }
+}
 
-impl<T, R> Drop for SpinGuard<'_, T, R>
+impl<T, C, R> Drop for SpinGuard<'_, T, C, R>
 where
     T: ?Sized,
+    C: SpinContext,
 {
     fn drop(&mut self) {
         unsafe {
             // SAFETY: We are dropping the guard, so we are not holding the lock anymore.
             self.lock.do_unlock();
+
+            self.context
+                .take()
+                .expect("We should have a context here")
+                .restore();
         }
     }
 }
 
-impl<T, R> Deref for SpinGuard<'_, T, R>
+impl<T, C, R> Deref for SpinGuard<'_, T, C, R>
 where
     T: ?Sized,
+    C: SpinContext,
 {
     type Target = T;
 
@@ -49,9 +81,10 @@ where
     }
 }
 
-impl<T, R> DerefMut for SpinGuard<'_, T, R>
+impl<T, C, R> DerefMut for SpinGuard<'_, T, C, R>
 where
     T: ?Sized,
+    C: SpinContext,
 {
     fn deref_mut(&mut self) -> &mut Self::Target {
         // SAFETY: We are holding the lock, so we can safely access the value.
@@ -59,9 +92,10 @@ where
     }
 }
 
-impl<T, U, R> AsRef<U> for SpinGuard<'_, T, R>
+impl<T, U, C, R> AsRef<U> for SpinGuard<'_, T, C, R>
 where
     T: ?Sized,
+    C: SpinContext,
     U: ?Sized,
     <Self as Deref>::Target: AsRef<U>,
 {
@@ -70,9 +104,10 @@ where
     }
 }
 
-impl<T, U, R> AsMut<U> for SpinGuard<'_, T, R>
+impl<T, U, C, R> AsMut<U> for SpinGuard<'_, T, C, R>
 where
     T: ?Sized,
+    C: SpinContext,
     U: ?Sized,
     <Self as Deref>::Target: AsMut<U>,
 {
@@ -81,34 +116,48 @@ where
     }
 }
 
-impl<'a, T, R> UnlockableGuard for SpinGuard<'a, T, R>
+impl<'a, T, C, R> UnlockableGuard for SpinGuard<'a, T, C, R>
 where
     T: ?Sized + Send,
+    C: ContextUnlock,
+    C::Unlocked: Send,
     R: Relax,
 {
-    type Unlocked = UnlockedSpinGuard<'a, T, R>;
+    type Unlocked = UnlockedSpinGuard<'a, T, C, R>;
 
     fn unlock(self) -> Self::Unlocked {
-        let me = ManuallyDrop::new(self);
+        let mut me = ManuallyDrop::new(self);
         unsafe {
             // SAFETY: No access is possible after unlocking.
             me.lock.do_unlock();
         }
 
-        UnlockedSpinGuard(me.lock)
+        let unlocked_context = me
+            .context
+            .take()
+            .expect("We should have a context here")
+            .unlock();
+
+        UnlockedSpinGuard(me.lock, unlocked_context)
     }
 }
 
 // SAFETY: The guard is stateless so no more process needed.
-unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinGuard<'a, T, R>
+unsafe impl<'a, T, C, R> UnlockedGuard for UnlockedSpinGuard<'a, T, C, R>
 where
     T: ?Sized + Send,
+    C: ContextUnlock,
+    C::Unlocked: Send,
     R: Relax,
 {
-    type Guard = SpinGuard<'a, T, R>;
+    type Guard = SpinGuard<'a, T, C, R>;
 
     async fn relock(self) -> Self::Guard {
-        let Self(lock) = self;
-        lock.lock()
+        let Self(lock, context) = self;
+
+        let context = context.relock();
+        lock.do_lock();
+
+        SpinGuard::new(lock, unsafe { &mut *lock.value.get() }, context)
     }
 }

+ 28 - 99
crates/eonix_sync/src/spin/spin_irq.rs

@@ -1,124 +1,53 @@
-use super::{Relax, SpinGuard, SpinRelax, UnlockedSpinGuard};
-use crate::{marker::NotSend, UnlockableGuard, UnlockedGuard};
-use core::{
-    marker::PhantomData,
-    mem::ManuallyDrop,
-    ops::{Deref, DerefMut},
-};
+use super::{ContextUnlock, Relax, Spin, SpinContext, SpinGuard, UnlockedContext};
 
-pub(super) struct IrqStateGuard(ManuallyDrop<arch::IrqState>);
+pub struct IrqContext(arch::IrqState);
 
-pub struct SpinIrqGuard<'a, T, R = SpinRelax>
-where
-    T: ?Sized,
-{
-    pub(super) guard: SpinGuard<'a, T, R>,
-    pub(super) irq_state: IrqStateGuard,
-    /// We don't want this to be `Send` because we don't want to allow the guard to be
-    /// transferred to another thread since we have disabled the preemption and saved
-    /// IRQ states on the local cpu.
-    pub(super) _not_send: PhantomData<NotSend>,
-}
-
-pub struct UnlockedSpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-{
-    unlocked_guard: UnlockedSpinGuard<'a, T, R>,
-    irq_state: IrqStateGuard,
-}
+pub struct UnlockedIrqContext(arch::IrqState);
 
-// SAFETY: As long as the value protected by the lock is able to be shared between threads,
-//         we can access the guard from multiple threads.
-unsafe impl<T, R> Sync for SpinIrqGuard<'_, T, R> where T: ?Sized + Sync {}
+pub trait SpinIrq {
+    type Value: ?Sized;
+    type Context: SpinContext;
+    type Relax;
 
-impl IrqStateGuard {
-    pub const fn new(irq_state: arch::IrqState) -> Self {
-        Self(ManuallyDrop::new(irq_state))
-    }
-}
-
-impl Drop for IrqStateGuard {
-    fn drop(&mut self) {
-        let Self(irq_state) = self;
-
-        unsafe {
-            // SAFETY: We are dropping the guard, so we are never going to access the value.
-            ManuallyDrop::take(irq_state).restore();
-        }
-    }
+    fn lock_irq(&self) -> SpinGuard<Self::Value, Self::Context, Self::Relax>;
 }
 
-impl<T, R> Deref for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-{
-    type Target = T;
-
-    fn deref(&self) -> &Self::Target {
-        self.guard.deref()
+impl SpinContext for IrqContext {
+    fn save() -> Self {
+        IrqContext(arch::disable_irqs_save())
     }
-}
 
-impl<T, R> DerefMut for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-{
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        self.guard.deref_mut()
+    fn restore(self) {
+        self.0.restore();
     }
 }
 
-impl<T, U, R> AsRef<U> for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-    U: ?Sized,
-    <Self as Deref>::Target: AsRef<U>,
-{
-    fn as_ref(&self) -> &U {
-        self.deref().as_ref()
-    }
-}
+impl ContextUnlock for IrqContext {
+    type Unlocked = UnlockedIrqContext;
 
-impl<T, U, R> AsMut<U> for SpinIrqGuard<'_, T, R>
-where
-    T: ?Sized,
-    U: ?Sized,
-    <Self as Deref>::Target: AsMut<U>,
-{
-    fn as_mut(&mut self) -> &mut U {
-        self.deref_mut().as_mut()
+    fn unlock(self) -> Self::Unlocked {
+        UnlockedIrqContext(self.0)
     }
 }
 
-impl<'a, T, R> UnlockableGuard for SpinIrqGuard<'a, T, R>
-where
-    T: ?Sized + Send,
-    R: Relax,
-{
-    type Unlocked = UnlockedSpinIrqGuard<'a, T, R>;
+impl UnlockedContext for UnlockedIrqContext {
+    type Relocked = IrqContext;
 
-    fn unlock(self) -> Self::Unlocked {
-        UnlockedSpinIrqGuard {
-            unlocked_guard: self.guard.unlock(),
-            irq_state: self.irq_state,
-        }
+    fn relock(self) -> Self::Relocked {
+        IrqContext(self.0)
     }
 }
 
-// SAFETY: The guard is stateless so no more process needed.
-unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinIrqGuard<'a, T, R>
+impl<T, R> SpinIrq for Spin<T, R>
 where
-    T: ?Sized + Send,
+    T: ?Sized,
     R: Relax,
 {
-    type Guard = SpinIrqGuard<'a, T, R>;
+    type Value = T;
+    type Context = IrqContext;
+    type Relax = R;
 
-    async fn relock(self) -> Self::Guard {
-        SpinIrqGuard {
-            guard: self.unlocked_guard.relock().await,
-            irq_state: self.irq_state,
-            _not_send: PhantomData,
-        }
+    fn lock_irq(&self) -> SpinGuard<Self::Value, Self::Context, Self::Relax> {
+        self.lock_with_context(IrqContext::save())
     }
 }

+ 1 - 1
crates/eonix_sync/src/wait_list.rs

@@ -1,7 +1,7 @@
 mod wait_handle;
 mod wait_object;
 
-use crate::{LazyLock, Spin};
+use crate::{LazyLock, Spin, SpinIrq as _};
 use core::fmt;
 use intrusive_collections::{linked_list::CursorMut, LinkedList};
 use wait_object::{WaitObject, WaitObjectAdapter};

+ 1 - 0
crates/eonix_sync/src/wait_list/wait_handle.rs

@@ -1,4 +1,5 @@
 use super::{wait_object::WaitObject, WaitList};
+use crate::SpinIrq as _;
 use core::{
     cell::UnsafeCell,
     hint::spin_loop,

+ 1 - 1
crates/eonix_sync/src/wait_list/wait_object.rs

@@ -1,5 +1,5 @@
 use super::WaitList;
-use crate::Spin;
+use crate::{Spin, SpinIrq as _};
 use core::{
     cell::UnsafeCell,
     marker::PhantomPinned,

+ 1 - 0
src/driver/ahci/mod.rs

@@ -16,6 +16,7 @@ use control::AdapterControl;
 use core::ptr::NonNull;
 use defs::*;
 use eonix_mm::address::{AddrOps as _, PAddr};
+use eonix_sync::SpinIrq as _;
 use port::AdapterPort;
 
 pub(self) use register::Register;

+ 1 - 1
src/driver/ahci/port.rs

@@ -14,7 +14,7 @@ use bindings::{EINVAL, EIO};
 use core::pin::pin;
 use eonix_mm::address::{Addr as _, PAddr};
 use eonix_runtime::task::Task;
-use eonix_sync::WaitList;
+use eonix_sync::{SpinIrq as _, WaitList};
 
 /// An `AdapterPort` is an HBA device in AHCI mode.
 ///

+ 1 - 1
src/driver/ahci/slot.rs

@@ -2,7 +2,7 @@ use super::{command_table::CommandTable, CommandHeader};
 use crate::KResult;
 use core::pin::pin;
 use eonix_mm::address::Addr as _;
-use eonix_sync::{Spin, WaitList};
+use eonix_sync::{Spin, SpinIrq as _, WaitList};
 
 pub struct CommandSlot<'a> {
     /// # Usage

+ 1 - 1
src/driver/serial.rs

@@ -10,7 +10,7 @@ use alloc::{collections::vec_deque::VecDeque, format, sync::Arc};
 use bitflags::bitflags;
 use core::pin::pin;
 use eonix_runtime::{run::FutureRun, scheduler::Scheduler};
-use eonix_sync::WaitList;
+use eonix_sync::{SpinIrq as _, WaitList};
 
 bitflags! {
     struct LineStatus: u8 {

+ 1 - 0
src/kernel/interrupt.rs

@@ -8,6 +8,7 @@ use crate::{driver::Port8, prelude::*};
 use alloc::sync::Arc;
 use arch::{ExtendedContext, InterruptContext};
 use eonix_runtime::task::Task;
+use eonix_sync::SpinIrq as _;
 
 const PIC1_COMMAND: Port8 = Port8::new(0x20);
 const PIC1_DATA: Port8 = Port8::new(0x21);

+ 1 - 1
src/kernel/vfs/filearray.rs

@@ -74,7 +74,7 @@ impl FileArray {
 
     pub fn new_cloned(other: &Self) -> Arc<Self> {
         Arc::new(Self {
-            inner: Spin::clone(&other.inner),
+            inner: Spin::new(other.inner.lock().clone()),
         })
     }
 

+ 2 - 3
src/kernel/vfs/mod.rs

@@ -42,7 +42,6 @@ pub struct TimeSpec {
     pub nsec: u64,
 }
 
-#[derive(Clone)]
 pub struct FsContext {
     pub fsroot: Arc<Dentry>,
     pub cwd: Spin<Arc<Dentry>>,
@@ -75,8 +74,8 @@ impl FsContext {
     pub fn new_cloned(other: &Self) -> Arc<Self> {
         Arc::new(Self {
             fsroot: other.fsroot.clone(),
-            cwd: other.cwd.clone(),
-            umask: other.umask.clone(),
+            cwd: Spin::new(other.cwd.lock().clone()),
+            umask: Spin::new(other.umask.lock().clone()),
         })
     }