Ver Fonte

spin: rewrite spinlock, add traits for guards

greatbridf há 10 meses atrás
pai
commit
3009c12592

+ 1 - 0
Cargo.lock

@@ -120,6 +120,7 @@ dependencies = [
 name = "eonix_sync"
 version = "0.1.0"
 dependencies = [
+ "arch",
  "eonix_preempt",
 ]
 

+ 2 - 10
Cargo.toml

@@ -24,17 +24,10 @@ lazy_static = { version = "1.5.0", features = ["spin_no_std"] }
 spin = "0.9.8"
 
 [features]
-default = ["smp", "trace_future"]
-trace_condvar = []
+default = ["smp"]
 trace_syscall = []
 trace_scheduler = []
-trace_future = []
-log_trace = [
-    "trace_condvar",
-    "trace_syscall",
-    "trace_scheduler",
-    "trace_future",
-]
+log_trace = ["trace_syscall", "trace_scheduler"]
 log_debug = []
 smp = []
 
@@ -58,7 +51,6 @@ opt-level = 0
 
 [profile.dev.package."*"]
 opt-level = 2
-debug = false
 
 [profile.dev.build-override]
 opt-level = 0

+ 35 - 2
arch/src/x86_64/interrupt.rs

@@ -306,6 +306,9 @@ pub struct InterruptControl {
     apic_base: APICRegs,
 }
 
+/// State of the interrupt flag.
+pub struct IrqState(u64);
+
 impl InterruptContext {
     pub fn set_return_value(&mut self, value: u64) {
         // The return value is stored in rax.
@@ -491,18 +494,48 @@ impl InterruptControl {
     }
 }
 
+impl IrqState {
+    pub fn restore(self) {
+        let Self(state) = self;
+
+        unsafe {
+            asm!(
+                "push {state}",
+                "popf",
+                state = in(reg) state,
+                options(att_syntax, nomem)
+            );
+        }
+    }
+}
+
 pub fn enable_irqs() {
     unsafe {
-        asm!("sti");
+        asm!("sti", options(att_syntax, nomem, nostack));
     }
 }
 
 pub fn disable_irqs() {
     unsafe {
-        asm!("cli");
+        asm!("cli", options(att_syntax, nomem, nostack));
     }
 }
 
+pub fn disable_irqs_save() -> IrqState {
+    let state: u64;
+    unsafe {
+        asm!(
+            "pushf",
+            "pop {state}",
+            "cli",
+            state = out(reg) state,
+            options(att_syntax, nomem)
+        );
+    }
+
+    IrqState(state)
+}
+
 extern "C" {
     pub fn _arch_fork_return();
 }

+ 1 - 0
crates/eonix_sync/Cargo.toml

@@ -4,6 +4,7 @@ version = "0.1.0"
 edition = "2024"
 
 [dependencies]
+arch = { path = "../../arch" }
 eonix_preempt = { path = "../eonix_preempt" }
 
 [features]

+ 39 - 85
crates/eonix_sync/src/guard.rs

@@ -1,9 +1,5 @@
 use crate::{Lock, LockStrategy};
-use core::{
-    mem::ManuallyDrop,
-    ops::{Deref, DerefMut},
-    ptr,
-};
+use core::ops::{Deref, DerefMut};
 
 pub struct Guard<'a, T, S, L, const WRITE: bool = true>
 where
@@ -16,75 +12,6 @@ where
     pub(crate) context: S::GuardContext,
 }
 
-pub struct UnlockedGuard<'a, T, S, L, const WRITE: bool = true>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    pub(crate) lock: &'a Lock<T, L>,
-    pub(crate) strategy_data: &'a S::StrategyData,
-    pub(crate) context: S::GuardContext,
-}
-
-impl<'a, T, S, L, const W: bool> Guard<'a, T, S, L, W>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    #[must_use = "The returned `UnlockedGuard` must be used to relock the lock."]
-    pub fn unlock(mut self) -> UnlockedGuard<'a, T, S, L, W> {
-        unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
-
-        UnlockedGuard {
-            lock: self.lock,
-            strategy_data: self.strategy_data,
-            context: {
-                let me = ManuallyDrop::new(self);
-                // SAFETY: We are using `ManuallyDrop` to prevent the destructor from running.
-                unsafe { ptr::read(&me.context) }
-            },
-        }
-    }
-
-    /// # Safety
-    /// This function is unsafe because it allows you to unlock the lock without
-    /// dropping the guard. Using the guard after calling this function is
-    /// undefined behavior.
-    pub unsafe fn force_unlock(&mut self) {
-        unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
-    }
-
-    /// # Safety
-    /// Calling this function twice on a force unlocked guard will cause deadlocks.
-    pub unsafe fn force_relock(&mut self) {
-        unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
-    }
-}
-
-impl<'a, T, S, L, const W: bool> UnlockedGuard<'a, T, S, L, W>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    #[must_use = "Throwing away the relocked guard is pointless."]
-    pub fn relock(mut self) -> Guard<'a, T, S, L, W> {
-        unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
-
-        Guard {
-            lock: self.lock,
-            strategy_data: self.strategy_data,
-            context: {
-                let me = ManuallyDrop::new(self);
-                // SAFETY: We are using `ManuallyDrop` to prevent the destructor from running.
-                unsafe { ptr::read(&me.context) }
-            },
-        }
-    }
-}
-
 impl<T, S, L, const W: bool> Deref for Guard<'_, T, S, L, W>
 where
     T: ?Sized,
@@ -131,29 +58,56 @@ where
     }
 }
 
-impl<T, S, L, const WRITE: bool> Drop for UnlockedGuard<'_, T, S, L, WRITE>
+impl<T, S, L, const WRITE: bool> Drop for Guard<'_, T, S, L, WRITE>
 where
     T: ?Sized,
     S: LockStrategy,
     L: LockStrategy,
 {
     fn drop(&mut self) {
-        // SAFETY: If we are stubborn enough to drop the unlocked guard, relock it and
-        //         then unlock it again to prevent anything weird from happening.
-        unsafe {
-            S::do_relock(&self.strategy_data, &mut self.context);
-            S::do_unlock(&self.strategy_data, &mut self.context);
-        }
+        unsafe { S::do_unlock(&self.strategy_data, &mut self.context) }
     }
 }
 
-impl<T, S, L, const WRITE: bool> Drop for Guard<'_, T, S, L, WRITE>
+pub trait UnlockableGuard {
+    type Unlocked: UnlockedGuard<Guard = Self>;
+
+    #[must_use = "The returned `UnlockedGuard` must be used to relock the lock."]
+    fn unlock(self) -> Self::Unlocked;
+}
+
+/// # Safety
+/// Implementors of this trait MUST ensure that the lock is correctly unlocked if
+/// dropped accidentally.
+pub unsafe trait UnlockedGuard {
+    type Guard: UnlockableGuard;
+
+    #[must_use = "Throwing away the relocked guard is pointless."]
+    fn relock(self) -> Self::Guard;
+}
+
+pub trait ForceUnlockableGuard {
+    /// # Safety
+    /// This function is unsafe because it allows you to unlock the lock without
+    /// dropping the guard. Using the guard after calling this function is
+    /// undefined behavior.
+    unsafe fn force_unlock(&mut self);
+
+    /// # Safety
+    /// Calling this function twice on a force unlocked guard will cause deadlocks.
+    unsafe fn force_relock(&mut self);
+}
+
+impl<'a, T, S, L, const W: bool> ForceUnlockableGuard for Guard<'a, T, S, L, W>
 where
-    T: ?Sized,
     S: LockStrategy,
     L: LockStrategy,
 {
-    fn drop(&mut self) {
-        unsafe { S::do_unlock(&self.strategy_data, &mut self.context) }
+    unsafe fn force_unlock(&mut self) {
+        unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
+    }
+
+    unsafe fn force_relock(&mut self) {
+        unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
     }
 }

+ 3 - 4
crates/eonix_sync/src/lib.rs

@@ -3,13 +3,12 @@
 mod guard;
 mod lock;
 mod locked;
+mod marker;
 mod spin;
 mod strategy;
 
-pub use guard::Guard;
+pub use guard::{ForceUnlockableGuard, Guard, UnlockableGuard, UnlockedGuard};
 pub use lock::Lock;
 pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
-pub use spin::{IrqStrategy, SpinStrategy};
+pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinIrqGuard, SpinRelax};
 pub use strategy::LockStrategy;
-
-pub type Spin<T> = Lock<T, SpinStrategy>;

+ 68 - 10
crates/eonix_sync/src/lock.rs

@@ -1,6 +1,8 @@
-use super::{spin::IrqStrategy, strategy::LockStrategy};
+use super::strategy::LockStrategy;
 use crate::Guard;
-use core::{cell::UnsafeCell, fmt};
+use core::{arch::asm, cell::UnsafeCell, fmt, marker::PhantomData};
+
+pub struct IrqStrategy<Strategy: LockStrategy>(PhantomData<Strategy>);
 
 pub struct Lock<T, S>
 where
@@ -87,14 +89,6 @@ where
         }
     }
 
-    pub fn lock_shared_irq(&self) -> Guard<T, IrqStrategy<S>, S, false> {
-        Guard {
-            lock: self,
-            strategy_data: &self.strategy_data,
-            context: unsafe { IrqStrategy::<S>::do_lock(&self.strategy_data) },
-        }
-    }
-
     pub fn get_mut(&mut self) -> &mut T {
         unsafe { &mut *self.value.get() }
     }
@@ -137,3 +131,67 @@ where
         }
     }
 }
+
+unsafe impl<Strategy: LockStrategy> LockStrategy for IrqStrategy<Strategy> {
+    type StrategyData = Strategy::StrategyData;
+    type GuardContext = (Strategy::GuardContext, usize);
+
+    fn new_data() -> Self::StrategyData {
+        Strategy::new_data()
+    }
+
+    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
+        let mut context: usize;
+
+        unsafe {
+            asm!(
+                "pushf",
+                "pop {context}",
+                "cli",
+                context = out(reg) context,
+            );
+        }
+
+        unsafe { (Strategy::do_lock(data), context) }
+    }
+
+    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
+        unsafe {
+            Strategy::do_unlock(data, &mut context.0);
+
+            asm!(
+                "push {context}",
+                "popf",
+                context = in(reg) context.1,
+                options(nomem),
+            )
+        }
+    }
+
+    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
+        unsafe { Strategy::do_unlock(data, &mut context.0) }
+    }
+
+    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
+        unsafe { Strategy::do_relock(data, &mut context.0) }
+    }
+
+    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
+        unsafe { Strategy::is_locked(data) }
+    }
+
+    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
+        let mut irq_context: usize;
+        unsafe {
+            asm!(
+                "pushf",
+                "pop {context}",
+                "cli",
+                context = out(reg) irq_context,
+            );
+        }
+
+        let lock_context = unsafe { Strategy::try_lock(data) };
+        lock_context.map(|lock_context| (lock_context, irq_context))
+    }
+}

+ 12 - 0
crates/eonix_sync/src/marker.rs

@@ -0,0 +1,12 @@
+use core::{cell::UnsafeCell, marker::PhantomData};
+
+/// A marker type that indicates that the type is not `Send`.
+pub struct NotSend(PhantomData<*const ()>);
+
+/// A marker type that indicates that the type is not `Sync`.
+#[allow(dead_code)]
+pub struct NotSync(PhantomData<UnsafeCell<()>>);
+
+// SAFETY: This is a marker type that indicates that the type is not `Send`.
+//         So no restrictions on `Sync` are needed.
+unsafe impl Sync for NotSend {}

+ 88 - 93
crates/eonix_sync/src/spin.rs

@@ -1,122 +1,117 @@
-use super::strategy::LockStrategy;
+mod guard;
+mod relax;
+
 use core::{
-    arch::asm,
+    cell::UnsafeCell,
     marker::PhantomData,
+    mem::ManuallyDrop,
     sync::atomic::{AtomicBool, Ordering},
 };
 
-pub struct SpinStrategy;
-pub struct IrqStrategy<Strategy: LockStrategy>(PhantomData<Strategy>);
-
-impl SpinStrategy {
-    fn is_locked(data: &<Self as LockStrategy>::StrategyData) -> bool {
-        data.load(Ordering::Relaxed)
-    }
+pub use guard::{SpinGuard, SpinIrqGuard};
+pub use relax::{LoopRelax, Relax, SpinRelax};
+
+#[derive(Debug, Default)]
+pub struct Spin<T, R = SpinRelax>
+where
+    T: ?Sized,
+{
+    _phantom: PhantomData<R>,
+    locked: AtomicBool,
+    value: UnsafeCell<T>,
 }
 
-unsafe impl LockStrategy for SpinStrategy {
-    type StrategyData = AtomicBool;
-    type GuardContext = ();
-
-    fn new_data() -> Self::StrategyData {
-        AtomicBool::new(false)
-    }
-
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        data.load(Ordering::Relaxed)
-    }
-
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
-        use Ordering::{Acquire, Relaxed};
-        eonix_preempt::disable();
-
-        if data.compare_exchange(false, true, Acquire, Relaxed).is_ok() {
-            Some(())
-        } else {
-            None
-        }
-    }
-
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
-        use Ordering::{Acquire, Relaxed};
-        eonix_preempt::disable();
-
-        while data
-            .compare_exchange_weak(false, true, Acquire, Relaxed)
-            .is_err()
-        {
-            while Self::is_locked(data) {
-                core::hint::spin_loop();
-            }
+impl<T, R> Spin<T, R>
+where
+    R: Relax,
+{
+    pub const fn new(value: T) -> Self {
+        Self {
+            locked: AtomicBool::new(false),
+            value: UnsafeCell::new(value),
+            _phantom: PhantomData,
         }
     }
+}
 
-    unsafe fn do_unlock(data: &Self::StrategyData, _: &mut Self::GuardContext) {
-        data.store(false, Ordering::Release);
+impl<T, R> Spin<T, R>
+where
+    T: ?Sized,
+{
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that the protected
+    /// value is no longer accessed after calling this function.
+    unsafe fn do_unlock(&self) {
+        let locked = self.locked.swap(false, Ordering::Release);
+        debug_assert!(locked, "Spin::unlock(): Unlocking an unlocked lock");
         eonix_preempt::enable();
     }
 }
 
-unsafe impl<Strategy: LockStrategy> LockStrategy for IrqStrategy<Strategy> {
-    type StrategyData = Strategy::StrategyData;
-    type GuardContext = (Strategy::GuardContext, usize);
-
-    fn new_data() -> Self::StrategyData {
-        Strategy::new_data()
-    }
-
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
-        let mut context: usize;
-
-        unsafe {
-            asm!(
-                "pushf",
-                "pop {context}",
-                "cli",
-                context = out(reg) context,
-            );
+impl<T, R> Spin<T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    pub fn lock(&self) -> SpinGuard<'_, T, R> {
+        self.do_lock();
+
+        SpinGuard {
+            lock: self,
+            // SAFETY: We are holding the lock, so we can safely access the value.
+            value: unsafe { &mut *self.value.get() },
+            _not_send: PhantomData,
         }
-
-        unsafe { (Strategy::do_lock(data), context) }
     }
 
-    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe {
-            Strategy::do_unlock(data, &mut context.0);
+    pub fn lock_irq(&self) -> SpinIrqGuard<'_, T, R> {
+        let irq_state = arch::disable_irqs_save();
+        self.do_lock();
 
-            asm!(
-                "push {context}",
-                "popf",
-                context = in(reg) context.1,
-                options(nomem),
-            )
+        SpinIrqGuard {
+            lock: self,
+            // SAFETY: We are holding the lock, so we can safely access the value.
+            value: unsafe { &mut *self.value.get() },
+            irq_state: ManuallyDrop::new(irq_state),
+            _not_send: PhantomData,
         }
     }
 
-    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe { Strategy::do_unlock(data, &mut context.0) }
+    pub fn get_mut(&mut self) -> &mut T {
+        // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
+        unsafe { &mut *self.value.get() }
     }
 
-    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe { Strategy::do_relock(data, &mut context.0) }
-    }
+    fn do_lock(&self) {
+        eonix_preempt::disable();
 
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        unsafe { Strategy::is_locked(data) }
+        while let Err(_) =
+            self.locked
+                .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+        {
+            R::relax();
+        }
     }
+}
 
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
-        let mut irq_context: usize;
-        unsafe {
-            asm!(
-                "pushf",
-                "pop {context}",
-                "cli",
-                context = out(reg) irq_context,
-            );
+impl<T, R> Clone for Spin<T, R>
+where
+    T: ?Sized + Clone,
+    R: Relax,
+{
+    fn clone(&self) -> Self {
+        Self {
+            locked: AtomicBool::new(false),
+            value: UnsafeCell::new(self.lock().clone()),
+            _phantom: PhantomData,
         }
-
-        let lock_context = unsafe { Strategy::try_lock(data) };
-        lock_context.map(|lock_context| (lock_context, irq_context))
     }
 }
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can send the lock between threads.
+unsafe impl<T, R> Send for Spin<T, R> where T: ?Sized + Send {}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can provide exclusive access guarantees to the lock.
+unsafe impl<T, R> Sync for Spin<T, R> where T: ?Sized + Send {}

+ 267 - 0
crates/eonix_sync/src/spin/guard.rs

@@ -0,0 +1,267 @@
+use super::{Relax, Spin, SpinRelax};
+use crate::{marker::NotSend, ForceUnlockableGuard, UnlockableGuard, UnlockedGuard};
+use core::{
+    marker::PhantomData,
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+};
+
+pub struct SpinGuard<'a, T, R = SpinRelax>
+where
+    T: ?Sized,
+{
+    pub(super) lock: &'a Spin<T, R>,
+    pub(super) value: &'a mut T,
+    /// We don't want this to be `Send` because we don't want to allow the guard to be
+    /// transferred to another thread since we have disabled the preemption on the local cpu.
+    pub(super) _not_send: PhantomData<NotSend>,
+}
+
+pub struct SpinIrqGuard<'a, T, R = SpinRelax>
+where
+    T: ?Sized,
+{
+    pub(super) lock: &'a Spin<T, R>,
+    pub(super) value: &'a mut T,
+    pub(super) irq_state: ManuallyDrop<arch::IrqState>,
+    /// We don't want this to be `Send` because we don't want to allow the guard to be
+    /// transferred to another thread since we have disabled the preemption and saved
+    /// IRQ states on the local cpu.
+    pub(super) _not_send: PhantomData<NotSend>,
+}
+
+pub struct UnlockedSpinGuard<'a, T, R>(&'a Spin<T, R>)
+where
+    T: ?Sized;
+
+pub struct UnlockedSpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+{
+    lock: &'a Spin<T, R>,
+    irq_state: arch::IrqState,
+}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can access the guard from multiple threads.
+unsafe impl<T, R> Sync for SpinGuard<'_, T, R> where T: ?Sized + Sync {}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can access the guard from multiple threads.
+unsafe impl<T, R> Sync for SpinIrqGuard<'_, T, R> where T: ?Sized + Sync {}
+
+impl<T, R> Drop for SpinGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn drop(&mut self) {
+        unsafe {
+            // SAFETY: We are dropping the guard, so we are not holding the lock anymore.
+            self.lock.do_unlock();
+        }
+    }
+}
+
+impl<T, R> Drop for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn drop(&mut self) {
+        unsafe {
+            // SAFETY: We are dropping the guard, so we are not holding the lock anymore.
+            self.lock.do_unlock();
+
+            // SAFETY: We are dropping the guard, so we are never going to access the value.
+            ManuallyDrop::take(&mut self.irq_state).restore();
+        }
+    }
+}
+
+impl<T, R> Deref for SpinGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> DerefMut for SpinGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> AsRef<T> for SpinGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn as_ref(&self) -> &T {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> AsMut<T> for SpinGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn as_mut(&mut self) -> &mut T {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+impl<T, R> Deref for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> DerefMut for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> AsRef<T> for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn as_ref(&self) -> &T {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<T, R> AsMut<T> for SpinIrqGuard<'_, T, R>
+where
+    T: ?Sized,
+{
+    fn as_mut(&mut self) -> &mut T {
+        // SAFETY: We are holding the lock, so we can safely access the value.
+        self.value
+    }
+}
+
+impl<'a, T, R> UnlockableGuard for SpinGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Unlocked = UnlockedSpinGuard<'a, T, R>;
+
+    fn unlock(self) -> Self::Unlocked {
+        let me = ManuallyDrop::new(self);
+        unsafe {
+            // SAFETY: No access is possible after unlocking.
+            me.lock.do_unlock();
+        }
+
+        UnlockedSpinGuard(me.lock)
+    }
+}
+
+impl<'a, T, R> UnlockableGuard for SpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Unlocked = UnlockedSpinIrqGuard<'a, T, R>;
+
+    fn unlock(self) -> Self::Unlocked {
+        let mut me = ManuallyDrop::new(self);
+        unsafe {
+            // SAFETY: No access is possible after unlocking.
+            me.lock.do_unlock();
+        }
+
+        UnlockedSpinIrqGuard {
+            lock: me.lock,
+            // SAFETY: `me` is going to be dropped so never used again.
+            irq_state: unsafe { ManuallyDrop::take(&mut me.irq_state) },
+        }
+    }
+}
+
+// SAFETY: The guard is stateless so no more process needed.
+unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Guard = SpinGuard<'a, T, R>;
+
+    fn relock(self) -> Self::Guard {
+        let Self(lock) = self;
+        lock.lock()
+    }
+}
+
+// SAFETY: The guard is stateless so no more process needed.
+unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    type Guard = SpinIrqGuard<'a, T, R>;
+
+    fn relock(self) -> Self::Guard {
+        let mut guard = self.lock.lock_irq();
+
+        guard.irq_state = ManuallyDrop::new(self.irq_state);
+        guard
+    }
+}
+
+impl<'a, T, R> ForceUnlockableGuard for SpinGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    unsafe fn force_unlock(&mut self) {
+        unsafe {
+            // SAFETY: The caller assures that the value is no longer accessed.
+            self.lock.do_unlock();
+        }
+    }
+
+    unsafe fn force_relock(&mut self) {
+        self.lock.do_lock();
+    }
+}
+
+impl<'a, T, R> ForceUnlockableGuard for SpinIrqGuard<'a, T, R>
+where
+    T: ?Sized,
+    R: Relax,
+{
+    unsafe fn force_unlock(&mut self) {
+        unsafe {
+            // SAFETY: The caller assures that the value is no longer accessed.
+            self.lock.do_unlock();
+        }
+
+        // IRQ state is not restored.
+    }
+
+    unsafe fn force_relock(&mut self) {
+        self.lock.do_lock();
+    }
+}

+ 17 - 0
crates/eonix_sync/src/spin/relax.rs

@@ -0,0 +1,17 @@
+pub trait Relax {
+    fn relax();
+}
+
+#[derive(Default, Debug, Clone, Copy)]
+pub struct LoopRelax;
+impl Relax for LoopRelax {
+    fn relax() {}
+}
+
+#[derive(Default, Debug, Clone, Copy)]
+pub struct SpinRelax;
+impl Relax for SpinRelax {
+    fn relax() {
+        core::hint::spin_loop();
+    }
+}

+ 7 - 4
src/driver/serial.rs

@@ -61,11 +61,14 @@ impl Serial {
     }
 
     async fn wait_for_interrupt(&self) {
-        let mut working = self.working.lock_irq();
-        self.enable_interrupts();
-        *working = false;
+        let mut working = {
+            let mut working = self.working.lock_irq();
+            self.enable_interrupts();
+            *working = false;
 
-        self.cv_worker.async_wait(&mut working).await;
+            self.cv_worker.async_wait(working)
+        }
+        .await;
 
         *working = true;
         self.disable_interrupts();

+ 4 - 2
src/kernel/task/process.rs

@@ -6,7 +6,7 @@ use crate::{
     kernel::mem::MMList,
     prelude::*,
     rcu::{rcu_sync, RCUPointer, RCUReadGuard},
-    sync::{CondVar, RwSemReadGuard, SpinGuard},
+    sync::{CondVar, RwSemReadGuard},
 };
 use alloc::{
     collections::{btree_map::BTreeMap, vec_deque::VecDeque},
@@ -14,7 +14,9 @@ use alloc::{
 };
 use bindings::{ECHILD, EINTR, EPERM, ESRCH};
 use core::sync::atomic::{AtomicU32, Ordering};
-use eonix_sync::{AsProof as _, AsProofMut as _, Locked, Proof, ProofMut};
+use eonix_sync::{
+    AsProof as _, AsProofMut as _, ForceUnlockableGuard as _, Locked, Proof, ProofMut, SpinGuard,
+};
 use pointers::BorrowedArc;
 
 pub struct ProcessBuilder {

+ 1 - 2
src/sync.rs

@@ -2,12 +2,11 @@ mod arcswap;
 mod condvar;
 pub mod semaphore;
 
-pub use eonix_sync::{Guard, Lock, Spin, SpinStrategy};
+pub use eonix_sync::{Guard, Lock, Spin};
 
 pub type Mutex<T> = Lock<T, semaphore::SemaphoreStrategy<1>>;
 pub type RwSemaphore<T> = Lock<T, semaphore::RwSemaphoreStrategy>;
 
-pub type SpinGuard<'lock, T> = Guard<'lock, T, SpinStrategy, SpinStrategy, true>;
 pub type RwSemReadGuard<'lock, T> =
     Guard<'lock, T, semaphore::RwSemaphoreStrategy, semaphore::RwSemaphoreStrategy, false>;
 

+ 13 - 27
src/sync/condvar.rs

@@ -1,9 +1,9 @@
 use crate::prelude::*;
 use alloc::collections::vec_deque::VecDeque;
-use core::task::Waker;
+use core::{future::Future, task::Waker};
 use eonix_preempt::{assert_preempt_count_eq, assert_preempt_enabled};
 use eonix_runtime::{scheduler::Scheduler, task::Task};
-use eonix_sync::{Guard, LockStrategy};
+use eonix_sync::{ForceUnlockableGuard, UnlockableGuard, UnlockedGuard as _};
 
 pub struct CondVar<const INTERRUPTIBLE: bool> {
     waiters: Spin<VecDeque<Waker>>,
@@ -27,24 +27,18 @@ impl<const I: bool> CondVar<I> {
     }
 
     fn wake(waker: Waker) {
-        println_trace!("trace_condvar", "tid({}) is trying to wake", thread.tid);
         waker.wake();
-        println_trace!("trace_condvar", "tid({}) is awake", thread.tid);
     }
 
     fn sleep() -> Waker {
         let task = Task::current();
 
-        println_trace!("trace_condvar", "tid({}) is trying to sleep", task.id);
-
         let waker = if I {
             Waker::from(task.isleep())
         } else {
             Waker::from(task.usleep())
         };
 
-        println_trace!("trace_condvar", "tid({}) is sleeping", task.id);
-
         waker
     }
 
@@ -64,12 +58,7 @@ impl<const I: bool> CondVar<I> {
     ///
     /// # Might Sleep
     /// This function **might sleep**, so call it in a preemptible context.
-    pub fn wait<'a, T, S, L, const W: bool>(&self, guard: &mut Guard<'a, T, S, L, W>)
-    where
-        T: ?Sized,
-        S: LockStrategy,
-        L: LockStrategy,
-    {
+    pub fn wait(&self, guard: &mut impl ForceUnlockableGuard) {
         eonix_preempt::disable();
         let waker = Self::sleep();
         self.waiters.lock().push_back(waker);
@@ -90,15 +79,11 @@ impl<const I: bool> CondVar<I> {
         assert!(Task::current().is_runnable());
     }
 
-    /// Unlock the `guard`. Then wait until being waken up. Relock the `guard` before returning.
-    ///
-    /// # Might Sleep
-    /// This function **might sleep**, so call it in a preemptible context.
-    pub async fn async_wait<'a, T, S, L, const W: bool>(&self, guard: &mut Guard<'a, T, S, L, W>)
+    /// Unlock the `guard`. Then wait until being waken up. Relock the `guard` and return it.
+    pub fn async_wait<G>(&self, guard: G) -> impl Future<Output = G> + Send
     where
-        T: ?Sized,
-        S: LockStrategy,
-        L: LockStrategy,
+        G: UnlockableGuard,
+        G::Unlocked: Send,
     {
         let waker = Self::sleep();
         self.waiters.lock().push_back(waker);
@@ -109,13 +94,14 @@ impl<const I: bool> CondVar<I> {
         // Check the flag before doing `schedule()` but after we've unlocked the `guard`.
         // If the flag is already set, we don't need to sleep.
 
-        unsafe { guard.force_unlock() };
-
+        let guard = guard.unlock();
         assert_preempt_enabled!("CondVar::async_wait");
-        Scheduler::sleep().await;
 
-        unsafe { guard.force_relock() };
+        async {
+            Scheduler::sleep().await;
 
-        assert!(Task::current().is_runnable());
+            assert!(Task::current().is_runnable());
+            guard.relock()
+        }
     }
 }