Prechádzať zdrojové kódy

mutex: refactor to get rid of `Lock`

`Lock` is now completely removed from the eonix_sync crate.
greatbridf 10 mesiacov pred
rodič
commit
3d55507589

+ 0 - 97
crates/eonix_sync/src/guard.rs

@@ -1,78 +1,3 @@
-use crate::{Lock, LockStrategy};
-use core::ops::{Deref, DerefMut};
-
-pub struct Guard<'a, T, S, L, const WRITE: bool = true>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    pub(crate) lock: &'a Lock<T, L>,
-    pub(crate) strategy_data: &'a S::StrategyData,
-    pub(crate) context: S::GuardContext,
-}
-
-impl<T, S, L, const W: bool> Deref for Guard<'_, T, S, L, W>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    type Target = T;
-
-    fn deref(&self) -> &Self::Target {
-        unsafe { &*self.lock.value.get() }
-    }
-}
-
-impl<T, S, L> DerefMut for Guard<'_, T, S, L, true>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        unsafe { &mut *self.lock.value.get() }
-    }
-}
-
-impl<T, S, L, const WRITE: bool> AsRef<T> for Guard<'_, T, S, L, WRITE>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    fn as_ref(&self) -> &T {
-        unsafe { &*self.lock.value.get() }
-    }
-}
-
-impl<T, S, L> AsMut<T> for Guard<'_, T, S, L, true>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    fn as_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.lock.value.get() }
-    }
-}
-
-impl<T, S, L, const WRITE: bool> Drop for Guard<'_, T, S, L, WRITE>
-where
-    T: ?Sized,
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    fn drop(&mut self) {
-        if WRITE {
-            unsafe { S::do_unlock(&self.strategy_data, &mut self.context) }
-        } else {
-            unsafe { S::do_unlock_shared(&self.strategy_data, &mut self.context) }
-        }
-    }
-}
-
 pub trait UnlockableGuard {
     type Unlocked: UnlockedGuard<Guard = Self>;
 
@@ -101,25 +26,3 @@ pub trait ForceUnlockableGuard {
     /// Calling this function twice on a force unlocked guard will cause deadlocks.
     unsafe fn force_relock(&mut self);
 }
-
-impl<'a, T, S, L, const W: bool> ForceUnlockableGuard for Guard<'a, T, S, L, W>
-where
-    S: LockStrategy,
-    L: LockStrategy,
-{
-    unsafe fn force_unlock(&mut self) {
-        if W {
-            unsafe { S::do_temporary_unlock(&self.strategy_data, &mut self.context) }
-        } else {
-            unsafe { S::do_temporary_unlock_shared(&self.strategy_data, &mut self.context) }
-        }
-    }
-
-    unsafe fn force_relock(&mut self) {
-        if W {
-            unsafe { S::do_relock(&self.strategy_data, &mut self.context) }
-        } else {
-            unsafe { S::do_relock_shared(&self.strategy_data, &mut self.context) }
-        }
-    }
-}

+ 3 - 5
crates/eonix_sync/src/lib.rs

@@ -1,16 +1,14 @@
 #![no_std]
 
 mod guard;
-mod lock;
 mod locked;
 mod marker;
+mod mutex;
 mod rwlock;
 mod spin;
-mod strategy;
 
-pub use guard::{ForceUnlockableGuard, Guard, UnlockableGuard, UnlockedGuard};
-pub use lock::Lock;
+pub use guard::{ForceUnlockableGuard, UnlockableGuard, UnlockedGuard};
 pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
+pub use mutex::{Mutex, MutexGuard, Wait as MutexWait};
 pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard, Wait as RwLockWait};
 pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinIrqGuard, SpinRelax};
-pub use strategy::LockStrategy;

+ 0 - 196
crates/eonix_sync/src/lock.rs

@@ -1,196 +0,0 @@
-use super::strategy::LockStrategy;
-use crate::Guard;
-use core::{arch::asm, cell::UnsafeCell, fmt, marker::PhantomData};
-
-pub struct IrqStrategy<Strategy: LockStrategy>(PhantomData<Strategy>);
-
-pub struct Lock<T, S>
-where
-    T: ?Sized,
-    S: LockStrategy,
-{
-    pub(crate) strategy_data: S::StrategyData,
-    pub(crate) value: UnsafeCell<T>,
-}
-
-// SAFETY: As long as the value protected by the lock is able to be shared between threads,
-//         the lock itself is also able to be shared between threads.
-unsafe impl<T, S> Send for Lock<T, S>
-where
-    T: ?Sized + Send,
-    S: LockStrategy,
-{
-}
-
-// SAFETY: As long as the value protected by the lock is able to be shared between threads,
-//         the lock will provide synchronization between threads.
-unsafe impl<T, S> Sync for Lock<T, S>
-where
-    T: ?Sized + Send,
-    S: LockStrategy,
-{
-}
-
-impl<T, S> Lock<T, S>
-where
-    S: LockStrategy,
-{
-    pub fn new(value: T) -> Self {
-        Self {
-            strategy_data: S::new_data(),
-            value: UnsafeCell::new(value),
-        }
-    }
-}
-
-impl<T, S> Lock<T, S>
-where
-    T: ?Sized,
-    S: LockStrategy,
-{
-    pub fn is_locked(&self) -> bool {
-        unsafe { S::is_locked(&self.strategy_data) }
-    }
-
-    pub fn try_lock(&self) -> Option<Guard<T, S, S>> {
-        if !unsafe { S::is_locked(&self.strategy_data) } {
-            unsafe { S::try_lock(&self.strategy_data) }.map(|context| Guard {
-                lock: self,
-                strategy_data: &self.strategy_data,
-                context,
-            })
-        } else {
-            None
-        }
-    }
-
-    pub fn lock(&self) -> Guard<T, S, S> {
-        Guard {
-            lock: self,
-            strategy_data: &self.strategy_data,
-            context: unsafe { S::do_lock(&self.strategy_data) },
-        }
-    }
-
-    pub fn lock_irq(&self) -> Guard<T, IrqStrategy<S>, S> {
-        Guard {
-            lock: self,
-            strategy_data: &self.strategy_data,
-            context: unsafe { IrqStrategy::<S>::do_lock(&self.strategy_data) },
-        }
-    }
-
-    pub fn lock_shared(&self) -> Guard<T, S, S, false> {
-        Guard {
-            lock: self,
-            strategy_data: &self.strategy_data,
-            context: unsafe { S::do_lock_shared(&self.strategy_data) },
-        }
-    }
-
-    pub fn get_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.value.get() }
-    }
-}
-
-impl<T, S> fmt::Debug for Lock<T, S>
-where
-    T: fmt::Debug,
-    S: LockStrategy,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("Lock")
-            .field("locked_value", &self.value)
-            .finish()
-    }
-}
-
-impl<T, S> Clone for Lock<T, S>
-where
-    T: Clone,
-    S: LockStrategy,
-{
-    fn clone(&self) -> Self {
-        Self {
-            strategy_data: S::new_data(),
-            value: UnsafeCell::new(self.lock_shared().clone()),
-        }
-    }
-}
-
-impl<T, S> Default for Lock<T, S>
-where
-    T: Default,
-    S: LockStrategy,
-{
-    fn default() -> Self {
-        Self {
-            strategy_data: S::new_data(),
-            value: Default::default(),
-        }
-    }
-}
-
-unsafe impl<Strategy: LockStrategy> LockStrategy for IrqStrategy<Strategy> {
-    type StrategyData = Strategy::StrategyData;
-    type GuardContext = (Strategy::GuardContext, usize);
-
-    fn new_data() -> Self::StrategyData {
-        Strategy::new_data()
-    }
-
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
-        let mut context: usize;
-
-        unsafe {
-            asm!(
-                "pushf",
-                "pop {context}",
-                "cli",
-                context = out(reg) context,
-            );
-        }
-
-        unsafe { (Strategy::do_lock(data), context) }
-    }
-
-    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe {
-            Strategy::do_unlock(data, &mut context.0);
-
-            asm!(
-                "push {context}",
-                "popf",
-                context = in(reg) context.1,
-                options(nomem),
-            )
-        }
-    }
-
-    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe { Strategy::do_unlock(data, &mut context.0) }
-    }
-
-    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext) {
-        unsafe { Strategy::do_relock(data, &mut context.0) }
-    }
-
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        unsafe { Strategy::is_locked(data) }
-    }
-
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
-        let mut irq_context: usize;
-        unsafe {
-            asm!(
-                "pushf",
-                "pop {context}",
-                "cli",
-                context = out(reg) irq_context,
-            );
-        }
-
-        let lock_context = unsafe { Strategy::try_lock(data) };
-        lock_context.map(|lock_context| (lock_context, irq_context))
-    }
-}

+ 0 - 40
crates/eonix_sync/src/locked/proof.rs

@@ -1,4 +1,3 @@
-use crate::{Guard, LockStrategy};
 use core::{marker::PhantomData, ptr::NonNull};
 
 /// A proof of mutable access to a position in memory with lifetime `'pos`.
@@ -148,25 +147,6 @@ where
     }
 }
 
-/// SAFETY: The lock is held for the lifetime `'guard`. So the access must be
-/// valid for the lifetime `'pos` that is shorter than `'guard`.
-unsafe impl<'lock, 'pos, T, S, L> AsProofMut<'lock, 'pos, T> for Guard<'lock, T, S, L, true>
-where
-    T: ?Sized,
-    S: LockStrategy + 'lock,
-    L: LockStrategy + 'lock,
-{
-    fn prove_mut(&self) -> ProofMut<'pos, T>
-    where
-        'lock: 'pos,
-    {
-        ProofMut {
-            address: unsafe { NonNull::new_unchecked(&raw const **self as *mut _) },
-            _phantom: PhantomData,
-        }
-    }
-}
-
 /// SAFETY: The reference is valid for the lifetime `'guard`. So the access must be
 /// valid for the lifetime `'pos` that is shorter than `'guard`.
 unsafe impl<'guard, 'pos, T> AsProof<'guard, 'pos, T> for &'guard T
@@ -200,23 +180,3 @@ where
         }
     }
 }
-
-/// SAFETY: The lock is held for the lifetime `'guard`. So the access must be
-/// valid for the lifetime `'pos` that is shorter than `'guard`.
-unsafe impl<'lock, 'pos, T, S, L, const B: bool> AsProof<'lock, 'pos, T>
-    for Guard<'lock, T, S, L, B>
-where
-    T: ?Sized,
-    S: LockStrategy + 'lock,
-    L: LockStrategy + 'lock,
-{
-    fn prove(&self) -> Proof<'pos, T>
-    where
-        'lock: 'pos,
-    {
-        Proof {
-            address: unsafe { NonNull::new_unchecked(&raw const **self as *mut _) },
-            _phantom: PhantomData,
-        }
-    }
-}

+ 118 - 0
crates/eonix_sync/src/mutex.rs

@@ -0,0 +1,118 @@
+mod guard;
+mod wait;
+
+use core::{
+    cell::UnsafeCell,
+    sync::atomic::{AtomicBool, Ordering},
+};
+
+pub use guard::MutexGuard;
+pub use wait::Wait;
+
+#[derive(Debug, Default)]
+pub struct Mutex<T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    locked: AtomicBool,
+    wait: W,
+    value: UnsafeCell<T>,
+}
+
+impl<T, W> Mutex<T, W>
+where
+    W: Wait,
+{
+    pub const fn new(value: T, wait: W) -> Self {
+        Self {
+            locked: AtomicBool::new(false),
+            wait,
+            value: UnsafeCell::new(value),
+        }
+    }
+}
+
+impl<T, W> Mutex<T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that we've got the
+    /// exclusive access before calling this function.
+    unsafe fn get_lock(&self) -> MutexGuard<'_, T, W> {
+        MutexGuard {
+            lock: self,
+            // SAFETY: We are holding the lock, so we can safely access the value.
+            value: unsafe { &mut *self.value.get() },
+        }
+    }
+
+    pub fn try_lock(&self) -> Option<MutexGuard<'_, T, W>> {
+        self.locked
+            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+            .ok()
+            .map(|_| unsafe { self.get_lock() })
+    }
+
+    fn try_lock_weak(&self) -> Option<MutexGuard<'_, T, W>> {
+        self.locked
+            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+            .ok()
+            .map(|_| unsafe { self.get_lock() })
+    }
+
+    #[cold]
+    fn lock_slow_path(&self) -> MutexGuard<'_, T, W> {
+        loop {
+            if let Some(guard) = self.try_lock_weak() {
+                return guard;
+            }
+
+            self.wait.wait(|| !self.locked.load(Ordering::Relaxed));
+        }
+    }
+
+    pub fn lock(&self) -> MutexGuard<'_, T, W> {
+        if let Some(guard) = self.try_lock() {
+            // Quick path
+            guard
+        } else {
+            self.lock_slow_path()
+        }
+    }
+
+    pub fn get_mut(&mut self) -> &mut T {
+        // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
+        unsafe { &mut *self.value.get() }
+    }
+}
+
+impl<T, W> Clone for Mutex<T, W>
+where
+    T: ?Sized + Clone,
+    W: Wait,
+{
+    fn clone(&self) -> Self {
+        Self::new(self.lock().clone(), W::new())
+    }
+}
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can send the lock between threads.
+unsafe impl<T, W> Send for Mutex<T, W>
+where
+    T: ?Sized + Send,
+    W: Wait,
+{
+}
+
+// SAFETY: `RwLock` can provide exclusive access to the value it protects, so it is safe to
+//         implement `Sync` for it as long as the protected value is `Send`.
+unsafe impl<T, W> Sync for Mutex<T, W>
+where
+    T: ?Sized + Send,
+    W: Wait,
+{
+}

+ 101 - 0
crates/eonix_sync/src/mutex/guard.rs

@@ -0,0 +1,101 @@
+use crate::ForceUnlockableGuard;
+
+use super::{Mutex, Wait};
+use core::{
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+    sync::atomic::Ordering,
+};
+
+pub struct MutexGuard<'a, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    pub(super) lock: &'a Mutex<T, W>,
+    pub(super) value: &'a mut T,
+}
+
+impl<T, W> Drop for MutexGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn drop(&mut self) {
+        let locked = self.lock.locked.swap(false, Ordering::Release);
+        debug_assert!(
+            locked,
+            "MutexGuard::drop(): unlock() called on an unlocked mutex.",
+        );
+        self.lock.wait.notify();
+    }
+}
+
+impl<T, W> Deref for MutexGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.value
+    }
+}
+
+impl<T, W> DerefMut for MutexGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.value
+    }
+}
+
+impl<T, U, W> AsRef<U> for MutexGuard<'_, T, W>
+where
+    T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
+    W: Wait,
+{
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
+    }
+}
+
+impl<T, U, W> AsMut<U> for MutexGuard<'_, T, W>
+where
+    T: ?Sized + AsMut<U>,
+    U: ?Sized,
+    <Self as Deref>::Target: AsMut<U>,
+    W: Wait,
+{
+    fn as_mut(&mut self) -> &mut U {
+        self.deref_mut().as_mut()
+    }
+}
+
+impl<T, W> ForceUnlockableGuard for MutexGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    unsafe fn force_unlock(&mut self) {
+        let locked = self.lock.locked.swap(false, Ordering::Release);
+        debug_assert!(
+            locked,
+            "MutexGuard::drop(): unlock() called on an unlocked mutex.",
+        );
+        self.lock.wait.notify();
+    }
+
+    unsafe fn force_relock(&mut self) {
+        let _ = ManuallyDrop::new(if let Some(guard) = self.lock.try_lock() {
+            guard
+        } else {
+            self.lock.lock_slow_path()
+        });
+    }
+}

+ 17 - 0
crates/eonix_sync/src/mutex/wait.rs

@@ -0,0 +1,17 @@
+pub trait Wait {
+    fn new() -> Self
+    where
+        Self: Sized;
+
+    fn has_waiting(&self) -> bool
+    where
+        Self: Sized;
+
+    fn wait(&self, check: impl Fn() -> bool)
+    where
+        Self: Sized;
+
+    fn notify(&self)
+    where
+        Self: Sized;
+}

+ 15 - 9
crates/eonix_sync/src/rwlock/guard.rs

@@ -77,23 +77,27 @@ where
     }
 }
 
-impl<T, W> AsRef<T> for RwLockReadGuard<'_, T, W>
+impl<T, U, W> AsRef<U> for RwLockWriteGuard<'_, T, W>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
     W: Wait,
 {
-    fn as_ref(&self) -> &T {
-        self.value
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
     }
 }
 
-impl<T, W> AsMut<T> for RwLockWriteGuard<'_, T, W>
+impl<T, U, W> AsMut<U> for RwLockWriteGuard<'_, T, W>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsMut<U>,
     W: Wait,
 {
-    fn as_mut(&mut self) -> &mut T {
-        self.value
+    fn as_mut(&mut self) -> &mut U {
+        self.deref_mut().as_mut()
     }
 }
 
@@ -109,13 +113,15 @@ where
     }
 }
 
-impl<T, W> AsRef<T> for RwLockWriteGuard<'_, T, W>
+impl<T, U, W> AsRef<U> for RwLockReadGuard<'_, T, W>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
     W: Wait,
 {
-    fn as_ref(&self) -> &T {
-        self.value
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
     }
 }
 

+ 21 - 16
crates/eonix_sync/src/spin/guard.rs

@@ -99,25 +99,28 @@ where
     }
 }
 
-impl<T, R> AsRef<T> for SpinGuard<'_, T, R>
+impl<T, U, R> AsRef<U> for SpinGuard<'_, T, R>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
 {
-    fn as_ref(&self) -> &T {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
     }
 }
 
-impl<T, R> AsMut<T> for SpinGuard<'_, T, R>
+impl<T, U, R> AsMut<U> for SpinGuard<'_, T, R>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsMut<U>,
 {
-    fn as_mut(&mut self) -> &mut T {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
+    fn as_mut(&mut self) -> &mut U {
+        self.deref_mut().as_mut()
     }
 }
+
 impl<T, R> Deref for SpinIrqGuard<'_, T, R>
 where
     T: ?Sized,
@@ -140,23 +143,25 @@ where
     }
 }
 
-impl<T, R> AsRef<T> for SpinIrqGuard<'_, T, R>
+impl<T, U, R> AsRef<U> for SpinIrqGuard<'_, T, R>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsRef<U>,
 {
-    fn as_ref(&self) -> &T {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
+    fn as_ref(&self) -> &U {
+        self.deref().as_ref()
     }
 }
 
-impl<T, R> AsMut<T> for SpinIrqGuard<'_, T, R>
+impl<T, U, R> AsMut<U> for SpinIrqGuard<'_, T, R>
 where
     T: ?Sized,
+    U: ?Sized,
+    <Self as Deref>::Target: AsMut<U>,
 {
-    fn as_mut(&mut self) -> &mut T {
-        // SAFETY: We are holding the lock, so we can safely access the value.
-        self.value
+    fn as_mut(&mut self) -> &mut U {
+        self.deref_mut().as_mut()
     }
 }
 

+ 0 - 75
crates/eonix_sync/src/strategy.rs

@@ -1,75 +0,0 @@
-pub unsafe trait LockStrategy {
-    type StrategyData;
-    type GuardContext;
-
-    fn new_data() -> Self::StrategyData
-    where
-        Self: Sized;
-
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool
-    where
-        Self: Sized;
-
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext>
-    where
-        Self: Sized;
-
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext
-    where
-        Self: Sized;
-
-    unsafe fn do_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext)
-    where
-        Self: Sized;
-
-    unsafe fn try_lock_shared(data: &Self::StrategyData) -> Option<Self::GuardContext>
-    where
-        Self: Sized,
-    {
-        unsafe { Self::try_lock(data) }
-    }
-
-    unsafe fn do_lock_shared(data: &Self::StrategyData) -> Self::GuardContext
-    where
-        Self: Sized,
-    {
-        unsafe { Self::do_lock(data) }
-    }
-
-    unsafe fn do_unlock_shared(data: &Self::StrategyData, context: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        unsafe { Self::do_unlock(data, context) }
-    }
-
-    unsafe fn do_temporary_unlock(data: &Self::StrategyData, context: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        unsafe { Self::do_unlock(data, context) }
-    }
-
-    unsafe fn do_temporary_unlock_shared(
-        data: &Self::StrategyData,
-        context: &mut Self::GuardContext,
-    ) where
-        Self: Sized,
-    {
-        unsafe { Self::do_unlock_shared(data, context) }
-    }
-
-    unsafe fn do_relock(data: &Self::StrategyData, context: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        *context = unsafe { Self::do_lock(data) };
-    }
-
-    unsafe fn do_relock_shared(data: &Self::StrategyData, context: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        *context = unsafe { Self::do_lock_shared(data) };
-    }
-}

+ 1 - 1
src/driver/ahci/mod.rs

@@ -94,7 +94,7 @@ impl Device {
                     let port = port.clone();
                     let name = format!("ahci-p{}-stats", port.nport);
                     procfs::populate_root(name.into_bytes().into(), move |buffer| {
-                        writeln!(&mut buffer.get_writer(), "{:?}", port.stats.lock().as_ref())
+                        writeln!(&mut buffer.get_writer(), "{:?}", &*port.stats.lock())
                             .map_err(|_| EIO)
                     })?;
                 }

+ 10 - 11
src/kernel/mem/mm_list.rs

@@ -1,19 +1,18 @@
 mod page_fault;
 
+use super::{MMArea, Page, PageTable, VAddr, VRange};
+use crate::kernel::vfs::dentry::Dentry;
+use crate::{
+    prelude::*,
+    sync::{mutex_new, ArcSwap},
+};
+use alloc::{collections::btree_set::BTreeSet, sync::Arc};
+use bindings::{EEXIST, EFAULT, EINVAL, ENOMEM, KERNEL_PML4};
 use core::{
     ops::Sub as _,
     sync::atomic::{AtomicUsize, Ordering},
 };
 
-use crate::{prelude::*, sync::ArcSwap};
-
-use alloc::{collections::btree_set::BTreeSet, sync::Arc};
-use bindings::{EEXIST, EFAULT, EINVAL, ENOMEM, KERNEL_PML4};
-
-use crate::kernel::vfs::dentry::Dentry;
-
-use super::{MMArea, Page, PageTable, VAddr, VRange};
-
 pub use page_fault::handle_page_fault;
 
 #[derive(Debug, Clone)]
@@ -198,7 +197,7 @@ impl MMList {
         let page_table = PageTable::new();
         Self {
             root_page_table: AtomicUsize::from(page_table.root_page_table()),
-            inner: ArcSwap::new(Mutex::new(MMListInner {
+            inner: ArcSwap::new(mutex_new(MMListInner {
                 areas: BTreeSet::new(),
                 page_table,
                 break_start: None,
@@ -214,7 +213,7 @@ impl MMList {
         let page_table = PageTable::new();
         let list = Self {
             root_page_table: AtomicUsize::from(page_table.root_page_table()),
-            inner: ArcSwap::new(Mutex::new(MMListInner {
+            inner: ArcSwap::new(mutex_new(MMListInner {
                 areas: inner.areas.clone(),
                 page_table,
                 break_start: inner.break_start,

+ 2 - 2
src/kernel/task/process.rs

@@ -386,7 +386,7 @@ impl Process {
         let mut procs = ProcessList::get().write();
         // We may set pgid of either the calling process or a child process.
         if pid == self.pid {
-            self.do_setpgid(pgid, procs.as_mut())
+            self.do_setpgid(pgid, &mut procs)
         } else {
             let child = {
                 // If `pid` refers to one of our children, the thread leaders must be
@@ -408,7 +408,7 @@ impl Process {
 
             // TODO: Check whether we, as a child, have already performed an `execve`.
             //       If so, we should return `Err(EACCES)`.
-            child.do_setpgid(pgid, procs.as_mut())
+            child.do_setpgid(pgid, &mut procs)
         }
     }
 

+ 6 - 2
src/kernel/terminal.rs

@@ -2,7 +2,11 @@ use super::{
     task::{ProcessList, Session, Signal, Thread},
     user::{UserPointer, UserPointerMut},
 };
-use crate::{io::Buffer, prelude::*, sync::CondVar};
+use crate::{
+    io::Buffer,
+    prelude::*,
+    sync::{mutex_new, CondVar},
+};
 use alloc::{
     collections::vec_deque::VecDeque,
     sync::{Arc, Weak},
@@ -396,7 +400,7 @@ impl core::fmt::Debug for Terminal {
 impl Terminal {
     pub fn new(device: Arc<dyn TerminalDevice>) -> Arc<Self> {
         Arc::new(Self {
-            inner: Mutex::new(TerminalInner {
+            inner: mutex_new(TerminalInner {
                 termio: Termios::new_standard(),
                 session: Weak::new(),
                 buffer: VecDeque::with_capacity(BUFFER_SIZE),

+ 10 - 15
src/kernel/vfs/file.rs

@@ -1,5 +1,8 @@
-use core::{ops::ControlFlow, sync::atomic::Ordering};
-
+use super::{
+    dentry::Dentry,
+    inode::{Mode, WriteOffset},
+    s_isblk, s_isdir, s_isreg,
+};
 use crate::{
     io::{Buffer, BufferFill, ByteBuffer},
     kernel::{
@@ -11,20 +14,14 @@ use crate::{
         CharDevice,
     },
     prelude::*,
-    sync::CondVar,
+    sync::{mutex_new, CondVar},
 };
-
 use alloc::{collections::vec_deque::VecDeque, sync::Arc};
 use bindings::{
     statx, EBADF, EFAULT, EINTR, EINVAL, ENOTDIR, ENOTTY, EOVERFLOW, EPIPE, ESPIPE, S_IFMT,
 };
 use bitflags::bitflags;
-
-use super::{
-    dentry::Dentry,
-    inode::{Mode, WriteOffset},
-    s_isblk, s_isdir, s_isreg,
-};
+use core::{ops::ControlFlow, sync::atomic::Ordering};
 
 pub struct InodeFile {
     read: bool,
@@ -110,7 +107,7 @@ impl Pipe {
 
     pub fn new() -> Arc<Self> {
         Arc::new(Self {
-            inner: Mutex::new(PipeInner {
+            inner: mutex_new(PipeInner {
                 buffer: VecDeque::with_capacity(Self::PIPE_SIZE),
                 read_closed: false,
                 write_closed: false,
@@ -290,7 +287,7 @@ impl InodeFile {
             write: rwa.1,
             append: rwa.2,
             mode: cached_mode,
-            cursor: Mutex::new(0),
+            cursor: mutex_new(0),
         }))
     }
 
@@ -320,9 +317,7 @@ impl InodeFile {
 
         // TODO!!!: use `UserBuffer`
         if self.append {
-            let nwrote = self
-                .dentry
-                .write(buffer, WriteOffset::End(cursor.as_mut()))?;
+            let nwrote = self.dentry.write(buffer, WriteOffset::End(&mut cursor))?;
 
             Ok(nwrote)
         } else {

+ 4 - 9
src/net/netdev.rs

@@ -1,9 +1,6 @@
+use crate::{bindings::root::EFAULT, prelude::*, sync::mutex_new};
 use alloc::{collections::btree_map::BTreeMap, sync::Arc};
 
-use crate::{bindings::root::EFAULT, prelude::*};
-
-use lazy_static::lazy_static;
-
 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
 pub enum LinkStatus {
     Up,
@@ -53,10 +50,8 @@ impl Ord for dyn Netdev {
     }
 }
 
-lazy_static! {
-    static ref NETDEVS_ID: Spin<u32> = Spin::new(0);
-    static ref NETDEVS: Spin<BTreeMap<u32, Arc<Mutex<dyn Netdev>>>> = Spin::new(BTreeMap::new());
-}
+static NETDEVS_ID: Spin<u32> = Spin::new(0);
+static NETDEVS: Spin<BTreeMap<u32, Arc<Mutex<dyn Netdev>>>> = Spin::new(BTreeMap::new());
 
 pub fn alloc_id() -> u32 {
     let mut id = NETDEVS_ID.lock();
@@ -74,7 +69,7 @@ pub fn register_netdev(netdev: impl Netdev + 'static) -> Result<Arc<Mutex<dyn Ne
     use alloc::collections::btree_map::Entry;
     match netdevs.entry(devid) {
         Entry::Vacant(entry) => {
-            let netdev = Arc::new(Mutex::new(netdev));
+            let netdev = Arc::new(mutex_new(netdev));
             entry.insert(netdev.clone());
             Ok(netdev)
         }

+ 2 - 2
src/rcu.rs

@@ -1,6 +1,6 @@
 use crate::{
     prelude::*,
-    sync::{rwlock_new, RwLockReadGuard},
+    sync::{mutex_new, rwlock_new, RwLockReadGuard},
 };
 use alloc::sync::Arc;
 use core::{
@@ -58,7 +58,7 @@ impl<T: RCUNode<T>> RCUList<T> {
         Self {
             head: AtomicPtr::new(core::ptr::null_mut()),
             reader_lock: rwlock_new(()),
-            update_lock: Mutex::new(()),
+            update_lock: mutex_new(()),
         }
     }
 

+ 56 - 12
src/sync.rs

@@ -1,19 +1,25 @@
 mod arcswap;
 mod condvar;
-pub mod semaphore;
 
-use eonix_sync::RwLockWait;
-pub use eonix_sync::{Lock, Spin};
+pub use eonix_sync::Spin;
+use eonix_sync::{MutexWait, RwLockWait};
 
 #[doc(hidden)]
 #[derive(Debug)]
-pub struct Wait {
+pub struct RwLockWaitImpl {
     lock: Spin<()>,
     cv_read: UCondVar,
     cv_write: UCondVar,
 }
 
-impl Wait {
+#[doc(hidden)]
+#[derive(Debug)]
+pub struct MutexWaitImpl {
+    lock: Spin<()>,
+    cv: UCondVar,
+}
+
+impl RwLockWaitImpl {
     const fn new() -> Self {
         Self {
             lock: Spin::new(()),
@@ -23,7 +29,16 @@ impl Wait {
     }
 }
 
-impl RwLockWait for Wait {
+impl MutexWaitImpl {
+    const fn new() -> Self {
+        Self {
+            lock: Spin::new(()),
+            cv: UCondVar::new(),
+        }
+    }
+}
+
+impl RwLockWait for RwLockWaitImpl {
     fn new() -> Self {
         Self::new()
     }
@@ -76,16 +91,45 @@ impl RwLockWait for Wait {
     }
 }
 
+impl MutexWait for MutexWaitImpl {
+    fn new() -> Self {
+        Self::new()
+    }
+
+    fn has_waiting(&self) -> bool {
+        self.cv.has_waiters()
+    }
+
+    fn wait(&self, check: impl Fn() -> bool) {
+        let mut lock = self.lock.lock();
+        loop {
+            if check() {
+                break;
+            }
+            self.cv.wait(&mut lock);
+        }
+    }
+
+    fn notify(&self) {
+        let _lock = self.lock.lock();
+        if self.has_waiting() {
+            self.cv.notify_one();
+        }
+    }
+}
+
 pub const fn rwlock_new<T>(value: T) -> RwLock<T> {
-    RwLock::new(value, Wait::new())
+    RwLock::new(value, RwLockWaitImpl::new())
+}
+
+pub const fn mutex_new<T>(value: T) -> Mutex<T> {
+    Mutex::new(value, MutexWaitImpl::new())
 }
 
-pub type Mutex<T> = Lock<T, semaphore::SemaphoreStrategy<1>>;
-pub type RwLock<T> = eonix_sync::RwLock<T, Wait>;
+pub type RwLock<T> = eonix_sync::RwLock<T, RwLockWaitImpl>;
+pub type Mutex<T> = eonix_sync::Mutex<T, MutexWaitImpl>;
 
-pub type RwLockReadGuard<'a, T> = eonix_sync::RwLockReadGuard<'a, T, Wait>;
-#[allow(dead_code)]
-pub type RwLockWriteGuard<'a, T> = eonix_sync::RwLockWriteGuard<'a, T, Wait>;
+pub type RwLockReadGuard<'a, T> = eonix_sync::RwLockReadGuard<'a, T, RwLockWaitImpl>;
 
 pub type CondVar = condvar::CondVar<true>;
 pub type UCondVar = condvar::CondVar<false>;

+ 0 - 71
src/sync/semaphore.rs

@@ -1,71 +0,0 @@
-use super::{Spin, UCondVar};
-use eonix_sync::LockStrategy;
-
-pub struct SemaphoreStrategy<const MAX: usize = { core::usize::MAX }>;
-
-pub struct SemaphoreData {
-    counter: Spin<usize>,
-    cv: UCondVar,
-}
-
-unsafe impl<const MAX: usize> LockStrategy for SemaphoreStrategy<MAX> {
-    type StrategyData = SemaphoreData;
-    type GuardContext = ();
-
-    #[inline(always)]
-    fn new_data() -> Self::StrategyData {
-        SemaphoreData {
-            counter: Spin::new(0),
-            cv: UCondVar::new(),
-        }
-    }
-
-    #[inline(always)]
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        *data.counter.lock() == MAX
-    }
-
-    #[inline(always)]
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
-        let mut counter = data.counter.lock();
-        assert!(*counter <= MAX);
-        if *counter < MAX {
-            *counter += 1;
-            Some(())
-        } else {
-            None
-        }
-    }
-
-    #[inline(always)]
-    /// Acquire the semaphore in write mode
-    ///
-    /// # Might Sleep
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
-        loop {
-            let mut counter = data.counter.lock();
-            assert!(*counter <= MAX);
-
-            if *counter < MAX {
-                *counter += 1;
-                return;
-            }
-
-            data.cv.wait(&mut counter);
-        }
-    }
-
-    #[inline(always)]
-    unsafe fn do_unlock(data: &Self::StrategyData, _: &mut Self::GuardContext) {
-        let mut counter = data.counter.lock();
-        assert!(*counter <= MAX);
-
-        match *counter {
-            n if n > 0 => {
-                *counter -= 1;
-                data.cv.notify_one();
-            }
-            _ => panic!("Semaphore in inconsistent state"),
-        }
-    }
-}