소스 검색

eonix_sync: make sleeping functions `async`

greatbridf 9 달 전
부모
커밋
c5a6a24ea8

+ 22 - 1
crates/eonix_runtime/src/task.rs

@@ -10,9 +10,11 @@ use crate::{
 use alloc::{boxed::Box, sync::Arc, task::Wake};
 use atomic_unique_refcell::AtomicUniqueRefCell;
 use core::{
-    pin::Pin,
+    pin::{pin, Pin},
     sync::atomic::{AtomicBool, AtomicU32, Ordering},
+    task::{Context, Poll, Waker},
 };
+use eonix_preempt::assert_preempt_enabled;
 use eonix_sync::Spin;
 use intrusive_collections::RBTreeAtomicLink;
 use task_state::TaskState;
@@ -162,6 +164,25 @@ impl Task {
 
         eonix_preempt::enable();
     }
+
+    pub fn block_on<F>(future: F) -> F::Output
+    where
+        F: Future,
+    {
+        assert_preempt_enabled!("block_on() must be called with preemption enabled");
+
+        let waker = Waker::from(Task::current().clone());
+        let mut context = Context::from_waker(&waker);
+        let mut future = pin!(future);
+
+        loop {
+            if let Poll::Ready(output) = future.as_mut().poll(&mut context) {
+                break output;
+            }
+
+            Task::park();
+        }
+    }
 }
 
 impl Wake for Task {

+ 6 - 24
crates/eonix_spin_irq/src/lib.rs

@@ -6,8 +6,8 @@ use core::{
     ops::{Deref, DerefMut},
 };
 use eonix_sync::{
-    marker::NotSend, ForceUnlockableGuard, Relax, Spin, SpinGuard, SpinRelax, UnlockableGuard,
-    UnlockedGuard, UnlockedSpinGuard,
+    marker::NotSend, Relax, Spin, SpinGuard, SpinRelax, UnlockableGuard, UnlockedGuard,
+    UnlockedSpinGuard,
 };
 
 pub trait SpinIrq<T, R = SpinRelax>
@@ -121,7 +121,7 @@ where
 
 impl<'a, T, R> UnlockableGuard for SpinIrqGuard<'a, T, R>
 where
-    T: ?Sized,
+    T: ?Sized + Send,
     R: Relax,
 {
     type Unlocked = UnlockedSpinIrqGuard<'a, T, R>;
@@ -137,34 +137,16 @@ where
 // SAFETY: The guard is stateless so no more process needed.
 unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinIrqGuard<'a, T, R>
 where
-    T: ?Sized,
+    T: ?Sized + Send,
     R: Relax,
 {
     type Guard = SpinIrqGuard<'a, T, R>;
 
-    fn relock(self) -> Self::Guard {
+    async fn relock(self) -> Self::Guard {
         SpinIrqGuard {
-            guard: self.unlocked_guard.relock(),
+            guard: self.unlocked_guard.relock().await,
             irq_state: self.irq_state,
             _not_send: PhantomData,
         }
     }
 }
-
-impl<'a, T, R> ForceUnlockableGuard for SpinIrqGuard<'a, T, R>
-where
-    T: ?Sized,
-    R: Relax,
-{
-    unsafe fn force_unlock(&mut self) {
-        unsafe {
-            self.guard.force_unlock();
-        }
-    }
-
-    unsafe fn force_relock(&mut self) {
-        unsafe {
-            self.guard.force_relock();
-        }
-    }
-}

+ 2 - 27
crates/eonix_sync/src/guard.rs

@@ -8,34 +8,9 @@ pub trait UnlockableGuard {
 /// # Safety
 /// Implementors of this trait MUST ensure that the lock is correctly unlocked if
 /// the lock is stateful and dropped accidentally.
-pub unsafe trait UnlockedGuard {
+pub unsafe trait UnlockedGuard: Send {
     type Guard: UnlockableGuard;
 
     #[must_use = "Throwing away the relocked guard is pointless."]
-    fn relock(self) -> Self::Guard;
-}
-
-pub trait ForceUnlockableGuard {
-    /// # Safety
-    /// This function is unsafe because it allows you to unlock the lock without
-    /// dropping the guard. Using the guard after calling this function is
-    /// undefined behavior.
-    unsafe fn force_unlock(&mut self);
-
-    /// # Safety
-    /// Calling this function twice on a force unlocked guard will cause deadlocks.
-    unsafe fn force_relock(&mut self);
-
-    fn do_unlocked(&mut self, f: impl FnOnce())
-    where
-        Self: Sized,
-    {
-        // SAFETY: We unlock the lock before calling the function and relock it after
-        // calling the function. So we will end up with the lock being held again.
-        unsafe {
-            self.force_unlock();
-            f();
-            self.force_relock();
-        }
-    }
+    fn relock(self) -> impl Future<Output = Self::Guard> + Send;
 }

+ 3 - 3
crates/eonix_sync/src/lib.rs

@@ -9,11 +9,11 @@ mod rwlock;
 mod spin;
 mod wait_list;
 
-pub use guard::{ForceUnlockableGuard, UnlockableGuard, UnlockedGuard};
+pub use guard::{UnlockableGuard, UnlockedGuard};
 pub use lazy_lock::LazyLock;
 pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
-pub use mutex::{Mutex, MutexGuard, Wait as MutexWait};
-pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard, Wait as RwLockWait};
+pub use mutex::{Mutex, MutexGuard};
+pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
 pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinRelax, UnlockedSpinGuard};
 pub use wait_list::WaitList;
 

+ 20 - 42
crates/eonix_sync/src/mutex.rs

@@ -1,47 +1,42 @@
 mod guard;
-mod wait;
 
+use crate::WaitList;
 use core::{
     cell::UnsafeCell,
+    pin::pin,
     sync::atomic::{AtomicBool, Ordering},
 };
 
 pub use guard::MutexGuard;
-pub use wait::Wait;
 
 #[derive(Debug, Default)]
-pub struct Mutex<T, W>
+pub struct Mutex<T>
 where
     T: ?Sized,
-    W: Wait,
 {
     locked: AtomicBool,
-    wait: W,
+    wait_list: WaitList,
     value: UnsafeCell<T>,
 }
 
-impl<T, W> Mutex<T, W>
-where
-    W: Wait,
-{
-    pub const fn new(value: T, wait: W) -> Self {
+impl<T> Mutex<T> {
+    pub const fn new(value: T) -> Self {
         Self {
             locked: AtomicBool::new(false),
-            wait,
+            wait_list: WaitList::new(),
             value: UnsafeCell::new(value),
         }
     }
 }
 
-impl<T, W> Mutex<T, W>
+impl<T> Mutex<T>
 where
     T: ?Sized,
-    W: Wait,
 {
     /// # Safety
     /// This function is unsafe because the caller MUST ensure that we've got the
     /// exclusive access before calling this function.
-    unsafe fn get_lock(&self) -> MutexGuard<'_, T, W> {
+    unsafe fn get_lock(&self) -> MutexGuard<'_, T> {
         MutexGuard {
             lock: self,
             // SAFETY: We are holding the lock, so we can safely access the value.
@@ -49,14 +44,14 @@ where
         }
     }
 
-    pub fn try_lock(&self) -> Option<MutexGuard<'_, T, W>> {
+    pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
         self.locked
             .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
             .ok()
             .map(|_| unsafe { self.get_lock() })
     }
 
-    fn try_lock_weak(&self) -> Option<MutexGuard<'_, T, W>> {
+    fn try_lock_weak(&self) -> Option<MutexGuard<'_, T>> {
         self.locked
             .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
             .ok()
@@ -64,22 +59,25 @@ where
     }
 
     #[cold]
-    fn lock_slow_path(&self) -> MutexGuard<'_, T, W> {
+    async fn lock_slow_path(&self) -> MutexGuard<'_, T> {
         loop {
+            let mut wait = pin!(self.wait_list.prepare_to_wait());
+            wait.as_mut().add_to_wait_list();
+
             if let Some(guard) = self.try_lock_weak() {
                 return guard;
             }
 
-            self.wait.wait(|| !self.locked.load(Ordering::Relaxed));
+            wait.await;
         }
     }
 
-    pub fn lock(&self) -> MutexGuard<'_, T, W> {
+    pub async fn lock(&self) -> MutexGuard<'_, T> {
         if let Some(guard) = self.try_lock() {
             // Quick path
             guard
         } else {
-            self.lock_slow_path()
+            self.lock_slow_path().await
         }
     }
 
@@ -89,30 +87,10 @@ where
     }
 }
 
-impl<T, W> Clone for Mutex<T, W>
-where
-    T: ?Sized + Clone,
-    W: Wait,
-{
-    fn clone(&self) -> Self {
-        Self::new(self.lock().clone(), W::new())
-    }
-}
-
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can send the lock between threads.
-unsafe impl<T, W> Send for Mutex<T, W>
-where
-    T: ?Sized + Send,
-    W: Wait,
-{
-}
+unsafe impl<T> Send for Mutex<T> where T: ?Sized + Send {}
 
 // SAFETY: `RwLock` can provide exclusive access to the value it protects, so it is safe to
 //         implement `Sync` for it as long as the protected value is `Send`.
-unsafe impl<T, W> Sync for Mutex<T, W>
-where
-    T: ?Sized + Send,
-    W: Wait,
-{
-}
+unsafe impl<T> Sync for Mutex<T> where T: ?Sized + Send {}

+ 31 - 34
crates/eonix_sync/src/mutex/guard.rs

@@ -1,25 +1,25 @@
-use crate::ForceUnlockableGuard;
-
-use super::{Mutex, Wait};
+use super::Mutex;
+use crate::{UnlockableGuard, UnlockedGuard};
 use core::{
-    mem::ManuallyDrop,
     ops::{Deref, DerefMut},
     sync::atomic::Ordering,
 };
 
-pub struct MutexGuard<'a, T, W>
+pub struct MutexGuard<'a, T>
 where
     T: ?Sized,
-    W: Wait,
 {
-    pub(super) lock: &'a Mutex<T, W>,
+    pub(super) lock: &'a Mutex<T>,
     pub(super) value: &'a mut T,
 }
 
-impl<T, W> Drop for MutexGuard<'_, T, W>
+pub struct UnlockedMutexGuard<'a, T>(&'a Mutex<T>)
+where
+    T: ?Sized;
+
+impl<T> Drop for MutexGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn drop(&mut self) {
         let locked = self.lock.locked.swap(false, Ordering::Release);
@@ -27,14 +27,13 @@ where
             locked,
             "MutexGuard::drop(): unlock() called on an unlocked mutex.",
         );
-        self.lock.wait.notify();
+        self.lock.wait_list.notify_one();
     }
 }
 
-impl<T, W> Deref for MutexGuard<'_, T, W>
+impl<T> Deref for MutexGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     type Target = T;
 
@@ -43,59 +42,57 @@ where
     }
 }
 
-impl<T, W> DerefMut for MutexGuard<'_, T, W>
+impl<T> DerefMut for MutexGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn deref_mut(&mut self) -> &mut Self::Target {
         self.value
     }
 }
 
-impl<T, U, W> AsRef<U> for MutexGuard<'_, T, W>
+impl<T, U> AsRef<U> for MutexGuard<'_, T>
 where
     T: ?Sized,
     U: ?Sized,
     <Self as Deref>::Target: AsRef<U>,
-    W: Wait,
 {
     fn as_ref(&self) -> &U {
         self.deref().as_ref()
     }
 }
 
-impl<T, U, W> AsMut<U> for MutexGuard<'_, T, W>
+impl<T, U> AsMut<U> for MutexGuard<'_, T>
 where
     T: ?Sized + AsMut<U>,
     U: ?Sized,
     <Self as Deref>::Target: AsMut<U>,
-    W: Wait,
 {
     fn as_mut(&mut self) -> &mut U {
         self.deref_mut().as_mut()
     }
 }
 
-impl<T, W> ForceUnlockableGuard for MutexGuard<'_, T, W>
+impl<'a, T> UnlockableGuard for MutexGuard<'a, T>
 where
-    T: ?Sized,
-    W: Wait,
+    T: ?Sized + Send,
 {
-    unsafe fn force_unlock(&mut self) {
-        let locked = self.lock.locked.swap(false, Ordering::Release);
-        debug_assert!(
-            locked,
-            "MutexGuard::drop(): unlock() called on an unlocked mutex.",
-        );
-        self.lock.wait.notify();
+    type Unlocked = UnlockedMutexGuard<'a, T>;
+
+    fn unlock(self) -> Self::Unlocked {
+        // The lock will be unlocked when the guard is dropped.
+        UnlockedMutexGuard(self.lock)
     }
+}
+
+unsafe impl<'a, T> UnlockedGuard for UnlockedMutexGuard<'a, T>
+where
+    T: ?Sized + Send,
+{
+    type Guard = MutexGuard<'a, T>;
 
-    unsafe fn force_relock(&mut self) {
-        let _ = ManuallyDrop::new(if let Some(guard) = self.lock.try_lock() {
-            guard
-        } else {
-            self.lock.lock_slow_path()
-        });
+    async fn relock(self) -> Self::Guard {
+        let Self(lock) = self;
+        lock.lock().await
     }
 }

+ 0 - 17
crates/eonix_sync/src/mutex/wait.rs

@@ -1,17 +0,0 @@
-pub trait Wait {
-    fn new() -> Self
-    where
-        Self: Sized;
-
-    fn has_waiting(&self) -> bool
-    where
-        Self: Sized;
-
-    fn wait(&self, check: impl Fn() -> bool)
-    where
-        Self: Sized;
-
-    fn notify(&self)
-    where
-        Self: Sized;
-}

+ 63 - 52
crates/eonix_sync/src/rwlock.rs

@@ -1,47 +1,44 @@
 mod guard;
-mod wait;
 
+use crate::WaitList;
 use core::{
     cell::UnsafeCell,
+    pin::pin,
     sync::atomic::{AtomicIsize, Ordering},
 };
 
 pub use guard::{RwLockReadGuard, RwLockWriteGuard};
-pub use wait::Wait;
 
 #[derive(Debug, Default)]
-pub struct RwLock<T, W>
+pub struct RwLock<T>
 where
     T: ?Sized,
-    W: Wait,
 {
     counter: AtomicIsize,
-    wait: W,
+    read_wait: WaitList,
+    write_wait: WaitList,
     value: UnsafeCell<T>,
 }
 
-impl<T, W> RwLock<T, W>
-where
-    W: Wait,
-{
-    pub const fn new(value: T, wait: W) -> Self {
+impl<T> RwLock<T> {
+    pub const fn new(value: T) -> Self {
         Self {
             counter: AtomicIsize::new(0),
-            wait,
+            read_wait: WaitList::new(),
+            write_wait: WaitList::new(),
             value: UnsafeCell::new(value),
         }
     }
 }
 
-impl<T, W> RwLock<T, W>
+impl<T> RwLock<T>
 where
     T: ?Sized,
-    W: Wait,
 {
     /// # Safety
     /// This function is unsafe because the caller MUST ensure that we've got the
     /// write access before calling this function.
-    unsafe fn write_lock(&self) -> RwLockWriteGuard<'_, T, W> {
+    unsafe fn write_lock(&self) -> RwLockWriteGuard<'_, T> {
         RwLockWriteGuard {
             lock: self,
             // SAFETY: We are holding the write lock, so we can safely access the value.
@@ -52,7 +49,7 @@ where
     /// # Safety
     /// This function is unsafe because the caller MUST ensure that we've got the
     /// read access before calling this function.
-    unsafe fn read_lock(&self) -> RwLockReadGuard<'_, T, W> {
+    unsafe fn read_lock(&self) -> RwLockReadGuard<'_, T> {
         RwLockReadGuard {
             lock: self,
             // SAFETY: We are holding the read lock, so we can safely access the value.
@@ -60,23 +57,53 @@ where
         }
     }
 
-    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, W>> {
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that we won't hold any
+    /// references to the value after calling this function.
+    pub(self) unsafe fn write_unlock(&self) {
+        let old = self.counter.swap(0, Ordering::Release);
+        debug_assert_eq!(
+            old, -1,
+            "RwLock::write_unlock(): erroneous counter value: {}",
+            old
+        );
+        if !self.write_wait.notify_one() {
+            self.read_wait.notify_all();
+        }
+    }
+
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that we won't hold any
+    /// references to the value after calling this function.
+    pub(self) unsafe fn read_unlock(&self) {
+        match self.counter.fetch_sub(1, Ordering::Release) {
+            2.. => {}
+            1 => {
+                if !self.write_wait.notify_one() {
+                    self.read_wait.notify_all();
+                }
+            }
+            val => unreachable!("RwLock::read_unlock(): erroneous counter value: {}", val),
+        }
+    }
+
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T>> {
         self.counter
             .compare_exchange(0, -1, Ordering::Acquire, Ordering::Relaxed)
             .ok()
             .map(|_| unsafe { self.write_lock() })
     }
 
-    fn try_write_weak(&self) -> Option<RwLockWriteGuard<'_, T, W>> {
+    fn try_write_weak(&self) -> Option<RwLockWriteGuard<'_, T>> {
         self.counter
             .compare_exchange_weak(0, -1, Ordering::Acquire, Ordering::Relaxed)
             .ok()
             .map(|_| unsafe { self.write_lock() })
     }
 
-    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T, W>> {
+    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> {
         // We'll spin if we fail here anyway.
-        if self.wait.has_write_waiting() {
+        if self.write_wait.has_waiters() {
             return None;
         }
 
@@ -91,7 +118,7 @@ where
         }
     }
 
-    fn try_read_weak(&self) -> Option<RwLockReadGuard<'_, T, W>> {
+    fn try_read_weak(&self) -> Option<RwLockReadGuard<'_, T>> {
         // TODO: If we check write waiters here, we would lose wakeups.
         //       Try locking the wait lists to prevent this.
 
@@ -107,44 +134,48 @@ where
     }
 
     #[cold]
-    fn write_slow_path(&self) -> RwLockWriteGuard<'_, T, W> {
+    async fn write_slow_path(&self) -> RwLockWriteGuard<'_, T> {
         loop {
+            let mut wait = pin!(self.write_wait.prepare_to_wait());
+            wait.as_mut().add_to_wait_list();
+
             if let Some(guard) = self.try_write_weak() {
                 return guard;
             }
 
-            self.wait
-                .write_wait(|| self.counter.load(Ordering::Relaxed) == 0);
+            wait.await;
         }
     }
 
     #[cold]
-    fn read_slow_path(&self) -> RwLockReadGuard<'_, T, W> {
+    async fn read_slow_path(&self) -> RwLockReadGuard<'_, T> {
         loop {
+            let mut wait = pin!(self.read_wait.prepare_to_wait());
+            wait.as_mut().add_to_wait_list();
+
             if let Some(guard) = self.try_read_weak() {
                 return guard;
             }
 
-            self.wait
-                .read_wait(|| self.counter.load(Ordering::Relaxed) >= 0);
+            wait.await;
         }
     }
 
-    pub fn write(&self) -> RwLockWriteGuard<'_, T, W> {
+    pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
         if let Some(guard) = self.try_write() {
             // Quick path
             guard
         } else {
-            self.write_slow_path()
+            self.write_slow_path().await
         }
     }
 
-    pub fn read(&self) -> RwLockReadGuard<'_, T, W> {
+    pub async fn read(&self) -> RwLockReadGuard<'_, T> {
         if let Some(guard) = self.try_read() {
             // Quick path
             guard
         } else {
-            self.read_slow_path()
+            self.read_slow_path().await
         }
     }
 
@@ -154,30 +185,10 @@ where
     }
 }
 
-impl<T, W> Clone for RwLock<T, W>
-where
-    T: ?Sized + Clone,
-    W: Wait,
-{
-    fn clone(&self) -> Self {
-        Self::new(self.read().clone(), W::new())
-    }
-}
-
 // SAFETY: As long as the value protected by the lock is able to be shared between threads,
 //         we can send the lock between threads.
-unsafe impl<T, W> Send for RwLock<T, W>
-where
-    T: ?Sized + Send,
-    W: Wait,
-{
-}
+unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {}
 
 // SAFETY: `RwLock` can provide shared access to the value it protects, so it is safe to
 //         implement `Sync` for it. However, this is only true if the value itself is `Sync`.
-unsafe impl<T, W> Sync for RwLock<T, W>
-where
-    T: ?Sized + Send + Sync,
-    W: Wait,
-{
-}
+unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}

+ 76 - 63
crates/eonix_sync/src/rwlock/guard.rs

@@ -1,64 +1,58 @@
-use crate::{AsProof, AsProofMut, ForceUnlockableGuard, Proof, ProofMut};
+use super::RwLock;
+use crate::{AsProof, AsProofMut, Proof, ProofMut, UnlockableGuard, UnlockedGuard};
+use core::ops::{Deref, DerefMut};
 
-use super::{RwLock, Wait};
-use core::{
-    mem::ManuallyDrop,
-    ops::{Deref, DerefMut},
-    sync::atomic::Ordering,
-};
-
-pub struct RwLockWriteGuard<'a, T, W>
+pub struct RwLockWriteGuard<'a, T>
 where
     T: ?Sized,
-    W: Wait,
 {
-    pub(super) lock: &'a RwLock<T, W>,
+    pub(super) lock: &'a RwLock<T>,
     pub(super) value: &'a mut T,
 }
 
-pub struct RwLockReadGuard<'a, T, W>
+pub struct RwLockReadGuard<'a, T>
 where
     T: ?Sized,
-    W: Wait,
 {
-    pub(super) lock: &'a RwLock<T, W>,
+    pub(super) lock: &'a RwLock<T>,
     pub(super) value: &'a T,
 }
 
-impl<T, W> Drop for RwLockWriteGuard<'_, T, W>
+pub struct UnlockedRwLockReadGuard<'a, T>(&'a RwLock<T>)
+where
+    T: ?Sized;
+
+pub struct UnlockedRwLockWriteGuard<'a, T>(&'a RwLock<T>)
+where
+    T: ?Sized;
+
+impl<T> Drop for RwLockWriteGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn drop(&mut self) {
-        let old = self.lock.counter.swap(0, Ordering::Release);
-        assert_eq!(
-            old, -1,
-            "RwLockWriteGuard::drop(): erroneous counter value: {}",
-            old
-        );
-        self.lock.wait.write_notify();
+        unsafe {
+            // SAFETY: We are dropping the guard.
+            self.lock.write_unlock();
+        }
     }
 }
 
-impl<T, W> Drop for RwLockReadGuard<'_, T, W>
+impl<T> Drop for RwLockReadGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn drop(&mut self) {
-        match self.lock.counter.fetch_sub(1, Ordering::Release) {
-            2.. => {}
-            1 => self.lock.wait.read_notify(),
-            val => unreachable!("RwLockReadGuard::drop(): erroneous counter value: {}", val),
+        unsafe {
+            // SAFETY: We are dropping the guard.
+            self.lock.read_unlock();
         }
     }
 }
 
-impl<T, W> Deref for RwLockWriteGuard<'_, T, W>
+impl<T> Deref for RwLockWriteGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     type Target = T;
 
@@ -67,44 +61,40 @@ where
     }
 }
 
-impl<T, W> DerefMut for RwLockWriteGuard<'_, T, W>
+impl<T> DerefMut for RwLockWriteGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn deref_mut(&mut self) -> &mut Self::Target {
         self.value
     }
 }
 
-impl<T, U, W> AsRef<U> for RwLockWriteGuard<'_, T, W>
+impl<T, U> AsRef<U> for RwLockWriteGuard<'_, T>
 where
     T: ?Sized,
     U: ?Sized,
     <Self as Deref>::Target: AsRef<U>,
-    W: Wait,
 {
     fn as_ref(&self) -> &U {
         self.deref().as_ref()
     }
 }
 
-impl<T, U, W> AsMut<U> for RwLockWriteGuard<'_, T, W>
+impl<T, U> AsMut<U> for RwLockWriteGuard<'_, T>
 where
     T: ?Sized,
     U: ?Sized,
     <Self as Deref>::Target: AsMut<U>,
-    W: Wait,
 {
     fn as_mut(&mut self) -> &mut U {
         self.deref_mut().as_mut()
     }
 }
 
-impl<T, W> Deref for RwLockReadGuard<'_, T, W>
+impl<T> Deref for RwLockReadGuard<'_, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     type Target = T;
 
@@ -113,67 +103,90 @@ where
     }
 }
 
-impl<T, U, W> AsRef<U> for RwLockReadGuard<'_, T, W>
+impl<T, U> AsRef<U> for RwLockReadGuard<'_, T>
 where
     T: ?Sized,
     U: ?Sized,
     <Self as Deref>::Target: AsRef<U>,
-    W: Wait,
 {
     fn as_ref(&self) -> &U {
         self.deref().as_ref()
     }
 }
 
-unsafe impl<'guard, 'pos, T, W> AsProof<'guard, 'pos, T> for RwLockWriteGuard<'guard, T, W>
+unsafe impl<'guard, 'pos, T> AsProof<'guard, 'pos, T> for RwLockWriteGuard<'guard, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn prove(&self) -> Proof<'pos, T> {
         unsafe { Proof::new(&raw const *self.value) }
     }
 }
 
-unsafe impl<'guard, 'pos, T, W> AsProofMut<'guard, 'pos, T> for RwLockWriteGuard<'guard, T, W>
+unsafe impl<'guard, 'pos, T> AsProofMut<'guard, 'pos, T> for RwLockWriteGuard<'guard, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn prove_mut(&self) -> ProofMut<'pos, T> {
         unsafe { ProofMut::new(&raw const *self.value as *mut _) }
     }
 }
 
-unsafe impl<'guard, 'pos, T, W> AsProof<'guard, 'pos, T> for RwLockReadGuard<'guard, T, W>
+unsafe impl<'guard, 'pos, T> AsProof<'guard, 'pos, T> for RwLockReadGuard<'guard, T>
 where
     T: ?Sized,
-    W: Wait,
 {
     fn prove(&self) -> Proof<'pos, T> {
         unsafe { Proof::new(&raw const *self.value) }
     }
 }
 
-impl<'a, T, W> ForceUnlockableGuard for RwLockReadGuard<'_, T, W>
+impl<'a, T> UnlockableGuard for RwLockReadGuard<'a, T>
 where
-    T: ?Sized,
-    W: Wait,
+    T: ?Sized + Send + Sync,
 {
-    unsafe fn force_unlock(&mut self) {
-        match self.lock.counter.fetch_sub(1, Ordering::Release) {
-            2.. => {}
-            1 => self.lock.wait.read_notify(),
-            val => unreachable!("RwLockReadGuard::drop(): erroneous counter value: {}", val),
-        }
+    type Unlocked = UnlockedRwLockReadGuard<'a, T>;
+
+    fn unlock(self) -> Self::Unlocked {
+        // The lock will be unlocked when the guard is dropped.
+        UnlockedRwLockReadGuard(self.lock)
     }
+}
+
+// SAFETY: `UnlockedRwLockReadGuard` is stateless.
+unsafe impl<'a, T> UnlockedGuard for UnlockedRwLockReadGuard<'a, T>
+where
+    T: ?Sized + Send + Sync,
+{
+    type Guard = RwLockReadGuard<'a, T>;
+
+    async fn relock(self) -> Self::Guard {
+        let Self(lock) = self;
+        lock.read().await
+    }
+}
+
+impl<'a, T> UnlockableGuard for RwLockWriteGuard<'a, T>
+where
+    T: ?Sized + Send + Sync,
+{
+    type Unlocked = UnlockedRwLockWriteGuard<'a, T>;
+
+    fn unlock(self) -> Self::Unlocked {
+        // The lock will be unlocked when the guard is dropped.
+        UnlockedRwLockWriteGuard(self.lock)
+    }
+}
+
+// SAFETY: `UnlockedRwLockWriteGuard` is stateless.
+unsafe impl<'a, T> UnlockedGuard for UnlockedRwLockWriteGuard<'a, T>
+where
+    T: ?Sized + Send + Sync,
+{
+    type Guard = RwLockWriteGuard<'a, T>;
 
-    unsafe fn force_relock(&mut self) {
-        let _ = ManuallyDrop::new(if let Some(guard) = self.lock.try_read() {
-            // Quick path
-            guard
-        } else {
-            self.lock.read_slow_path()
-        });
+    async fn relock(self) -> Self::Guard {
+        let Self(lock) = self;
+        lock.write().await
     }
 }

+ 0 - 26
crates/eonix_sync/src/rwlock/wait.rs

@@ -1,26 +0,0 @@
-pub trait Wait {
-    fn new() -> Self
-    where
-        Self: Sized;
-
-    fn has_write_waiting(&self) -> bool
-    where
-        Self: Sized;
-    fn has_read_waiting(&self) -> bool
-    where
-        Self: Sized;
-
-    fn write_wait(&self, check: impl Fn() -> bool)
-    where
-        Self: Sized;
-    fn read_wait(&self, check: impl Fn() -> bool)
-    where
-        Self: Sized;
-
-    fn write_notify(&self)
-    where
-        Self: Sized;
-    fn read_notify(&self)
-    where
-        Self: Sized;
-}

+ 4 - 21
crates/eonix_sync/src/spin/guard.rs

@@ -1,5 +1,5 @@
 use super::{Relax, Spin, SpinRelax};
-use crate::{marker::NotSend, ForceUnlockableGuard, UnlockableGuard, UnlockedGuard};
+use crate::{marker::NotSend, UnlockableGuard, UnlockedGuard};
 use core::{
     marker::PhantomData,
     mem::ManuallyDrop,
@@ -83,7 +83,7 @@ where
 
 impl<'a, T, R> UnlockableGuard for SpinGuard<'a, T, R>
 where
-    T: ?Sized,
+    T: ?Sized + Send,
     R: Relax,
 {
     type Unlocked = UnlockedSpinGuard<'a, T, R>;
@@ -102,30 +102,13 @@ where
 // SAFETY: The guard is stateless so no more process needed.
 unsafe impl<'a, T, R> UnlockedGuard for UnlockedSpinGuard<'a, T, R>
 where
-    T: ?Sized,
+    T: ?Sized + Send,
     R: Relax,
 {
     type Guard = SpinGuard<'a, T, R>;
 
-    fn relock(self) -> Self::Guard {
+    async fn relock(self) -> Self::Guard {
         let Self(lock) = self;
         lock.lock()
     }
 }
-
-impl<'a, T, R> ForceUnlockableGuard for SpinGuard<'a, T, R>
-where
-    T: ?Sized,
-    R: Relax,
-{
-    unsafe fn force_unlock(&mut self) {
-        unsafe {
-            // SAFETY: The caller assures that the value is no longer accessed.
-            self.lock.do_unlock();
-        }
-    }
-
-    unsafe fn force_relock(&mut self) {
-        self.lock.do_lock();
-    }
-}

+ 1 - 1
src/prelude.rs

@@ -22,7 +22,7 @@ pub(crate) use alloc::{boxed::Box, string::String, vec, vec::Vec};
 
 pub(crate) use core::{any::Any, fmt::Write, marker::PhantomData, str};
 
-pub use crate::sync::{Mutex, RwLock, Spin};
+pub use crate::sync::Spin;
 
 #[allow(dead_code)]
 pub trait AsAny: Send + Sync {

+ 1 - 132
src/sync.rs

@@ -1,138 +1,7 @@
 mod arcswap;
 mod condvar;
 
-use eonix_sync::{MutexWait, RwLockWait};
-
+pub use arcswap::ArcSwap;
 pub use eonix_sync::Spin;
 
-#[doc(hidden)]
-#[derive(Debug)]
-pub struct RwLockWaitImpl {
-    lock: Spin<()>,
-    cv_read: UCondVar,
-    cv_write: UCondVar,
-}
-
-#[doc(hidden)]
-#[derive(Debug)]
-pub struct MutexWaitImpl {
-    lock: Spin<()>,
-    cv: UCondVar,
-}
-
-impl RwLockWaitImpl {
-    const fn new() -> Self {
-        Self {
-            lock: Spin::new(()),
-            cv_read: UCondVar::new(),
-            cv_write: UCondVar::new(),
-        }
-    }
-}
-
-impl MutexWaitImpl {
-    const fn new() -> Self {
-        Self {
-            lock: Spin::new(()),
-            cv: UCondVar::new(),
-        }
-    }
-}
-
-impl RwLockWait for RwLockWaitImpl {
-    fn new() -> Self {
-        Self::new()
-    }
-
-    fn has_write_waiting(&self) -> bool {
-        self.cv_write.has_waiters()
-    }
-
-    fn has_read_waiting(&self) -> bool {
-        self.cv_read.has_waiters()
-    }
-
-    fn write_wait(&self, check: impl Fn() -> bool) {
-        let mut lock = self.lock.lock();
-
-        loop {
-            if check() {
-                break;
-            }
-            self.cv_write.wait(&mut lock);
-        }
-    }
-
-    fn read_wait(&self, check: impl Fn() -> bool) {
-        let mut lock = self.lock.lock();
-        loop {
-            if check() {
-                break;
-            }
-            self.cv_read.wait(&mut lock);
-        }
-    }
-
-    fn write_notify(&self) {
-        let _lock = self.lock.lock();
-        if self.has_write_waiting() {
-            self.cv_write.notify_one();
-        } else if self.has_read_waiting() {
-            self.cv_read.notify_all();
-        }
-    }
-
-    fn read_notify(&self) {
-        let _lock = self.lock.lock();
-        if self.has_write_waiting() {
-            self.cv_write.notify_one();
-        } else if self.has_read_waiting() {
-            self.cv_read.notify_all();
-        }
-    }
-}
-
-impl MutexWait for MutexWaitImpl {
-    fn new() -> Self {
-        Self::new()
-    }
-
-    fn has_waiting(&self) -> bool {
-        self.cv.has_waiters()
-    }
-
-    fn wait(&self, check: impl Fn() -> bool) {
-        let mut lock = self.lock.lock();
-        loop {
-            if check() {
-                break;
-            }
-            self.cv.wait(&mut lock);
-        }
-    }
-
-    fn notify(&self) {
-        let _lock = self.lock.lock();
-        if self.has_waiting() {
-            self.cv.notify_one();
-        }
-    }
-}
-
-pub const fn rwlock_new<T>(value: T) -> RwLock<T> {
-    RwLock::new(value, RwLockWaitImpl::new())
-}
-
-pub const fn mutex_new<T>(value: T) -> Mutex<T> {
-    Mutex::new(value, MutexWaitImpl::new())
-}
-
-pub type RwLock<T> = eonix_sync::RwLock<T, RwLockWaitImpl>;
-pub type Mutex<T> = eonix_sync::Mutex<T, MutexWaitImpl>;
-
-pub type RwLockReadGuard<'a, T> = eonix_sync::RwLockReadGuard<'a, T, RwLockWaitImpl>;
-
 pub type CondVar = condvar::CondVar<true>;
-pub type UCondVar = condvar::CondVar<false>;
-
-pub use arcswap::ArcSwap;

+ 24 - 46
src/sync/condvar.rs

@@ -1,12 +1,10 @@
-use crate::{kernel::task::Thread, prelude::*};
-use alloc::collections::vec_deque::VecDeque;
-use core::task::Waker;
-use eonix_preempt::assert_preempt_count_eq;
-use eonix_runtime::task::Task;
-use eonix_sync::ForceUnlockableGuard;
+use crate::kernel::task::Thread;
+use core::pin::pin;
+use eonix_sync::{UnlockableGuard, UnlockedGuard as _, WaitList};
+use intrusive_collections::UnsafeRef;
 
 pub struct CondVar<const INTERRUPTIBLE: bool> {
-    waiters: Spin<VecDeque<Waker>>,
+    wait_list: WaitList,
 }
 
 impl<const I: bool> core::fmt::Debug for CondVar<I> {
@@ -22,62 +20,42 @@ impl<const I: bool> core::fmt::Debug for CondVar<I> {
 impl<const I: bool> CondVar<I> {
     pub const fn new() -> Self {
         Self {
-            waiters: Spin::new(VecDeque::new()),
+            wait_list: WaitList::new(),
         }
     }
 
-    pub fn has_waiters(&self) -> bool {
-        !self.waiters.lock().is_empty()
-    }
-
-    fn wake(waker: Waker) {
-        waker.wake();
+    pub fn notify_all(&self) {
+        self.wait_list.notify_all();
     }
 
-    pub fn notify_one(&self) {
-        if let Some(waker) = self.waiters.lock().pop_front() {
-            Self::wake(waker);
-        }
-    }
+    /// Unlock the `guard`. Then wait until being waken up.
+    /// Return the relocked `guard`.
+    pub async fn wait<G>(&self, guard: G) -> G
+    where
+        G: UnlockableGuard + Send,
+        G::Unlocked: Send,
+    {
+        let mut wait_handle = pin!(self.wait_list.prepare_to_wait());
+        wait_handle.as_mut().add_to_wait_list();
 
-    pub fn notify_all(&self) {
-        for waker in self.waiters.lock().drain(..) {
-            Self::wake(waker);
-        }
-    }
+        let interrupt_waker = pin!(|| {});
 
-    /// Unlock the `guard`. Then wait until being waken up. Relock the `guard` before returning.
-    ///
-    /// # Might Sleep
-    /// This function **might sleep**, so call it in a preemptible context.
-    pub fn wait(&self, guard: &mut impl ForceUnlockableGuard) {
-        eonix_preempt::disable();
-        let waker = Waker::from(Task::current().clone());
         if I {
             // Prohibit the thread from being woken up by a signal.
-            Thread::current()
-                .signal_list
-                .set_signal_waker(Some(waker.clone()));
+            Thread::current().signal_list.set_signal_waker(Some(unsafe {
+                UnsafeRef::from_raw(interrupt_waker.as_ref().get_ref())
+            }));
         }
 
-        self.waiters.lock().push_back(waker.clone());
-
-        // TODO!!!: Another way to do this:
-        //
-        // Store a flag in our entry in the waiting list.
-        // Check the flag before doing `schedule()` but after we've unlocked the `guard`.
-        // If the flag is already set, we don't need to sleep.
-
-        unsafe { guard.force_unlock() };
+        let unlocked_guard = guard.unlock();
 
-        assert_preempt_count_eq!(1, "CondVar::wait");
-        Task::park_preempt_disabled();
+        wait_handle.await;
 
         if I {
             // Allow the thread to be woken up by a signal again.
             Thread::current().signal_list.set_signal_waker(None);
         }
 
-        unsafe { guard.force_relock() };
+        unlocked_guard.relock().await
     }
 }