Ver Fonte

rwlock: refactor to get rid of `Lock`

greatbridf há 10 meses atrás
pai
commit
1dc9996b90

+ 2 - 4
crates/eonix_sync/src/lib.rs

@@ -11,8 +11,6 @@ mod strategy;
 pub use guard::{ForceUnlockableGuard, Guard, UnlockableGuard, UnlockedGuard};
 pub use lock::Lock;
 pub use locked::{AsProof, AsProofMut, Locked, Proof, ProofMut};
-pub use rwlock::RwLockStrategy;
+pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard, Wait as RwLockWait};
 pub use spin::{LoopRelax, Relax, Spin, SpinGuard, SpinIrqGuard, SpinRelax};
-pub use strategy::{LockStrategy, WaitStrategy};
-
-pub type RwLock<T, W> = Lock<T, RwLockStrategy<W>>;
+pub use strategy::LockStrategy;

+ 30 - 0
crates/eonix_sync/src/locked/proof.rs

@@ -68,6 +68,36 @@ where
         'guard: 'pos;
 }
 
+impl<T> Proof<'_, T>
+where
+    T: ?Sized,
+{
+    /// # Safety
+    /// The caller must ensure valid access for at least the lifetime `'pos`.
+    pub const unsafe fn new(address: *const T) -> Self {
+        Self {
+            // SAFETY: The validity of the reference is guaranteed by the caller.
+            address: unsafe { NonNull::new_unchecked(address as *mut _) },
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T> ProofMut<'_, T>
+where
+    T: ?Sized,
+{
+    /// # Safety
+    /// The caller must ensure valid access for at least the lifetime `'pos`.
+    pub const unsafe fn new(address: *mut T) -> Self {
+        Self {
+            // SAFETY: The validity of the reference is guaranteed by the caller.
+            address: unsafe { NonNull::new_unchecked(address as *mut _) },
+            _phantom: PhantomData,
+        }
+    }
+}
+
 /// Proof of mutable access to a position in memory can be duplicated.
 impl<T> Copy for ProofMut<'_, T> where T: ?Sized {}
 

+ 144 - 105
crates/eonix_sync/src/rwlock.rs

@@ -1,154 +1,193 @@
-use crate::{LockStrategy, WaitStrategy};
+mod guard;
+mod wait;
+
 use core::{
-    marker::PhantomData,
+    cell::UnsafeCell,
     sync::atomic::{AtomicIsize, Ordering},
 };
 
-pub struct RwLockStrategy<W>(PhantomData<W>)
-where
-    W: WaitStrategy;
+pub use guard::{RwLockReadGuard, RwLockWriteGuard};
+pub use wait::Wait;
 
-pub struct RwLockData<W>
+#[derive(Debug, Default)]
+pub struct RwLock<T, W>
 where
-    W: WaitStrategy,
+    T: ?Sized,
+    W: Wait,
 {
     counter: AtomicIsize,
-    wait_data: W::Data,
+    wait: W,
+    value: UnsafeCell<T>,
 }
 
-impl<W> RwLockStrategy<W>
+impl<T, W> RwLock<T, W>
 where
-    W: WaitStrategy,
+    W: Wait,
 {
-    #[cold]
-    fn lock_slow_path(
-        data: &<Self as LockStrategy>::StrategyData,
-    ) -> <Self as LockStrategy>::GuardContext {
-        loop {
-            if let Ok(_) =
-                data.counter
-                    .compare_exchange_weak(0, -1, Ordering::Acquire, Ordering::Relaxed)
-            {
-                return ();
-            }
-
-            W::write_wait(&data.wait_data, || {
-                data.counter.load(Ordering::Relaxed) == 0
-            });
-        }
-    }
-
-    #[cold]
-    fn lock_shared_slow_path(
-        data: &<Self as LockStrategy>::StrategyData,
-    ) -> <Self as LockStrategy>::GuardContext {
-        loop {
-            let mut counter = data.counter.load(Ordering::Relaxed);
-            while counter >= 0 {
-                match data.counter.compare_exchange_weak(
-                    counter,
-                    counter + 1,
-                    Ordering::Acquire,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return (),
-                    Err(previous) => counter = previous,
-                }
-            }
-
-            W::read_wait(&data.wait_data, || {
-                data.counter.load(Ordering::Relaxed) >= 0
-            });
+    pub const fn new(value: T, wait: W) -> Self {
+        Self {
+            counter: AtomicIsize::new(0),
+            wait,
+            value: UnsafeCell::new(value),
         }
     }
 }
 
-unsafe impl<W> LockStrategy for RwLockStrategy<W>
+impl<T, W> RwLock<T, W>
 where
-    W: WaitStrategy,
+    T: ?Sized,
+    W: Wait,
 {
-    type StrategyData = RwLockData<W>;
-    type GuardContext = ();
-
-    fn new_data() -> Self::StrategyData {
-        Self::StrategyData {
-            counter: AtomicIsize::new(0),
-            wait_data: W::new_data(),
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that we've got the
+    /// write access before calling this function.
+    unsafe fn write_lock(&self) -> RwLockWriteGuard<'_, T, W> {
+        RwLockWriteGuard {
+            lock: self,
+            // SAFETY: We are holding the write lock, so we can safely access the value.
+            value: unsafe { &mut *self.value.get() },
         }
     }
 
-    unsafe fn is_locked(data: &Self::StrategyData) -> bool {
-        data.counter.load(Ordering::Relaxed) == 1
+    /// # Safety
+    /// This function is unsafe because the caller MUST ensure that we've got the
+    /// read access before calling this function.
+    unsafe fn read_lock(&self) -> RwLockReadGuard<'_, T, W> {
+        RwLockReadGuard {
+            lock: self,
+            // SAFETY: We are holding the read lock, so we can safely access the value.
+            value: unsafe { &*self.value.get() },
+        }
     }
 
-    unsafe fn try_lock(data: &Self::StrategyData) -> Option<Self::GuardContext> {
-        data.counter
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, W>> {
+        self.counter
             .compare_exchange(0, -1, Ordering::Acquire, Ordering::Relaxed)
-            .map(|_| ())
             .ok()
+            .map(|_| unsafe { self.write_lock() })
+    }
+
+    fn try_write_weak(&self) -> Option<RwLockWriteGuard<'_, T, W>> {
+        self.counter
+            .compare_exchange_weak(0, -1, Ordering::Acquire, Ordering::Relaxed)
+            .ok()
+            .map(|_| unsafe { self.write_lock() })
     }
 
-    unsafe fn try_lock_shared(data: &Self::StrategyData) -> Option<Self::GuardContext>
-    where
-        Self: Sized,
-    {
-        if W::has_write_waiting(&data.wait_data) {
+    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T, W>> {
+        if self.wait.has_write_waiting() {
             return None;
         }
 
-        let counter = data.counter.load(Ordering::Relaxed);
-        match counter {
-            0.. => data
-                .counter
+        let counter = self.counter.load(Ordering::Relaxed);
+        if counter >= 0 {
+            self.counter
                 .compare_exchange(counter, counter + 1, Ordering::Acquire, Ordering::Relaxed)
                 .ok()
-                .map(|_| ()),
-            _ => None,
+                .map(|_| unsafe { self.read_lock() })
+        } else {
+            None
         }
     }
 
-    unsafe fn do_lock(data: &Self::StrategyData) -> Self::GuardContext {
-        if let Some(context) = unsafe { Self::try_lock(data) } {
+    fn try_read_weak(&self) -> Option<RwLockReadGuard<'_, T, W>> {
+        if self.wait.has_write_waiting() {
+            return None;
+        }
+
+        let counter = self.counter.load(Ordering::Relaxed);
+        if counter >= 0 {
+            self.counter
+                .compare_exchange_weak(counter, counter + 1, Ordering::Acquire, Ordering::Relaxed)
+                .ok()
+                .map(|_| unsafe { self.read_lock() })
+        } else {
+            None
+        }
+    }
+
+    #[cold]
+    fn write_slow_path(&self) -> RwLockWriteGuard<'_, T, W> {
+        loop {
+            if let Some(guard) = self.try_write_weak() {
+                return guard;
+            }
+
+            self.wait
+                .write_wait(|| self.counter.load(Ordering::Relaxed) == 0);
+        }
+    }
+
+    #[cold]
+    fn read_slow_path(&self) -> RwLockReadGuard<'_, T, W> {
+        loop {
+            // TODO: can we use `try_read_weak` here?
+            let mut counter = self.counter.load(Ordering::Relaxed);
+            while counter >= 0 {
+                match self.counter.compare_exchange_weak(
+                    counter,
+                    counter + 1,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return unsafe { self.read_lock() },
+                    Err(previous) => counter = previous,
+                }
+            }
+
+            self.wait
+                .read_wait(|| self.counter.load(Ordering::Relaxed) >= 0);
+        }
+    }
+
+    pub fn write(&self) -> RwLockWriteGuard<'_, T, W> {
+        if let Some(guard) = self.try_write() {
             // Quick path
-            context
+            guard
         } else {
-            Self::lock_slow_path(data)
+            self.write_slow_path()
         }
     }
 
-    unsafe fn do_lock_shared(data: &Self::StrategyData) -> Self::GuardContext {
-        if let Some(context) = unsafe { Self::try_lock_shared(data) } {
+    pub fn read(&self) -> RwLockReadGuard<'_, T, W> {
+        if let Some(guard) = self.try_read() {
             // Quick path
-            context
+            guard
         } else {
-            Self::lock_shared_slow_path(data)
+            self.read_slow_path()
         }
     }
 
-    unsafe fn do_unlock(data: &Self::StrategyData, _: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        let old = data.counter.fetch_add(1, Ordering::Release);
-        assert_eq!(
-            old, -1,
-            "RwLockStrategy::do_unlock: erroneous counter value: {}",
-            old
-        );
-        W::write_notify(&data.wait_data);
+    pub fn get_mut(&mut self) -> &mut T {
+        // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
+        unsafe { &mut *self.value.get() }
     }
+}
 
-    unsafe fn do_unlock_shared(data: &Self::StrategyData, _: &mut Self::GuardContext)
-    where
-        Self: Sized,
-    {
-        match data.counter.fetch_sub(1, Ordering::Release) {
-            2.. => {}
-            1 => W::read_notify(&data.wait_data),
-            val => unreachable!(
-                "RwLockStrategy::do_unlock_shared: erroneous counter value: {}",
-                val
-            ),
-        }
+impl<T, W> Clone for RwLock<T, W>
+where
+    T: ?Sized + Clone,
+    W: Wait,
+{
+    fn clone(&self) -> Self {
+        Self::new(self.read().clone(), W::new())
     }
 }
+
+// SAFETY: As long as the value protected by the lock is able to be shared between threads,
+//         we can send the lock between threads.
+unsafe impl<T, W> Send for RwLock<T, W>
+where
+    T: ?Sized + Send,
+    W: Wait,
+{
+}
+
+// SAFETY: `RwLock` can provide shared access to the value it protects, so it is safe to
+//         implement `Sync` for it. However, this is only true if the value itself is `Sync`.
+unsafe impl<T, W> Sync for RwLock<T, W>
+where
+    T: ?Sized + Send + Sync,
+    W: Wait,
+{
+}

+ 173 - 0
crates/eonix_sync/src/rwlock/guard.rs

@@ -0,0 +1,173 @@
+use crate::{AsProof, AsProofMut, ForceUnlockableGuard, Proof, ProofMut};
+
+use super::{RwLock, Wait};
+use core::{
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+    sync::atomic::Ordering,
+};
+
+pub struct RwLockWriteGuard<'a, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    pub(super) lock: &'a RwLock<T, W>,
+    pub(super) value: &'a mut T,
+}
+
+pub struct RwLockReadGuard<'a, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    pub(super) lock: &'a RwLock<T, W>,
+    pub(super) value: &'a T,
+}
+
+impl<T, W> Drop for RwLockWriteGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn drop(&mut self) {
+        let old = self.lock.counter.swap(0, Ordering::Release);
+        assert_eq!(
+            old, -1,
+            "RwLockWriteGuard::drop(): erroneous counter value: {}",
+            old
+        );
+        self.lock.wait.write_notify();
+    }
+}
+
+impl<T, W> Drop for RwLockReadGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn drop(&mut self) {
+        match self.lock.counter.fetch_sub(1, Ordering::Release) {
+            2.. => {}
+            1 => self.lock.wait.read_notify(),
+            val => unreachable!("RwLockReadGuard::drop(): erroneous counter value: {}", val),
+        }
+    }
+}
+
+impl<T, W> Deref for RwLockWriteGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.value
+    }
+}
+
+impl<T, W> DerefMut for RwLockWriteGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.value
+    }
+}
+
+impl<T, W> AsRef<T> for RwLockReadGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn as_ref(&self) -> &T {
+        self.value
+    }
+}
+
+impl<T, W> AsMut<T> for RwLockWriteGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn as_mut(&mut self) -> &mut T {
+        self.value
+    }
+}
+
+impl<T, W> Deref for RwLockReadGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.value
+    }
+}
+
+impl<T, W> AsRef<T> for RwLockWriteGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn as_ref(&self) -> &T {
+        self.value
+    }
+}
+
+unsafe impl<'guard, 'pos, T, W> AsProof<'guard, 'pos, T> for RwLockWriteGuard<'guard, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn prove(&self) -> Proof<'pos, T> {
+        unsafe { Proof::new(&raw const *self.value) }
+    }
+}
+
+unsafe impl<'guard, 'pos, T, W> AsProofMut<'guard, 'pos, T> for RwLockWriteGuard<'guard, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn prove_mut(&self) -> ProofMut<'pos, T> {
+        unsafe { ProofMut::new(&raw const *self.value as *mut _) }
+    }
+}
+
+unsafe impl<'guard, 'pos, T, W> AsProof<'guard, 'pos, T> for RwLockReadGuard<'guard, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    fn prove(&self) -> Proof<'pos, T> {
+        unsafe { Proof::new(&raw const *self.value) }
+    }
+}
+
+impl<'a, T, W> ForceUnlockableGuard for RwLockReadGuard<'_, T, W>
+where
+    T: ?Sized,
+    W: Wait,
+{
+    unsafe fn force_unlock(&mut self) {
+        match self.lock.counter.fetch_sub(1, Ordering::Release) {
+            2.. => {}
+            1 => self.lock.wait.read_notify(),
+            val => unreachable!("RwLockReadGuard::drop(): erroneous counter value: {}", val),
+        }
+    }
+
+    unsafe fn force_relock(&mut self) {
+        let _ = ManuallyDrop::new(if let Some(guard) = self.lock.try_read() {
+            // Quick path
+            guard
+        } else {
+            self.lock.read_slow_path()
+        });
+    }
+}

+ 26 - 0
crates/eonix_sync/src/rwlock/wait.rs

@@ -0,0 +1,26 @@
+pub trait Wait {
+    fn new() -> Self
+    where
+        Self: Sized;
+
+    fn has_write_waiting(&self) -> bool
+    where
+        Self: Sized;
+    fn has_read_waiting(&self) -> bool
+    where
+        Self: Sized;
+
+    fn write_wait(&self, check: impl Fn() -> bool)
+    where
+        Self: Sized;
+    fn read_wait(&self, check: impl Fn() -> bool)
+    where
+        Self: Sized;
+
+    fn write_notify(&self)
+    where
+        Self: Sized;
+    fn read_notify(&self)
+    where
+        Self: Sized;
+}

+ 1 - 5
crates/eonix_sync/src/spin.rs

@@ -100,11 +100,7 @@ where
     R: Relax,
 {
     fn clone(&self) -> Self {
-        Self {
-            locked: AtomicBool::new(false),
-            value: UnsafeCell::new(self.lock().clone()),
-            _phantom: PhantomData,
-        }
+        Self::new(self.lock().clone())
     }
 }
 

+ 0 - 32
crates/eonix_sync/src/strategy.rs

@@ -73,35 +73,3 @@ pub unsafe trait LockStrategy {
         *context = unsafe { Self::do_lock_shared(data) };
     }
 }
-
-pub trait WaitStrategy {
-    type Data;
-
-    fn new_data() -> Self::Data
-    where
-        Self: Sized;
-
-    fn has_write_waiting(data: &Self::Data) -> bool
-    where
-        Self: Sized;
-
-    fn has_read_waiting(data: &Self::Data) -> bool
-    where
-        Self: Sized;
-
-    fn write_wait(data: &Self::Data, check: impl Fn() -> bool)
-    where
-        Self: Sized;
-
-    fn read_wait(data: &Self::Data, check: impl Fn() -> bool)
-    where
-        Self: Sized;
-
-    fn write_notify(data: &Self::Data)
-    where
-        Self: Sized;
-
-    fn read_notify(data: &Self::Data)
-    where
-        Self: Sized;
-}

+ 16 - 18
src/fs/fat32.rs

@@ -1,14 +1,5 @@
-use core::{ops::ControlFlow, sync::atomic::Ordering};
-
-use alloc::{
-    collections::btree_map::BTreeMap,
-    sync::{Arc, Weak},
-    vec::Vec,
-};
-use bindings::EIO;
-
-use dir::Dirs as _;
-use file::ClusterRead;
+mod dir;
+mod file;
 
 use crate::{
     io::{Buffer, ByteBuffer, UninitBuffer},
@@ -25,11 +16,18 @@ use crate::{
         },
     },
     prelude::*,
+    sync::rwlock_new,
     KResult,
 };
-
-mod dir;
-mod file;
+use alloc::{
+    collections::btree_map::BTreeMap,
+    sync::{Arc, Weak},
+    vec::Vec,
+};
+use bindings::EIO;
+use core::{ops::ControlFlow, sync::atomic::Ordering};
+use dir::Dirs as _;
+use file::ClusterRead;
 
 type ClusterNo = u32;
 
@@ -135,7 +133,7 @@ impl FatFs {
             sectors_per_cluster: 0,
             rootdir_cluster: 0,
             data_start: 0,
-            fat: RwLock::new(Vec::new()),
+            fat: rwlock_new(Vec::new()),
             weak: weak.clone(),
             icache: BTreeMap::new(),
             volume_label: [0; 11],
@@ -247,7 +245,7 @@ impl Inode for FileInode {
     fn read(&self, buffer: &mut dyn Buffer, offset: usize) -> KResult<usize> {
         let vfs = self.vfs.upgrade().ok_or(EIO)?;
         let vfs = vfs.as_any().downcast_ref::<FatFs>().unwrap();
-        let fat = vfs.fat.lock_shared();
+        let fat = vfs.fat.read();
 
         if self.size.load(Ordering::Relaxed) as usize == 0 {
             return Ok(0);
@@ -288,7 +286,7 @@ impl Inode for DirInode {
     fn lookup(&self, dentry: &Arc<Dentry>) -> KResult<Option<Arc<dyn Inode>>> {
         let vfs = self.vfs.upgrade().ok_or(EIO)?;
         let vfs = vfs.as_any().downcast_ref::<FatFs>().unwrap();
-        let fat = vfs.fat.lock_shared();
+        let fat = vfs.fat.read();
 
         let mut entries = ClusterIterator::new(fat.as_ref(), self.ino as ClusterNo)
             .read(vfs, 0)
@@ -319,7 +317,7 @@ impl Inode for DirInode {
     ) -> KResult<usize> {
         let vfs = self.vfs.upgrade().ok_or(EIO)?;
         let vfs = vfs.as_any().downcast_ref::<FatFs>().unwrap();
-        let fat = vfs.fat.lock_shared();
+        let fat = vfs.fat.read();
 
         let cluster_iter = ClusterIterator::new(fat.as_ref(), self.ino as ClusterNo)
             .read(vfs, offset)

+ 4 - 4
src/fs/procfs.rs

@@ -133,7 +133,7 @@ impl DirInode {
 
 impl Inode for DirInode {
     fn lookup(&self, dentry: &Arc<Dentry>) -> KResult<Option<Arc<dyn Inode>>> {
-        let lock = self.rwsem.lock_shared();
+        let lock = self.rwsem.read();
         Ok(self
             .entries
             .access(lock.prove())
@@ -150,7 +150,7 @@ impl Inode for DirInode {
         offset: usize,
         callback: &mut dyn FnMut(&[u8], Ino) -> KResult<ControlFlow<(), ()>>,
     ) -> KResult<usize> {
-        let lock = self.rwsem.lock_shared();
+        let lock = self.rwsem.read();
         self.entries
             .access(lock.prove())
             .iter()
@@ -238,7 +238,7 @@ pub fn creat(
     let inode = FileInode::new(ino, Arc::downgrade(&fs), file);
 
     {
-        let lock = parent.idata.rwsem.lock();
+        let lock = parent.idata.rwsem.write();
         parent
             .entries
             .access_mut(lock.prove_mut())
@@ -262,7 +262,7 @@ pub fn mkdir(parent: &ProcFsNode, name: &[u8]) -> KResult<ProcFsNode> {
 
     parent
         .entries
-        .access_mut(inode.rwsem.lock().prove_mut())
+        .access_mut(inode.rwsem.write().prove_mut())
         .push((Arc::from(name), ProcFsNode::Dir(inode.clone())));
 
     Ok(ProcFsNode::Dir(inode))

+ 12 - 12
src/fs/tmpfs.rs

@@ -86,7 +86,7 @@ impl Inode for DirectoryInode {
         offset: usize,
         callback: &mut dyn FnMut(&[u8], Ino) -> KResult<ControlFlow<(), ()>>,
     ) -> KResult<usize> {
-        let lock = self.rwsem.lock_shared();
+        let lock = self.rwsem.read();
         self.entries
             .access(lock.prove())
             .iter()
@@ -101,7 +101,7 @@ impl Inode for DirectoryInode {
         let vfs = acquire(&self.vfs)?;
         let vfs = astmp(&vfs);
 
-        let rwsem = self.rwsem.lock();
+        let rwsem = self.rwsem.write();
 
         let ino = vfs.assign_ino();
         let file = FileInode::new(ino, self.vfs.clone(), mode);
@@ -118,7 +118,7 @@ impl Inode for DirectoryInode {
         let vfs = acquire(&self.vfs)?;
         let vfs = astmp(&vfs);
 
-        let rwsem = self.rwsem.lock();
+        let rwsem = self.rwsem.write();
 
         let ino = vfs.assign_ino();
         let file = NodeInode::new(
@@ -136,7 +136,7 @@ impl Inode for DirectoryInode {
         let vfs = acquire(&self.vfs)?;
         let vfs = astmp(&vfs);
 
-        let rwsem = self.rwsem.lock();
+        let rwsem = self.rwsem.write();
 
         let ino = vfs.assign_ino();
         let file = SymlinkInode::new(ino, self.vfs.clone(), target.into());
@@ -149,7 +149,7 @@ impl Inode for DirectoryInode {
         let vfs = acquire(&self.vfs)?;
         let vfs = astmp(&vfs);
 
-        let rwsem = self.rwsem.lock();
+        let rwsem = self.rwsem.write();
 
         let ino = vfs.assign_ino();
         let newdir = DirectoryInode::new(ino, self.vfs.clone(), mode);
@@ -161,10 +161,10 @@ impl Inode for DirectoryInode {
     fn unlink(&self, at: &Arc<Dentry>) -> KResult<()> {
         let _vfs = acquire(&self.vfs)?;
 
-        let dlock = self.rwsem.lock();
+        let dlock = self.rwsem.write();
 
         let file = at.get_inode()?;
-        let _flock = file.rwsem.lock();
+        let _flock = file.rwsem.write();
 
         // SAFETY: `flock` has done the synchronization
         if file.mode.load(Ordering::Relaxed) & S_IFDIR != 0 {
@@ -205,7 +205,7 @@ impl Inode for DirectoryInode {
 
     fn chmod(&self, mode: Mode) -> KResult<()> {
         let _vfs = acquire(&self.vfs)?;
-        let _lock = self.rwsem.lock();
+        let _lock = self.rwsem.write();
 
         // SAFETY: `rwsem` has done the synchronization
         let old = self.mode.load(Ordering::Relaxed);
@@ -265,7 +265,7 @@ impl FileInode {
 impl Inode for FileInode {
     fn read(&self, buffer: &mut dyn Buffer, offset: usize) -> KResult<usize> {
         // TODO: We don't need that strong guarantee, find some way to avoid locks
-        let lock = self.rwsem.lock_shared();
+        let lock = self.rwsem.read();
 
         match self.filedata.access(lock.prove()).split_at_checked(offset) {
             Some((_, data)) => buffer.fill(data).map(|result| result.allow_partial()),
@@ -275,7 +275,7 @@ impl Inode for FileInode {
 
     fn write(&self, buffer: &[u8], offset: WriteOffset) -> KResult<usize> {
         // TODO: We don't need that strong guarantee, find some way to avoid locks
-        let lock = self.rwsem.lock();
+        let lock = self.rwsem.write();
         let filedata = self.filedata.access_mut(lock.prove_mut());
 
         let offset = match offset {
@@ -303,7 +303,7 @@ impl Inode for FileInode {
 
     fn truncate(&self, length: usize) -> KResult<()> {
         // TODO: We don't need that strong guarantee, find some way to avoid locks
-        let lock = self.rwsem.lock();
+        let lock = self.rwsem.write();
         let filedata = self.filedata.access_mut(lock.prove_mut());
 
         // SAFETY: `lock` has done the synchronization
@@ -315,7 +315,7 @@ impl Inode for FileInode {
 
     fn chmod(&self, mode: Mode) -> KResult<()> {
         let _vfs = acquire(&self.vfs)?;
-        let _lock = self.rwsem.lock();
+        let _lock = self.rwsem.write();
 
         // SAFETY: `rwsem` has done the synchronization
         let old = self.mode.load(Ordering::Relaxed);

+ 1 - 1
src/kernel/chardev.rs

@@ -76,7 +76,7 @@ impl CharDevice {
     pub fn open(self: &Arc<Self>) -> KResult<Arc<File>> {
         Ok(match &self.device {
             CharDeviceType::Terminal(terminal) => {
-                let procs = ProcessList::get().lock_shared();
+                let procs = ProcessList::get().read();
                 let current = Thread::current();
                 let session = current.process.session(procs.prove());
                 // We only set the control terminal if the process is the session leader.

+ 7 - 7
src/kernel/syscall/procops.rs

@@ -159,7 +159,7 @@ fn sys_exit(int_stack: &mut InterruptContext, _: &mut ExtendedContext) -> usize
     let status = int_stack.rbx as u32;
 
     unsafe {
-        let mut procs = ProcessList::get().lock();
+        let mut procs = ProcessList::get().write();
         eonix_preempt::disable();
 
         // SAFETY: Preemption is disabled.
@@ -180,7 +180,7 @@ bitflags! {
     }
 }
 
-fn do_waitpid(waitpid: u32, arg1: *mut u32, options: u32) -> KResult<u32> {
+fn do_waitpid(_waitpid: u32, arg1: *mut u32, options: u32) -> KResult<u32> {
     // if waitpid != u32::MAX {
     //     unimplemented!("waitpid with pid {waitpid}")
     // }
@@ -234,7 +234,7 @@ fn do_getsid(pid: u32) -> KResult<u32> {
     if pid == 0 {
         Ok(Thread::current().process.session_rcu().sid)
     } else {
-        let procs = ProcessList::get().lock_shared();
+        let procs = ProcessList::get().read();
         procs
             .try_find_process(pid)
             .map(|proc| proc.session(procs.prove()).sid)
@@ -246,7 +246,7 @@ fn do_getpgid(pid: u32) -> KResult<u32> {
     if pid == 0 {
         Ok(Thread::current().process.pgroup_rcu().pgid)
     } else {
-        let procs = ProcessList::get().lock_shared();
+        let procs = ProcessList::get().read();
         procs
             .try_find_process(pid)
             .map(|proc| proc.pgroup(procs.prove()).pgid)
@@ -324,7 +324,7 @@ fn do_prctl(option: u32, arg2: usize) -> KResult<()> {
 }
 
 fn do_kill(pid: i32, sig: u32) -> KResult<()> {
-    let procs = ProcessList::get().lock_shared();
+    let procs = ProcessList::get().read();
     match pid {
         // Send signal to every process for which the calling process has
         // permission to send signals.
@@ -351,7 +351,7 @@ fn do_kill(pid: i32, sig: u32) -> KResult<()> {
 
 fn do_tkill(tid: u32, sig: u32) -> KResult<()> {
     ProcessList::get()
-        .lock_shared()
+        .read()
         .try_find_thread(tid)
         .ok_or(ESRCH)?
         .raise(Signal::try_from(sig)?);
@@ -582,7 +582,7 @@ fn sys_vfork(int_stack: &mut InterruptContext, ext: &mut ExtendedContext) -> usi
 }
 
 fn sys_fork(int_stack: &mut InterruptContext, _: &mut ExtendedContext) -> usize {
-    let mut procs = ProcessList::get().lock();
+    let mut procs = ProcessList::get().write();
 
     let current = Thread::current();
     let current_process = current.process.clone();

+ 4 - 4
src/kernel/task/process.rs

@@ -291,7 +291,7 @@ impl Process {
         if wait_object.stopped().is_some() || wait_object.is_continue() {
             Ok(Some(wait_object))
         } else {
-            let mut procs = ProcessList::get().lock();
+            let mut procs = ProcessList::get().write();
             procs.remove_process(wait_object.pid);
             assert!(self
                 .inner
@@ -306,7 +306,7 @@ impl Process {
 
     /// Create a new session for the process.
     pub fn setsid(self: &Arc<Self>) -> KResult<u32> {
-        let mut process_list = ProcessList::get().lock();
+        let mut process_list = ProcessList::get().write();
         // If there exists a session that has the same sid as our pid, we can't create a new
         // session. The standard says that we should create a new process group and be the
         // only process in the new process group and session.
@@ -383,7 +383,7 @@ impl Process {
     /// This function should be called on the process that issued the syscall in order to do
     /// permission checks.
     pub fn setpgid(self: &Arc<Self>, pid: u32, pgid: u32) -> KResult<()> {
-        let mut procs = ProcessList::get().lock();
+        let mut procs = ProcessList::get().write();
         // We may set pgid of either the calling process or a child process.
         if pid == self.pid {
             self.do_setpgid(pgid, procs.as_mut())
@@ -485,7 +485,7 @@ impl WaitList {
     /// releases the lock on `ProcessList` and `WaitList` and waits on `cv_wait_procs`.
     pub fn entry(&self, want_stop: bool, want_continue: bool) -> Entry {
         Entry {
-            process_list: ProcessList::get().lock_shared(),
+            process_list: ProcessList::get().read(),
             wait_procs: self.wait_procs.lock(),
             cv: &self.cv_wait_procs,
             want_stop,

+ 3 - 3
src/kernel/task/process_list.rs

@@ -1,5 +1,5 @@
 use super::{Process, ProcessGroup, Session, Signal, Thread, WaitObject, WaitType};
-use crate::{prelude::*, rcu::rcu_sync};
+use crate::{prelude::*, rcu::rcu_sync, sync::rwlock_new};
 use alloc::{
     collections::btree_map::BTreeMap,
     sync::{Arc, Weak},
@@ -23,7 +23,7 @@ pub struct ProcessList {
 
 lazy_static! {
     static ref GLOBAL_PROC_LIST: RwLock<ProcessList> = {
-        RwLock::new(ProcessList {
+        rwlock_new(ProcessList {
             init: None,
             threads: BTreeMap::new(),
             processes: BTreeMap::new(),
@@ -56,7 +56,7 @@ impl ProcessList {
 
     pub fn kill_current(signal: Signal) -> ! {
         unsafe {
-            let mut process_list = ProcessList::get().lock();
+            let mut process_list = ProcessList::get().write();
             eonix_preempt::disable();
 
             // SAFETY: Preemption disabled.

+ 7 - 7
src/kernel/task/session.rs

@@ -1,5 +1,5 @@
 use super::{Process, ProcessGroup, ProcessList, Signal, Thread};
-use crate::{kernel::Terminal, prelude::*};
+use crate::{kernel::Terminal, prelude::*, sync::rwlock_new};
 use alloc::{
     collections::btree_map::BTreeMap,
     sync::{Arc, Weak},
@@ -30,7 +30,7 @@ impl Session {
         let session = Arc::new(Self {
             sid: leader.pid,
             leader: Arc::downgrade(leader),
-            job_control: RwLock::new(SessionJobControl {
+            job_control: rwlock_new(SessionJobControl {
                 foreground: Weak::new(),
                 control_terminal: None,
             }),
@@ -56,14 +56,14 @@ impl Session {
     }
 
     pub fn foreground(&self) -> Option<Arc<ProcessGroup>> {
-        self.job_control.lock_shared().foreground.upgrade()
+        self.job_control.read().foreground.upgrade()
     }
 
     /// Set the foreground process group identified by `pgid`.
     /// The process group must belong to the session.
     pub fn set_foreground_pgid(&self, pgid: u32, procs: Proof<'_, ProcessList>) -> KResult<()> {
         if let Some(group) = self.groups.access(procs).get(&pgid) {
-            self.job_control.lock().foreground = group.clone();
+            self.job_control.write().foreground = group.clone();
             Ok(())
         } else {
             // TODO: Check if the process group refers to an existing process group.
@@ -80,7 +80,7 @@ impl Session {
         forced: bool,
         procs: Proof<'_, ProcessList>,
     ) -> KResult<()> {
-        let mut job_control = self.job_control.lock();
+        let mut job_control = self.job_control.write();
         if let Some(_) = job_control.control_terminal.as_ref() {
             if let Some(session) = terminal.session().as_ref() {
                 if session.sid == self.sid {
@@ -98,14 +98,14 @@ impl Session {
     /// Drop the control terminal reference inside the session.
     /// DO NOT TOUCH THE TERMINAL'S SESSION FIELD.
     pub fn drop_control_terminal(&self) -> Option<Arc<Terminal>> {
-        let mut inner = self.job_control.lock();
+        let mut inner = self.job_control.write();
         inner.foreground = Weak::new();
         inner.control_terminal.take()
     }
 
     pub fn raise_foreground(&self, signal: Signal) {
         if let Some(fg) = self.foreground() {
-            let procs = ProcessList::get().lock_shared();
+            let procs = ProcessList::get().read();
             fg.raise(signal, procs.prove());
         }
     }

+ 2 - 2
src/kernel/task/signal.rs

@@ -413,7 +413,7 @@ impl SignalList {
                                 pid: thread.process.pid,
                                 code: WaitType::Stopped(signal),
                             },
-                            ProcessList::get().lock_shared().prove(),
+                            ProcessList::get().read().prove(),
                         );
                     }
 
@@ -436,7 +436,7 @@ impl SignalList {
                                 pid: thread.process.pid,
                                 code: WaitType::Continued,
                             },
-                            ProcessList::get().lock_shared().prove(),
+                            ProcessList::get().read().prove(),
                         );
                     }
                 }

+ 1 - 1
src/kernel/terminal.rs

@@ -606,7 +606,7 @@ impl Terminal {
             TerminalIORequest::SetProcessGroup(pgid) => {
                 let pgid = pgid.read()?;
 
-                let procs = ProcessList::get().lock_shared();
+                let procs = ProcessList::get().read();
                 let inner = self.inner.lock();
                 let session = inner.session.upgrade();
 

+ 10 - 11
src/kernel/vfs/inode.rs

@@ -1,3 +1,5 @@
+use super::{dentry::Dentry, s_isblk, s_ischr, vfs::Vfs, DevId, TimeSpec};
+use crate::{io::Buffer, prelude::*, sync::rwlock_new};
 use alloc::sync::{Arc, Weak};
 use bindings::{
     statx, EINVAL, EISDIR, ENOTDIR, EPERM, STATX_ATIME, STATX_BLOCKS, STATX_CTIME, STATX_GID,
@@ -11,9 +13,6 @@ use core::{
     sync::atomic::{AtomicU32, AtomicU64, Ordering},
 };
 
-use super::{dentry::Dentry, s_isblk, s_ischr, vfs::Vfs, DevId, TimeSpec};
-use crate::{io::Buffer, prelude::*};
-
 pub type Ino = u64;
 pub type AtomicIno = AtomicU64;
 #[allow(dead_code)]
@@ -50,19 +49,19 @@ pub struct InodeData {
 }
 
 impl InodeData {
-    pub fn new(ino: Ino, vfs: Weak<dyn Vfs>) -> Self {
+    pub const fn new(ino: Ino, vfs: Weak<dyn Vfs>) -> Self {
         Self {
             ino,
             vfs,
             atime: Spin::new(TimeSpec::default()),
             ctime: Spin::new(TimeSpec::default()),
             mtime: Spin::new(TimeSpec::default()),
-            rwsem: RwLock::new(()),
-            size: Default::default(),
-            nlink: Default::default(),
-            uid: Default::default(),
-            gid: Default::default(),
-            mode: Default::default(),
+            rwsem: rwlock_new(()),
+            size: AtomicU64::new(0),
+            nlink: AtomicNlink::new(0),
+            uid: AtomicUid::new(0),
+            gid: AtomicGid::new(0),
+            mode: AtomicMode::new(0),
         }
     }
 }
@@ -249,7 +248,7 @@ pub trait Inode: Send + Sync + InodeInner {
         f(
             uninit_mut.as_mut_ptr(),
             // SAFETY: `idata` is initialized and we will never move the lock.
-            &unsafe { idata.assume_init_ref() }.rwsem.lock_shared(),
+            &unsafe { idata.assume_init_ref() }.rwsem.read(),
         );
 
         // Safety: `uninit` is initialized

+ 6 - 0
src/kernel/vfs/mod.rs

@@ -60,6 +60,12 @@ lazy_static! {
     });
 }
 
+impl TimeSpec {
+    pub const fn default() -> Self {
+        Self { sec: 0, nsec: 0 }
+    }
+}
+
 impl FsContext {
     pub fn get_current<'lt>() -> &'lt Arc<Self> {
         &Thread::current().borrow().fs_context

+ 1 - 1
src/lib.rs

@@ -179,7 +179,7 @@ async fn init_process(early_kstack_pfn: usize) {
 
     let thread_builder = ThreadBuilder::new().name(Arc::from(*b"busybox"));
 
-    let mut process_list = ProcessList::get().lock();
+    let mut process_list = ProcessList::get().write();
     let (thread, process) = ProcessBuilder::new()
         .mm_list(mm_list)
         .thread_builder(thread_builder)

+ 13 - 12
src/rcu.rs

@@ -1,28 +1,28 @@
-use crate::{prelude::*, sync::RwLockReadGuard};
+use crate::{
+    prelude::*,
+    sync::{rwlock_new, RwLockReadGuard},
+};
 use alloc::sync::Arc;
 use core::{
     ops::Deref,
     ptr::NonNull,
     sync::atomic::{AtomicPtr, Ordering},
 };
-use lazy_static::lazy_static;
 use pointers::BorrowedArc;
 
 pub struct RCUReadGuard<'data, T: 'data> {
     value: T,
-    guard: RwLockReadGuard<'data, ()>,
+    _guard: RwLockReadGuard<'data, ()>,
     _phantom: PhantomData<&'data T>,
 }
 
-lazy_static! {
-    static ref GLOBAL_RCU_SEM: RwLock<()> = RwLock::new(());
-}
+static GLOBAL_RCU_SEM: RwLock<()> = rwlock_new(());
 
 impl<'data, T: 'data> RCUReadGuard<'data, T> {
     fn lock(value: T) -> Self {
         Self {
             value,
-            guard: GLOBAL_RCU_SEM.lock_shared(),
+            _guard: GLOBAL_RCU_SEM.read(),
             _phantom: PhantomData,
         }
     }
@@ -37,7 +37,8 @@ impl<'data, T: 'data> Deref for RCUReadGuard<'data, T> {
 }
 
 pub fn rcu_sync() {
-    GLOBAL_RCU_SEM.lock();
+    // Lock the global RCU semaphore to ensure that all readers are done.
+    let _ = GLOBAL_RCU_SEM.write();
 }
 
 pub trait RCUNode<MySelf> {
@@ -56,7 +57,7 @@ impl<T: RCUNode<T>> RCUList<T> {
     pub fn new() -> Self {
         Self {
             head: AtomicPtr::new(core::ptr::null_mut()),
-            reader_lock: RwLock::new(()),
+            reader_lock: rwlock_new(()),
             update_lock: Mutex::new(()),
         }
     }
@@ -101,7 +102,7 @@ impl<T: RCUNode<T>> RCUList<T> {
             unsafe { Arc::from_raw(me) };
         }
 
-        let _lck = self.reader_lock.lock();
+        let _lck = self.reader_lock.write();
         node.rcu_prev()
             .store(core::ptr::null_mut(), Ordering::Release);
         node.rcu_next()
@@ -136,7 +137,7 @@ impl<T: RCUNode<T>> RCUList<T> {
             unsafe { Arc::from_raw(old) };
         }
 
-        let _lck = self.reader_lock.lock();
+        let _lck = self.reader_lock.write();
         old_node
             .rcu_prev()
             .store(core::ptr::null_mut(), Ordering::Release);
@@ -146,7 +147,7 @@ impl<T: RCUNode<T>> RCUList<T> {
     }
 
     pub fn iter(&self) -> RCUIterator<T> {
-        let _lck = self.reader_lock.lock_shared();
+        let _lck = self.reader_lock.read();
 
         RCUIterator {
             // SAFETY: We have a read lock, so the node is still alive.

+ 40 - 31
src/sync.rs

@@ -2,81 +2,90 @@ mod arcswap;
 mod condvar;
 pub mod semaphore;
 
-use eonix_sync::WaitStrategy;
-
-pub use eonix_sync::{Guard, Lock, Spin};
+use eonix_sync::RwLockWait;
+pub use eonix_sync::{Lock, Spin};
 
 #[doc(hidden)]
+#[derive(Debug)]
 pub struct Wait {
     lock: Spin<()>,
     cv_read: UCondVar,
     cv_write: UCondVar,
 }
 
-impl WaitStrategy for Wait {
-    type Data = Self;
-
-    fn new_data() -> Self::Data {
+impl Wait {
+    const fn new() -> Self {
         Self {
             lock: Spin::new(()),
             cv_read: UCondVar::new(),
             cv_write: UCondVar::new(),
         }
     }
+}
 
-    fn has_write_waiting(data: &Self::Data) -> bool {
-        data.cv_write.has_waiters()
+impl RwLockWait for Wait {
+    fn new() -> Self {
+        Self::new()
     }
 
-    fn has_read_waiting(data: &Self::Data) -> bool {
-        data.cv_read.has_waiters()
+    fn has_write_waiting(&self) -> bool {
+        self.cv_write.has_waiters()
     }
 
-    fn write_wait(data: &Self::Data, check: impl Fn() -> bool) {
-        let mut lock = data.lock.lock();
+    fn has_read_waiting(&self) -> bool {
+        self.cv_read.has_waiters()
+    }
+
+    fn write_wait(&self, check: impl Fn() -> bool) {
+        let mut lock = self.lock.lock();
 
         loop {
             if check() {
                 break;
             }
-            data.cv_write.wait(&mut lock);
+            self.cv_write.wait(&mut lock);
         }
     }
 
-    fn read_wait(data: &Self::Data, check: impl Fn() -> bool) {
-        let mut lock = data.lock.lock();
+    fn read_wait(&self, check: impl Fn() -> bool) {
+        let mut lock = self.lock.lock();
         loop {
             if check() {
                 break;
             }
-            data.cv_read.wait(&mut lock);
+            self.cv_read.wait(&mut lock);
         }
     }
 
-    fn write_notify(data: &Self::Data) {
-        let _lock = data.lock.lock();
-        if Self::has_write_waiting(data) {
-            data.cv_write.notify_one();
-        } else if Self::has_read_waiting(data) {
-            data.cv_read.notify_all();
+    fn write_notify(&self) {
+        let _lock = self.lock.lock();
+        if self.has_write_waiting() {
+            self.cv_write.notify_one();
+        } else if self.has_read_waiting() {
+            self.cv_read.notify_all();
         }
     }
 
-    fn read_notify(data: &Self::Data) {
-        let _lock = data.lock.lock();
-        if Self::has_write_waiting(data) {
-            data.cv_write.notify_one();
-        } else if Self::has_read_waiting(data) {
-            data.cv_read.notify_all();
+    fn read_notify(&self) {
+        let _lock = self.lock.lock();
+        if self.has_write_waiting() {
+            self.cv_write.notify_one();
+        } else if self.has_read_waiting() {
+            self.cv_read.notify_all();
         }
     }
 }
 
+pub const fn rwlock_new<T>(value: T) -> RwLock<T> {
+    RwLock::new(value, Wait::new())
+}
+
 pub type Mutex<T> = Lock<T, semaphore::SemaphoreStrategy<1>>;
 pub type RwLock<T> = eonix_sync::RwLock<T, Wait>;
 
-pub type RwLockReadGuard<'lock, T> =
-    Guard<'lock, T, eonix_sync::RwLockStrategy<Wait>, eonix_sync::RwLockStrategy<Wait>, false>;
+pub type RwLockReadGuard<'a, T> = eonix_sync::RwLockReadGuard<'a, T, Wait>;
+#[allow(dead_code)]
+pub type RwLockWriteGuard<'a, T> = eonix_sync::RwLockWriteGuard<'a, T, Wait>;
 
 pub type CondVar = condvar::CondVar<true>;
 pub type UCondVar = condvar::CondVar<false>;

+ 1 - 1
src/sync/condvar.rs

@@ -20,7 +20,7 @@ impl<const I: bool> core::fmt::Debug for CondVar<I> {
 }
 
 impl<const I: bool> CondVar<I> {
-    pub fn new() -> Self {
+    pub const fn new() -> Self {
         Self {
             waiters: Spin::new(VecDeque::new()),
         }