Ver código fonte

rcu: provide call_rcu() to call rcu drop asynchronously

We can pass a function to be called after a success rcu_sync call.

Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 6 meses atrás
pai
commit
21dd5ea1c7
2 arquivos alterados com 29 adições e 16 exclusões
  1. 13 11
      src/kernel/task/process.rs
  2. 16 5
      src/rcu.rs

+ 13 - 11
src/kernel/task/process.rs

@@ -4,10 +4,11 @@ use super::{
 };
 use crate::kernel::constants::{ECHILD, EINTR, EINVAL, EPERM, ESRCH};
 use crate::kernel::task::{CloneArgs, CloneFlags};
+use crate::rcu::call_rcu;
 use crate::{
     kernel::mem::MMList,
     prelude::*,
-    rcu::{rcu_sync, RCUPointer, RCUReadGuard},
+    rcu::{RCUPointer, RCUReadGuard},
     sync::CondVar,
 };
 use alloc::{
@@ -408,12 +409,14 @@ impl Process {
             .session(session.clone())
             .build(&mut process_list);
 
-        {
-            let _old_session = unsafe { self.session.swap(Some(session.clone())) }.unwrap();
-            let old_pgroup = unsafe { self.pgroup.swap(Some(pgroup.clone())) }.unwrap();
-            old_pgroup.remove_member(self.pid, process_list.prove_mut());
-            Task::block_on(rcu_sync());
-        }
+        let old_session = unsafe { self.session.swap(Some(session.clone())) }.unwrap();
+        let old_pgroup = unsafe { self.pgroup.swap(Some(pgroup.clone())) }.unwrap();
+        old_pgroup.remove_member(self.pid, process_list.prove_mut());
+
+        call_rcu(move || {
+            drop(old_session);
+            drop(old_pgroup);
+        });
 
         Ok(pgroup.pgid)
     }
@@ -459,10 +462,9 @@ impl Process {
         };
 
         pgroup.remove_member(self.pid, procs.prove_mut());
-        {
-            let _old_pgroup = unsafe { self.pgroup.swap(Some(new_pgroup)) }.unwrap();
-            Task::block_on(rcu_sync());
-        }
+
+        let old_pgroup = unsafe { self.pgroup.swap(Some(new_pgroup)) }.unwrap();
+        call_rcu(move || drop(old_pgroup));
 
         Ok(())
     }

+ 16 - 5
src/rcu.rs

@@ -1,11 +1,11 @@
-use crate::prelude::*;
+use crate::{kernel::task::block_on, prelude::*};
 use alloc::sync::Arc;
 use core::{
     ops::Deref,
     ptr::NonNull,
     sync::atomic::{AtomicPtr, Ordering},
 };
-use eonix_runtime::task::Task;
+use eonix_runtime::scheduler::RUNTIME;
 use eonix_sync::{Mutex, RwLock, RwLockReadGuard};
 use pointers::BorrowedArc;
 
@@ -21,7 +21,7 @@ impl<'data, T> RCUReadGuard<'data, BorrowedArc<'data, T>> {
     fn lock(value: BorrowedArc<'data, T>) -> Self {
         Self {
             value,
-            _guard: Task::block_on(GLOBAL_RCU_SEM.read()),
+            _guard: block_on(GLOBAL_RCU_SEM.read()),
             _phantom: PhantomData,
         }
     }
@@ -48,6 +48,14 @@ pub async fn rcu_sync() {
     let _ = GLOBAL_RCU_SEM.write().await;
 }
 
+pub fn call_rcu(func: impl FnOnce() + Send + 'static) {
+    RUNTIME.spawn(async move {
+        // Wait for all readers to finish.
+        rcu_sync().await;
+        func();
+    });
+}
+
 pub trait RCUNode<MySelf> {
     fn rcu_prev(&self) -> &AtomicPtr<MySelf>;
     fn rcu_next(&self) -> &AtomicPtr<MySelf>;
@@ -154,7 +162,7 @@ impl<T: RCUNode<T>> RCUList<T> {
     }
 
     pub fn iter(&self) -> RCUIterator<T> {
-        let _lck = Task::block_on(self.reader_lock.read());
+        let _lck = block_on(self.reader_lock.read());
 
         RCUIterator {
             // SAFETY: We have a read lock, so the node is still alive.
@@ -264,7 +272,10 @@ impl<T> Drop for RCUPointer<T> {
         if let Some(arc) = unsafe { self.swap(None) } {
             // We only wait if there are other references.
             if Arc::strong_count(&arc) == 1 {
-                Task::block_on(rcu_sync());
+                call_rcu(move || {
+                    let _ = arc;
+                    todo!();
+                });
             }
         }
     }