rcu.rs 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. use core::{
  2. ops::Deref,
  3. sync::atomic::{AtomicPtr, Ordering},
  4. };
  5. use crate::{
  6. prelude::*,
  7. sync::{lock::Guard, semaphore::RwSemaphoreStrategy},
  8. };
  9. use alloc::sync::Arc;
  10. use lazy_static::lazy_static;
  11. pub struct RCUReadGuard<'data, T: 'data> {
  12. value: T,
  13. guard: Guard<'data, (), RwSemaphoreStrategy, false>,
  14. _phantom: PhantomData<&'data T>,
  15. }
  16. lazy_static! {
  17. static ref GLOBAL_RCU_SEM: RwSemaphore<()> = RwSemaphore::new(());
  18. }
  19. impl<'data, T: 'data> RCUReadGuard<'data, T> {
  20. fn lock(value: T) -> Self {
  21. Self {
  22. value,
  23. guard: GLOBAL_RCU_SEM.lock_shared(),
  24. _phantom: PhantomData,
  25. }
  26. }
  27. }
  28. impl<'data, T: 'data> Deref for RCUReadGuard<'data, T> {
  29. type Target = T;
  30. fn deref(&self) -> &Self::Target {
  31. &self.value
  32. }
  33. }
  34. fn rcu_sync() {
  35. GLOBAL_RCU_SEM.lock();
  36. }
  37. pub trait RCUNode<MySelf> {
  38. fn rcu_prev(&self) -> &AtomicPtr<MySelf>;
  39. fn rcu_next(&self) -> &AtomicPtr<MySelf>;
  40. }
  41. pub struct RCUList<T: RCUNode<T>> {
  42. head: AtomicPtr<T>,
  43. reader_lock: RwSemaphore<()>,
  44. update_lock: Mutex<()>,
  45. }
  46. impl<T: RCUNode<T>> RCUList<T> {
  47. pub fn new() -> Self {
  48. Self {
  49. head: AtomicPtr::new(core::ptr::null_mut()),
  50. reader_lock: RwSemaphore::new(()),
  51. update_lock: Mutex::new(()),
  52. }
  53. }
  54. pub fn insert(&self, new_node: Arc<T>) {
  55. let _lck = self.update_lock.lock();
  56. let old_head = self.head.load(Ordering::Acquire);
  57. new_node
  58. .rcu_prev()
  59. .store(core::ptr::null_mut(), Ordering::Release);
  60. new_node.rcu_next().store(old_head, Ordering::Release);
  61. if let Some(old_head) = unsafe { old_head.as_ref() } {
  62. old_head
  63. .rcu_prev()
  64. .store(Arc::into_raw(new_node.clone()) as *mut _, Ordering::Release);
  65. }
  66. self.head
  67. .store(Arc::into_raw(new_node) as *mut _, Ordering::Release);
  68. }
  69. pub fn remove(&self, node: &Arc<T>) {
  70. let _lck = self.update_lock.lock();
  71. let prev = node.rcu_prev().load(Ordering::Acquire);
  72. let next = node.rcu_next().load(Ordering::Acquire);
  73. if let Some(next) = unsafe { next.as_ref() } {
  74. let me = next.rcu_prev().swap(prev, Ordering::AcqRel);
  75. debug_assert!(me == Arc::as_ptr(&node) as *mut _);
  76. unsafe { Arc::from_raw(me) };
  77. }
  78. {
  79. let prev_next =
  80. unsafe { prev.as_ref().map(|rcu| rcu.rcu_next()) }.unwrap_or(&self.head);
  81. let me = prev_next.swap(next, Ordering::AcqRel);
  82. debug_assert!(me == Arc::as_ptr(&node) as *mut _);
  83. unsafe { Arc::from_raw(me) };
  84. }
  85. let _lck = self.reader_lock.lock();
  86. node.rcu_prev()
  87. .store(core::ptr::null_mut(), Ordering::Release);
  88. node.rcu_next()
  89. .store(core::ptr::null_mut(), Ordering::Release);
  90. }
  91. pub fn replace(&self, old_node: &Arc<T>, new_node: Arc<T>) {
  92. let _lck = self.update_lock.lock();
  93. let prev = old_node.rcu_prev().load(Ordering::Acquire);
  94. let next = old_node.rcu_next().load(Ordering::Acquire);
  95. new_node.rcu_prev().store(prev, Ordering::Release);
  96. new_node.rcu_next().store(next, Ordering::Release);
  97. {
  98. let prev_next =
  99. unsafe { prev.as_ref().map(|rcu| rcu.rcu_next()) }.unwrap_or(&self.head);
  100. let old = prev_next.swap(Arc::into_raw(new_node.clone()) as *mut _, Ordering::AcqRel);
  101. debug_assert!(old == Arc::as_ptr(&old_node) as *mut _);
  102. unsafe { Arc::from_raw(old) };
  103. }
  104. if let Some(next) = unsafe { next.as_ref() } {
  105. let old = next
  106. .rcu_prev()
  107. .swap(Arc::into_raw(new_node.clone()) as *mut _, Ordering::AcqRel);
  108. debug_assert!(old == Arc::as_ptr(&old_node) as *mut _);
  109. unsafe { Arc::from_raw(old) };
  110. }
  111. let _lck = self.reader_lock.lock();
  112. old_node
  113. .rcu_prev()
  114. .store(core::ptr::null_mut(), Ordering::Release);
  115. old_node
  116. .rcu_next()
  117. .store(core::ptr::null_mut(), Ordering::Release);
  118. }
  119. pub fn iter(&self) -> RCUIterator<T> {
  120. let _lck = self.reader_lock.lock_shared();
  121. RCUIterator {
  122. // SAFETY: We have a read lock, so the node is still alive.
  123. cur: self.head.load(Ordering::SeqCst),
  124. _lock: _lck,
  125. }
  126. }
  127. }
  128. pub struct RCUIterator<'lt, T: RCUNode<T>> {
  129. cur: *const T,
  130. _lock: Guard<'lt, (), RwSemaphoreStrategy, false>,
  131. }
  132. impl<'lt, T: RCUNode<T>> Iterator for RCUIterator<'lt, T> {
  133. type Item = BorrowedArc<'lt, T>;
  134. fn next(&mut self) -> Option<Self::Item> {
  135. match unsafe { self.cur.as_ref() } {
  136. None => None,
  137. Some(real) => {
  138. // SAFETY: We have a read lock, so the node is still alive.
  139. let ret = self.cur;
  140. self.cur = real.rcu_next().load(Ordering::SeqCst);
  141. Some(BorrowedArc::from_raw(ret))
  142. }
  143. }
  144. }
  145. }
  146. pub struct RCUPointer<T>(AtomicPtr<T>);
  147. impl<T> RCUPointer<T> {
  148. pub fn new_with(value: Arc<T>) -> Self {
  149. Self(AtomicPtr::new(Arc::into_raw(value) as *mut _))
  150. }
  151. pub fn empty() -> Self {
  152. Self(AtomicPtr::new(core::ptr::null_mut()))
  153. }
  154. pub fn load<'lt>(&self) -> Option<RCUReadGuard<'lt, BorrowedArc<'lt, T>>> {
  155. let ptr = self.0.load(Ordering::Acquire);
  156. if ptr.is_null() {
  157. None
  158. } else {
  159. Some(RCUReadGuard::lock(BorrowedArc::from_raw(ptr)))
  160. }
  161. }
  162. /// # Safety
  163. /// Caller must ensure that the pointer is freed after all readers are done.
  164. pub unsafe fn swap(&self, new: Option<Arc<T>>) -> Option<Arc<T>> {
  165. let new = new
  166. .map(|arc| Arc::into_raw(arc) as *mut T)
  167. .unwrap_or(core::ptr::null_mut());
  168. let old = self.0.swap(new, Ordering::AcqRel);
  169. if old.is_null() {
  170. None
  171. } else {
  172. rcu_sync();
  173. Some(unsafe { Arc::from_raw(old) })
  174. }
  175. }
  176. }