wait_object.rs 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. use super::WaitList;
  2. use crate::Spin;
  3. use core::{
  4. cell::UnsafeCell,
  5. marker::PhantomPinned,
  6. pin::Pin,
  7. ptr::null_mut,
  8. sync::atomic::{AtomicBool, AtomicPtr, Ordering},
  9. task::Waker,
  10. };
  11. use intrusive_collections::{intrusive_adapter, LinkedListAtomicLink, UnsafeRef};
  12. intrusive_adapter!(
  13. pub WaitObjectAdapter = Pin<UnsafeRef<WaitObject>>:
  14. WaitObject { link: LinkedListAtomicLink }
  15. );
  16. pub struct WaitObject {
  17. woken_up: AtomicBool,
  18. /// Separation of the field `waker` from its lock is basically due to the
  19. /// consideration of space. We hope that the object can fit into a cacheline
  20. /// and `woken_up` takes only 1 byte where the rest 7 bytes can accomodate 1
  21. /// extra byte required for a spinlock.
  22. waker_lock: Spin<()>,
  23. waker: UnsafeCell<Option<Waker>>,
  24. wait_list: AtomicPtr<WaitList>,
  25. link: LinkedListAtomicLink,
  26. _pinned: PhantomPinned,
  27. }
  28. // SAFETY: `WaitObject` is `Sync` because we sync the `waker` access with a spinlock.
  29. unsafe impl Sync for WaitObject {}
  30. impl WaitObject {
  31. pub const fn new(wait_list: &WaitList) -> Self {
  32. Self {
  33. woken_up: AtomicBool::new(false),
  34. waker_lock: Spin::new(()),
  35. waker: UnsafeCell::new(None),
  36. wait_list: AtomicPtr::new(wait_list as *const _ as *mut _),
  37. link: LinkedListAtomicLink::new(),
  38. _pinned: PhantomPinned,
  39. }
  40. }
  41. pub fn save_waker(&self, waker: Waker) {
  42. let _lock = self.waker_lock.lock_irq();
  43. unsafe {
  44. // SAFETY: We're holding the waker lock.
  45. let old_waker = (*self.waker.get()).replace(waker);
  46. assert!(old_waker.is_none(), "Waker already set.");
  47. }
  48. }
  49. /// Save the waker if the wait object was not woken up atomically.
  50. ///
  51. /// # Returns
  52. /// Whether the waker was saved.
  53. pub fn save_waker_if_not_woken_up(&self, waker: &Waker) -> bool {
  54. let _lock = self.waker_lock.lock_irq();
  55. if self.woken_up() {
  56. return false;
  57. }
  58. unsafe {
  59. // SAFETY: We're holding the waker lock.
  60. let old_waker = (*self.waker.get()).replace(waker.clone());
  61. assert!(old_waker.is_none(), "Waker already set.");
  62. }
  63. true
  64. }
  65. pub fn take_waker(&self) -> Option<Waker> {
  66. let _lock = self.waker_lock.lock_irq();
  67. unsafe {
  68. // SAFETY: We're holding the waker lock.
  69. self.waker.get().as_mut().unwrap().take()
  70. }
  71. }
  72. /// Check whether someone had woken up the wait object.
  73. ///
  74. /// Does an `Acquire` operation.
  75. pub fn woken_up(&self) -> bool {
  76. self.woken_up.load(Ordering::Acquire)
  77. }
  78. /// Set the wait object as woken up.
  79. ///
  80. /// Does a `Release` operation.
  81. pub fn set_woken_up(&self) {
  82. self.woken_up.store(true, Ordering::Release);
  83. }
  84. pub fn wait_list(&self) -> *const WaitList {
  85. self.wait_list.load(Ordering::Acquire)
  86. }
  87. pub fn clear_wait_list(&self) {
  88. self.wait_list.store(null_mut(), Ordering::Release);
  89. }
  90. pub fn on_list(&self) -> bool {
  91. !self.wait_list.load(Ordering::Acquire).is_null()
  92. }
  93. }