sync.rs 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. mod condvar;
  2. pub mod lock;
  3. pub mod semaphore;
  4. pub mod spin;
  5. pub mod strategy;
  6. pub mod preempt {
  7. use core::sync::atomic::{compiler_fence, AtomicUsize, Ordering};
  8. /// TODO: This should be per cpu.
  9. static PREEMPT_COUNT: AtomicUsize = AtomicUsize::new(0);
  10. #[inline(always)]
  11. pub fn disable() {
  12. PREEMPT_COUNT.fetch_add(1, Ordering::Relaxed);
  13. compiler_fence(Ordering::SeqCst);
  14. }
  15. #[inline(always)]
  16. pub fn enable() {
  17. compiler_fence(Ordering::SeqCst);
  18. PREEMPT_COUNT.fetch_sub(1, Ordering::Relaxed);
  19. }
  20. #[inline(always)]
  21. pub fn count() -> usize {
  22. PREEMPT_COUNT.load(Ordering::Relaxed)
  23. }
  24. }
  25. #[no_mangle]
  26. pub extern "C" fn r_preempt_disable() {
  27. preempt::disable();
  28. }
  29. #[no_mangle]
  30. pub extern "C" fn r_preempt_enable() {
  31. preempt::enable();
  32. }
  33. #[no_mangle]
  34. pub extern "C" fn r_preempt_count() -> usize {
  35. preempt::count()
  36. }
  37. pub type Spin<T> = lock::Lock<T, spin::SpinStrategy>;
  38. pub type Mutex<T> = lock::Lock<T, semaphore::SemaphoreStrategy<1>>;
  39. #[allow(dead_code)]
  40. pub type Semaphore<T> = lock::Lock<T, semaphore::SemaphoreStrategy>;
  41. pub type RwSemaphore<T> = lock::Lock<T, semaphore::RwSemaphoreStrategy>;
  42. #[allow(dead_code)]
  43. pub type SpinGuard<'lock, T> = lock::Guard<'lock, T, spin::SpinStrategy, true>;
  44. #[allow(dead_code)]
  45. pub type MutexGuard<'lock, T> = lock::Guard<'lock, T, semaphore::SemaphoreStrategy<1>, true>;
  46. #[allow(dead_code)]
  47. pub type SemGuard<'lock, T> = lock::Guard<'lock, T, semaphore::SemaphoreStrategy, true>;
  48. #[allow(dead_code)]
  49. pub type RwSemReadGuard<'lock, T> = lock::Guard<'lock, T, semaphore::RwSemaphoreStrategy, false>;
  50. #[allow(dead_code)]
  51. pub type RwSemWriteGuard<'lock, T> = lock::Guard<'lock, T, semaphore::RwSemaphoreStrategy, true>;
  52. pub type CondVar = condvar::CondVar<true>;
  53. pub type UCondVar = condvar::CondVar<false>;
  54. pub struct Locked<T: Sized, U: ?Sized> {
  55. inner: UnsafeCell<T>,
  56. guard: *const U,
  57. }
  58. unsafe impl<T: Sized + Send, U: ?Sized> Send for Locked<T, U> {}
  59. unsafe impl<T: Sized + Send + Sync, U: ?Sized> Sync for Locked<T, U> {}
  60. impl<T: Sized + Sync, U: ?Sized> Locked<T, U> {
  61. pub fn new(value: T, from: &U) -> Self {
  62. Self {
  63. inner: UnsafeCell::new(value),
  64. guard: from,
  65. }
  66. }
  67. pub fn access<'lt>(&'lt self, guard: &'lt U) -> &'lt T {
  68. assert_eq!(self.guard, guard as *const U, "wrong guard");
  69. // SAFETY: The guard protects the shared access to the inner value.
  70. unsafe { self.inner.get().as_ref() }.unwrap()
  71. }
  72. pub fn access_mut<'lt>(&'lt self, guard: &'lt mut U) -> &'lt mut T {
  73. assert_eq!(self.guard, guard as *const U, "wrong guard");
  74. // SAFETY: The guard protects the exclusive access to the inner value.
  75. unsafe { self.inner.get().as_mut() }.unwrap()
  76. }
  77. }
  78. macro_rules! might_sleep {
  79. () => {
  80. assert_eq!(
  81. $crate::sync::preempt::count(),
  82. 0,
  83. "a might_sleep function called with preempt disabled"
  84. );
  85. };
  86. ($n:expr) => {
  87. assert_eq!(
  88. $crate::sync::preempt::count(),
  89. $n,
  90. "a might_sleep function called with the preempt count not satisfying its requirement",
  91. );
  92. };
  93. }
  94. use core::cell::UnsafeCell;
  95. pub(crate) use might_sleep;