lib.rs 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. #![no_std]
  2. mod guard;
  3. use core::cell::UnsafeCell;
  4. use core::marker::PhantomData;
  5. use core::sync::atomic::{AtomicBool, Ordering};
  6. use eonix_sync_base::{Relax, SpinRelax};
  7. pub use guard::{SpinGuard, UnlockedSpinGuard};
  8. pub trait SpinContext {
  9. fn save() -> Self;
  10. fn restore(self);
  11. }
  12. pub trait ContextUnlock: SpinContext {
  13. type Unlocked: UnlockedContext<Relocked = Self>;
  14. fn unlock(self) -> Self::Unlocked;
  15. }
  16. pub trait UnlockedContext {
  17. type Relocked: ContextUnlock<Unlocked = Self>;
  18. fn relock(self) -> Self::Relocked;
  19. }
  20. pub struct NoContext;
  21. pub struct DisablePreemption();
  22. //// A spinlock is a lock that uses busy-waiting to acquire the lock.
  23. /// It is useful for short critical sections where the overhead of a context switch
  24. /// is too high.
  25. #[derive(Debug, Default)]
  26. pub struct Spin<T, R = SpinRelax>
  27. where
  28. T: ?Sized,
  29. {
  30. _phantom: PhantomData<R>,
  31. locked: AtomicBool,
  32. value: UnsafeCell<T>,
  33. }
  34. impl<T, R> Spin<T, R>
  35. where
  36. R: Relax,
  37. {
  38. pub const fn new(value: T) -> Self {
  39. Self {
  40. locked: AtomicBool::new(false),
  41. value: UnsafeCell::new(value),
  42. _phantom: PhantomData,
  43. }
  44. }
  45. pub fn into_inner(mut self) -> T {
  46. assert!(
  47. !*self.locked.get_mut(),
  48. "Spin::take(): Cannot take a locked Spin"
  49. );
  50. self.value.into_inner()
  51. }
  52. }
  53. impl<T, R> Spin<T, R>
  54. where
  55. T: ?Sized,
  56. {
  57. /// # Safety
  58. /// This function is unsafe because the caller MUST ensure that the protected
  59. /// value is no longer accessed after calling this function.
  60. unsafe fn do_unlock(&self) {
  61. let locked = self.locked.swap(false, Ordering::Release);
  62. debug_assert!(locked, "Spin::unlock(): Unlocking an unlocked lock");
  63. }
  64. }
  65. impl<T, R> Spin<T, R>
  66. where
  67. T: ?Sized,
  68. R: Relax,
  69. {
  70. pub fn lock_with_context<C>(&self, context: C) -> SpinGuard<'_, T, C, R>
  71. where
  72. C: SpinContext,
  73. {
  74. self.do_lock();
  75. SpinGuard::new(
  76. self,
  77. unsafe {
  78. // SAFETY: We are holding the lock, so we can safely access the value.
  79. &mut *self.value.get()
  80. },
  81. context,
  82. )
  83. }
  84. pub fn lock(&self) -> SpinGuard<'_, T, DisablePreemption, R> {
  85. self.lock_with_context(DisablePreemption::save())
  86. }
  87. pub fn get_mut(&mut self) -> &mut T {
  88. // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
  89. unsafe { &mut *self.value.get() }
  90. }
  91. fn do_lock(&self) {
  92. while let Err(_) =
  93. self.locked
  94. .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
  95. {
  96. R::relax();
  97. }
  98. }
  99. }
  100. // SAFETY: As long as the value protected by the lock is able to be shared between threads,
  101. // we can send the lock between threads.
  102. unsafe impl<T, R> Send for Spin<T, R> where T: ?Sized + Send {}
  103. // SAFETY: As long as the value protected by the lock is able to be shared between threads,
  104. // we can provide exclusive access guarantees to the lock.
  105. unsafe impl<T, R> Sync for Spin<T, R> where T: ?Sized + Send {}
  106. impl SpinContext for NoContext {
  107. fn save() -> Self {
  108. Self
  109. }
  110. fn restore(self) {}
  111. }
  112. impl ContextUnlock for NoContext {
  113. type Unlocked = NoContext;
  114. fn unlock(self) -> Self::Unlocked {
  115. self
  116. }
  117. }
  118. impl UnlockedContext for NoContext {
  119. type Relocked = NoContext;
  120. fn relock(self) -> Self::Relocked {
  121. self
  122. }
  123. }
  124. impl SpinContext for DisablePreemption {
  125. fn save() -> Self {
  126. eonix_preempt::disable();
  127. Self()
  128. }
  129. fn restore(self) {
  130. eonix_preempt::enable();
  131. }
  132. }
  133. impl ContextUnlock for DisablePreemption {
  134. type Unlocked = DisablePreemption;
  135. fn unlock(self) -> Self::Unlocked {
  136. eonix_preempt::enable();
  137. self
  138. }
  139. }
  140. impl UnlockedContext for DisablePreemption {
  141. type Relocked = DisablePreemption;
  142. fn relock(self) -> Self::Relocked {
  143. eonix_preempt::disable();
  144. self
  145. }
  146. }