spin.rs 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. mod guard;
  2. mod relax;
  3. mod spin_irq;
  4. use core::{
  5. cell::UnsafeCell,
  6. marker::PhantomData,
  7. sync::atomic::{AtomicBool, Ordering},
  8. };
  9. use spin_irq::IrqStateGuard;
  10. pub use guard::{SpinGuard, UnlockedSpinGuard};
  11. pub use relax::{LoopRelax, Relax, SpinRelax};
  12. pub use spin_irq::{SpinIrqGuard, UnlockedSpinIrqGuard};
  13. //// A spinlock is a lock that uses busy-waiting to acquire the lock.
  14. /// It is useful for short critical sections where the overhead of a context switch
  15. /// is too high.
  16. #[derive(Debug, Default)]
  17. pub struct Spin<T, R = SpinRelax>
  18. where
  19. T: ?Sized,
  20. {
  21. _phantom: PhantomData<R>,
  22. locked: AtomicBool,
  23. value: UnsafeCell<T>,
  24. }
  25. impl<T, R> Spin<T, R>
  26. where
  27. R: Relax,
  28. {
  29. pub const fn new(value: T) -> Self {
  30. Self {
  31. locked: AtomicBool::new(false),
  32. value: UnsafeCell::new(value),
  33. _phantom: PhantomData,
  34. }
  35. }
  36. }
  37. impl<T, R> Spin<T, R>
  38. where
  39. T: ?Sized,
  40. {
  41. /// # Safety
  42. /// This function is unsafe because the caller MUST ensure that the protected
  43. /// value is no longer accessed after calling this function.
  44. unsafe fn do_unlock(&self) {
  45. let locked = self.locked.swap(false, Ordering::Release);
  46. debug_assert!(locked, "Spin::unlock(): Unlocking an unlocked lock");
  47. eonix_preempt::enable();
  48. }
  49. }
  50. impl<T, R> Spin<T, R>
  51. where
  52. T: ?Sized,
  53. R: Relax,
  54. {
  55. pub fn lock(&self) -> SpinGuard<'_, T, R> {
  56. self.do_lock();
  57. SpinGuard {
  58. lock: self,
  59. // SAFETY: We are holding the lock, so we can safely access the value.
  60. value: unsafe { &mut *self.value.get() },
  61. _not_send: PhantomData,
  62. }
  63. }
  64. pub fn lock_irq(&self) -> SpinIrqGuard<'_, T, R> {
  65. let irq_state = arch::disable_irqs_save();
  66. let guard = self.lock();
  67. SpinIrqGuard {
  68. guard,
  69. irq_state: IrqStateGuard::new(irq_state),
  70. _not_send: PhantomData,
  71. }
  72. }
  73. pub fn get_mut(&mut self) -> &mut T {
  74. // SAFETY: The exclusive access to the lock is guaranteed by the borrow checker.
  75. unsafe { &mut *self.value.get() }
  76. }
  77. fn do_lock(&self) {
  78. eonix_preempt::disable();
  79. while let Err(_) =
  80. self.locked
  81. .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
  82. {
  83. R::relax();
  84. }
  85. }
  86. }
  87. impl<T, R> Clone for Spin<T, R>
  88. where
  89. T: ?Sized + Clone,
  90. R: Relax,
  91. {
  92. fn clone(&self) -> Self {
  93. Self::new(self.lock().clone())
  94. }
  95. }
  96. // SAFETY: As long as the value protected by the lock is able to be shared between threads,
  97. // we can send the lock between threads.
  98. unsafe impl<T, R> Send for Spin<T, R> where T: ?Sized + Send {}
  99. // SAFETY: As long as the value protected by the lock is able to be shared between threads,
  100. // we can provide exclusive access guarantees to the lock.
  101. unsafe impl<T, R> Sync for Spin<T, R> where T: ?Sized + Send {}