lock.cc 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #include <assert.h>
  2. #include <kernel/async/lock.hpp>
  3. namespace kernel::async {
  4. static inline void _raw_spin_lock(spinlock_t* lock_addr) {
  5. asm volatile(
  6. "%=:\n\t\
  7. mov $1, %%eax\n\t\
  8. xchg %%eax, (%0)\n\t\
  9. cmp $0, %%eax\n\t\
  10. jne %=b\n\t\
  11. "
  12. :
  13. : "r"(lock_addr)
  14. : "eax", "memory");
  15. }
  16. static inline void _raw_spin_unlock(spinlock_t* lock_addr) {
  17. asm volatile(
  18. "mov $0, %%eax\n\
  19. xchg %%eax, (%0)"
  20. :
  21. : "r"(lock_addr)
  22. : "eax", "memory");
  23. }
  24. static inline lock_context_t _save_interrupt_state() {
  25. lock_context_t retval;
  26. asm volatile(
  27. "pushf\n\t"
  28. "pop %0\n\t"
  29. "cli"
  30. : "=g"(retval)
  31. :
  32. :);
  33. return retval;
  34. }
  35. static inline void _restore_interrupt_state(lock_context_t context) {
  36. asm volatile(
  37. "push %0\n\t"
  38. "popf"
  39. :
  40. : "g"(context)
  41. :);
  42. }
  43. extern "C" void r_preempt_disable();
  44. extern "C" void r_preempt_enable();
  45. extern "C" unsigned long r_preempt_count();
  46. void preempt_disable() {
  47. r_preempt_disable();
  48. }
  49. void preempt_enable() {
  50. r_preempt_enable();
  51. }
  52. unsigned long preempt_count() {
  53. return r_preempt_count();
  54. }
  55. void spin_lock(spinlock_t& lock) {
  56. preempt_disable();
  57. _raw_spin_lock(&lock);
  58. }
  59. void spin_unlock(spinlock_t& lock) {
  60. _raw_spin_unlock(&lock);
  61. preempt_enable();
  62. }
  63. lock_context_t spin_lock_irqsave(spinlock_t& lock) {
  64. auto state = _save_interrupt_state();
  65. preempt_disable();
  66. _raw_spin_lock(&lock);
  67. return state;
  68. }
  69. void spin_unlock_irqrestore(spinlock_t& lock, lock_context_t state) {
  70. _raw_spin_unlock(&lock);
  71. preempt_enable();
  72. _restore_interrupt_state(state);
  73. }
  74. mutex::~mutex() {
  75. assert(m_lock == 0);
  76. }
  77. lock_context_t mutex::lock_irq() {
  78. return spin_lock_irqsave(m_lock);
  79. }
  80. void mutex::unlock_irq(lock_context_t state) {
  81. spin_unlock_irqrestore(m_lock, state);
  82. }
  83. } // namespace kernel::async