page_fault.rs 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. use eonix_hal::mm::flush_tlb;
  2. use eonix_hal::traits::fault::PageFaultErrorCode;
  3. use eonix_mm::address::{Addr as _, AddrOps as _, VRange};
  4. use eonix_mm::paging::PAGE_SIZE;
  5. use posix_types::signal::Signal;
  6. use super::{MMList, VAddr};
  7. use crate::kernel::task::Thread;
  8. #[repr(C)]
  9. struct FixEntry {
  10. start: u64,
  11. length: u64,
  12. jump_address: u64,
  13. op_type: u64,
  14. }
  15. impl FixEntry {
  16. fn start(&self) -> VAddr {
  17. VAddr::from(self.start as usize)
  18. }
  19. fn end(&self) -> VAddr {
  20. VAddr::from((self.start + self.length) as usize)
  21. }
  22. #[allow(dead_code)]
  23. fn range(&self) -> VRange {
  24. VRange::new(self.start(), self.end())
  25. }
  26. fn jump_address(&self) -> VAddr {
  27. VAddr::from(self.jump_address as usize)
  28. }
  29. fn entries() -> &'static [FixEntry] {
  30. extern "C" {
  31. fn FIX_START();
  32. fn FIX_END();
  33. }
  34. unsafe {
  35. // SAFETY: `FIX_START` and `FIX_END` are defined in the
  36. // linker script in `.rodata` section.
  37. core::slice::from_raw_parts(
  38. FIX_START as usize as *const FixEntry,
  39. (FIX_END as usize - FIX_START as usize) / size_of::<FixEntry>(),
  40. )
  41. }
  42. }
  43. }
  44. impl MMList {
  45. /// Handle a user page fault.
  46. pub async fn handle_user_page_fault(
  47. &self,
  48. addr: VAddr,
  49. error: PageFaultErrorCode,
  50. ) -> Result<(), Signal> {
  51. debug_assert!(
  52. error.contains(PageFaultErrorCode::UserAccess),
  53. "Kernel mode page fault happened in user space."
  54. );
  55. let inner = self.inner.borrow();
  56. let inner = inner.lock().await;
  57. let area = inner.areas.get(&VRange::from(addr)).ok_or(Signal::SIGBUS)?;
  58. // Check user access permission.
  59. if error.contains(PageFaultErrorCode::Read) && !area.permission.read {
  60. // Under x86_64, we don't have a way to distinguish
  61. // between a read fault and a non-present fault. But it should be OK
  62. // since non-readable pages are not allowed under x86 and if we read
  63. // both the two false.
  64. Err(Signal::SIGSEGV)?
  65. }
  66. if error.contains(PageFaultErrorCode::Write) && !area.permission.write {
  67. Err(Signal::SIGSEGV)?
  68. }
  69. if error.contains(PageFaultErrorCode::InstructionFetch) && !area.permission.execute {
  70. Err(Signal::SIGSEGV)?
  71. }
  72. let pte = inner
  73. .page_table
  74. .iter_user(VRange::from(addr.floor()).grow(PAGE_SIZE))
  75. .next()
  76. .expect("If we can find the mapped area, we should be able to find the PTE");
  77. area.handle(
  78. pte,
  79. addr.floor() - area.range().start(),
  80. error.contains(PageFaultErrorCode::Write),
  81. )
  82. .await
  83. .map_err(|_| Signal::SIGBUS)?;
  84. flush_tlb(addr.floor().addr());
  85. Ok(())
  86. }
  87. }
  88. /// Try to fix the page fault by jumping to the `error` address.
  89. ///
  90. /// # Return
  91. /// Returns the new program counter after fixing.
  92. ///
  93. /// # Panic
  94. /// Panics if we can't find the instruction causing the fault in the fix list.
  95. fn try_page_fault_fix(pc: VAddr, addr: VAddr) -> VAddr {
  96. // TODO: Use `op_type` to fix.
  97. for entry in FixEntry::entries().iter() {
  98. if pc >= entry.start() && pc < entry.end() {
  99. return entry.jump_address();
  100. }
  101. }
  102. kernel_page_fault_die(addr, pc)
  103. }
  104. #[cold]
  105. fn kernel_page_fault_die(vaddr: VAddr, pc: VAddr) -> ! {
  106. panic!(
  107. "Invalid kernel mode memory access to {:?} while executing the instruction at {:?}",
  108. vaddr, pc
  109. )
  110. }
  111. pub async fn handle_kernel_page_fault(
  112. fault_pc: VAddr,
  113. addr: VAddr,
  114. error: PageFaultErrorCode,
  115. ) -> Option<VAddr> {
  116. debug_assert!(
  117. !error.contains(PageFaultErrorCode::UserAccess),
  118. "User mode page fault happened in kernel space."
  119. );
  120. debug_assert!(
  121. !error.contains(PageFaultErrorCode::InstructionFetch),
  122. "Kernel mode instruction fetch fault."
  123. );
  124. // TODO: Move this to `UserBuffer` handler since we shouldn'e get any page fault
  125. // in the kernel except for the instructions in the fix list.
  126. let mms = &Thread::current().process.mm_list;
  127. let inner = mms.inner.borrow();
  128. let inner = inner.lock().await;
  129. let area = match inner.areas.get(&VRange::from(addr)) {
  130. Some(area) => area,
  131. None => {
  132. return Some(try_page_fault_fix(fault_pc, addr));
  133. }
  134. };
  135. let pte = inner
  136. .page_table
  137. .iter_user(VRange::from(addr.floor()).grow(PAGE_SIZE))
  138. .next()
  139. .expect("If we can find the mapped area, we should be able to find the PTE");
  140. if let Err(_) = area
  141. .handle(
  142. pte,
  143. addr.floor() - area.range().start(),
  144. error.contains(PageFaultErrorCode::Write),
  145. )
  146. .await
  147. {
  148. return Some(try_page_fault_fix(fault_pc, addr));
  149. }
  150. flush_tlb(addr.floor().addr());
  151. None
  152. }