page_fault.rs 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. use super::{MMList, VAddr};
  2. use crate::kernel::task::{Signal, Thread};
  3. use arch::flush_tlb;
  4. use eonix_hal::traits::fault::PageFaultErrorCode;
  5. use eonix_mm::address::{Addr as _, AddrOps as _, VRange};
  6. use eonix_mm::paging::PAGE_SIZE;
  7. use eonix_runtime::task::Task;
  8. #[repr(C)]
  9. struct FixEntry {
  10. start: u64,
  11. length: u64,
  12. jump_address: u64,
  13. op_type: u64,
  14. }
  15. impl FixEntry {
  16. fn start(&self) -> VAddr {
  17. VAddr::from(self.start as usize)
  18. }
  19. fn end(&self) -> VAddr {
  20. VAddr::from((self.start + self.length) as usize)
  21. }
  22. fn range(&self) -> VRange {
  23. VRange::new(self.start(), self.end())
  24. }
  25. fn jump_address(&self) -> VAddr {
  26. VAddr::from(self.jump_address as usize)
  27. }
  28. fn entries() -> &'static [FixEntry] {
  29. extern "C" {
  30. static FIX_START: *const FixEntry;
  31. static FIX_END: *const FixEntry;
  32. }
  33. unsafe {
  34. // SAFETY: `FIX_START` and `FIX_END` are defined in the
  35. // linker script in `.rodata` section.
  36. core::slice::from_raw_parts(
  37. FIX_START,
  38. (FIX_END as usize - FIX_START as usize) / size_of::<FixEntry>(),
  39. )
  40. }
  41. }
  42. }
  43. impl MMList {
  44. /// Handle a user page fault.
  45. pub async fn handle_user_page_fault(
  46. &self,
  47. addr: VAddr,
  48. error: PageFaultErrorCode,
  49. ) -> Result<(), Signal> {
  50. debug_assert!(
  51. error.contains(PageFaultErrorCode::UserAccess),
  52. "Kernel mode page fault happened in user space."
  53. );
  54. let inner = self.inner.borrow();
  55. let inner = inner.lock().await;
  56. let area = inner.areas.get(&VRange::from(addr)).ok_or(Signal::SIGBUS)?;
  57. // Check user access permission.
  58. if error.contains(PageFaultErrorCode::Write) && !area.permission.write {
  59. Err(Signal::SIGSEGV)?
  60. }
  61. if error.contains(PageFaultErrorCode::InstructionFetch) && !area.permission.execute {
  62. Err(Signal::SIGSEGV)?
  63. }
  64. let pte = inner
  65. .page_table
  66. .iter_user(VRange::from(addr.floor()).grow(PAGE_SIZE))
  67. .next()
  68. .expect("If we can find the mapped area, we should be able to find the PTE");
  69. area.handle(pte, addr.floor() - area.range().start())
  70. .map_err(|_| Signal::SIGBUS)?;
  71. #[cfg(not(target_arch = "x86_64"))]
  72. {
  73. // Flush the TLB for the affected address range.
  74. // x86 CPUs will try to retrieve the PTE again for non-present entries.
  75. // So we don't need to flush TLB.
  76. flush_tlb(addr.floor().addr());
  77. }
  78. Ok(())
  79. }
  80. }
  81. /// Try to fix the page fault by jumping to the `error` address.
  82. ///
  83. /// # Return
  84. /// Returns the new program counter after fixing.
  85. ///
  86. /// # Panic
  87. /// Panics if we can't find the instruction causing the fault in the fix list.
  88. fn try_page_fault_fix(pc: VAddr, addr: VAddr) -> VAddr {
  89. // TODO: Use `op_type` to fix.
  90. for entry in FixEntry::entries().iter() {
  91. if pc >= entry.start() && pc < entry.end() {
  92. return entry.jump_address();
  93. }
  94. }
  95. kernel_page_fault_die(addr, pc)
  96. }
  97. #[cold]
  98. fn kernel_page_fault_die(vaddr: VAddr, pc: VAddr) -> ! {
  99. panic!(
  100. "Invalid kernel mode memory access to {:?} while executing the instruction at {:?}",
  101. vaddr, pc
  102. )
  103. }
  104. pub fn handle_kernel_page_fault(
  105. fault_pc: VAddr,
  106. addr: VAddr,
  107. error: PageFaultErrorCode,
  108. ) -> Option<VAddr> {
  109. debug_assert!(
  110. !error.contains(PageFaultErrorCode::UserAccess),
  111. "User mode page fault happened in kernel space."
  112. );
  113. debug_assert!(
  114. !error.contains(PageFaultErrorCode::InstructionFetch),
  115. "Kernel mode instruction fetch fault."
  116. );
  117. // TODO: Move this to `UserBuffer` handler since we shouldn'e get any page fault
  118. // in the kernel except for the instructions in the fix list.
  119. let mms = &Thread::current().process.mm_list;
  120. let inner = mms.inner.borrow();
  121. let inner = Task::block_on(inner.lock());
  122. let area = match inner.areas.get(&VRange::from(addr)) {
  123. Some(area) => area,
  124. None => {
  125. return Some(try_page_fault_fix(fault_pc, addr));
  126. }
  127. };
  128. let pte = inner
  129. .page_table
  130. .iter_user(VRange::from(addr.floor()).grow(PAGE_SIZE))
  131. .next()
  132. .expect("If we can find the mapped area, we should be able to find the PTE");
  133. if let Err(_) = area.handle(pte, addr.floor() - area.range().start()) {
  134. return Some(try_page_fault_fix(fault_pc, addr));
  135. }
  136. None
  137. }