page_fault.rs 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. use bindings::kernel::mem::paging::pfn_to_page;
  2. use bindings::{PA_A, PA_ANON, PA_COW, PA_MMAP, PA_P, PA_RW};
  3. use bitflags::bitflags;
  4. use crate::bindings::root::interrupt_stack;
  5. use crate::kernel::mem::paging::{Page, PageBuffer};
  6. use crate::kernel::mem::phys::{CachedPP, PhysPtr};
  7. use crate::kernel::mem::{Mapping, VRange};
  8. use crate::kernel::task::{ProcessList, Signal, Thread};
  9. use crate::prelude::*;
  10. use super::{MMList, VAddr};
  11. bitflags! {
  12. pub struct PageFaultError: u64 {
  13. const Present = 0x0001;
  14. const Write = 0x0002;
  15. const User = 0x0004;
  16. const ReservedSet = 0x0008;
  17. const InstructionFetch = 0x0010;
  18. const ProtectionKey = 0x0020;
  19. const SGX = 0x8000;
  20. }
  21. }
  22. #[repr(C)]
  23. struct FixEntry {
  24. start: u64,
  25. length: u64,
  26. jump_address: u64,
  27. op_type: u64,
  28. }
  29. impl MMList {
  30. fn handle_page_fault(
  31. &self,
  32. int_stack: &mut interrupt_stack,
  33. addr: VAddr,
  34. error: PageFaultError,
  35. ) -> Result<(), Signal> {
  36. let inner = self.inner.lock();
  37. let area = match inner.areas.get(&VRange::from(addr)) {
  38. Some(area) => area,
  39. None => {
  40. if error.contains(PageFaultError::User) {
  41. return Err(Signal::SIGBUS);
  42. } else {
  43. try_page_fault_fix(int_stack, addr);
  44. return Ok(());
  45. }
  46. }
  47. };
  48. // User access permission violation, check user access permission.
  49. if error.contains(PageFaultError::User | PageFaultError::Present) {
  50. if error.contains(PageFaultError::Write) && !area.permission.write {
  51. ProcessList::kill_current(Signal::SIGSEGV)
  52. }
  53. if error.contains(PageFaultError::InstructionFetch) && !area.permission.execute {
  54. ProcessList::kill_current(Signal::SIGSEGV)
  55. }
  56. }
  57. let pte = self
  58. .page_table
  59. .iter_user(VRange::new(addr.floor(), addr.floor() + 0x1000))
  60. .unwrap()
  61. .next()
  62. .expect("If we can find the mapped area, we should be able to find the PTE");
  63. let is_mapped = matches!(&area.mapping, Mapping::File(_));
  64. if !is_mapped && !error.contains(PageFaultError::Present) {
  65. try_page_fault_fix(int_stack, addr);
  66. return Ok(());
  67. }
  68. let mut pfn = pte.pfn();
  69. let mut attributes = pte.attributes();
  70. if attributes & PA_COW as usize != 0 {
  71. attributes &= !PA_COW as usize;
  72. if area.permission.write {
  73. attributes |= PA_RW as usize;
  74. } else {
  75. attributes &= !PA_RW as usize;
  76. }
  77. // TODO!!!: Change this.
  78. let page = unsafe { pfn_to_page(pfn).as_mut().unwrap() };
  79. if page.refcount == 1 {
  80. pte.set_attributes(attributes);
  81. return Ok(());
  82. }
  83. let new_page = Page::alloc_one();
  84. if attributes & PA_ANON as usize != 0 {
  85. new_page.zero();
  86. } else {
  87. new_page
  88. .as_cached()
  89. .as_mut_slice::<u8>(0x1000)
  90. .copy_from_slice(CachedPP::new(pfn).as_slice(0x1000));
  91. }
  92. attributes &= !(PA_A | PA_ANON) as usize;
  93. page.refcount -= 1;
  94. pfn = new_page.into_pfn();
  95. pte.set(pfn, attributes);
  96. }
  97. // TODO: shared mapping
  98. if attributes & PA_MMAP as usize != 0 {
  99. attributes |= PA_P as usize;
  100. if let Mapping::File(mapping) = &area.mapping {
  101. let load_offset = addr.floor() - area.range().start();
  102. if load_offset < mapping.length {
  103. // SAFETY: Since we are here, the `pfn` must refer to a valid buddy page.
  104. let page = unsafe { Page::from_pfn(pfn, 0) };
  105. let nread = mapping
  106. .file
  107. .read(
  108. &mut PageBuffer::new(page.clone()),
  109. mapping.offset + load_offset,
  110. )
  111. .map_err(|_| Signal::SIGBUS)?;
  112. if nread < page.len() {
  113. page.as_cached().as_mut_slice::<u8>(0x1000)[nread..].fill(0);
  114. }
  115. if mapping.length - load_offset < 0x1000 {
  116. let length_to_end = mapping.length - load_offset;
  117. page.as_cached().as_mut_slice::<u8>(0x1000)[length_to_end..].fill(0);
  118. }
  119. }
  120. // Otherwise, the page is kept zero emptied.
  121. attributes &= !PA_MMAP as usize;
  122. pte.set_attributes(attributes);
  123. } else {
  124. panic!("Anonymous mapping should not be PA_MMAP");
  125. }
  126. }
  127. Ok(())
  128. }
  129. }
  130. extern "C" {
  131. static FIX_START: *const FixEntry;
  132. static FIX_END: *const FixEntry;
  133. }
  134. /// Try to fix the page fault by jumping to the `error` address.
  135. ///
  136. /// Panic if we can't find the `ip` in the fix list.
  137. fn try_page_fault_fix(int_stack: &mut interrupt_stack, addr: VAddr) {
  138. let ip = int_stack.v_rip as u64;
  139. // TODO: Use `op_type` to fix.
  140. // SAFETY: `FIX_START` and `FIX_END` are defined in the linker script in `.rodata` section.
  141. let entries = unsafe {
  142. core::slice::from_raw_parts(
  143. FIX_START,
  144. (FIX_END as usize - FIX_START as usize) / size_of::<FixEntry>(),
  145. )
  146. };
  147. for entry in entries.iter() {
  148. if ip >= entry.start && ip < entry.start + entry.length {
  149. int_stack.v_rip = entry.jump_address as usize;
  150. return;
  151. }
  152. }
  153. kernel_page_fault_die(addr, ip as usize)
  154. }
  155. fn kernel_page_fault_die(vaddr: VAddr, ip: usize) -> ! {
  156. panic!(
  157. "Invalid kernel mode memory access to {:#8x} while executing the instruction at {:#8x}",
  158. vaddr.0, ip
  159. )
  160. }
  161. pub fn handle_page_fault(int_stack: &mut interrupt_stack) {
  162. let error = PageFaultError::from_bits_truncate(int_stack.error_code);
  163. let vaddr = VAddr(arch::x86_64::vm::get_cr2());
  164. let result = Thread::current()
  165. .process
  166. .mm_list
  167. .handle_page_fault(int_stack, vaddr, error);
  168. if let Err(signal) = result {
  169. println_debug!(
  170. "Page fault on {:#x} in user space at {:#x}",
  171. vaddr.0,
  172. int_stack.v_rip
  173. );
  174. ProcessList::kill_current(signal)
  175. }
  176. }