page_table.rs 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. use lazy_static::lazy_static;
  2. use crate::prelude::*;
  3. use crate::bindings::root::{EINVAL, KERNEL_PML4};
  4. use super::{
  5. paging::Page,
  6. phys::{CachedPP, PhysPtr as _},
  7. VAddr, VRange,
  8. };
  9. use super::{MMArea, Permission};
  10. const PA_P: usize = 0x001;
  11. const PA_RW: usize = 0x002;
  12. const PA_US: usize = 0x004;
  13. const PA_PWT: usize = 0x008;
  14. const PA_PCD: usize = 0x010;
  15. const PA_A: usize = 0x020;
  16. const PA_D: usize = 0x040;
  17. const PA_PS: usize = 0x080;
  18. const PA_G: usize = 0x100;
  19. const PA_COW: usize = 0x200;
  20. const PA_MMAP: usize = 0x400;
  21. const PA_ANON: usize = 0x800;
  22. const PA_NXE: usize = 0x8000_0000_0000_0000;
  23. const PA_MASK: usize = 0xfff0_0000_0000_0fff;
  24. #[repr(transparent)]
  25. #[derive(Debug, Clone, Copy)]
  26. pub struct PTE(usize);
  27. #[derive(Debug)]
  28. pub struct PageTable {
  29. page: Page,
  30. }
  31. pub struct PTEIterator<'lt, const KERNEL: bool> {
  32. count: usize,
  33. i4: u16,
  34. i3: u16,
  35. i2: u16,
  36. i1: u16,
  37. p4: CachedPP,
  38. p3: CachedPP,
  39. p2: CachedPP,
  40. p1: CachedPP,
  41. start: VAddr,
  42. end: VAddr,
  43. _phantom: core::marker::PhantomData<&'lt ()>,
  44. }
  45. lazy_static! {
  46. static ref EMPTY_PAGE: Page = {
  47. let page = Page::alloc_one();
  48. page.zero();
  49. page
  50. };
  51. }
  52. impl PTE {
  53. pub fn is_user(&self) -> bool {
  54. self.0 & PA_US != 0
  55. }
  56. pub fn is_present(&self) -> bool {
  57. self.0 & PA_P != 0
  58. }
  59. pub fn pfn(&self) -> usize {
  60. self.0 & !PA_MASK
  61. }
  62. pub fn attributes(&self) -> usize {
  63. self.0 & PA_MASK
  64. }
  65. pub fn set(&mut self, pfn: usize, attributes: usize) {
  66. self.0 = pfn | attributes;
  67. }
  68. pub fn set_pfn(&mut self, pfn: usize) {
  69. self.set(pfn, self.attributes())
  70. }
  71. pub fn set_attributes(&mut self, attributes: usize) {
  72. self.set(self.pfn(), attributes)
  73. }
  74. fn parse_page_table(&mut self, kernel: bool) -> CachedPP {
  75. let attributes = if kernel {
  76. PA_P | PA_RW | PA_G
  77. } else {
  78. PA_P | PA_RW | PA_US
  79. };
  80. if self.is_present() {
  81. CachedPP::new(self.pfn())
  82. } else {
  83. let page = Page::alloc_one();
  84. let pp = page.as_cached();
  85. page.zero();
  86. self.set(page.into_pfn(), attributes);
  87. pp
  88. }
  89. }
  90. pub fn setup_cow(&mut self, from: &mut Self) {
  91. self.set(
  92. unsafe { Page::from_pfn(from.pfn(), 0) }.into_pfn(),
  93. (from.attributes() & !(PA_RW | PA_A | PA_D)) | PA_COW,
  94. );
  95. from.set_attributes((from.attributes() & !PA_RW) | PA_COW);
  96. }
  97. pub fn clear(&mut self) {
  98. self.set(0, 0)
  99. }
  100. /// Take the ownership of the page from the PTE, clear the PTE and return the page.
  101. pub fn take(&mut self) -> Page {
  102. // SAFETY: Acquire the ownership of the page from the page table and then
  103. // clear the PTE so no one could be able to access the page from here later on.
  104. let page = unsafe { Page::take_pfn(self.pfn(), 0) };
  105. self.clear();
  106. page
  107. }
  108. }
  109. impl<'lt, const KERNEL: bool> PTEIterator<'lt, KERNEL> {
  110. fn new(pt: &'lt Page, start: VAddr, end: VAddr) -> KResult<Self> {
  111. if start > end {
  112. return Err(EINVAL);
  113. }
  114. let p4 = pt.as_cached();
  115. let p3 = p4.as_mut_slice::<PTE>(512)[Self::index(4, start)].parse_page_table(KERNEL);
  116. let p2 = p3.as_mut_slice::<PTE>(512)[Self::index(3, start)].parse_page_table(KERNEL);
  117. let p1 = p2.as_mut_slice::<PTE>(512)[Self::index(2, start)].parse_page_table(KERNEL);
  118. Ok(Self {
  119. count: (end.0 - start.0) >> 12,
  120. i4: Self::index(4, start) as u16,
  121. i3: Self::index(3, start) as u16,
  122. i2: Self::index(2, start) as u16,
  123. i1: Self::index(1, start) as u16,
  124. p4,
  125. p3,
  126. p2,
  127. p1,
  128. start,
  129. end,
  130. _phantom: core::marker::PhantomData,
  131. })
  132. }
  133. fn offset(level: u32) -> usize {
  134. 12 + (level as usize - 1) * 9
  135. }
  136. fn index(level: u32, vaddr: VAddr) -> usize {
  137. (vaddr.0 >> Self::offset(level)) & 0x1ff
  138. }
  139. }
  140. impl<'lt, const KERNEL: bool> Iterator for PTEIterator<'lt, KERNEL> {
  141. type Item = &'lt mut PTE;
  142. fn next(&mut self) -> Option<Self::Item> {
  143. if self.count != 0 {
  144. self.count -= 1;
  145. } else {
  146. return None;
  147. }
  148. let retval = &mut self.p1.as_mut_slice::<PTE>(512)[self.i1 as usize];
  149. self.i1 = (self.i1 + 1) % 512;
  150. if self.i1 == 0 {
  151. self.i2 = (self.i2 + 1) % 512;
  152. if self.i2 == 0 {
  153. self.i3 = (self.i3 + 1) % 512;
  154. if self.i3 == 0 {
  155. self.i4 = (self.i4 + 1) % 512;
  156. if self.i4 == 0 {
  157. panic!("PTEIterator: out of range");
  158. }
  159. }
  160. self.p3 =
  161. self.p4.as_mut_slice::<PTE>(512)[self.i4 as usize].parse_page_table(KERNEL);
  162. }
  163. self.p2 = self.p3.as_mut_slice::<PTE>(512)[self.i3 as usize].parse_page_table(KERNEL);
  164. }
  165. self.p1 = self.p2.as_mut_slice::<PTE>(512)[self.i2 as usize].parse_page_table(KERNEL);
  166. Some(retval)
  167. }
  168. }
  169. impl PageTable {
  170. pub fn new() -> Self {
  171. let page = Page::alloc_one();
  172. page.zero();
  173. // TODO: copy only the kernel space mappings.
  174. let kernel_space_page_table = CachedPP::new(KERNEL_PML4 as usize);
  175. page.as_cached().as_mut_slice::<u64>(512)[256..]
  176. .copy_from_slice(&kernel_space_page_table.as_mut_slice(512)[256..]);
  177. Self { page }
  178. }
  179. pub fn iter_user(&self, range: VRange) -> KResult<PTEIterator<'_, false>> {
  180. PTEIterator::new(&self.page, range.start().floor(), range.end().ceil())
  181. }
  182. pub fn iter_kernel(&self, range: VRange) -> KResult<PTEIterator<'_, true>> {
  183. PTEIterator::new(&self.page, range.start().floor(), range.end().ceil())
  184. }
  185. pub fn switch(&self) {
  186. arch::vm::switch_page_table(self.page.as_phys())
  187. }
  188. pub fn unmap(&self, area: &MMArea) {
  189. let range = area.range();
  190. let use_invlpg = range.len() / 4096 < 4;
  191. let iter = self.iter_user(range).unwrap();
  192. if self.page.as_phys() != arch::vm::current_page_table() {
  193. for pte in iter {
  194. pte.take();
  195. }
  196. return;
  197. }
  198. if use_invlpg {
  199. for (offset_pages, pte) in iter.enumerate() {
  200. pte.take();
  201. let pfn = range.start().floor().0 + offset_pages * 4096;
  202. arch::vm::invlpg(pfn);
  203. }
  204. } else {
  205. for pte in iter {
  206. pte.take();
  207. }
  208. arch::vm::invlpg_all();
  209. }
  210. }
  211. pub fn lazy_invalidate_tlb_all(&self) {
  212. if self.page.as_phys() == arch::vm::current_page_table() {
  213. arch::vm::invlpg_all();
  214. }
  215. }
  216. pub fn set_mmapped(&self, range: VRange, permission: Permission) {
  217. // PA_RW is set during page fault handling.
  218. // PA_NXE is preserved across page faults, so we set PA_NXE now.
  219. let attributes = if permission.execute {
  220. PA_US | PA_COW | PA_ANON | PA_MMAP
  221. } else {
  222. PA_US | PA_COW | PA_ANON | PA_MMAP | PA_NXE
  223. };
  224. for pte in self.iter_user(range).unwrap() {
  225. pte.set(EMPTY_PAGE.clone().into_pfn(), attributes);
  226. }
  227. }
  228. pub fn set_anonymous(&self, range: VRange, permission: Permission) {
  229. // PA_RW is set during page fault handling.
  230. // PA_NXE is preserved across page faults, so we set PA_NXE now.
  231. let attributes = if permission.execute {
  232. PA_P | PA_US | PA_COW | PA_ANON
  233. } else {
  234. PA_P | PA_US | PA_COW | PA_ANON | PA_NXE
  235. };
  236. for pte in self.iter_user(range).unwrap() {
  237. pte.set(EMPTY_PAGE.clone().into_pfn(), attributes);
  238. }
  239. }
  240. }
  241. fn drop_page_table_recursive(pt: &Page, level: usize) {
  242. for pte in pt
  243. .as_cached()
  244. .as_mut_slice::<PTE>(512)
  245. .iter_mut()
  246. .filter(|pte| pte.is_present() && pte.is_user())
  247. {
  248. let page = pte.take();
  249. if level > 1 {
  250. drop_page_table_recursive(&page, level - 1);
  251. }
  252. }
  253. }
  254. impl Drop for PageTable {
  255. fn drop(&mut self) {
  256. drop_page_table_recursive(&self.page, 4);
  257. }
  258. }