mm_list.rs 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. mod mapping;
  2. mod page_fault;
  3. use super::address::{VAddrExt as _, VRangeExt as _};
  4. use super::page_alloc::GlobalPageAlloc;
  5. use super::paging::AllocZeroed as _;
  6. use super::{AsMemoryBlock, MMArea, Page};
  7. use crate::kernel::constants::{EEXIST, EFAULT, EINVAL, ENOMEM};
  8. use crate::kernel::mem::page_alloc::RawPagePtr;
  9. use crate::{prelude::*, sync::ArcSwap};
  10. use alloc::collections::btree_set::BTreeSet;
  11. use core::fmt;
  12. use core::sync::atomic::{AtomicUsize, Ordering};
  13. use eonix_hal::mm::{
  14. flush_tlb_all, get_root_page_table_pfn, set_root_page_table_pfn, ArchPagingMode,
  15. ArchPhysAccess, GLOBAL_PAGE_TABLE,
  16. };
  17. use eonix_mm::address::{Addr as _, PAddr};
  18. use eonix_mm::page_table::PageAttribute;
  19. use eonix_mm::paging::PFN;
  20. use eonix_mm::{
  21. address::{AddrOps as _, VAddr, VRange},
  22. page_table::{PageTable, RawAttribute, PTE},
  23. paging::PAGE_SIZE,
  24. };
  25. use eonix_sync::{LazyLock, Mutex};
  26. pub use mapping::{FileMapping, Mapping};
  27. pub use page_fault::handle_kernel_page_fault;
  28. pub static EMPTY_PAGE: LazyLock<Page> = LazyLock::new(|| Page::zeroed());
  29. #[derive(Debug, Clone, Copy)]
  30. pub struct Permission {
  31. pub read: bool,
  32. pub write: bool,
  33. pub execute: bool,
  34. }
  35. pub type KernelPageTable<'a> = PageTable<'a, ArchPagingMode, GlobalPageAlloc, ArchPhysAccess>;
  36. struct MMListInner<'a> {
  37. areas: BTreeSet<MMArea>,
  38. page_table: KernelPageTable<'a>,
  39. break_start: Option<VRange>,
  40. break_pos: Option<VAddr>,
  41. }
  42. pub struct MMList {
  43. inner: ArcSwap<Mutex<MMListInner<'static>>>,
  44. user_count: AtomicUsize,
  45. /// Only used in kernel space to switch page tables on context switch.
  46. root_page_table: AtomicUsize,
  47. }
  48. impl MMListInner<'_> {
  49. fn overlapping_addr(&self, addr: VAddr) -> Option<&MMArea> {
  50. self.areas.get(&VRange::from(addr))
  51. }
  52. fn check_overlapping_addr(&self, addr: VAddr) -> bool {
  53. addr.is_user() && self.overlapping_addr(addr).is_none()
  54. }
  55. fn overlapping_range(&self, range: VRange) -> impl DoubleEndedIterator<Item = &MMArea> + '_ {
  56. self.areas.range(range.into_bounds())
  57. }
  58. fn check_overlapping_range(&self, range: VRange) -> bool {
  59. range.is_user() && self.overlapping_range(range).next().is_none()
  60. }
  61. fn random_start(&self) -> VAddr {
  62. VAddr::from(0x1234000)
  63. }
  64. fn find_available(&self, mut hint: VAddr, len: usize) -> Option<VAddr> {
  65. let len = len.div_ceil(PAGE_SIZE) * PAGE_SIZE;
  66. if hint == VAddr::NULL {
  67. hint = self.random_start();
  68. } else {
  69. hint = hint.floor();
  70. }
  71. let mut range = VRange::from(hint).grow(len);
  72. loop {
  73. if !range.is_user() {
  74. return None;
  75. }
  76. match self.overlapping_range(range).next_back() {
  77. None => return Some(range.start()),
  78. Some(area) => {
  79. range = VRange::from(area.range().end().ceil()).grow(len);
  80. }
  81. }
  82. }
  83. }
  84. fn unmap(&mut self, start: VAddr, len: usize) -> KResult<Vec<Page>> {
  85. assert_eq!(start.floor(), start);
  86. let end = (start + len).ceil();
  87. let range_to_unmap = VRange::new(start, end);
  88. if !range_to_unmap.is_user() {
  89. return Err(EINVAL);
  90. }
  91. let mut left_remaining = None;
  92. let mut right_remaining = None;
  93. let mut pages_to_free = Vec::new();
  94. // TODO: Write back dirty pages.
  95. self.areas.retain(|area| {
  96. let Some((left, mid, right)) = area.range().mask_with_checked(&range_to_unmap) else {
  97. return true;
  98. };
  99. for pte in self.page_table.iter_user(mid) {
  100. let (pfn, _) = pte.take();
  101. pages_to_free.push(unsafe {
  102. // SAFETY: We got the pfn from a valid page table entry, so it should be valid.
  103. Page::from_raw(pfn)
  104. });
  105. }
  106. match (left, right) {
  107. (None, None) => {}
  108. (Some(left), None) => {
  109. assert!(left_remaining.is_none());
  110. let (Some(left), _) = area.clone().split(left.end()) else {
  111. unreachable!("`left.end()` is within the area");
  112. };
  113. left_remaining = Some(left);
  114. }
  115. (None, Some(right)) => {
  116. assert!(right_remaining.is_none());
  117. let (_, Some(right)) = area.clone().split(right.start()) else {
  118. unreachable!("`right.start()` is within the area");
  119. };
  120. right_remaining = Some(right);
  121. }
  122. (Some(left), Some(right)) => {
  123. assert!(left_remaining.is_none());
  124. assert!(right_remaining.is_none());
  125. let (Some(left), Some(mid)) = area.clone().split(left.end()) else {
  126. unreachable!("`left.end()` is within the area");
  127. };
  128. let (_, Some(right)) = mid.split(right.start()) else {
  129. unreachable!("`right.start()` is within the area");
  130. };
  131. left_remaining = Some(left);
  132. right_remaining = Some(right);
  133. }
  134. }
  135. false
  136. });
  137. if let Some(front) = left_remaining {
  138. self.areas.insert(front);
  139. }
  140. if let Some(back) = right_remaining {
  141. self.areas.insert(back);
  142. }
  143. Ok(pages_to_free)
  144. }
  145. fn protect(&mut self, start: VAddr, len: usize, permission: Permission) -> KResult<()> {
  146. assert_eq!(start.floor(), start);
  147. assert!(len != 0);
  148. let end = (start + len).ceil();
  149. let range_to_protect = VRange::new(start, end);
  150. if !range_to_protect.is_user() {
  151. return Err(EINVAL);
  152. }
  153. let mut found = false;
  154. let old_areas = core::mem::take(&mut self.areas);
  155. for mut area in old_areas {
  156. let Some((left, mid, right)) = area.range().mask_with_checked(&range_to_protect) else {
  157. self.areas.insert(area);
  158. continue;
  159. };
  160. found = true;
  161. if let Some(left) = left {
  162. let (Some(left), Some(right)) = area.split(left.end()) else {
  163. unreachable!("`left.end()` is within the area");
  164. };
  165. self.areas.insert(left);
  166. area = right;
  167. }
  168. if let Some(right) = right {
  169. let (Some(left), Some(right)) = area.split(right.start()) else {
  170. unreachable!("`right.start()` is within the area");
  171. };
  172. self.areas.insert(right);
  173. area = left;
  174. }
  175. for pte in self.page_table.iter_user(mid) {
  176. let mut page_attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
  177. if !permission.read && !permission.write && !permission.execute {
  178. // If no permissions are set, we just remove the page.
  179. page_attr.remove(
  180. PageAttribute::PRESENT
  181. | PageAttribute::READ
  182. | PageAttribute::WRITE
  183. | PageAttribute::EXECUTE,
  184. );
  185. pte.set_attr(page_attr.into());
  186. continue;
  187. }
  188. page_attr.set(PageAttribute::READ, permission.read);
  189. if !page_attr.contains(PageAttribute::COPY_ON_WRITE) {
  190. page_attr.set(PageAttribute::WRITE, permission.write);
  191. }
  192. page_attr.set(PageAttribute::EXECUTE, permission.execute);
  193. pte.set_attr(page_attr.into());
  194. }
  195. area.permission = permission;
  196. self.areas.insert(area);
  197. }
  198. if !found {
  199. return Err(ENOMEM);
  200. }
  201. Ok(())
  202. }
  203. fn mmap(
  204. &mut self,
  205. at: VAddr,
  206. len: usize,
  207. mapping: Mapping,
  208. permission: Permission,
  209. is_shared: bool,
  210. ) -> KResult<()> {
  211. assert_eq!(at.floor(), at);
  212. assert_eq!(len & (PAGE_SIZE - 1), 0);
  213. let range = VRange::new(at, at + len);
  214. // We are doing a area marker insertion.
  215. if len == 0 && !self.check_overlapping_addr(at) || !self.check_overlapping_range(range) {
  216. return Err(EEXIST);
  217. }
  218. match &mapping {
  219. Mapping::Anonymous => self.page_table.set_anonymous(range, permission),
  220. Mapping::File(_) => self.page_table.set_mmapped(range, permission),
  221. }
  222. self.areas
  223. .insert(MMArea::new(range, mapping, permission, is_shared));
  224. Ok(())
  225. }
  226. }
  227. impl Drop for MMListInner<'_> {
  228. fn drop(&mut self) {
  229. // May buggy
  230. for area in &self.areas {
  231. if area.is_shared {
  232. for pte in self.page_table.iter_user(area.range()) {
  233. let (pfn, _) = pte.take();
  234. let raw_page = RawPagePtr::from(pfn);
  235. if raw_page.refcount().fetch_sub(1, Ordering::Relaxed) == 1 {
  236. // Wrong here
  237. // unsafe { Page::from_raw(pfn) };
  238. }
  239. }
  240. } else {
  241. for pte in self.page_table.iter_user(area.range()) {
  242. let (pfn, _) = pte.take();
  243. unsafe { Page::from_raw(pfn) };
  244. }
  245. }
  246. }
  247. // TODO: Recycle all pages in the page table.
  248. }
  249. }
  250. impl MMList {
  251. async fn flush_user_tlbs(&self) {
  252. match self.user_count.load(Ordering::Relaxed) {
  253. 0 => {
  254. // If there are currently no users, we don't need to do anything.
  255. }
  256. 1 => {
  257. if PAddr::from(get_root_page_table_pfn()).addr()
  258. == self.root_page_table.load(Ordering::Relaxed)
  259. {
  260. // If there is only one user and we are using the page table,
  261. // flushing the TLB for the local cpu only is enough.
  262. flush_tlb_all();
  263. } else {
  264. // Send the TLB flush request to the core.
  265. todo!();
  266. }
  267. }
  268. _ => {
  269. // If there are more than one users, we broadcast the TLB flush
  270. // to all cores.
  271. todo!()
  272. }
  273. }
  274. }
  275. pub fn new() -> Self {
  276. let page_table = GLOBAL_PAGE_TABLE.clone_global();
  277. Self {
  278. root_page_table: AtomicUsize::from(page_table.addr().addr()),
  279. user_count: AtomicUsize::new(0),
  280. inner: ArcSwap::new(Mutex::new(MMListInner {
  281. areas: BTreeSet::new(),
  282. page_table,
  283. break_start: None,
  284. break_pos: None,
  285. })),
  286. }
  287. }
  288. pub async fn new_cloned(&self) -> Self {
  289. let inner = self.inner.borrow();
  290. let mut inner = inner.lock().await;
  291. let page_table = GLOBAL_PAGE_TABLE.clone_global();
  292. let list = Self {
  293. root_page_table: AtomicUsize::from(page_table.addr().addr()),
  294. user_count: AtomicUsize::new(0),
  295. inner: ArcSwap::new(Mutex::new(MMListInner {
  296. areas: inner.areas.clone(),
  297. page_table,
  298. break_start: inner.break_start,
  299. break_pos: inner.break_pos,
  300. })),
  301. };
  302. {
  303. let list_inner = list.inner.borrow();
  304. let list_inner = list_inner.lock().await;
  305. for area in list_inner.areas.iter() {
  306. if !area.is_shared {
  307. list_inner
  308. .page_table
  309. .set_copy_on_write(&mut inner.page_table, area.range());
  310. } else {
  311. list_inner
  312. .page_table
  313. .set_copied(&mut inner.page_table, area.range());
  314. }
  315. }
  316. }
  317. // We've set some pages as CoW, so we need to invalidate all our users' TLB.
  318. self.flush_user_tlbs().await;
  319. list
  320. }
  321. pub async fn new_shared(&self) -> Self {
  322. todo!()
  323. }
  324. pub fn activate(&self) {
  325. self.user_count.fetch_add(1, Ordering::Acquire);
  326. let root_page_table = self.root_page_table.load(Ordering::Relaxed);
  327. assert_ne!(root_page_table, 0);
  328. set_root_page_table_pfn(PFN::from(PAddr::from(root_page_table)));
  329. }
  330. pub fn deactivate(&self) {
  331. set_root_page_table_pfn(PFN::from(GLOBAL_PAGE_TABLE.addr()));
  332. let old_user_count = self.user_count.fetch_sub(1, Ordering::Release);
  333. assert_ne!(old_user_count, 0);
  334. }
  335. /// Deactivate `self` and activate `to` with root page table changed only once.
  336. /// This might reduce the overhead of switching page tables twice.
  337. #[allow(dead_code)]
  338. pub fn switch(&self, to: &Self) {
  339. self.user_count.fetch_add(1, Ordering::Acquire);
  340. let root_page_table = self.root_page_table.load(Ordering::Relaxed);
  341. assert_ne!(root_page_table, 0);
  342. set_root_page_table_pfn(PFN::from(PAddr::from(root_page_table)));
  343. let old_user_count = to.user_count.fetch_sub(1, Ordering::Release);
  344. assert_ne!(old_user_count, 0);
  345. }
  346. /// Replace the current page table with a new one.
  347. ///
  348. /// # Safety
  349. /// This function should be called only when we are sure that the `MMList` is not
  350. /// being used by any other thread.
  351. pub unsafe fn replace(&self, new: Option<Self>) {
  352. eonix_preempt::disable();
  353. assert_eq!(
  354. self.user_count.load(Ordering::Relaxed),
  355. 1,
  356. "We should be the only user"
  357. );
  358. assert_eq!(
  359. new.as_ref()
  360. .map(|new_mm| new_mm.user_count.load(Ordering::Relaxed))
  361. .unwrap_or(0),
  362. 0,
  363. "`new` must not be used by anyone"
  364. );
  365. let old_root_page_table = self.root_page_table.load(Ordering::Relaxed);
  366. let current_root_page_table = get_root_page_table_pfn();
  367. assert_eq!(
  368. PAddr::from(current_root_page_table).addr(),
  369. old_root_page_table,
  370. "We should be the only user"
  371. );
  372. let new_root_page_table = match &new {
  373. Some(new_mm) => new_mm.root_page_table.load(Ordering::Relaxed),
  374. None => GLOBAL_PAGE_TABLE.addr().addr(),
  375. };
  376. set_root_page_table_pfn(PFN::from(PAddr::from(new_root_page_table)));
  377. self.root_page_table
  378. .store(new_root_page_table, Ordering::Relaxed);
  379. // TODO: Check whether we should wake someone up if they've been put
  380. // to sleep when calling `vfork`.
  381. self.inner
  382. .swap(new.map(|new_mm| new_mm.inner.swap(None)).flatten());
  383. eonix_preempt::enable();
  384. }
  385. /// No need to do invalidation manually, `PageTable` already does it.
  386. pub async fn unmap(&self, start: VAddr, len: usize) -> KResult<()> {
  387. let pages_to_free = self.inner.borrow().lock().await.unmap(start, len)?;
  388. // We need to assure that the pages are not accessed anymore.
  389. // The ones having these pages in their TLB could read from or write to them.
  390. // So flush the TLBs first for all our users.
  391. self.flush_user_tlbs().await;
  392. // Then free the pages.
  393. drop(pages_to_free);
  394. Ok(())
  395. }
  396. pub async fn protect(&self, start: VAddr, len: usize, prot: Permission) -> KResult<()> {
  397. self.inner.borrow().lock().await.protect(start, len, prot)?;
  398. // flush the tlb due to the pte attribute changes
  399. self.flush_user_tlbs().await;
  400. Ok(())
  401. }
  402. pub async fn map_vdso(&self) -> KResult<()> {
  403. unsafe extern "C" {
  404. fn VDSO_PADDR();
  405. }
  406. static VDSO_PADDR_VALUE: &'static unsafe extern "C" fn() =
  407. &(VDSO_PADDR as unsafe extern "C" fn());
  408. let vdso_paddr = unsafe {
  409. // SAFETY: To prevent the compiler from optimizing this into `la` instructions
  410. // and causing a linking error.
  411. (VDSO_PADDR_VALUE as *const _ as *const usize).read_volatile()
  412. };
  413. let vdso_pfn = PFN::from(PAddr::from(vdso_paddr));
  414. const VDSO_START: VAddr = VAddr::from(0x7f00_0000_0000);
  415. const VDSO_SIZE: usize = 0x1000;
  416. let inner = self.inner.borrow();
  417. let inner = inner.lock().await;
  418. let mut pte_iter = inner
  419. .page_table
  420. .iter_user(VRange::from(VDSO_START).grow(VDSO_SIZE));
  421. let pte = pte_iter.next().expect("There should be at least one PTE");
  422. pte.set(
  423. vdso_pfn,
  424. (PageAttribute::PRESENT
  425. | PageAttribute::READ
  426. | PageAttribute::EXECUTE
  427. | PageAttribute::USER
  428. | PageAttribute::ACCESSED)
  429. .into(),
  430. );
  431. assert!(pte_iter.next().is_none(), "There should be only one PTE");
  432. Ok(())
  433. }
  434. pub async fn mmap_hint(
  435. &self,
  436. hint: VAddr,
  437. len: usize,
  438. mapping: Mapping,
  439. permission: Permission,
  440. is_shared: bool,
  441. ) -> KResult<VAddr> {
  442. let inner = self.inner.borrow();
  443. let mut inner = inner.lock().await;
  444. if hint == VAddr::NULL {
  445. let at = inner.find_available(hint, len).ok_or(ENOMEM)?;
  446. inner.mmap(at, len, mapping, permission, is_shared)?;
  447. return Ok(at);
  448. }
  449. match inner.mmap(hint, len, mapping.clone(), permission, is_shared) {
  450. Ok(()) => Ok(hint),
  451. Err(EEXIST) => {
  452. let at = inner.find_available(hint, len).ok_or(ENOMEM)?;
  453. inner.mmap(at, len, mapping, permission, is_shared)?;
  454. Ok(at)
  455. }
  456. Err(err) => Err(err),
  457. }
  458. }
  459. pub async fn mmap_fixed(
  460. &self,
  461. at: VAddr,
  462. len: usize,
  463. mapping: Mapping,
  464. permission: Permission,
  465. is_shared: bool,
  466. ) -> KResult<VAddr> {
  467. self.inner
  468. .borrow()
  469. .lock()
  470. .await
  471. .mmap(at, len, mapping.clone(), permission, is_shared)
  472. .map(|_| at)
  473. }
  474. pub async fn set_break(&self, pos: Option<VAddr>) -> VAddr {
  475. let inner = self.inner.borrow();
  476. let mut inner = inner.lock().await;
  477. // SAFETY: `set_break` is only called in syscalls, where program break should be valid.
  478. assert!(inner.break_start.is_some() && inner.break_pos.is_some());
  479. let break_start = inner.break_start.unwrap();
  480. let current_break = inner.break_pos.unwrap();
  481. let pos = match pos {
  482. None => return current_break,
  483. Some(pos) => pos.ceil(),
  484. };
  485. if current_break > pos {
  486. return current_break;
  487. }
  488. let range = VRange::new(current_break, pos);
  489. if !inner.check_overlapping_range(range) {
  490. return current_break;
  491. }
  492. if !inner.areas.contains(&break_start) {
  493. inner.areas.insert(MMArea::new(
  494. break_start,
  495. Mapping::Anonymous,
  496. Permission {
  497. read: true,
  498. write: true,
  499. execute: false,
  500. },
  501. false,
  502. ));
  503. }
  504. let program_break = inner
  505. .areas
  506. .get(&break_start)
  507. .expect("Program break area should be valid");
  508. let len = pos - current_break;
  509. let range_to_grow = VRange::from(program_break.range().end()).grow(len);
  510. program_break.grow(len);
  511. inner.page_table.set_anonymous(
  512. range_to_grow,
  513. Permission {
  514. read: true,
  515. write: true,
  516. execute: false,
  517. },
  518. );
  519. inner.break_pos = Some(pos);
  520. pos
  521. }
  522. /// This should be called only **once** for every thread.
  523. pub async fn register_break(&self, start: VAddr) {
  524. let inner = self.inner.borrow();
  525. let mut inner = inner.lock().await;
  526. assert!(inner.break_start.is_none() && inner.break_pos.is_none());
  527. inner.break_start = Some(start.into());
  528. inner.break_pos = Some(start);
  529. }
  530. /// Access the memory area with the given function.
  531. /// The function will be called with the offset of the area and the slice of the area.
  532. pub async fn access_mut<F>(&self, start: VAddr, len: usize, func: F) -> KResult<()>
  533. where
  534. F: Fn(usize, &mut [u8]),
  535. {
  536. // First, validate the address range.
  537. let end = start + len;
  538. if !start.is_user() || !end.is_user() {
  539. return Err(EINVAL);
  540. }
  541. let inner = self.inner.borrow();
  542. let inner = inner.lock().await;
  543. let mut offset = 0;
  544. let mut remaining = len;
  545. let mut current = start;
  546. while remaining > 0 {
  547. let area = inner.overlapping_addr(current).ok_or(EFAULT)?;
  548. let area_start = area.range().start();
  549. let area_end = area.range().end();
  550. let area_remaining = area_end - current;
  551. let access_len = remaining.min(area_remaining);
  552. let access_end = current + access_len;
  553. for (idx, pte) in inner
  554. .page_table
  555. .iter_user(VRange::new(current, access_end))
  556. .enumerate()
  557. {
  558. let page_start = current.floor() + idx * 0x1000;
  559. let page_end = page_start + 0x1000;
  560. // Prepare for the worst case that we might write to the page...
  561. area.handle(pte, page_start - area_start, true).await?;
  562. let start_offset;
  563. if page_start < current {
  564. start_offset = current - page_start;
  565. } else {
  566. start_offset = 0;
  567. }
  568. let end_offset;
  569. if page_end > access_end {
  570. end_offset = access_end - page_start;
  571. } else {
  572. end_offset = 0x1000;
  573. }
  574. unsafe {
  575. // SAFETY: We are sure that the page is valid and we have the right to access it.
  576. Page::with_raw(pte.get_pfn(), |page| {
  577. // SAFETY: The caller guarantees that no one else is using the page.
  578. let page_data = page.as_memblk().as_bytes_mut();
  579. func(
  580. offset + idx * 0x1000,
  581. &mut page_data[start_offset..end_offset],
  582. );
  583. });
  584. }
  585. }
  586. offset += access_len;
  587. remaining -= access_len;
  588. current = access_end;
  589. }
  590. Ok(())
  591. }
  592. }
  593. impl fmt::Debug for MMList {
  594. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  595. f.debug_struct("MMList").finish()
  596. }
  597. }
  598. trait PageTableExt {
  599. fn set_anonymous(&self, range: VRange, permission: Permission);
  600. fn set_mmapped(&self, range: VRange, permission: Permission);
  601. fn set_copy_on_write(&self, from: &Self, range: VRange);
  602. fn set_copied(&self, from: &Self, range: VRange);
  603. }
  604. impl PageTableExt for KernelPageTable<'_> {
  605. fn set_anonymous(&self, range: VRange, permission: Permission) {
  606. for pte in self.iter_user(range) {
  607. pte.set_anonymous(permission.execute);
  608. }
  609. }
  610. fn set_mmapped(&self, range: VRange, permission: Permission) {
  611. for pte in self.iter_user(range) {
  612. pte.set_mapped(permission.execute);
  613. }
  614. }
  615. fn set_copy_on_write(&self, from: &Self, range: VRange) {
  616. let to_iter = self.iter_user(range);
  617. let from_iter = from.iter_user(range);
  618. for (to, from) in to_iter.zip(from_iter) {
  619. to.set_copy_on_write(from);
  620. }
  621. }
  622. fn set_copied(&self, from: &Self, range: VRange) {
  623. let to_iter = self.iter_user(range);
  624. let from_iter = from.iter_user(range);
  625. for (to, from) in to_iter.zip(from_iter) {
  626. let (pfn, attr) = from.get();
  627. to.set(pfn, attr);
  628. }
  629. }
  630. }
  631. trait PTEExt {
  632. // private anonymous
  633. fn set_anonymous(&mut self, execute: bool);
  634. // file mapped or shared anonymous
  635. fn set_mapped(&mut self, execute: bool);
  636. fn set_copy_on_write(&mut self, from: &mut Self);
  637. }
  638. impl<T> PTEExt for T
  639. where
  640. T: PTE,
  641. {
  642. fn set_anonymous(&mut self, execute: bool) {
  643. // Writable flag is set during page fault handling while executable flag is
  644. // preserved across page faults, so we set executable flag now.
  645. let mut attr = PageAttribute::PRESENT
  646. | PageAttribute::READ
  647. | PageAttribute::USER
  648. | PageAttribute::COPY_ON_WRITE;
  649. attr.set(PageAttribute::EXECUTE, execute);
  650. self.set(EMPTY_PAGE.clone().into_raw(), T::Attr::from(attr));
  651. }
  652. fn set_mapped(&mut self, execute: bool) {
  653. // Writable flag is set during page fault handling while executable flag is
  654. // preserved across page faults, so we set executable flag now.
  655. let mut attr = PageAttribute::READ | PageAttribute::USER | PageAttribute::MAPPED;
  656. attr.set(PageAttribute::EXECUTE, execute);
  657. self.set(EMPTY_PAGE.clone().into_raw(), T::Attr::from(attr));
  658. }
  659. fn set_copy_on_write(&mut self, from: &mut Self) {
  660. let mut from_attr = from
  661. .get_attr()
  662. .as_page_attr()
  663. .expect("Not a page attribute");
  664. if !from_attr.intersects(PageAttribute::PRESENT | PageAttribute::MAPPED) {
  665. return;
  666. }
  667. from_attr.remove(PageAttribute::WRITE | PageAttribute::DIRTY);
  668. from_attr.insert(PageAttribute::COPY_ON_WRITE);
  669. let pfn = unsafe {
  670. // SAFETY: We get the pfn from a valid page table entry, so it should be valid as well.
  671. Page::with_raw(from.get_pfn(), |page| page.clone().into_raw())
  672. };
  673. self.set(pfn, T::Attr::from(from_attr & !PageAttribute::ACCESSED));
  674. from.set_attr(T::Attr::from(from_attr));
  675. }
  676. }