mm_list.rs 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. mod mapping;
  2. mod page_fault;
  3. use super::address::{VAddrExt as _, VRangeExt as _};
  4. use super::page_alloc::GlobalPageAlloc;
  5. use super::paging::AllocZeroed as _;
  6. use super::{AsMemoryBlock, MMArea, Page};
  7. use crate::kernel::constants::{EEXIST, EFAULT, EINVAL, ENOMEM};
  8. use crate::kernel::mem::page_alloc::RawPagePtr;
  9. use crate::{prelude::*, sync::ArcSwap};
  10. use alloc::collections::btree_set::BTreeSet;
  11. use core::fmt;
  12. use core::sync::atomic::{AtomicUsize, Ordering};
  13. use eonix_hal::mm::{
  14. flush_tlb_all, get_root_page_table_pfn, set_root_page_table_pfn, ArchPagingMode,
  15. ArchPhysAccess, GLOBAL_PAGE_TABLE,
  16. };
  17. use eonix_mm::address::{Addr as _, PAddr};
  18. use eonix_mm::page_table::PageAttribute;
  19. use eonix_mm::paging::PFN;
  20. use eonix_mm::{
  21. address::{AddrOps as _, VAddr, VRange},
  22. page_table::{PageTable, RawAttribute, PTE},
  23. paging::PAGE_SIZE,
  24. };
  25. use eonix_runtime::task::Task;
  26. use eonix_sync::{LazyLock, Mutex};
  27. pub use mapping::{FileMapping, Mapping};
  28. pub use page_fault::handle_kernel_page_fault;
  29. pub static EMPTY_PAGE: LazyLock<Page> = LazyLock::new(|| Page::zeroed());
  30. #[derive(Debug, Clone, Copy)]
  31. pub struct Permission {
  32. pub read: bool,
  33. pub write: bool,
  34. pub execute: bool,
  35. }
  36. pub type KernelPageTable<'a> = PageTable<'a, ArchPagingMode, GlobalPageAlloc, ArchPhysAccess>;
  37. struct MMListInner<'a> {
  38. areas: BTreeSet<MMArea>,
  39. page_table: KernelPageTable<'a>,
  40. break_start: Option<VRange>,
  41. break_pos: Option<VAddr>,
  42. }
  43. pub struct MMList {
  44. inner: ArcSwap<Mutex<MMListInner<'static>>>,
  45. user_count: AtomicUsize,
  46. /// Only used in kernel space to switch page tables on context switch.
  47. root_page_table: AtomicUsize,
  48. }
  49. impl MMListInner<'_> {
  50. fn overlapping_addr(&self, addr: VAddr) -> Option<&MMArea> {
  51. self.areas.get(&VRange::from(addr))
  52. }
  53. fn check_overlapping_addr(&self, addr: VAddr) -> bool {
  54. addr.is_user() && self.overlapping_addr(addr).is_none()
  55. }
  56. fn overlapping_range(&self, range: VRange) -> impl DoubleEndedIterator<Item = &MMArea> + '_ {
  57. self.areas.range(range.into_bounds())
  58. }
  59. fn check_overlapping_range(&self, range: VRange) -> bool {
  60. range.is_user() && self.overlapping_range(range).next().is_none()
  61. }
  62. fn random_start(&self) -> VAddr {
  63. VAddr::from(0x1234000)
  64. }
  65. fn find_available(&self, mut hint: VAddr, len: usize) -> Option<VAddr> {
  66. let len = len.div_ceil(PAGE_SIZE) * PAGE_SIZE;
  67. if hint == VAddr::NULL {
  68. hint = self.random_start();
  69. } else {
  70. hint = hint.floor();
  71. }
  72. let mut range = VRange::from(hint).grow(len);
  73. loop {
  74. if !range.is_user() {
  75. return None;
  76. }
  77. match self.overlapping_range(range).next_back() {
  78. None => return Some(range.start()),
  79. Some(area) => {
  80. range = VRange::from(area.range().end().ceil()).grow(len);
  81. }
  82. }
  83. }
  84. }
  85. fn unmap(&mut self, start: VAddr, len: usize) -> KResult<Vec<Page>> {
  86. assert_eq!(start.floor(), start);
  87. let end = (start + len).ceil();
  88. let range_to_unmap = VRange::new(start, end);
  89. if !range_to_unmap.is_user() {
  90. return Err(EINVAL);
  91. }
  92. let mut left_remaining = None;
  93. let mut right_remaining = None;
  94. let mut pages_to_free = Vec::new();
  95. // TODO: Write back dirty pages.
  96. self.areas.retain(|area| {
  97. let Some((left, mid, right)) = area.range().mask_with_checked(&range_to_unmap) else {
  98. return true;
  99. };
  100. for pte in self.page_table.iter_user(mid) {
  101. let (pfn, _) = pte.take();
  102. pages_to_free.push(unsafe {
  103. // SAFETY: We got the pfn from a valid page table entry, so it should be valid.
  104. Page::from_raw(pfn)
  105. });
  106. }
  107. match (left, right) {
  108. (None, None) => {}
  109. (Some(left), None) => {
  110. assert!(left_remaining.is_none());
  111. let (Some(left), _) = area.clone().split(left.end()) else {
  112. unreachable!("`left.end()` is within the area");
  113. };
  114. left_remaining = Some(left);
  115. }
  116. (None, Some(right)) => {
  117. assert!(right_remaining.is_none());
  118. let (_, Some(right)) = area.clone().split(right.start()) else {
  119. unreachable!("`right.start()` is within the area");
  120. };
  121. right_remaining = Some(right);
  122. }
  123. (Some(left), Some(right)) => {
  124. assert!(left_remaining.is_none());
  125. assert!(right_remaining.is_none());
  126. let (Some(left), Some(mid)) = area.clone().split(left.end()) else {
  127. unreachable!("`left.end()` is within the area");
  128. };
  129. let (_, Some(right)) = mid.split(right.start()) else {
  130. unreachable!("`right.start()` is within the area");
  131. };
  132. left_remaining = Some(left);
  133. right_remaining = Some(right);
  134. }
  135. }
  136. false
  137. });
  138. if let Some(front) = left_remaining {
  139. self.areas.insert(front);
  140. }
  141. if let Some(back) = right_remaining {
  142. self.areas.insert(back);
  143. }
  144. Ok(pages_to_free)
  145. }
  146. fn protect(&mut self, start: VAddr, len: usize, permission: Permission) -> KResult<()> {
  147. assert_eq!(start.floor(), start);
  148. assert!(len != 0);
  149. let end = (start + len).ceil();
  150. let range_to_protect = VRange::new(start, end);
  151. if !range_to_protect.is_user() {
  152. return Err(EINVAL);
  153. }
  154. let mut found = false;
  155. let old_areas = core::mem::take(&mut self.areas);
  156. for mut area in old_areas {
  157. let Some((left, mid, right)) = area.range().mask_with_checked(&range_to_protect) else {
  158. self.areas.insert(area);
  159. continue;
  160. };
  161. found = true;
  162. if let Some(left) = left {
  163. let (Some(left), Some(right)) = area.split(left.end()) else {
  164. unreachable!("`left.end()` is within the area");
  165. };
  166. self.areas.insert(left);
  167. area = right;
  168. }
  169. if let Some(right) = right {
  170. let (Some(left), Some(right)) = area.split(right.start()) else {
  171. unreachable!("`right.start()` is within the area");
  172. };
  173. self.areas.insert(right);
  174. area = left;
  175. }
  176. for pte in self.page_table.iter_user(mid) {
  177. let mut page_attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
  178. if !permission.read && !permission.write && !permission.execute {
  179. // If no permissions are set, we just remove the page.
  180. page_attr.remove(
  181. PageAttribute::PRESENT
  182. | PageAttribute::READ
  183. | PageAttribute::WRITE
  184. | PageAttribute::EXECUTE,
  185. );
  186. pte.set_attr(page_attr.into());
  187. continue;
  188. }
  189. page_attr.set(PageAttribute::READ, permission.read);
  190. if !page_attr.contains(PageAttribute::COPY_ON_WRITE) {
  191. page_attr.set(PageAttribute::WRITE, permission.write);
  192. }
  193. page_attr.set(PageAttribute::EXECUTE, permission.execute);
  194. pte.set_attr(page_attr.into());
  195. }
  196. area.permission = permission;
  197. self.areas.insert(area);
  198. }
  199. if !found {
  200. return Err(ENOMEM);
  201. }
  202. Ok(())
  203. }
  204. fn mmap(
  205. &mut self,
  206. at: VAddr,
  207. len: usize,
  208. mapping: Mapping,
  209. permission: Permission,
  210. is_shared: bool,
  211. ) -> KResult<()> {
  212. assert_eq!(at.floor(), at);
  213. assert_eq!(len & (PAGE_SIZE - 1), 0);
  214. let range = VRange::new(at, at + len);
  215. // We are doing a area marker insertion.
  216. if len == 0 && !self.check_overlapping_addr(at) || !self.check_overlapping_range(range) {
  217. return Err(EEXIST);
  218. }
  219. match &mapping {
  220. Mapping::Anonymous => self.page_table.set_anonymous(range, permission),
  221. Mapping::File(_) => self.page_table.set_mmapped(range, permission),
  222. }
  223. self.areas
  224. .insert(MMArea::new(range, mapping, permission, is_shared));
  225. Ok(())
  226. }
  227. }
  228. impl Drop for MMListInner<'_> {
  229. fn drop(&mut self) {
  230. // May buggy
  231. for area in &self.areas {
  232. if area.is_shared {
  233. for pte in self.page_table.iter_user(area.range()) {
  234. let (pfn, _) = pte.take();
  235. let raw_page = RawPagePtr::from(pfn);
  236. if raw_page.refcount().fetch_sub(1, Ordering::Relaxed) == 1 {
  237. // Wrong here
  238. // unsafe { Page::from_raw(pfn) };
  239. }
  240. }
  241. } else {
  242. for pte in self.page_table.iter_user(area.range()) {
  243. let (pfn, _) = pte.take();
  244. unsafe { Page::from_raw(pfn) };
  245. }
  246. }
  247. }
  248. // TODO: Recycle all pages in the page table.
  249. }
  250. }
  251. impl MMList {
  252. async fn flush_user_tlbs(&self) {
  253. match self.user_count.load(Ordering::Relaxed) {
  254. 0 => {
  255. // If there are currently no users, we don't need to do anything.
  256. }
  257. 1 => {
  258. if PAddr::from(get_root_page_table_pfn()).addr()
  259. == self.root_page_table.load(Ordering::Relaxed)
  260. {
  261. // If there is only one user and we are using the page table,
  262. // flushing the TLB for the local cpu only is enough.
  263. flush_tlb_all();
  264. } else {
  265. // Send the TLB flush request to the core.
  266. todo!();
  267. }
  268. }
  269. _ => {
  270. // If there are more than one users, we broadcast the TLB flush
  271. // to all cores.
  272. todo!()
  273. }
  274. }
  275. }
  276. pub fn new() -> Self {
  277. let page_table = GLOBAL_PAGE_TABLE.clone_global();
  278. Self {
  279. root_page_table: AtomicUsize::from(page_table.addr().addr()),
  280. user_count: AtomicUsize::new(0),
  281. inner: ArcSwap::new(Mutex::new(MMListInner {
  282. areas: BTreeSet::new(),
  283. page_table,
  284. break_start: None,
  285. break_pos: None,
  286. })),
  287. }
  288. }
  289. pub async fn new_cloned(&self) -> Self {
  290. let inner = self.inner.borrow();
  291. let mut inner = inner.lock().await;
  292. let page_table = GLOBAL_PAGE_TABLE.clone_global();
  293. let list = Self {
  294. root_page_table: AtomicUsize::from(page_table.addr().addr()),
  295. user_count: AtomicUsize::new(0),
  296. inner: ArcSwap::new(Mutex::new(MMListInner {
  297. areas: inner.areas.clone(),
  298. page_table,
  299. break_start: inner.break_start,
  300. break_pos: inner.break_pos,
  301. })),
  302. };
  303. {
  304. let list_inner = list.inner.borrow();
  305. let list_inner = list_inner.lock().await;
  306. for area in list_inner.areas.iter() {
  307. if !area.is_shared {
  308. list_inner
  309. .page_table
  310. .set_copy_on_write(&mut inner.page_table, area.range());
  311. } else {
  312. list_inner
  313. .page_table
  314. .set_copied(&mut inner.page_table, area.range());
  315. }
  316. }
  317. }
  318. // We've set some pages as CoW, so we need to invalidate all our users' TLB.
  319. self.flush_user_tlbs().await;
  320. list
  321. }
  322. pub async fn new_shared(&self) -> Self {
  323. todo!()
  324. }
  325. pub fn activate(&self) {
  326. self.user_count.fetch_add(1, Ordering::Acquire);
  327. let root_page_table = self.root_page_table.load(Ordering::Relaxed);
  328. assert_ne!(root_page_table, 0);
  329. set_root_page_table_pfn(PFN::from(PAddr::from(root_page_table)));
  330. }
  331. pub fn deactivate(&self) {
  332. set_root_page_table_pfn(PFN::from(GLOBAL_PAGE_TABLE.addr()));
  333. let old_user_count = self.user_count.fetch_sub(1, Ordering::Release);
  334. assert_ne!(old_user_count, 0);
  335. }
  336. /// Deactivate `self` and activate `to` with root page table changed only once.
  337. /// This might reduce the overhead of switching page tables twice.
  338. #[allow(dead_code)]
  339. pub fn switch(&self, to: &Self) {
  340. self.user_count.fetch_add(1, Ordering::Acquire);
  341. let root_page_table = self.root_page_table.load(Ordering::Relaxed);
  342. assert_ne!(root_page_table, 0);
  343. set_root_page_table_pfn(PFN::from(PAddr::from(root_page_table)));
  344. let old_user_count = to.user_count.fetch_sub(1, Ordering::Release);
  345. assert_ne!(old_user_count, 0);
  346. }
  347. /// Replace the current page table with a new one.
  348. ///
  349. /// # Safety
  350. /// This function should be called only when we are sure that the `MMList` is not
  351. /// being used by any other thread.
  352. pub unsafe fn replace(&self, new: Option<Self>) {
  353. eonix_preempt::disable();
  354. assert_eq!(
  355. self.user_count.load(Ordering::Relaxed),
  356. 1,
  357. "We should be the only user"
  358. );
  359. assert_eq!(
  360. new.as_ref()
  361. .map(|new_mm| new_mm.user_count.load(Ordering::Relaxed))
  362. .unwrap_or(0),
  363. 0,
  364. "`new` must not be used by anyone"
  365. );
  366. let old_root_page_table = self.root_page_table.load(Ordering::Relaxed);
  367. let current_root_page_table = get_root_page_table_pfn();
  368. assert_eq!(
  369. PAddr::from(current_root_page_table).addr(),
  370. old_root_page_table,
  371. "We should be the only user"
  372. );
  373. let new_root_page_table = match &new {
  374. Some(new_mm) => new_mm.root_page_table.load(Ordering::Relaxed),
  375. None => GLOBAL_PAGE_TABLE.addr().addr(),
  376. };
  377. set_root_page_table_pfn(PFN::from(PAddr::from(new_root_page_table)));
  378. self.root_page_table
  379. .store(new_root_page_table, Ordering::Relaxed);
  380. // TODO: Check whether we should wake someone up if they've been put
  381. // to sleep when calling `vfork`.
  382. self.inner
  383. .swap(new.map(|new_mm| new_mm.inner.swap(None)).flatten());
  384. eonix_preempt::enable();
  385. }
  386. /// No need to do invalidation manually, `PageTable` already does it.
  387. pub async fn unmap(&self, start: VAddr, len: usize) -> KResult<()> {
  388. let pages_to_free = self.inner.borrow().lock().await.unmap(start, len)?;
  389. // We need to assure that the pages are not accessed anymore.
  390. // The ones having these pages in their TLB could read from or write to them.
  391. // So flush the TLBs first for all our users.
  392. self.flush_user_tlbs().await;
  393. // Then free the pages.
  394. drop(pages_to_free);
  395. Ok(())
  396. }
  397. pub async fn protect(&self, start: VAddr, len: usize, prot: Permission) -> KResult<()> {
  398. self.inner.borrow().lock().await.protect(start, len, prot)?;
  399. // flush the tlb due to the pte attribute changes
  400. self.flush_user_tlbs().await;
  401. Ok(())
  402. }
  403. pub fn map_vdso(&self) -> KResult<()> {
  404. unsafe extern "C" {
  405. fn VDSO_PADDR();
  406. }
  407. static VDSO_PADDR_VALUE: &'static unsafe extern "C" fn() =
  408. &(VDSO_PADDR as unsafe extern "C" fn());
  409. let vdso_paddr = unsafe {
  410. // SAFETY: To prevent the compiler from optimizing this into `la` instructions
  411. // and causing a linking error.
  412. (VDSO_PADDR_VALUE as *const _ as *const usize).read_volatile()
  413. };
  414. let vdso_pfn = PFN::from(PAddr::from(vdso_paddr));
  415. const VDSO_START: VAddr = VAddr::from(0x7f00_0000_0000);
  416. const VDSO_SIZE: usize = 0x1000;
  417. let inner = self.inner.borrow();
  418. let inner = Task::block_on(inner.lock());
  419. let mut pte_iter = inner
  420. .page_table
  421. .iter_user(VRange::from(VDSO_START).grow(VDSO_SIZE));
  422. let pte = pte_iter.next().expect("There should be at least one PTE");
  423. pte.set(
  424. vdso_pfn,
  425. (PageAttribute::PRESENT
  426. | PageAttribute::READ
  427. | PageAttribute::EXECUTE
  428. | PageAttribute::USER
  429. | PageAttribute::ACCESSED)
  430. .into(),
  431. );
  432. assert!(pte_iter.next().is_none(), "There should be only one PTE");
  433. Ok(())
  434. }
  435. pub fn mmap_hint(
  436. &self,
  437. hint: VAddr,
  438. len: usize,
  439. mapping: Mapping,
  440. permission: Permission,
  441. is_shared: bool,
  442. ) -> KResult<VAddr> {
  443. let inner = self.inner.borrow();
  444. let mut inner = Task::block_on(inner.lock());
  445. if hint == VAddr::NULL {
  446. let at = inner.find_available(hint, len).ok_or(ENOMEM)?;
  447. inner.mmap(at, len, mapping, permission, is_shared)?;
  448. return Ok(at);
  449. }
  450. match inner.mmap(hint, len, mapping.clone(), permission, is_shared) {
  451. Ok(()) => Ok(hint),
  452. Err(EEXIST) => {
  453. let at = inner.find_available(hint, len).ok_or(ENOMEM)?;
  454. inner.mmap(at, len, mapping, permission, is_shared)?;
  455. Ok(at)
  456. }
  457. Err(err) => Err(err),
  458. }
  459. }
  460. pub fn mmap_fixed(
  461. &self,
  462. at: VAddr,
  463. len: usize,
  464. mapping: Mapping,
  465. permission: Permission,
  466. is_shared: bool,
  467. ) -> KResult<VAddr> {
  468. Task::block_on(self.inner.borrow().lock())
  469. .mmap(at, len, mapping.clone(), permission, is_shared)
  470. .map(|_| at)
  471. }
  472. pub fn set_break(&self, pos: Option<VAddr>) -> VAddr {
  473. let inner = self.inner.borrow();
  474. let mut inner = Task::block_on(inner.lock());
  475. // SAFETY: `set_break` is only called in syscalls, where program break should be valid.
  476. assert!(inner.break_start.is_some() && inner.break_pos.is_some());
  477. let break_start = inner.break_start.unwrap();
  478. let current_break = inner.break_pos.unwrap();
  479. let pos = match pos {
  480. None => return current_break,
  481. Some(pos) => pos.ceil(),
  482. };
  483. if current_break > pos {
  484. return current_break;
  485. }
  486. let range = VRange::new(current_break, pos);
  487. if !inner.check_overlapping_range(range) {
  488. return current_break;
  489. }
  490. if !inner.areas.contains(&break_start) {
  491. inner.areas.insert(MMArea::new(
  492. break_start,
  493. Mapping::Anonymous,
  494. Permission {
  495. read: true,
  496. write: true,
  497. execute: false,
  498. },
  499. false,
  500. ));
  501. }
  502. let program_break = inner
  503. .areas
  504. .get(&break_start)
  505. .expect("Program break area should be valid");
  506. let len = pos - current_break;
  507. let range_to_grow = VRange::from(program_break.range().end()).grow(len);
  508. program_break.grow(len);
  509. inner.page_table.set_anonymous(
  510. range_to_grow,
  511. Permission {
  512. read: true,
  513. write: true,
  514. execute: false,
  515. },
  516. );
  517. inner.break_pos = Some(pos);
  518. pos
  519. }
  520. /// This should be called only **once** for every thread.
  521. pub fn register_break(&self, start: VAddr) {
  522. let inner = self.inner.borrow();
  523. let mut inner = Task::block_on(inner.lock());
  524. assert!(inner.break_start.is_none() && inner.break_pos.is_none());
  525. inner.break_start = Some(start.into());
  526. inner.break_pos = Some(start);
  527. }
  528. /// Access the memory area with the given function.
  529. /// The function will be called with the offset of the area and the slice of the area.
  530. pub fn access_mut<F>(&self, start: VAddr, len: usize, func: F) -> KResult<()>
  531. where
  532. F: Fn(usize, &mut [u8]),
  533. {
  534. // First, validate the address range.
  535. let end = start + len;
  536. if !start.is_user() || !end.is_user() {
  537. return Err(EINVAL);
  538. }
  539. let inner = self.inner.borrow();
  540. let inner = Task::block_on(inner.lock());
  541. let mut offset = 0;
  542. let mut remaining = len;
  543. let mut current = start;
  544. while remaining > 0 {
  545. let area = inner.overlapping_addr(current).ok_or(EFAULT)?;
  546. let area_start = area.range().start();
  547. let area_end = area.range().end();
  548. let area_remaining = area_end - current;
  549. let access_len = remaining.min(area_remaining);
  550. let access_end = current + access_len;
  551. for (idx, pte) in inner
  552. .page_table
  553. .iter_user(VRange::new(current, access_end))
  554. .enumerate()
  555. {
  556. let page_start = current.floor() + idx * 0x1000;
  557. let page_end = page_start + 0x1000;
  558. area.handle(pte, page_start - area_start, None)?;
  559. let start_offset;
  560. if page_start < current {
  561. start_offset = current - page_start;
  562. } else {
  563. start_offset = 0;
  564. }
  565. let end_offset;
  566. if page_end > access_end {
  567. end_offset = access_end - page_start;
  568. } else {
  569. end_offset = 0x1000;
  570. }
  571. unsafe {
  572. // SAFETY: We are sure that the page is valid and we have the right to access it.
  573. Page::with_raw(pte.get_pfn(), |page| {
  574. // SAFETY: The caller guarantees that no one else is using the page.
  575. let page_data = page.as_memblk().as_bytes_mut();
  576. func(
  577. offset + idx * 0x1000,
  578. &mut page_data[start_offset..end_offset],
  579. );
  580. });
  581. }
  582. }
  583. offset += access_len;
  584. remaining -= access_len;
  585. current = access_end;
  586. }
  587. Ok(())
  588. }
  589. }
  590. impl fmt::Debug for MMList {
  591. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  592. f.debug_struct("MMList").finish()
  593. }
  594. }
  595. trait PageTableExt {
  596. fn set_anonymous(&self, range: VRange, permission: Permission);
  597. fn set_mmapped(&self, range: VRange, permission: Permission);
  598. fn set_copy_on_write(&self, from: &Self, range: VRange);
  599. fn set_copied(&self, from: &Self, range: VRange);
  600. }
  601. impl PageTableExt for KernelPageTable<'_> {
  602. fn set_anonymous(&self, range: VRange, permission: Permission) {
  603. for pte in self.iter_user(range) {
  604. pte.set_anonymous(permission.execute);
  605. }
  606. }
  607. fn set_mmapped(&self, range: VRange, permission: Permission) {
  608. for pte in self.iter_user(range) {
  609. pte.set_mapped(permission.execute);
  610. }
  611. }
  612. fn set_copy_on_write(&self, from: &Self, range: VRange) {
  613. let to_iter = self.iter_user(range);
  614. let from_iter = from.iter_user(range);
  615. for (to, from) in to_iter.zip(from_iter) {
  616. to.set_copy_on_write(from);
  617. }
  618. }
  619. fn set_copied(&self, from: &Self, range: VRange) {
  620. let to_iter = self.iter_user(range);
  621. let from_iter = from.iter_user(range);
  622. for (to, from) in to_iter.zip(from_iter) {
  623. let (pfn, attr) = from.get();
  624. to.set(pfn, attr);
  625. }
  626. }
  627. }
  628. trait PTEExt {
  629. // private anonymous
  630. fn set_anonymous(&mut self, execute: bool);
  631. // file mapped or shared anonymous
  632. fn set_mapped(&mut self, execute: bool);
  633. fn set_copy_on_write(&mut self, from: &mut Self);
  634. }
  635. impl<T> PTEExt for T
  636. where
  637. T: PTE,
  638. {
  639. fn set_anonymous(&mut self, execute: bool) {
  640. // Writable flag is set during page fault handling while executable flag is
  641. // preserved across page faults, so we set executable flag now.
  642. let mut attr = PageAttribute::PRESENT
  643. | PageAttribute::READ
  644. | PageAttribute::USER
  645. | PageAttribute::COPY_ON_WRITE;
  646. attr.set(PageAttribute::EXECUTE, execute);
  647. self.set(EMPTY_PAGE.clone().into_raw(), T::Attr::from(attr));
  648. }
  649. fn set_mapped(&mut self, execute: bool) {
  650. // Writable flag is set during page fault handling while executable flag is
  651. // preserved across page faults, so we set executable flag now.
  652. let mut attr = PageAttribute::READ | PageAttribute::USER | PageAttribute::MAPPED;
  653. attr.set(PageAttribute::EXECUTE, execute);
  654. self.set(EMPTY_PAGE.clone().into_raw(), T::Attr::from(attr));
  655. }
  656. fn set_copy_on_write(&mut self, from: &mut Self) {
  657. let mut from_attr = from
  658. .get_attr()
  659. .as_page_attr()
  660. .expect("Not a page attribute");
  661. if !from_attr.intersects(PageAttribute::PRESENT | PageAttribute::MAPPED) {
  662. return;
  663. }
  664. from_attr.remove(PageAttribute::WRITE);
  665. from_attr.insert(PageAttribute::COPY_ON_WRITE);
  666. let pfn = unsafe {
  667. // SAFETY: We get the pfn from a valid page table entry, so it should be valid as well.
  668. Page::with_raw(from.get_pfn(), |page| page.clone().into_raw())
  669. };
  670. self.set(pfn, T::Attr::from(from_attr & !PageAttribute::ACCESSED));
  671. from.set_attr(T::Attr::from(from_attr));
  672. }
  673. }