mm_area.rs 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. use super::mm_list::EMPTY_PAGE;
  2. use super::paging::AllocZeroed as _;
  3. use super::{AsMemoryBlock, Mapping, Page, Permission};
  4. use crate::kernel::constants::EINVAL;
  5. use crate::prelude::KResult;
  6. use core::borrow::Borrow;
  7. use core::cell::UnsafeCell;
  8. use core::cmp;
  9. use eonix_mm::address::{AddrOps as _, VAddr, VRange};
  10. use eonix_mm::page_table::{PageAttribute, RawAttribute, PTE};
  11. use eonix_mm::paging::{PAGE_SIZE, PFN};
  12. use eonix_runtime::task::Task;
  13. #[derive(Debug)]
  14. pub struct MMArea {
  15. range: UnsafeCell<VRange>,
  16. pub(super) mapping: Mapping,
  17. pub(super) permission: Permission,
  18. pub is_shared: bool,
  19. }
  20. impl Clone for MMArea {
  21. fn clone(&self) -> Self {
  22. Self {
  23. range: UnsafeCell::new(self.range()),
  24. mapping: self.mapping.clone(),
  25. permission: self.permission,
  26. is_shared: self.is_shared,
  27. }
  28. }
  29. }
  30. impl MMArea {
  31. pub fn new(range: VRange, mapping: Mapping, permission: Permission, is_shared: bool) -> Self {
  32. Self {
  33. range: range.into(),
  34. mapping,
  35. permission,
  36. is_shared,
  37. }
  38. }
  39. fn range_borrow(&self) -> &VRange {
  40. // SAFETY: The only way we get a reference to `MMArea` object is through `MMListInner`.
  41. // And `MMListInner` is locked with IRQ disabled.
  42. unsafe { self.range.get().as_ref().unwrap() }
  43. }
  44. pub fn range(&self) -> VRange {
  45. *self.range_borrow()
  46. }
  47. /// # Safety
  48. /// This function should be called only when we can guarantee that the range
  49. /// won't overlap with any other range in some scope.
  50. pub fn grow(&self, count: usize) {
  51. let range = unsafe { self.range.get().as_mut().unwrap() };
  52. range.clone_from(&self.range_borrow().grow(count));
  53. }
  54. pub fn split(mut self, at: VAddr) -> (Option<Self>, Option<Self>) {
  55. assert!(at.is_page_aligned());
  56. match self.range_borrow().cmp(&VRange::from(at)) {
  57. cmp::Ordering::Less => (Some(self), None),
  58. cmp::Ordering::Greater => (None, Some(self)),
  59. cmp::Ordering::Equal => {
  60. let diff = at - self.range_borrow().start();
  61. if diff == 0 {
  62. return (None, Some(self));
  63. }
  64. let right = Self {
  65. range: VRange::new(at, self.range_borrow().end()).into(),
  66. permission: self.permission,
  67. mapping: match &self.mapping {
  68. Mapping::Anonymous => Mapping::Anonymous,
  69. Mapping::File(mapping) => Mapping::File(mapping.offset(diff)),
  70. },
  71. is_shared: self.is_shared,
  72. };
  73. let new_range = self.range_borrow().shrink(self.range_borrow().end() - at);
  74. *self.range.get_mut() = new_range;
  75. (Some(self), Some(right))
  76. }
  77. }
  78. }
  79. pub fn handle_cow(&self, pfn: &mut PFN, attr: &mut PageAttribute) {
  80. assert!(attr.contains(PageAttribute::COPY_ON_WRITE));
  81. attr.remove(PageAttribute::COPY_ON_WRITE);
  82. attr.set(PageAttribute::WRITE, self.permission.write);
  83. let page = unsafe { Page::from_raw(*pfn) };
  84. if page.is_exclusive() {
  85. // SAFETY: This is actually safe. If we read `1` here and we have `MMList` lock
  86. // held, there couldn't be neither other processes sharing the page, nor other
  87. // threads making the page COW at the same time.
  88. core::mem::forget(page);
  89. return;
  90. }
  91. let new_page;
  92. if *pfn == EMPTY_PAGE.pfn() {
  93. new_page = Page::zeroed();
  94. } else {
  95. new_page = Page::alloc();
  96. unsafe {
  97. // SAFETY: `page` is CoW, which means that others won't write to it.
  98. let old_page_data = page.as_memblk().as_bytes();
  99. // SAFETY: `new_page` is exclusive owned by us.
  100. let new_page_data = new_page.as_memblk().as_bytes_mut();
  101. new_page_data.copy_from_slice(old_page_data);
  102. };
  103. }
  104. attr.remove(PageAttribute::ACCESSED);
  105. *pfn = new_page.into_raw();
  106. }
  107. /// # Arguments
  108. /// * `offset`: The offset from the start of the mapping, aligned to 4KB boundary.
  109. pub async fn handle_mmap(
  110. &self,
  111. pfn: &mut PFN,
  112. attr: &mut PageAttribute,
  113. offset: usize,
  114. write: bool,
  115. ) -> KResult<()> {
  116. let Mapping::File(file_mapping) = &self.mapping else {
  117. panic!("Anonymous mapping should not be PA_MMAP");
  118. };
  119. assert!(offset < file_mapping.length, "Offset out of range");
  120. let Some(page_cache) = file_mapping.file.page_cache() else {
  121. panic!("Mapping file should have pagecache");
  122. };
  123. let file_offset = file_mapping.offset + offset;
  124. let cnt_to_read = (file_mapping.length - offset).min(0x1000);
  125. page_cache
  126. .with_page(file_offset, |page, cache_page| {
  127. // Non-write faults: we find page in pagecache and do mapping
  128. // Write fault: we need to care about shared or private mapping.
  129. if !write {
  130. // Bss is embarrassing in pagecache!
  131. // We have to assume cnt_to_read < PAGE_SIZE all bss
  132. if cnt_to_read < PAGE_SIZE {
  133. let new_page = Page::zeroed();
  134. unsafe {
  135. let page_data = new_page.as_memblk().as_bytes_mut();
  136. page_data[..cnt_to_read]
  137. .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
  138. }
  139. *pfn = new_page.into_raw();
  140. } else {
  141. *pfn = page.clone().into_raw();
  142. }
  143. if self.permission.write {
  144. if self.is_shared {
  145. // The page may will not be written,
  146. // But we simply assume page will be dirty
  147. cache_page.set_dirty();
  148. attr.insert(PageAttribute::WRITE);
  149. } else {
  150. attr.insert(PageAttribute::COPY_ON_WRITE);
  151. }
  152. }
  153. } else {
  154. if self.is_shared {
  155. cache_page.set_dirty();
  156. *pfn = page.clone().into_raw();
  157. } else {
  158. let new_page = Page::zeroed();
  159. unsafe {
  160. let page_data = new_page.as_memblk().as_bytes_mut();
  161. page_data[..cnt_to_read]
  162. .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
  163. }
  164. *pfn = new_page.into_raw();
  165. }
  166. attr.insert(PageAttribute::WRITE);
  167. }
  168. })
  169. .await?
  170. .ok_or(EINVAL)?;
  171. attr.insert(PageAttribute::PRESENT);
  172. attr.remove(PageAttribute::MAPPED);
  173. Ok(())
  174. }
  175. pub fn handle(&self, pte: &mut impl PTE, offset: usize, write: bool) -> KResult<()> {
  176. let mut attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
  177. let mut pfn = pte.get_pfn();
  178. if attr.contains(PageAttribute::COPY_ON_WRITE) {
  179. self.handle_cow(&mut pfn, &mut attr);
  180. }
  181. if attr.contains(PageAttribute::MAPPED) {
  182. Task::block_on(self.handle_mmap(&mut pfn, &mut attr, offset, write))?;
  183. }
  184. attr.insert(PageAttribute::ACCESSED);
  185. if write {
  186. attr.insert(PageAttribute::DIRTY);
  187. }
  188. pte.set(pfn, attr.into());
  189. Ok(())
  190. }
  191. }
  192. impl Eq for MMArea {}
  193. impl PartialEq for MMArea {
  194. fn eq(&self, other: &Self) -> bool {
  195. self.range_borrow().eq(other.range_borrow())
  196. }
  197. }
  198. impl PartialOrd for MMArea {
  199. fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
  200. self.range_borrow().partial_cmp(other.range_borrow())
  201. }
  202. }
  203. impl Ord for MMArea {
  204. fn cmp(&self, other: &Self) -> core::cmp::Ordering {
  205. self.range_borrow().cmp(other.range_borrow())
  206. }
  207. }
  208. impl Borrow<VRange> for MMArea {
  209. fn borrow(&self) -> &VRange {
  210. self.range_borrow()
  211. }
  212. }