page.rs 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. use super::{GlobalPageAlloc, PageAlloc, RawPage as _, PFN};
  2. use crate::address::{AddrRange, PAddr};
  3. use core::{fmt, mem::ManuallyDrop, ptr::NonNull, sync::atomic::Ordering};
  4. pub const PAGE_SIZE: usize = 4096;
  5. pub const LEVEL0_PAGE_SIZE: usize = 4096;
  6. pub const LEVEL1_PAGE_SIZE: usize = 2 * 1024 * 1024;
  7. pub const LEVEL2_PAGE_SIZE: usize = 1 * 1024 * 1024 * 1024;
  8. pub const PAGE_SIZE_BITS: u32 = PAGE_SIZE.trailing_zeros();
  9. /// A block of memory that is aligned to the page size and can be used for
  10. /// page-aligned allocations.
  11. ///
  12. /// This is used to ensure that the memory is properly aligned to the page size.
  13. #[allow(dead_code)]
  14. #[repr(align(4096))]
  15. pub struct PageBlock([u8; PAGE_SIZE]);
  16. /// A trait that provides the kernel access to the page.
  17. pub trait PageAccess {
  18. /// Returns a kernel-accessible pointer to the page referenced by the given
  19. /// physical frame number.
  20. ///
  21. /// # Safety
  22. /// This function is unsafe because calling this function on some non-existing
  23. /// pfn will cause undefined behavior.
  24. unsafe fn get_ptr_for_pfn(pfn: PFN) -> NonNull<PageBlock>;
  25. /// Returns a kernel-accessible pointer to the given page.
  26. fn get_ptr_for_page<A: PageAlloc>(page: &Page<A>) -> NonNull<PageBlock> {
  27. unsafe {
  28. // SAFETY: `page.pfn()` is guaranteed to be valid.
  29. Self::get_ptr_for_pfn(page.pfn())
  30. }
  31. }
  32. }
  33. /// A Page allocated in allocator `A`.
  34. #[derive(PartialEq, Eq, PartialOrd, Ord)]
  35. pub struct Page<A: PageAlloc> {
  36. raw_page: A::RawPage,
  37. alloc: A,
  38. }
  39. unsafe impl<A: PageAlloc> Send for Page<A> {}
  40. unsafe impl<A: PageAlloc> Sync for Page<A> {}
  41. impl<A> Page<A>
  42. where
  43. A: GlobalPageAlloc,
  44. {
  45. /// Allocate a page of the given *order*.
  46. pub fn alloc_order(order: u32) -> Self {
  47. Self::alloc_order_in(order, A::global())
  48. }
  49. /// Allocate exactly one page.
  50. pub fn alloc() -> Self {
  51. Self::alloc_in(A::global())
  52. }
  53. /// Allocate a contiguous block of pages that can contain at least `count` pages.
  54. pub fn alloc_at_least(count: usize) -> Self {
  55. Self::alloc_at_least_in(count, A::global())
  56. }
  57. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  58. ///
  59. /// # Safety
  60. /// This function is unsafe because it assumes that the caller has ensured that
  61. /// `pfn` points to a valid page allocated through `alloc_order()` and that the
  62. /// page have not been freed or deallocated yet.
  63. ///
  64. /// No checks are done. Any violation of this assumption may lead to undefined behavior.
  65. pub unsafe fn from_raw_unchecked(pfn: PFN) -> Self {
  66. unsafe { Self::from_raw_unchecked_in(pfn, A::global()) }
  67. }
  68. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  69. ///
  70. /// This function is a safe wrapper around `from_paddr_unchecked()` that does **some sort
  71. /// of** checks to ensure that the page is valid and managed by the allocator.
  72. ///
  73. /// # Panic
  74. /// This function will panic if the page is not valid or if the page is not managed by
  75. /// the allocator.
  76. ///
  77. /// # Safety
  78. /// This function is unsafe because it assumes that the caller has ensured that
  79. /// `pfn` points to an existing page (A.K.A. inside the global page array) and the
  80. /// page will not be freed or deallocated during the call.
  81. pub unsafe fn from_raw(pfn: PFN) -> Self {
  82. unsafe { Self::from_raw_in(pfn, A::global()) }
  83. }
  84. /// Do some work with the page without touching the reference count with the same
  85. /// restrictions as `from_raw_in()`.
  86. ///
  87. /// # Safety
  88. /// Check `from_raw()` for the safety requirements.
  89. pub unsafe fn with_raw<F, O>(pfn: PFN, func: F) -> O
  90. where
  91. F: FnOnce(&Self) -> O,
  92. {
  93. unsafe { Self::with_raw_in(pfn, func, A::global()) }
  94. }
  95. /// Do some work with the page without touching the reference count with the same
  96. /// restrictions as `from_raw_unchecked_in()`.
  97. ///
  98. /// # Safety
  99. /// Check `from_raw_unchecked()` for the safety requirements.
  100. pub unsafe fn with_raw_unchecked<F, O>(pfn: PFN, func: F, alloc: A) -> O
  101. where
  102. F: FnOnce(&Self) -> O,
  103. {
  104. unsafe { Self::with_raw_unchecked_in(pfn, func, alloc) }
  105. }
  106. }
  107. impl<A> Page<A>
  108. where
  109. A: PageAlloc,
  110. {
  111. /// Allocate a page of the given *order*.
  112. pub fn alloc_order_in(order: u32, alloc: A) -> Self {
  113. Self {
  114. raw_page: alloc.alloc_order(order).expect("Out of memory"),
  115. alloc,
  116. }
  117. }
  118. /// Allocate exactly one page.
  119. pub fn alloc_in(alloc: A) -> Self {
  120. Self {
  121. raw_page: alloc.alloc().expect("Out of memory"),
  122. alloc,
  123. }
  124. }
  125. /// Allocate a contiguous block of pages that can contain at least `count` pages.
  126. pub fn alloc_at_least_in(count: usize, alloc: A) -> Self {
  127. Self {
  128. raw_page: alloc.alloc_at_least(count).expect("Out of memory"),
  129. alloc,
  130. }
  131. }
  132. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  133. ///
  134. /// # Safety
  135. /// This function is unsafe because it assumes that the caller has ensured that
  136. /// `pfn` points to a valid page managed by `alloc` and that the page have not
  137. /// been freed or deallocated yet.
  138. ///
  139. /// No checks are done. Any violation of this assumption may lead to undefined behavior.
  140. pub unsafe fn from_raw_unchecked_in(pfn: PFN, alloc: A) -> Self {
  141. Self {
  142. raw_page: A::RawPage::from(pfn),
  143. alloc,
  144. }
  145. }
  146. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  147. ///
  148. /// This function is a safe wrapper around `from_paddr_unchecked()` that does **some sort
  149. /// of** checks to ensure that the page is valid and managed by the allocator.
  150. ///
  151. /// # Panic
  152. /// This function will panic if the page is not valid or if the page is not managed by
  153. /// the allocator.
  154. ///
  155. /// # Safety
  156. /// This function is unsafe because it assumes that the caller has ensured that
  157. /// `pfn` points to an existing page (A.K.A. inside the global page array) and the
  158. /// page will not be freed or deallocated during the call.
  159. pub unsafe fn from_raw_in(pfn: PFN, alloc: A) -> Self {
  160. unsafe {
  161. // SAFETY: The caller guarantees that the page is inside the global page array.
  162. assert!(alloc.has_management_over(A::RawPage::from(pfn)));
  163. // SAFETY: We've checked that the validity of the page. And the caller guarantees
  164. // that the page will not be freed or deallocated during the call.
  165. Self::from_raw_unchecked_in(pfn, alloc)
  166. }
  167. }
  168. /// Do some work with the page without touching the reference count with the same
  169. /// restrictions as `from_raw_in()`.
  170. ///
  171. /// # Safety
  172. /// Check `from_raw_in()` for the safety requirements.
  173. pub unsafe fn with_raw_in<F, O>(pfn: PFN, func: F, alloc: A) -> O
  174. where
  175. F: FnOnce(&Self) -> O,
  176. {
  177. unsafe {
  178. let me = ManuallyDrop::new(Self::from_raw_in(pfn, alloc));
  179. func(&me)
  180. }
  181. }
  182. /// Do some work with the page without touching the reference count with the same
  183. /// restrictions as `from_raw_unchecked_in()`.
  184. ///
  185. /// # Safety
  186. /// Check `from_raw_unchecked_in()` for the safety requirements.
  187. pub unsafe fn with_raw_unchecked_in<F, O>(pfn: PFN, func: F, alloc: A) -> O
  188. where
  189. F: FnOnce(&Self) -> O,
  190. {
  191. unsafe {
  192. let me = ManuallyDrop::new(Self::from_raw_unchecked_in(pfn, alloc));
  193. func(&me)
  194. }
  195. }
  196. /// Whether we are the only owner of the page.
  197. pub fn is_exclusive(&self) -> bool {
  198. self.raw_page.refcount().load(Ordering::Acquire) == 1
  199. }
  200. /// Returns the *order* of the page, which is the log2 of the number of pages
  201. /// contained in the page object.
  202. pub fn order(&self) -> u32 {
  203. self.raw_page.order()
  204. }
  205. /// Returns the total size of the page in bytes.
  206. pub fn len(&self) -> usize {
  207. 1 << (self.order() + PAGE_SIZE_BITS)
  208. }
  209. /// Consumes the `Page` and returns the physical frame number without dropping
  210. /// the reference count the page holds.
  211. pub fn into_raw(self) -> PFN {
  212. let me = ManuallyDrop::new(self);
  213. me.pfn()
  214. }
  215. /// Returns the physical frame number of the page, which is aligned with the
  216. /// page size and valid.
  217. pub fn pfn(&self) -> PFN {
  218. Into::<PFN>::into(self.raw_page)
  219. }
  220. /// Returns the start physical address of the page, which is guaranteed to be
  221. /// aligned to the page size and valid.
  222. pub fn start(&self) -> PAddr {
  223. PAddr::from(self.pfn())
  224. }
  225. /// Returns the physical address range of the page, which is guaranteed to be
  226. /// aligned to the page size and valid.
  227. pub fn range(&self) -> AddrRange<PAddr> {
  228. AddrRange::from(self.start()).grow(self.len())
  229. }
  230. /// Get the allocator that manages this page.
  231. pub fn allocator(&self) -> &A {
  232. &self.alloc
  233. }
  234. }
  235. impl<A> Clone for Page<A>
  236. where
  237. A: PageAlloc,
  238. {
  239. fn clone(&self) -> Self {
  240. // SAFETY: Memory order here can be Relaxed is for the same reason as that
  241. // in the copy constructor of `std::shared_ptr`.
  242. self.raw_page.refcount().fetch_add(1, Ordering::Relaxed);
  243. Self {
  244. raw_page: self.raw_page,
  245. alloc: self.alloc.clone(),
  246. }
  247. }
  248. }
  249. impl<A> Drop for Page<A>
  250. where
  251. A: PageAlloc,
  252. {
  253. fn drop(&mut self) {
  254. match self.raw_page.refcount().fetch_sub(1, Ordering::AcqRel) {
  255. 0 => panic!("Refcount for an in-use page is 0"),
  256. 1 => unsafe {
  257. // SAFETY: `self.raw_page` points to a valid page inside the global page array.
  258. assert!(self.alloc.has_management_over(self.raw_page));
  259. // SAFETY: `self.raw_page` is managed by the allocator and we're dropping the page.
  260. self.alloc.dealloc(self.raw_page)
  261. },
  262. _ => {}
  263. }
  264. }
  265. }
  266. impl<A: PageAlloc> fmt::Debug for Page<A> {
  267. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  268. write!(
  269. f,
  270. "Page({:?}, order={})",
  271. Into::<PFN>::into(self.raw_page),
  272. self.order()
  273. )
  274. }
  275. }