page.rs 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. use super::{GlobalPageAlloc, PageAlloc, RawPage as _, PFN};
  2. use crate::address::{AddrRange, PAddr};
  3. use core::{fmt, mem::ManuallyDrop, ptr::NonNull, sync::atomic::Ordering};
  4. pub const PAGE_SIZE: usize = 4096;
  5. #[derive(Clone, Copy)]
  6. pub enum PageSize {
  7. _4KbPage = 4096,
  8. _2MbPage = 2 * 1024 * 1024,
  9. _1GbPage = 1 * 1024 * 1024 * 1024,
  10. }
  11. pub const PAGE_SIZE_BITS: u32 = PAGE_SIZE.trailing_zeros();
  12. /// A block of memory that is aligned to the page size and can be used for
  13. /// page-aligned allocations.
  14. ///
  15. /// This is used to ensure that the memory is properly aligned to the page size.
  16. #[allow(dead_code)]
  17. #[repr(align(4096))]
  18. pub struct PageBlock([u8; PAGE_SIZE]);
  19. /// A trait that provides the kernel access to the page.
  20. pub trait PageAccess {
  21. /// Returns a kernel-accessible pointer to the page referenced by the given
  22. /// physical frame number.
  23. ///
  24. /// # Safety
  25. /// This function is unsafe because calling this function on some non-existing
  26. /// pfn will cause undefined behavior.
  27. unsafe fn get_ptr_for_pfn(pfn: PFN) -> NonNull<PageBlock>;
  28. /// Returns a kernel-accessible pointer to the given page.
  29. fn get_ptr_for_page<A: PageAlloc>(page: &Page<A>) -> NonNull<PageBlock> {
  30. unsafe {
  31. // SAFETY: `page.pfn()` is guaranteed to be valid.
  32. Self::get_ptr_for_pfn(page.pfn())
  33. }
  34. }
  35. }
  36. /// A Page allocated in allocator `A`.
  37. #[derive(PartialEq, Eq, PartialOrd, Ord)]
  38. pub struct Page<A: PageAlloc> {
  39. raw_page: A::RawPage,
  40. alloc: A,
  41. }
  42. unsafe impl<A: PageAlloc> Send for Page<A> {}
  43. unsafe impl<A: PageAlloc> Sync for Page<A> {}
  44. impl<A> Page<A>
  45. where
  46. A: GlobalPageAlloc,
  47. {
  48. /// Allocate a page of the given *order*.
  49. pub fn alloc_order(order: u32) -> Self {
  50. Self::alloc_order_in(order, A::global())
  51. }
  52. /// Allocate exactly one page.
  53. pub fn alloc() -> Self {
  54. Self::alloc_in(A::global())
  55. }
  56. /// Allocate a contiguous block of pages that can contain at least `count` pages.
  57. pub fn alloc_at_least(count: usize) -> Self {
  58. Self::alloc_at_least_in(count, A::global())
  59. }
  60. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  61. ///
  62. /// # Safety
  63. /// This function is unsafe because it assumes that the caller has ensured that
  64. /// `pfn` points to a valid page allocated through `alloc_order()` and that the
  65. /// page have not been freed or deallocated yet.
  66. ///
  67. /// No checks are done. Any violation of this assumption may lead to undefined behavior.
  68. pub unsafe fn from_raw_unchecked(pfn: PFN) -> Self {
  69. unsafe { Self::from_raw_unchecked_in(pfn, A::global()) }
  70. }
  71. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  72. ///
  73. /// This function is a safe wrapper around `from_paddr_unchecked()` that does **some sort
  74. /// of** checks to ensure that the page is valid and managed by the allocator.
  75. ///
  76. /// # Panic
  77. /// This function will panic if the page is not valid or if the page is not managed by
  78. /// the allocator.
  79. ///
  80. /// # Safety
  81. /// This function is unsafe because it assumes that the caller has ensured that
  82. /// `pfn` points to an existing page (A.K.A. inside the global page array) and the
  83. /// page will not be freed or deallocated during the call.
  84. pub unsafe fn from_raw(pfn: PFN) -> Self {
  85. unsafe { Self::from_raw_in(pfn, A::global()) }
  86. }
  87. /// Do some work with the page without touching the reference count with the same
  88. /// restrictions as `from_raw_in()`.
  89. ///
  90. /// # Safety
  91. /// Check `from_raw()` for the safety requirements.
  92. pub unsafe fn with_raw<F, O>(pfn: PFN, func: F) -> O
  93. where
  94. F: FnOnce(&Self) -> O,
  95. {
  96. unsafe { Self::with_raw_in(pfn, func, A::global()) }
  97. }
  98. /// Do some work with the page without touching the reference count with the same
  99. /// restrictions as `from_raw_unchecked_in()`.
  100. ///
  101. /// # Safety
  102. /// Check `from_raw_unchecked()` for the safety requirements.
  103. pub unsafe fn with_raw_unchecked<F, O>(pfn: PFN, func: F, alloc: A) -> O
  104. where
  105. F: FnOnce(&Self) -> O,
  106. {
  107. unsafe { Self::with_raw_unchecked_in(pfn, func, alloc) }
  108. }
  109. }
  110. impl<A> Page<A>
  111. where
  112. A: PageAlloc,
  113. {
  114. /// Allocate a page of the given *order*.
  115. pub fn alloc_order_in(order: u32, alloc: A) -> Self {
  116. Self {
  117. raw_page: alloc.alloc_order(order).expect("Out of memory"),
  118. alloc,
  119. }
  120. }
  121. /// Allocate exactly one page.
  122. pub fn alloc_in(alloc: A) -> Self {
  123. Self {
  124. raw_page: alloc.alloc().expect("Out of memory"),
  125. alloc,
  126. }
  127. }
  128. /// Allocate a contiguous block of pages that can contain at least `count` pages.
  129. pub fn alloc_at_least_in(count: usize, alloc: A) -> Self {
  130. Self {
  131. raw_page: alloc.alloc_at_least(count).expect("Out of memory"),
  132. alloc,
  133. }
  134. }
  135. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  136. ///
  137. /// # Safety
  138. /// This function is unsafe because it assumes that the caller has ensured that
  139. /// `pfn` points to a valid page managed by `alloc` and that the page have not
  140. /// been freed or deallocated yet.
  141. ///
  142. /// No checks are done. Any violation of this assumption may lead to undefined behavior.
  143. pub unsafe fn from_raw_unchecked_in(pfn: PFN, alloc: A) -> Self {
  144. Self {
  145. raw_page: A::RawPage::from(pfn),
  146. alloc,
  147. }
  148. }
  149. /// Acquire the ownership of the page pointed to by `pfn`, leaving `refcount` untouched.
  150. ///
  151. /// This function is a safe wrapper around `from_paddr_unchecked()` that does **some sort
  152. /// of** checks to ensure that the page is valid and managed by the allocator.
  153. ///
  154. /// # Panic
  155. /// This function will panic if the page is not valid or if the page is not managed by
  156. /// the allocator.
  157. ///
  158. /// # Safety
  159. /// This function is unsafe because it assumes that the caller has ensured that
  160. /// `pfn` points to an existing page (A.K.A. inside the global page array) and the
  161. /// page will not be freed or deallocated during the call.
  162. pub unsafe fn from_raw_in(pfn: PFN, alloc: A) -> Self {
  163. unsafe {
  164. // SAFETY: The caller guarantees that the page is inside the global page array.
  165. assert!(alloc.has_management_over(A::RawPage::from(pfn)));
  166. // SAFETY: We've checked that the validity of the page. And the caller guarantees
  167. // that the page will not be freed or deallocated during the call.
  168. Self::from_raw_unchecked_in(pfn, alloc)
  169. }
  170. }
  171. /// Do some work with the page without touching the reference count with the same
  172. /// restrictions as `from_raw_in()`.
  173. ///
  174. /// # Safety
  175. /// Check `from_raw_in()` for the safety requirements.
  176. pub unsafe fn with_raw_in<F, O>(pfn: PFN, func: F, alloc: A) -> O
  177. where
  178. F: FnOnce(&Self) -> O,
  179. {
  180. unsafe {
  181. let me = ManuallyDrop::new(Self::from_raw_in(pfn, alloc));
  182. func(&me)
  183. }
  184. }
  185. /// Do some work with the page without touching the reference count with the same
  186. /// restrictions as `from_raw_unchecked_in()`.
  187. ///
  188. /// # Safety
  189. /// Check `from_raw_unchecked_in()` for the safety requirements.
  190. pub unsafe fn with_raw_unchecked_in<F, O>(pfn: PFN, func: F, alloc: A) -> O
  191. where
  192. F: FnOnce(&Self) -> O,
  193. {
  194. unsafe {
  195. let me = ManuallyDrop::new(Self::from_raw_unchecked_in(pfn, alloc));
  196. func(&me)
  197. }
  198. }
  199. /// Whether we are the only owner of the page.
  200. pub fn is_exclusive(&self) -> bool {
  201. self.raw_page.refcount().load(Ordering::Acquire) == 1
  202. }
  203. /// Returns the *order* of the page, which is the log2 of the number of pages
  204. /// contained in the page object.
  205. pub fn order(&self) -> u32 {
  206. self.raw_page.order()
  207. }
  208. /// Returns the total size of the page in bytes.
  209. pub fn len(&self) -> usize {
  210. 1 << (self.order() + PAGE_SIZE_BITS)
  211. }
  212. /// Consumes the `Page` and returns the physical frame number without dropping
  213. /// the reference count the page holds.
  214. pub fn into_raw(self) -> PFN {
  215. let me = ManuallyDrop::new(self);
  216. me.pfn()
  217. }
  218. /// Returns the physical frame number of the page, which is aligned with the
  219. /// page size and valid.
  220. pub fn pfn(&self) -> PFN {
  221. Into::<PFN>::into(self.raw_page)
  222. }
  223. /// Returns the start physical address of the page, which is guaranteed to be
  224. /// aligned to the page size and valid.
  225. pub fn start(&self) -> PAddr {
  226. PAddr::from(self.pfn())
  227. }
  228. /// Returns the physical address range of the page, which is guaranteed to be
  229. /// aligned to the page size and valid.
  230. pub fn range(&self) -> AddrRange<PAddr> {
  231. AddrRange::from(self.start()).grow(self.len())
  232. }
  233. /// Get the allocator that manages this page.
  234. pub fn allocator(&self) -> &A {
  235. &self.alloc
  236. }
  237. }
  238. impl<A> Clone for Page<A>
  239. where
  240. A: PageAlloc,
  241. {
  242. fn clone(&self) -> Self {
  243. // SAFETY: Memory order here can be Relaxed is for the same reason as that
  244. // in the copy constructor of `std::shared_ptr`.
  245. self.raw_page.refcount().fetch_add(1, Ordering::Relaxed);
  246. Self {
  247. raw_page: self.raw_page,
  248. alloc: self.alloc.clone(),
  249. }
  250. }
  251. }
  252. impl<A> Drop for Page<A>
  253. where
  254. A: PageAlloc,
  255. {
  256. fn drop(&mut self) {
  257. match self.raw_page.refcount().fetch_sub(1, Ordering::AcqRel) {
  258. 0 => panic!("Refcount for an in-use page is 0"),
  259. 1 => unsafe {
  260. // SAFETY: `self.raw_page` points to a valid page inside the global page array.
  261. assert!(self.alloc.has_management_over(self.raw_page));
  262. // SAFETY: `self.raw_page` is managed by the allocator and we're dropping the page.
  263. self.alloc.dealloc(self.raw_page)
  264. },
  265. _ => {}
  266. }
  267. }
  268. }
  269. impl<A: PageAlloc> fmt::Debug for Page<A> {
  270. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  271. write!(
  272. f,
  273. "Page({:?}, order={})",
  274. Into::<PFN>::into(self.raw_page),
  275. self.order()
  276. )
  277. }
  278. }