raw_page.rs 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. use buddy_allocator::BuddyRawPage;
  2. use core::{
  3. ptr::NonNull,
  4. sync::atomic::{AtomicU32, AtomicUsize, Ordering},
  5. };
  6. use eonix_mm::{
  7. address::{PAddr, VAddr},
  8. paging::{RawPage as RawPageTrait, PAGE_SIZE, PFN},
  9. };
  10. use intrusive_list::{container_of, Link};
  11. use slab_allocator::SlabRawPage;
  12. use crate::kernel::mem::access::RawPageAccess;
  13. use crate::kernel::mem::PhysAccess;
  14. const PAGE_ARRAY: NonNull<RawPage> =
  15. unsafe { NonNull::new_unchecked(0xffffff8040000000 as *mut _) };
  16. pub struct PageFlags(AtomicU32);
  17. struct SlabPageInner {
  18. allocated_count: u32,
  19. free_next: Option<NonNull<usize>>,
  20. }
  21. impl SlabPageInner {
  22. fn new(free_next: Option<NonNull<usize>>) -> Self {
  23. Self {
  24. allocated_count: 0,
  25. free_next,
  26. }
  27. }
  28. }
  29. pub struct BuddyPageInner {}
  30. enum PageType {
  31. Buddy(BuddyPageInner),
  32. Slab(SlabPageInner),
  33. }
  34. impl PageType {
  35. fn slab_data(&mut self) -> &mut SlabPageInner {
  36. if let PageType::Slab(slab_data) = self {
  37. return slab_data;
  38. } else {
  39. unreachable!()
  40. }
  41. }
  42. }
  43. pub struct RawPage {
  44. /// This can be used for LRU page swap in the future.
  45. ///
  46. /// Now only used for free page links in the buddy system.
  47. link: Link,
  48. /// # Safety
  49. /// This field is only used in buddy system and is protected by the global lock.
  50. order: u32,
  51. flags: PageFlags,
  52. refcount: AtomicUsize,
  53. shared_data: PageType,
  54. }
  55. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
  56. pub struct RawPagePtr(NonNull<RawPage>);
  57. impl PageFlags {
  58. pub const PRESENT: u32 = 1 << 0;
  59. // pub const LOCKED: u32 = 1 << 1;
  60. pub const BUDDY: u32 = 1 << 2;
  61. // pub const SLAB: u32 = 1 << 3;
  62. // pub const DIRTY: u32 = 1 << 4;
  63. pub const FREE: u32 = 1 << 5;
  64. pub const LOCAL: u32 = 1 << 6;
  65. pub fn has(&self, flag: u32) -> bool {
  66. (self.0.load(Ordering::Relaxed) & flag) == flag
  67. }
  68. pub fn set(&self, flag: u32) {
  69. self.0.fetch_or(flag, Ordering::Relaxed);
  70. }
  71. pub fn clear(&self, flag: u32) {
  72. self.0.fetch_and(!flag, Ordering::Relaxed);
  73. }
  74. }
  75. impl RawPagePtr {
  76. pub const fn new(ptr: NonNull<RawPage>) -> Self {
  77. Self(ptr)
  78. }
  79. /// Get a raw pointer to the underlying `RawPage` struct.
  80. ///
  81. /// # Safety
  82. /// Doing arithmetic on the pointer returned will cause immediate undefined behavior.
  83. pub const unsafe fn as_ptr(self) -> *mut RawPage {
  84. self.0.as_ptr()
  85. }
  86. pub const fn as_ref<'a>(self) -> &'a RawPage {
  87. unsafe { &*self.as_ptr() }
  88. }
  89. pub const fn as_mut<'a>(self) -> &'a mut RawPage {
  90. unsafe { &mut *self.as_ptr() }
  91. }
  92. pub const fn order(&self) -> u32 {
  93. self.as_ref().order
  94. }
  95. pub const fn flags(&self) -> &PageFlags {
  96. &self.as_ref().flags
  97. }
  98. pub const fn refcount(&self) -> &AtomicUsize {
  99. &self.as_ref().refcount
  100. }
  101. // return the ptr point to the actually raw page
  102. pub fn real_ptr<T>(&self) -> NonNull<T> {
  103. let pfn = unsafe { PFN::from(RawPagePtr(NonNull::new_unchecked(self.as_ptr()))) };
  104. unsafe { PAddr::from(pfn).as_ptr::<T>() }
  105. }
  106. }
  107. impl From<RawPagePtr> for PFN {
  108. fn from(value: RawPagePtr) -> Self {
  109. let idx = unsafe { value.as_ptr().offset_from(PAGE_ARRAY.as_ptr()) as usize };
  110. Self::from(idx)
  111. }
  112. }
  113. impl From<PFN> for RawPagePtr {
  114. fn from(pfn: PFN) -> Self {
  115. let raw_page_ptr = unsafe { PAGE_ARRAY.add(usize::from(pfn)) };
  116. Self::new(raw_page_ptr)
  117. }
  118. }
  119. impl RawPageTrait for RawPagePtr {
  120. fn order(&self) -> u32 {
  121. self.order()
  122. }
  123. fn refcount(&self) -> &AtomicUsize {
  124. self.refcount()
  125. }
  126. fn is_present(&self) -> bool {
  127. self.flags().has(PageFlags::PRESENT)
  128. }
  129. }
  130. impl BuddyRawPage for RawPagePtr {
  131. unsafe fn from_link(link: &mut Link) -> Self {
  132. let raw_page_ptr = container_of!(link, RawPage, link);
  133. Self(raw_page_ptr)
  134. }
  135. fn set_order(&self, order: u32) {
  136. self.as_mut().order = order;
  137. }
  138. unsafe fn get_link(&self) -> &mut Link {
  139. &mut self.as_mut().link
  140. }
  141. fn is_buddy(&self) -> bool {
  142. self.flags().has(PageFlags::BUDDY)
  143. }
  144. fn is_free(&self) -> bool {
  145. self.flags().has(PageFlags::FREE)
  146. }
  147. fn set_buddy(&self) {
  148. self.flags().set(PageFlags::BUDDY);
  149. }
  150. fn set_free(&self) {
  151. self.flags().set(PageFlags::FREE);
  152. }
  153. fn clear_buddy(&self) {
  154. self.flags().clear(PageFlags::BUDDY);
  155. }
  156. fn clear_free(&self) {
  157. self.flags().clear(PageFlags::FREE);
  158. }
  159. }
  160. impl SlabRawPage for RawPagePtr {
  161. unsafe fn from_link(link: &mut Link) -> Self {
  162. let raw_page_ptr = container_of!(link, RawPage, link);
  163. Self(raw_page_ptr)
  164. }
  165. unsafe fn get_link(&self) -> &mut Link {
  166. &mut self.as_mut().link
  167. }
  168. fn in_which(ptr: *mut u8) -> RawPagePtr {
  169. let vaddr = VAddr::from(ptr as usize & !(PAGE_SIZE - 1));
  170. unsafe { vaddr.as_raw_page() }
  171. }
  172. fn allocated_count(&self) -> &mut u32 {
  173. &mut self.as_mut().shared_data.slab_data().allocated_count
  174. }
  175. fn next_free(&self) -> &mut Option<NonNull<usize>> {
  176. &mut self.as_mut().shared_data.slab_data().free_next
  177. }
  178. fn real_page_ptr(&self) -> *mut u8 {
  179. self.real_ptr().as_ptr()
  180. }
  181. fn slab_init(&self, first_free: Option<NonNull<usize>>) {
  182. self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
  183. }
  184. }