raw_page.rs 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. use crate::kernel::mem::{page_cache::PageCacheRawPage, MemoryBlock};
  2. use crate::kernel::mem::{AsMemoryBlock, PhysAccess};
  3. use buddy_allocator::BuddyRawPage;
  4. use core::{
  5. ptr::NonNull,
  6. sync::atomic::{AtomicU32, AtomicUsize, Ordering},
  7. };
  8. use eonix_hal::mm::ArchPhysAccess;
  9. use eonix_mm::paging::PAGE_SIZE;
  10. use eonix_mm::{
  11. address::{PAddr, PhysAccess as _},
  12. paging::{RawPage as RawPageTrait, PFN},
  13. };
  14. use intrusive_list::{container_of, Link};
  15. use slab_allocator::SlabRawPage;
  16. const PAGE_ARRAY: NonNull<RawPage> =
  17. unsafe { NonNull::new_unchecked(0xffffff8040000000 as *mut _) };
  18. pub struct PageFlags(AtomicU32);
  19. struct SlabPageInner {
  20. allocated_count: u32,
  21. free_next: Option<NonNull<usize>>,
  22. }
  23. impl SlabPageInner {
  24. fn new(free_next: Option<NonNull<usize>>) -> Self {
  25. Self {
  26. allocated_count: 0,
  27. free_next,
  28. }
  29. }
  30. }
  31. struct PageCacheInner {
  32. valid_size: usize,
  33. }
  34. pub struct BuddyPageInner {}
  35. enum PageType {
  36. Buddy(BuddyPageInner),
  37. Slab(SlabPageInner),
  38. PageCache(PageCacheInner),
  39. }
  40. impl PageType {
  41. fn slab_data(&mut self) -> &mut SlabPageInner {
  42. if let PageType::Slab(slab_data) = self {
  43. return slab_data;
  44. } else {
  45. unreachable!()
  46. }
  47. }
  48. fn page_cache_data(&mut self) -> &mut PageCacheInner {
  49. if let PageType::PageCache(cache_data) = self {
  50. return cache_data;
  51. } else {
  52. unreachable!()
  53. }
  54. }
  55. }
  56. pub struct RawPage {
  57. /// This can be used for LRU page swap in the future.
  58. ///
  59. /// Now only used for free page links in the buddy system.
  60. link: Link,
  61. /// # Safety
  62. /// This field is only used in buddy system and is protected by the global lock.
  63. order: u32,
  64. flags: PageFlags,
  65. refcount: AtomicUsize,
  66. shared_data: PageType,
  67. }
  68. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
  69. pub struct RawPagePtr(NonNull<RawPage>);
  70. impl PageFlags {
  71. pub const PRESENT: u32 = 1 << 0;
  72. // pub const LOCKED: u32 = 1 << 1;
  73. pub const BUDDY: u32 = 1 << 2;
  74. pub const SLAB: u32 = 1 << 3;
  75. pub const DIRTY: u32 = 1 << 4;
  76. pub const FREE: u32 = 1 << 5;
  77. pub const LOCAL: u32 = 1 << 6;
  78. pub fn has(&self, flag: u32) -> bool {
  79. (self.0.load(Ordering::Relaxed) & flag) == flag
  80. }
  81. pub fn set(&self, flag: u32) {
  82. self.0.fetch_or(flag, Ordering::Relaxed);
  83. }
  84. pub fn clear(&self, flag: u32) {
  85. self.0.fetch_and(!flag, Ordering::Relaxed);
  86. }
  87. }
  88. impl RawPagePtr {
  89. pub const fn new(ptr: NonNull<RawPage>) -> Self {
  90. Self(ptr)
  91. }
  92. /// Get a raw pointer to the underlying `RawPage` struct.
  93. ///
  94. /// # Safety
  95. /// Doing arithmetic on the pointer returned will cause immediate undefined behavior.
  96. pub const unsafe fn as_ptr(self) -> *mut RawPage {
  97. self.0.as_ptr()
  98. }
  99. pub const fn as_ref<'a>(self) -> &'a RawPage {
  100. unsafe { &*self.as_ptr() }
  101. }
  102. pub const fn as_mut<'a>(self) -> &'a mut RawPage {
  103. unsafe { &mut *self.as_ptr() }
  104. }
  105. pub const fn order(&self) -> u32 {
  106. self.as_ref().order
  107. }
  108. pub const fn flags(&self) -> &PageFlags {
  109. &self.as_ref().flags
  110. }
  111. pub const fn refcount(&self) -> &AtomicUsize {
  112. &self.as_ref().refcount
  113. }
  114. // return the ptr point to the actually raw page
  115. pub fn real_ptr<T>(&self) -> NonNull<T> {
  116. let pfn = unsafe { PFN::from(RawPagePtr(NonNull::new_unchecked(self.as_ptr()))) };
  117. unsafe { PAddr::from(pfn).as_ptr::<T>() }
  118. }
  119. }
  120. impl From<RawPagePtr> for PFN {
  121. fn from(value: RawPagePtr) -> Self {
  122. let idx = unsafe { value.as_ptr().offset_from(PAGE_ARRAY.as_ptr()) as usize };
  123. Self::from(idx)
  124. }
  125. }
  126. impl From<PFN> for RawPagePtr {
  127. fn from(pfn: PFN) -> Self {
  128. let raw_page_ptr = unsafe { PAGE_ARRAY.add(usize::from(pfn)) };
  129. Self::new(raw_page_ptr)
  130. }
  131. }
  132. impl RawPageTrait for RawPagePtr {
  133. fn order(&self) -> u32 {
  134. self.order()
  135. }
  136. fn refcount(&self) -> &AtomicUsize {
  137. self.refcount()
  138. }
  139. fn is_present(&self) -> bool {
  140. self.flags().has(PageFlags::PRESENT)
  141. }
  142. }
  143. impl BuddyRawPage for RawPagePtr {
  144. unsafe fn from_link(link: &mut Link) -> Self {
  145. let raw_page_ptr = container_of!(link, RawPage, link);
  146. Self(raw_page_ptr)
  147. }
  148. fn set_order(&self, order: u32) {
  149. self.as_mut().order = order;
  150. }
  151. unsafe fn get_link(&self) -> &mut Link {
  152. &mut self.as_mut().link
  153. }
  154. fn is_buddy(&self) -> bool {
  155. self.flags().has(PageFlags::BUDDY)
  156. }
  157. fn is_free(&self) -> bool {
  158. self.flags().has(PageFlags::FREE)
  159. }
  160. fn set_buddy(&self) {
  161. self.flags().set(PageFlags::BUDDY);
  162. }
  163. fn set_free(&self) {
  164. self.flags().set(PageFlags::FREE);
  165. }
  166. fn clear_buddy(&self) {
  167. self.flags().clear(PageFlags::BUDDY);
  168. }
  169. fn clear_free(&self) {
  170. self.flags().clear(PageFlags::FREE);
  171. }
  172. }
  173. impl SlabRawPage for RawPagePtr {
  174. unsafe fn from_link(link: &mut Link) -> Self {
  175. let raw_page_ptr = container_of!(link, RawPage, link);
  176. Self(raw_page_ptr)
  177. }
  178. unsafe fn get_link(&self) -> &mut Link {
  179. &mut self.as_mut().link
  180. }
  181. fn in_which(ptr: *mut u8) -> RawPagePtr {
  182. unsafe {
  183. // SAFETY: The pointer is allocated from the slab allocator,
  184. // which can't be null.
  185. let ptr = NonNull::new_unchecked(ptr);
  186. // SAFETY: The pointer is valid.
  187. let paddr = ArchPhysAccess::from_ptr(ptr);
  188. let pfn = PFN::from(paddr);
  189. RawPagePtr::from(pfn)
  190. }
  191. }
  192. fn allocated_count(&self) -> &mut u32 {
  193. &mut self.as_mut().shared_data.slab_data().allocated_count
  194. }
  195. fn next_free(&self) -> &mut Option<NonNull<usize>> {
  196. &mut self.as_mut().shared_data.slab_data().free_next
  197. }
  198. fn real_page_ptr(&self) -> *mut u8 {
  199. self.real_ptr().as_ptr()
  200. }
  201. fn slab_init(&self, first_free: Option<NonNull<usize>>) {
  202. self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
  203. }
  204. }
  205. impl PageCacheRawPage for RawPagePtr {
  206. fn valid_size(&self) -> &mut usize {
  207. &mut self.as_mut().shared_data.page_cache_data().valid_size
  208. }
  209. fn is_dirty(&self) -> bool {
  210. self.flags().has(PageFlags::DIRTY)
  211. }
  212. fn clear_dirty(&self) {
  213. self.flags().clear(PageFlags::DIRTY);
  214. }
  215. fn set_dirty(&self) {
  216. self.flags().set(PageFlags::DIRTY);
  217. }
  218. fn cache_init(&self) {
  219. self.as_mut().shared_data = PageType::PageCache(PageCacheInner { valid_size: 0 });
  220. }
  221. }
  222. /// SAFETY: `RawPagePtr` is a pointer to a valid `RawPage` struct.
  223. impl AsMemoryBlock for RawPagePtr {
  224. fn as_memblk(&self) -> MemoryBlock {
  225. unsafe { MemoryBlock::new(self.real_ptr::<()>().addr(), PAGE_SIZE) }
  226. }
  227. }