raw_page.rs 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. use core::ptr::NonNull;
  2. use core::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
  3. use buddy_allocator::BuddyFolio;
  4. use eonix_hal::mm::ArchPhysAccess;
  5. use eonix_mm::address::{PAddr, PhysAccess as _};
  6. use eonix_mm::paging::{FolioList, FolioListSized, Zone, PFN};
  7. use intrusive_list::{container_of, Link, List};
  8. use slab_allocator::{SlabPage, SlabPageAlloc, SlabSlot};
  9. use super::zones::ZONE;
  10. use super::{GlobalPageAlloc, PerCpuPage};
  11. use crate::kernel::mem::PhysAccess;
  12. pub struct PageFlags(AtomicU32);
  13. #[derive(Clone, Copy)]
  14. struct SlabPageData {
  15. allocated_count: usize,
  16. free_next: Option<NonNull<SlabSlot>>,
  17. }
  18. impl SlabPageData {
  19. const fn new() -> Self {
  20. Self {
  21. allocated_count: 0,
  22. free_next: None,
  23. }
  24. }
  25. }
  26. #[repr(C)]
  27. union PageData {
  28. slab: SlabPageData,
  29. }
  30. pub struct RawPage {
  31. /// This can be used for LRU page swap in the future.
  32. ///
  33. /// Now only used for free page links in the buddy system.
  34. pub link: Link,
  35. /// # Safety
  36. /// This field is only used in buddy system and is protected by the global lock.
  37. pub order: u32,
  38. pub flags: PageFlags,
  39. pub refcount: AtomicUsize,
  40. shared_data: PageData,
  41. }
  42. // XXX: introduce Folio and remove this.
  43. unsafe impl Send for RawPage {}
  44. unsafe impl Sync for RawPage {}
  45. impl PageFlags {
  46. pub const LOCKED: u32 = 1 << 1;
  47. pub const BUDDY: u32 = 1 << 2;
  48. pub const SLAB: u32 = 1 << 3;
  49. pub const DIRTY: u32 = 1 << 4;
  50. pub const LOCAL: u32 = 1 << 6;
  51. pub fn has(&self, flag: u32) -> bool {
  52. (self.0.load(Ordering::Relaxed) & flag) == flag
  53. }
  54. pub fn set(&self, flag: u32) {
  55. self.0.fetch_or(flag, Ordering::Relaxed);
  56. }
  57. pub fn clear(&self, flag: u32) {
  58. self.0.fetch_and(!flag, Ordering::Relaxed);
  59. }
  60. /// Set the flag and return whether it was already set.
  61. ///
  62. /// If multiple flags are given, returns true if any of them were already set.
  63. pub fn test_and_set(&self, flag: u32) -> bool {
  64. (self.0.fetch_or(flag, Ordering::Relaxed) & flag) != 0
  65. }
  66. }
  67. impl BuddyFolio for RawPage {
  68. fn pfn(&self) -> PFN {
  69. ZONE.get_pfn(self)
  70. }
  71. fn get_order(&self) -> u32 {
  72. self.order
  73. }
  74. fn is_buddy(&self) -> bool {
  75. self.flags.has(PageFlags::BUDDY)
  76. }
  77. fn set_order(&mut self, order: u32) {
  78. self.order = order;
  79. }
  80. fn set_buddy(&mut self, val: bool) {
  81. if val {
  82. self.flags.set(PageFlags::BUDDY);
  83. } else {
  84. self.flags.clear(PageFlags::BUDDY)
  85. }
  86. }
  87. }
  88. impl SlabPage for RawPage {
  89. fn get_data_ptr(&self) -> NonNull<[u8]> {
  90. let paddr_start = PAddr::from(ZONE.get_pfn(self));
  91. let page_data_ptr = unsafe { paddr_start.as_ptr() };
  92. NonNull::slice_from_raw_parts(page_data_ptr, 1 << (self.order + 12))
  93. }
  94. fn get_free_slot(&self) -> Option<NonNull<SlabSlot>> {
  95. unsafe {
  96. // SAFETY: TODO
  97. self.shared_data.slab.free_next
  98. }
  99. }
  100. fn set_free_slot(&mut self, next: Option<NonNull<SlabSlot>>) {
  101. self.shared_data.slab.free_next = next;
  102. }
  103. fn get_alloc_count(&self) -> usize {
  104. unsafe {
  105. // SAFETY: TODO
  106. self.shared_data.slab.allocated_count
  107. }
  108. }
  109. fn inc_alloc_count(&mut self) -> usize {
  110. unsafe {
  111. // SAFETY: TODO
  112. self.shared_data.slab.allocated_count += 1;
  113. self.shared_data.slab.allocated_count
  114. }
  115. }
  116. fn dec_alloc_count(&mut self) -> usize {
  117. unsafe {
  118. // SAFETY: TODO
  119. self.shared_data.slab.allocated_count -= 1;
  120. self.shared_data.slab.allocated_count
  121. }
  122. }
  123. unsafe fn from_allocated(ptr: NonNull<u8>) -> &'static mut Self {
  124. unsafe {
  125. // SAFETY: The caller ensures that `ptr` is valid.
  126. let paddr = ArchPhysAccess::from_ptr(ptr);
  127. let pfn = PFN::from(paddr);
  128. ZONE.get_page(pfn)
  129. .expect("Page outside of the global zone")
  130. .as_mut()
  131. }
  132. }
  133. }
  134. impl PerCpuPage for RawPage {
  135. fn set_local(&mut self, val: bool) {
  136. if val {
  137. self.flags.set(PageFlags::LOCAL)
  138. } else {
  139. self.flags.clear(PageFlags::LOCAL)
  140. }
  141. }
  142. }
  143. pub struct RawPageList(List);
  144. unsafe impl Send for RawPageList {}
  145. impl FolioList for RawPageList {
  146. type Folio = RawPage;
  147. fn is_empty(&self) -> bool {
  148. self.0.is_empty()
  149. }
  150. fn peek_head(&mut self) -> Option<&mut Self::Folio> {
  151. unsafe {
  152. let link = self.0.head()?;
  153. let mut raw_page_ptr = container_of!(link, RawPage, link);
  154. Some(raw_page_ptr.as_mut())
  155. }
  156. }
  157. fn pop_head(&mut self) -> Option<&'static mut Self::Folio> {
  158. unsafe {
  159. let link = self.0.pop()?;
  160. let mut raw_page_ptr = container_of!(link, RawPage, link);
  161. Some(raw_page_ptr.as_mut())
  162. }
  163. }
  164. fn push_tail(&mut self, page: &'static mut Self::Folio) {
  165. self.0.insert(&mut page.link);
  166. }
  167. fn remove(&mut self, page: &mut Self::Folio) {
  168. self.0.remove(&mut page.link)
  169. }
  170. }
  171. impl FolioListSized for RawPageList {
  172. const NEW: Self = RawPageList(List::new());
  173. }
  174. unsafe impl SlabPageAlloc for GlobalPageAlloc {
  175. type Page = RawPage;
  176. type PageList = RawPageList;
  177. fn alloc_slab_page(&self) -> &'static mut RawPage {
  178. let raw_page = self.alloc_raw_order(0).expect("Out of memory");
  179. raw_page.flags.set(PageFlags::SLAB);
  180. raw_page.shared_data.slab = SlabPageData::new();
  181. raw_page
  182. }
  183. }