page_alloc.rs 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. mod raw_page;
  2. use super::{paging::AllocZeroed as _, Page};
  3. use buddy_allocator::{BuddyAllocator, BuddyRawPage as _};
  4. use core::{ptr::NonNull, sync::atomic::Ordering};
  5. use eonix_mm::{
  6. address::{AddrOps as _, PAddr},
  7. paging::{GlobalPageAlloc as GlobalPageAllocTrait, PageAlloc, PFN},
  8. };
  9. use eonix_sync::Spin;
  10. use intrusive_list::List;
  11. use raw_page::{PageFlags, RawPage, RawPagePtr};
  12. const COSTLY_ORDER: u32 = 3;
  13. const BATCH_SIZE: u32 = 64;
  14. static BUDDY_ALLOC: Spin<BuddyAllocator<RawPagePtr>> = Spin::new(BuddyAllocator::new());
  15. #[arch::define_percpu]
  16. static PERCPU_PAGE_ALLOC: PerCpuPageAlloc = PerCpuPageAlloc::new();
  17. #[derive(Clone)]
  18. pub struct NoAlloc;
  19. #[derive(Clone)]
  20. pub struct GlobalPageAlloc;
  21. #[derive(Clone)]
  22. pub struct BuddyPageAlloc;
  23. struct PerCpuPageAlloc {
  24. batch: u32,
  25. // TODO: might be used in the future.
  26. // high: u32,
  27. free_areas: [List; COSTLY_ORDER as usize + 1],
  28. }
  29. impl PerCpuPageAlloc {
  30. const fn new() -> Self {
  31. Self {
  32. batch: BATCH_SIZE,
  33. // high: 0,
  34. free_areas: [const { List::new() }; COSTLY_ORDER as usize + 1],
  35. }
  36. }
  37. fn insert_free_pages(&mut self, pages_ptr: RawPagePtr, order: u32) {
  38. let free_area = &mut self.free_areas[order as usize];
  39. free_area.insert(unsafe { pages_ptr.get_link() });
  40. }
  41. fn get_free_pages(&mut self, order: u32) -> Option<RawPagePtr> {
  42. let free_area = &mut self.free_areas[order as usize];
  43. free_area.pop().map(|node| unsafe {
  44. // SAFETY: `node` is a valid pointer to a `Link` that is not used by anyone.
  45. RawPagePtr::from_link(node)
  46. })
  47. }
  48. fn alloc_order(&mut self, order: u32) -> Option<RawPagePtr> {
  49. assert!(order <= COSTLY_ORDER);
  50. if let Some(pages) = self.get_free_pages(order) {
  51. return Some(pages);
  52. }
  53. let batch = self.batch >> order;
  54. for _ in 0..batch {
  55. if let Some(pages_ptr) = BUDDY_ALLOC.lock().alloc_order(order) {
  56. pages_ptr.flags().set(PageFlags::LOCAL);
  57. self.insert_free_pages(pages_ptr, order);
  58. } else {
  59. break;
  60. };
  61. }
  62. self.get_free_pages(order)
  63. }
  64. fn free_pages(&mut self, pages_ptr: RawPagePtr, order: u32) {
  65. assert_eq!(pages_ptr.order(), order);
  66. assert_eq!(pages_ptr.refcount().load(Ordering::Relaxed), 0);
  67. pages_ptr.refcount().store(1, Ordering::Relaxed);
  68. self.insert_free_pages(pages_ptr, order);
  69. }
  70. }
  71. impl GlobalPageAlloc {
  72. pub const fn buddy_alloc() -> BuddyPageAlloc {
  73. BuddyPageAlloc
  74. }
  75. }
  76. impl PageAlloc for GlobalPageAlloc {
  77. type RawPage = RawPagePtr;
  78. fn alloc_order(&self, order: u32) -> Option<RawPagePtr> {
  79. if order > COSTLY_ORDER {
  80. BUDDY_ALLOC.lock().alloc_order(order)
  81. } else {
  82. unsafe {
  83. eonix_preempt::disable();
  84. let page_ptr = PERCPU_PAGE_ALLOC.as_mut().alloc_order(order);
  85. eonix_preempt::enable();
  86. page_ptr
  87. }
  88. }
  89. }
  90. unsafe fn dealloc(&self, page_ptr: RawPagePtr) {
  91. if page_ptr.order() > COSTLY_ORDER {
  92. BUDDY_ALLOC.lock().dealloc(page_ptr);
  93. } else {
  94. let order = page_ptr.order();
  95. unsafe {
  96. eonix_preempt::disable();
  97. PERCPU_PAGE_ALLOC.as_mut().free_pages(page_ptr, order);
  98. eonix_preempt::enable();
  99. }
  100. }
  101. }
  102. fn has_management_over(&self, page_ptr: RawPagePtr) -> bool {
  103. BuddyAllocator::has_management_over(page_ptr)
  104. && (page_ptr.order() > COSTLY_ORDER || page_ptr.flags().has(PageFlags::LOCAL))
  105. }
  106. }
  107. impl PageAlloc for NoAlloc {
  108. type RawPage = RawPagePtr;
  109. fn alloc_order(&self, _order: u32) -> Option<RawPagePtr> {
  110. panic!("NoAlloc cannot allocate pages");
  111. }
  112. unsafe fn dealloc(&self, _: RawPagePtr) {
  113. panic!("NoAlloc cannot deallocate pages");
  114. }
  115. fn has_management_over(&self, _: RawPagePtr) -> bool {
  116. true
  117. }
  118. }
  119. impl GlobalPageAllocTrait for GlobalPageAlloc {
  120. fn global() -> Self {
  121. GlobalPageAlloc
  122. }
  123. }
  124. impl GlobalPageAllocTrait for NoAlloc {
  125. fn global() -> Self {
  126. NoAlloc
  127. }
  128. }
  129. impl PageAlloc for BuddyPageAlloc {
  130. type RawPage = RawPagePtr;
  131. fn alloc_order(&self, order: u32) -> Option<RawPagePtr> {
  132. BUDDY_ALLOC.lock().alloc_order(order)
  133. }
  134. unsafe fn dealloc(&self, page_ptr: RawPagePtr) {
  135. BUDDY_ALLOC.lock().dealloc(page_ptr);
  136. }
  137. fn has_management_over(&self, page_ptr: RawPagePtr) -> bool {
  138. BuddyAllocator::has_management_over(page_ptr)
  139. }
  140. }
  141. #[no_mangle]
  142. pub extern "C" fn mark_present(start: usize, end: usize) {
  143. let mut start_pfn = PFN::from(PAddr::from(start).ceil());
  144. let end_pfn = PFN::from(PAddr::from(end).floor());
  145. while start_pfn < end_pfn {
  146. RawPagePtr::from(start_pfn).flags().set(PageFlags::PRESENT);
  147. start_pfn = start_pfn + 1;
  148. }
  149. }
  150. #[no_mangle]
  151. pub extern "C" fn create_pages(start: PAddr, end: PAddr) {
  152. BUDDY_ALLOC.lock().create_pages(start, end);
  153. }
  154. #[no_mangle]
  155. pub extern "C" fn page_to_pfn(page: *const ()) -> PFN {
  156. let page_ptr = RawPagePtr::new(NonNull::new(page as *mut _).unwrap());
  157. PFN::from(page_ptr)
  158. }
  159. #[no_mangle]
  160. pub extern "C" fn c_alloc_page() -> *const RawPage {
  161. GlobalPageAlloc.alloc().expect("Out of memory").as_ref()
  162. }
  163. #[no_mangle]
  164. pub extern "C" fn c_alloc_pages(order: u32) -> *const RawPage {
  165. GlobalPageAlloc
  166. .alloc_order(order)
  167. .expect("Out of memory")
  168. .as_ref()
  169. }
  170. #[no_mangle]
  171. pub extern "C" fn c_alloc_page_table() -> PAddr {
  172. PAddr::from(Page::zeroed().into_raw())
  173. }