page_alloc.rs 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. mod raw_page;
  2. mod zones;
  3. use core::sync::atomic::Ordering;
  4. use buddy_allocator::BuddyAllocator;
  5. use eonix_mm::address::{AddrOps as _, PRange};
  6. use eonix_mm::paging::{
  7. GlobalPageAlloc as GlobalPageAllocTrait, PageAlloc, PageList, PageListSized as _, PFN,
  8. };
  9. use eonix_preempt::PreemptGuard;
  10. use eonix_sync::{NoContext, Spin};
  11. use raw_page::{PageFlags, RawPageList};
  12. pub use raw_page::{RawPage, RawPagePtr};
  13. pub use zones::GlobalZone;
  14. const COSTLY_ORDER: u32 = 3;
  15. const AREAS: usize = COSTLY_ORDER as usize + 1;
  16. const BATCH_SIZE: u32 = 64;
  17. static BUDDY_ALLOC: Spin<BuddyAllocator<GlobalZone, RawPageList>> =
  18. Spin::new(BuddyAllocator::new(&GlobalZone()));
  19. #[eonix_percpu::define_percpu]
  20. static PERCPU_PAGE_ALLOC: PerCpuPageAlloc = PerCpuPageAlloc::new();
  21. #[derive(Clone)]
  22. pub struct GlobalPageAlloc;
  23. #[derive(Clone)]
  24. pub struct BuddyPageAlloc();
  25. struct PerCpuPageAlloc {
  26. batch: u32,
  27. free_areas: [RawPageList; AREAS],
  28. }
  29. pub trait PerCpuPage {
  30. fn set_local(&mut self, val: bool);
  31. }
  32. impl PerCpuPageAlloc {
  33. const fn new() -> Self {
  34. Self {
  35. batch: BATCH_SIZE,
  36. free_areas: [RawPageList::NEW; AREAS],
  37. }
  38. }
  39. fn alloc_order(&mut self, order: u32) -> Option<&'static mut RawPage> {
  40. assert!(order <= COSTLY_ORDER);
  41. if let Some(pages) = self.free_areas[order as usize].pop_head() {
  42. return Some(pages);
  43. }
  44. let batch = self.batch >> order;
  45. for _ in 0..batch {
  46. let Some(page) = BUDDY_ALLOC.lock().alloc_order(order) else {
  47. break;
  48. };
  49. page.set_local(true);
  50. self.free_areas[order as usize].push_tail(page);
  51. }
  52. self.free_areas[order as usize].pop_head()
  53. }
  54. fn free_pages(&mut self, page: &'static mut RawPage, order: u32) {
  55. self.free_areas[order as usize].push_tail(page);
  56. }
  57. }
  58. impl GlobalPageAlloc {
  59. #[allow(dead_code)]
  60. pub const fn buddy_alloc() -> BuddyPageAlloc {
  61. BuddyPageAlloc()
  62. }
  63. /// Add the pages in the PAddr range `range` to the global allocator.
  64. ///
  65. /// This function is only to be called on system initialization when `eonix_preempt`
  66. /// is not functioning due to the absence of percpu area.
  67. ///
  68. /// # Safety
  69. /// This function is unsafe because calling this function in preemptible context
  70. /// might involve dead locks.
  71. pub unsafe fn add_pages(range: PRange) {
  72. BUDDY_ALLOC
  73. .lock_with_context(NoContext)
  74. .create_pages(range.start(), range.end())
  75. }
  76. }
  77. impl PageAlloc for GlobalPageAlloc {
  78. type RawPage = RawPagePtr;
  79. fn alloc_order(&self, order: u32) -> Option<RawPagePtr> {
  80. let raw_page = if order > COSTLY_ORDER {
  81. BUDDY_ALLOC.lock().alloc_order(order)
  82. } else {
  83. unsafe {
  84. eonix_preempt::disable();
  85. let page = PERCPU_PAGE_ALLOC.as_mut().alloc_order(order);
  86. eonix_preempt::enable();
  87. page
  88. }
  89. };
  90. raw_page.map(|raw_page| {
  91. // SAFETY: Memory order here can be Relaxed is for the same reason
  92. // as that in the copy constructor of `std::shared_ptr`.
  93. raw_page.refcount.fetch_add(1, Ordering::Relaxed);
  94. RawPagePtr::from_ref(raw_page)
  95. })
  96. }
  97. unsafe fn dealloc(&self, page_ptr: RawPagePtr) {
  98. assert_eq!(
  99. page_ptr.refcount().load(Ordering::Relaxed),
  100. 0,
  101. "Trying to free a page with refcount > 0"
  102. );
  103. if page_ptr.order() > COSTLY_ORDER {
  104. BUDDY_ALLOC.lock().dealloc(page_ptr.as_mut());
  105. } else {
  106. let order = page_ptr.order();
  107. unsafe {
  108. PreemptGuard::new(PERCPU_PAGE_ALLOC.as_mut()).free_pages(page_ptr.as_mut(), order);
  109. }
  110. }
  111. }
  112. fn has_management_over(&self, page_ptr: RawPagePtr) -> bool {
  113. page_ptr.order() > COSTLY_ORDER || page_ptr.flags().has(PageFlags::LOCAL)
  114. }
  115. }
  116. impl GlobalPageAllocTrait for GlobalPageAlloc {
  117. fn global() -> Self {
  118. GlobalPageAlloc
  119. }
  120. }
  121. impl PageAlloc for BuddyPageAlloc {
  122. type RawPage = RawPagePtr;
  123. fn alloc_order(&self, order: u32) -> Option<RawPagePtr> {
  124. BUDDY_ALLOC
  125. .lock()
  126. .alloc_order(order)
  127. .map(|raw_page| RawPagePtr::from_ref(raw_page))
  128. }
  129. unsafe fn dealloc(&self, page_ptr: RawPagePtr) {
  130. BUDDY_ALLOC.lock().dealloc(page_ptr.as_mut());
  131. }
  132. fn has_management_over(&self, _: RawPagePtr) -> bool {
  133. true
  134. }
  135. }