mm.rs 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. use core::{
  2. alloc::{AllocError, Allocator, Layout},
  3. cell::RefCell,
  4. ptr::NonNull,
  5. };
  6. use eonix_mm::{
  7. address::{AddrOps as _, PRange},
  8. paging::{PageAlloc, UnmanagedRawPage, PAGE_SIZE, PFN},
  9. };
  10. pub use crate::arch::mm::{
  11. flush_tlb, flush_tlb_all, get_root_page_table_pfn, set_root_page_table_pfn, ArchMemory,
  12. ArchPagingMode, ArchPhysAccess, GLOBAL_PAGE_TABLE,
  13. };
  14. pub struct BasicPageAlloc {
  15. ranges: [Option<PRange>; Self::MAX],
  16. head: usize,
  17. tail: usize,
  18. }
  19. struct ScopedAllocInner<'a> {
  20. _memory: &'a mut [u8],
  21. current: NonNull<[u8]>,
  22. allocated_count: usize,
  23. }
  24. pub struct ScopedAllocator<'a> {
  25. inner: RefCell<ScopedAllocInner<'a>>,
  26. }
  27. impl BasicPageAlloc {
  28. const MAX: usize = 32;
  29. pub const fn new() -> Self {
  30. Self {
  31. ranges: [None; Self::MAX],
  32. head: 0,
  33. tail: 0,
  34. }
  35. }
  36. fn alloc_one(&mut self) -> PFN {
  37. assert_ne!(self.head, self.tail, "No free pages available");
  38. let mut range = self.ranges[self.head].take().unwrap();
  39. range = range.shrink(PAGE_SIZE);
  40. let pfn = PFN::from(range.end());
  41. if range.len() != 0 {
  42. self.ranges[self.head] = Some(range);
  43. } else {
  44. self.head += 1;
  45. self.head %= Self::MAX;
  46. }
  47. pfn
  48. }
  49. fn alloc_order(&mut self, order: u32) -> PFN {
  50. assert!(order <= 4);
  51. let me = core::mem::replace(self, Self::new());
  52. let mut found = None;
  53. for mut range in me.into_iter() {
  54. if found.is_some() || range.len() < (PAGE_SIZE << order) {
  55. self.add_range(range);
  56. continue;
  57. }
  58. range = range.shrink(PAGE_SIZE << order);
  59. found = Some(PFN::from(range.end()));
  60. if range.len() != 0 {
  61. self.add_range(range);
  62. }
  63. }
  64. found.expect("No free pages available for the requested order")
  65. }
  66. pub fn add_range(&mut self, range: PRange) {
  67. let tail = self.tail;
  68. self.tail += 1;
  69. self.tail %= Self::MAX;
  70. if self.tail == self.head {
  71. panic!("Page allocator is full");
  72. }
  73. self.ranges[tail] = Some(PRange::new(range.start().ceil(), range.end().floor()));
  74. }
  75. pub fn alloc(&mut self, order: u32) -> PFN {
  76. match order {
  77. 0 => self.alloc_one(),
  78. ..=4 => self.alloc_order(order),
  79. _ => panic!("Order {} is too large for BasicPageAlloc", order),
  80. }
  81. }
  82. pub fn into_iter(self) -> impl Iterator<Item = PRange> {
  83. self.ranges
  84. .into_iter()
  85. .cycle()
  86. .skip(self.head)
  87. .map_while(|x| x)
  88. }
  89. }
  90. #[derive(Clone)]
  91. pub struct BasicPageAllocRef<'a>(&'a RefCell<BasicPageAlloc>);
  92. impl<'a> BasicPageAllocRef<'a> {
  93. pub const fn new(alloc: &'a RefCell<BasicPageAlloc>) -> Self {
  94. Self(alloc)
  95. }
  96. }
  97. impl PageAlloc for BasicPageAllocRef<'_> {
  98. type RawPage = UnmanagedRawPage;
  99. fn alloc_order(&self, order: u32) -> Option<Self::RawPage> {
  100. Some(Self::RawPage::new(self.0.borrow_mut().alloc(order), order))
  101. }
  102. unsafe fn dealloc(&self, _: Self::RawPage) {
  103. panic!("Dealloc is not supported in BasicPageAlloc");
  104. }
  105. fn has_management_over(&self, _: Self::RawPage) -> bool {
  106. true
  107. }
  108. }
  109. impl<'a> ScopedAllocator<'a> {
  110. pub fn new(memory: &'a mut [u8]) -> Self {
  111. ScopedAllocator {
  112. inner: RefCell::new(ScopedAllocInner {
  113. current: NonNull::new(memory).unwrap(),
  114. _memory: memory,
  115. allocated_count: 0,
  116. }),
  117. }
  118. }
  119. pub fn with_alloc<'b, 'r, O>(&'r self, func: impl FnOnce(&'b ScopedAllocator<'a>) -> O) -> O
  120. where
  121. 'a: 'b,
  122. 'r: 'b,
  123. {
  124. func(self)
  125. }
  126. }
  127. unsafe impl Allocator for &ScopedAllocator<'_> {
  128. fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
  129. let mut inner = self.inner.borrow_mut();
  130. let memory = &mut inner.current;
  131. let addr: NonNull<u8> = memory.cast();
  132. let offset = addr.align_offset(layout.align());
  133. if offset + layout.size() > memory.len() {
  134. return Err(AllocError);
  135. }
  136. let allocated = unsafe {
  137. // SAFETY: `addr + offset` won't overflow.
  138. NonNull::slice_from_raw_parts(addr.add(offset), layout.size())
  139. };
  140. unsafe {
  141. // SAFETY: `allocated + layout.size()` won't overflow.
  142. *memory = NonNull::slice_from_raw_parts(
  143. allocated.cast::<u8>().add(layout.size()),
  144. memory.len() - offset - layout.size(),
  145. );
  146. }
  147. inner.allocated_count += 1;
  148. Ok(allocated)
  149. }
  150. unsafe fn deallocate(&self, _: NonNull<u8>, _: Layout) {
  151. self.inner.borrow_mut().allocated_count -= 1;
  152. }
  153. }
  154. impl Drop for ScopedAllocator<'_> {
  155. fn drop(&mut self) {
  156. let inner = self.inner.borrow();
  157. if inner.allocated_count > 0 {
  158. panic!(
  159. "Memory leak detected: {} allocations not deallocated",
  160. inner.allocated_count
  161. );
  162. }
  163. }
  164. }