mm.rs 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. use core::{
  2. alloc::{AllocError, Allocator, Layout},
  3. cell::RefCell,
  4. ptr::NonNull,
  5. };
  6. use eonix_mm::{
  7. address::{AddrOps as _, PRange},
  8. paging::{PageAlloc, UnmanagedRawPage, PAGE_SIZE, PFN},
  9. };
  10. pub use crate::arch::mm::{ArchMemory, ArchPagingMode, ArchPhysAccess, GLOBAL_PAGE_TABLE};
  11. pub struct BasicPageAlloc {
  12. ranges: [Option<PRange>; Self::MAX],
  13. head: usize,
  14. tail: usize,
  15. }
  16. struct ScopedAllocInner<'a> {
  17. _memory: &'a mut [u8],
  18. current: NonNull<[u8]>,
  19. allocated_count: usize,
  20. }
  21. pub struct ScopedAllocator<'a> {
  22. inner: RefCell<ScopedAllocInner<'a>>,
  23. }
  24. impl BasicPageAlloc {
  25. const MAX: usize = 32;
  26. pub const fn new() -> Self {
  27. Self {
  28. ranges: [None; Self::MAX],
  29. head: 0,
  30. tail: 0,
  31. }
  32. }
  33. fn alloc_one(&mut self) -> PFN {
  34. assert_ne!(self.head, self.tail, "No free pages available");
  35. let mut range = self.ranges[self.head].take().unwrap();
  36. range = range.shrink(PAGE_SIZE);
  37. let pfn = PFN::from(range.end());
  38. if range.len() != 0 {
  39. self.ranges[self.head] = Some(range);
  40. } else {
  41. self.head += 1;
  42. self.head %= Self::MAX;
  43. }
  44. pfn
  45. }
  46. fn alloc_order(&mut self, order: u32) -> PFN {
  47. assert!(order <= 4);
  48. let me = core::mem::replace(self, Self::new());
  49. let mut found = None;
  50. for mut range in me.into_iter() {
  51. if found.is_some() || range.len() < (PAGE_SIZE << order) {
  52. self.add_range(range);
  53. continue;
  54. }
  55. range = range.shrink(PAGE_SIZE << order);
  56. found = Some(PFN::from(range.end()));
  57. if range.len() != 0 {
  58. self.add_range(range);
  59. }
  60. }
  61. found.expect("No free pages available for the requested order")
  62. }
  63. pub fn add_range(&mut self, range: PRange) {
  64. let tail = self.tail;
  65. self.tail += 1;
  66. self.tail %= Self::MAX;
  67. if self.tail == self.head {
  68. panic!("Page allocator is full");
  69. }
  70. self.ranges[tail] = Some(PRange::new(range.start().ceil(), range.end().floor()));
  71. }
  72. pub fn alloc(&mut self, order: u32) -> PFN {
  73. match order {
  74. 0 => self.alloc_one(),
  75. ..=4 => self.alloc_order(order),
  76. _ => panic!("Order {} is too large for BasicPageAlloc", order),
  77. }
  78. }
  79. pub fn into_iter(self) -> impl Iterator<Item = PRange> {
  80. self.ranges
  81. .into_iter()
  82. .cycle()
  83. .skip(self.head)
  84. .map_while(|x| x)
  85. }
  86. }
  87. #[derive(Clone)]
  88. pub struct BasicPageAllocRef<'a>(&'a RefCell<BasicPageAlloc>);
  89. impl<'a> BasicPageAllocRef<'a> {
  90. pub const fn new(alloc: &'a RefCell<BasicPageAlloc>) -> Self {
  91. Self(alloc)
  92. }
  93. }
  94. impl PageAlloc for BasicPageAllocRef<'_> {
  95. type RawPage = UnmanagedRawPage;
  96. fn alloc_order(&self, order: u32) -> Option<Self::RawPage> {
  97. Some(Self::RawPage::new(self.0.borrow_mut().alloc(order), order))
  98. }
  99. unsafe fn dealloc(&self, _: Self::RawPage) {
  100. panic!("Dealloc is not supported in BasicPageAlloc");
  101. }
  102. fn has_management_over(&self, _: Self::RawPage) -> bool {
  103. true
  104. }
  105. }
  106. impl<'a> ScopedAllocator<'a> {
  107. pub fn new(memory: &'a mut [u8]) -> Self {
  108. ScopedAllocator {
  109. inner: RefCell::new(ScopedAllocInner {
  110. current: NonNull::new(memory).unwrap(),
  111. _memory: memory,
  112. allocated_count: 0,
  113. }),
  114. }
  115. }
  116. pub fn with_alloc<'b, 'r, O>(&'r self, func: impl FnOnce(&'b ScopedAllocator<'a>) -> O) -> O
  117. where
  118. 'a: 'b,
  119. 'r: 'b,
  120. {
  121. func(self)
  122. }
  123. }
  124. unsafe impl Allocator for &ScopedAllocator<'_> {
  125. fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
  126. let mut inner = self.inner.borrow_mut();
  127. let memory = &mut inner.current;
  128. let addr: NonNull<u8> = memory.cast();
  129. let offset = addr.align_offset(layout.align());
  130. if offset + layout.size() > memory.len() {
  131. return Err(AllocError);
  132. }
  133. let allocated = unsafe {
  134. // SAFETY: `addr + offset` won't overflow.
  135. NonNull::slice_from_raw_parts(addr.add(offset), layout.size())
  136. };
  137. unsafe {
  138. // SAFETY: `allocated + layout.size()` won't overflow.
  139. *memory = NonNull::slice_from_raw_parts(
  140. allocated.cast::<u8>().add(layout.size()),
  141. memory.len() - offset - layout.size(),
  142. );
  143. }
  144. inner.allocated_count += 1;
  145. Ok(allocated)
  146. }
  147. unsafe fn deallocate(&self, _: NonNull<u8>, _: Layout) {
  148. self.inner.borrow_mut().allocated_count -= 1;
  149. }
  150. }
  151. impl Drop for ScopedAllocator<'_> {
  152. fn drop(&mut self) {
  153. let inner = self.inner.borrow();
  154. if inner.allocated_count > 0 {
  155. panic!(
  156. "Memory leak detected: {} allocations not deallocated",
  157. inner.allocated_count
  158. );
  159. }
  160. }
  161. }