page_alloc.rs 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. use super::address::{PAddr, PFN};
  2. use crate::intrusive_list::Link;
  3. use crate::{container_of, prelude::*};
  4. use bitflags::bitflags;
  5. use core::sync::atomic::Ordering;
  6. use core::{ptr::NonNull, sync::atomic::AtomicU32};
  7. use lazy_static::lazy_static;
  8. const MAX_PAGE_ORDER: u32 = 10;
  9. const PAGE_ARRAY: *mut Page = 0xffffff8040000000 as *mut Page;
  10. pub(super) type PagePtr = Ptr<Page>;
  11. #[repr(transparent)]
  12. pub struct Ptr<T>(Option<NonNull<T>>);
  13. impl<T> Clone for Ptr<T> {
  14. fn clone(&self) -> Self {
  15. Self(self.0)
  16. }
  17. }
  18. impl<T> Copy for Ptr<T> {}
  19. impl<T> Ptr<T> {
  20. pub const fn new(ptr: Option<NonNull<T>>) -> Self {
  21. Self(ptr)
  22. }
  23. pub fn from_raw(ptr: *mut T) -> Self {
  24. Self::new(NonNull::new(ptr))
  25. }
  26. pub fn null() -> Self {
  27. Self::new(None)
  28. }
  29. pub fn is_none(&self) -> bool {
  30. self.0.is_none()
  31. }
  32. pub fn is_some(&self) -> bool {
  33. self.0.is_some()
  34. }
  35. pub fn as_ptr(&self) -> *mut T {
  36. self.0.unwrap().as_ptr()
  37. }
  38. pub fn as_ref<'a>(&self) -> &'a T {
  39. unsafe { &*self.as_ptr() }
  40. }
  41. pub fn as_mut<'a>(&self) -> &'a mut T {
  42. unsafe { &mut *self.as_ptr() }
  43. }
  44. }
  45. impl PagePtr {
  46. pub unsafe fn increase_refcount(&self) -> u32 {
  47. self.as_mut().increase_refcount()
  48. }
  49. pub unsafe fn decrease_refcount(&self) -> u32 {
  50. self.as_mut().decrease_refcount()
  51. }
  52. pub unsafe fn load_refcount(&self) -> u32 {
  53. self.as_ref().refcount.load(Ordering::Acquire)
  54. }
  55. fn get_order(&self) -> u32 {
  56. self.as_ref().order
  57. }
  58. pub fn is_valid(&self, order: u32) -> bool {
  59. self.is_some() && self.get_order() == order
  60. }
  61. fn offset(&self, count: usize) -> Self {
  62. match self.0 {
  63. Some(non_null_ptr) => {
  64. let new_raw_ptr = unsafe { non_null_ptr.as_ptr().add(count) };
  65. Self::from_raw(new_raw_ptr)
  66. }
  67. None => Self::null(),
  68. }
  69. }
  70. }
  71. impl Into<PFN> for PagePtr {
  72. fn into(self) -> PFN {
  73. unsafe { PFN::from(self.as_ptr().offset_from(PAGE_ARRAY) as usize) }
  74. }
  75. }
  76. impl From<PFN> for PagePtr {
  77. fn from(pfn: PFN) -> Self {
  78. unsafe { Self::from_raw(PAGE_ARRAY.add(pfn.0)) }
  79. }
  80. }
  81. bitflags! {
  82. // TODO: Use atomic
  83. struct PageFlags: usize {
  84. const PRESENT = 1 << 0;
  85. const LOCKED = 1 << 1;
  86. const BUDDY = 1 << 2;
  87. const SLAB = 1 << 3;
  88. const DIRTY = 1 << 4;
  89. const FREE = 1 << 5;
  90. }
  91. }
  92. pub(super) struct Page {
  93. // Now only used for free page links in the buddy system.
  94. // Can be used for LRU page swap in the future.
  95. link: Link,
  96. flags: PageFlags, // TODO: This should be atomic.
  97. /// # Safety
  98. /// This field is only used in buddy system, which is protected by the global lock.
  99. order: u32,
  100. refcount: AtomicU32,
  101. }
  102. struct FreeArea {
  103. free_list: Link,
  104. count: usize,
  105. }
  106. /// Safety: `Zone` is `Send` because the `PAGE_ARRAY` is shared between cores.
  107. unsafe impl Send for Zone {}
  108. // /// Safety: TODO
  109. // unsafe impl Sync for Zone {}
  110. struct Zone {
  111. free_areas: [FreeArea; MAX_PAGE_ORDER as usize + 1],
  112. }
  113. impl Page {
  114. fn set_flags(&mut self, flags: PageFlags) {
  115. self.flags.insert(flags);
  116. }
  117. fn remove_flags(&mut self, flags: PageFlags) {
  118. self.flags.remove(flags);
  119. }
  120. fn set_order(&mut self, order: u32) {
  121. self.order = order;
  122. }
  123. unsafe fn increase_refcount(&mut self) -> u32 {
  124. self.refcount.fetch_add(1, Ordering::Relaxed)
  125. }
  126. unsafe fn decrease_refcount(&mut self) -> u32 {
  127. self.refcount.fetch_sub(1, Ordering::AcqRel)
  128. }
  129. pub fn is_buddy(&self) -> bool {
  130. self.flags.contains(PageFlags::BUDDY)
  131. }
  132. #[allow(dead_code)]
  133. pub fn is_slab(&self) -> bool {
  134. self.flags.contains(PageFlags::SLAB)
  135. }
  136. pub fn is_present(&self) -> bool {
  137. self.flags.contains(PageFlags::PRESENT)
  138. }
  139. pub fn is_free(&self) -> bool {
  140. self.flags.contains(PageFlags::FREE)
  141. }
  142. }
  143. impl FreeArea {
  144. const fn new() -> Self {
  145. Self {
  146. free_list: Link::new(),
  147. count: 0,
  148. }
  149. }
  150. fn alloc_pages(&mut self) -> PagePtr {
  151. if let Some(pages_link) = self.free_list.next_mut() {
  152. assert_ne!(self.count, 0);
  153. let pages_ptr = unsafe { container_of!(pages_link, Page, link) };
  154. let pages_ptr = Ptr::from_raw(pages_ptr);
  155. self.count -= 1;
  156. pages_ptr.as_mut().remove_flags(PageFlags::FREE);
  157. pages_link.remove();
  158. pages_ptr
  159. } else {
  160. PagePtr::null()
  161. }
  162. }
  163. fn add_pages(&mut self, pages_ptr: PagePtr) {
  164. self.count += 1;
  165. pages_ptr.as_mut().set_flags(PageFlags::FREE);
  166. self.free_list.insert(&mut pages_ptr.as_mut().link)
  167. }
  168. fn del_pages(&mut self, pages_ptr: PagePtr) {
  169. assert!(self.count >= 1 && pages_ptr.as_ref().is_free());
  170. self.count -= 1;
  171. pages_ptr.as_mut().remove_flags(PageFlags::FREE);
  172. pages_ptr.as_mut().link.remove();
  173. }
  174. }
  175. impl Zone {
  176. const fn new() -> Self {
  177. Self {
  178. free_areas: [const { FreeArea::new() }; MAX_PAGE_ORDER as usize + 1],
  179. }
  180. }
  181. fn alloc_pages(&mut self, order: u32) -> PagePtr {
  182. for current_order in order..=MAX_PAGE_ORDER {
  183. let pages_ptr = self.free_areas[current_order as usize].alloc_pages();
  184. if pages_ptr.is_none() {
  185. continue;
  186. }
  187. unsafe {
  188. pages_ptr.as_mut().increase_refcount();
  189. }
  190. pages_ptr.as_mut().set_order(order);
  191. if current_order > order {
  192. self.expand(pages_ptr, current_order, order);
  193. }
  194. assert!(pages_ptr.as_ref().is_present() && !pages_ptr.as_ref().is_free());
  195. return pages_ptr;
  196. }
  197. PagePtr::new(None)
  198. }
  199. fn expand(&mut self, pages_ptr: PagePtr, order: u32, target_order: u32) {
  200. assert!(pages_ptr.is_some());
  201. let mut offset = 1 << order;
  202. for order in (target_order..order).rev() {
  203. offset >>= 1;
  204. let split_pages_ptr = pages_ptr.offset(offset);
  205. split_pages_ptr.as_mut().set_order(order);
  206. split_pages_ptr.as_mut().set_flags(PageFlags::BUDDY);
  207. self.free_areas[order as usize].add_pages(split_pages_ptr);
  208. }
  209. }
  210. fn free_pages(&mut self, mut pages_ptr: PagePtr, order: u32) {
  211. assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
  212. assert_eq!(pages_ptr.get_order(), order);
  213. let mut pfn: PFN = pages_ptr.into();
  214. let mut current_order = order;
  215. while current_order < MAX_PAGE_ORDER {
  216. let buddy_pfn = pfn.buddy_pfn(current_order);
  217. let buddy_pages_ptr = PagePtr::from(buddy_pfn);
  218. if !self.buddy_check(buddy_pages_ptr, current_order) {
  219. break;
  220. }
  221. pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
  222. buddy_pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
  223. self.free_areas[current_order as usize].del_pages(buddy_pages_ptr);
  224. pages_ptr = PagePtr::from(pfn.combined_pfn(buddy_pfn));
  225. pages_ptr.as_mut().set_flags(PageFlags::BUDDY);
  226. pfn = pfn.combined_pfn(buddy_pfn);
  227. current_order += 1;
  228. }
  229. pages_ptr.as_mut().set_order(current_order);
  230. self.free_areas[current_order as usize].add_pages(pages_ptr);
  231. }
  232. /// This function checks whether a page is free && is the buddy
  233. /// we can coalesce a page and its buddy if
  234. /// - the buddy is valid(present) &&
  235. /// - the buddy is right now in free_areas &&
  236. /// - a page and its buddy have the same order &&
  237. /// - a page and its buddy are in the same zone. // check when smp
  238. fn buddy_check(&self, pages_ptr: PagePtr, order: u32) -> bool {
  239. if !pages_ptr.as_ref().is_present() {
  240. return false;
  241. }
  242. if !(pages_ptr.as_ref().is_free()) {
  243. return false;
  244. }
  245. if pages_ptr.as_ref().order != order {
  246. return false;
  247. }
  248. assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
  249. true
  250. }
  251. /// Only used on buddy initialization
  252. fn create_pages(&mut self, start: usize, end: usize) {
  253. let mut start_pfn = PAddr::from(start).ceil_pfn();
  254. let end_pfn = PAddr::from(end).floor_pfn();
  255. while start_pfn < end_pfn {
  256. let mut order = usize::from(start_pfn).trailing_zeros().min(MAX_PAGE_ORDER);
  257. while start_pfn + order as usize > end_pfn {
  258. order -= 1;
  259. }
  260. let page_ptr: PagePtr = start_pfn.into();
  261. page_ptr.as_mut().set_flags(PageFlags::BUDDY);
  262. self.free_areas[order as usize].add_pages(page_ptr);
  263. start_pfn = start_pfn + (1 << order) as usize;
  264. }
  265. }
  266. }
  267. lazy_static! {
  268. static ref ZONE: Spin<Zone> = Spin::new(Zone::new());
  269. }
  270. pub(super) fn alloc_page() -> PagePtr {
  271. ZONE.lock().alloc_pages(0)
  272. }
  273. pub(super) fn alloc_pages(order: u32) -> PagePtr {
  274. ZONE.lock().alloc_pages(order)
  275. }
  276. pub(super) fn free_pages(page_ptr: PagePtr, order: u32) {
  277. ZONE.lock().free_pages(page_ptr, order)
  278. }
  279. #[no_mangle]
  280. pub extern "C" fn mark_present(start: usize, end: usize) {
  281. let mut start_pfn = PAddr::from(start).ceil_pfn();
  282. let end_pfn = PAddr::from(end).floor_pfn();
  283. while start_pfn < end_pfn {
  284. PagePtr::from(start_pfn)
  285. .as_mut()
  286. .set_flags(PageFlags::PRESENT);
  287. start_pfn = start_pfn + 1;
  288. }
  289. }
  290. #[no_mangle]
  291. pub extern "C" fn create_pages(start: usize, end: usize) {
  292. ZONE.lock().create_pages(start, end);
  293. }
  294. #[no_mangle]
  295. pub extern "C" fn page_to_pfn(page: *const Page) -> usize {
  296. unsafe { page.offset_from(PAGE_ARRAY) as usize }
  297. }
  298. #[no_mangle]
  299. pub extern "C" fn c_alloc_page() -> *const Page {
  300. ZONE.lock().alloc_pages(0).as_ptr() as *const Page
  301. }
  302. #[no_mangle]
  303. pub extern "C" fn c_alloc_pages(order: u32) -> *const Page {
  304. ZONE.lock().alloc_pages(order).as_ptr() as *const Page
  305. }
  306. #[no_mangle]
  307. pub extern "C" fn c_alloc_page_table() -> usize {
  308. let pfn: PFN = ZONE.lock().alloc_pages(0).into();
  309. let paddr: usize = usize::from(pfn) << 12;
  310. unsafe {
  311. core::ptr::write_bytes(paddr as *mut u8, 0, 4096);
  312. }
  313. paddr
  314. }