page_alloc.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. use super::address::{PAddr, PFN};
  2. use crate::intrusive_list::Link;
  3. use crate::{container_of, prelude::*};
  4. use bitflags::bitflags;
  5. use core::sync::atomic::Ordering;
  6. use core::{ptr::NonNull, sync::atomic::AtomicU32};
  7. const MAX_PAGE_ORDER: u32 = 10;
  8. const PAGE_ALLOC_COSTLY_ORDER: u32 = 3;
  9. const BATCH_SIZE: u32 = 64;
  10. const PAGE_ARRAY: *mut Page = 0xffffff8040000000 as *mut Page;
  11. pub(super) type PagePtr = Ptr<Page>;
  12. #[repr(transparent)]
  13. pub struct Ptr<T>(Option<NonNull<T>>);
  14. impl<T> Clone for Ptr<T> {
  15. fn clone(&self) -> Self {
  16. Self(self.0)
  17. }
  18. }
  19. impl<T> Copy for Ptr<T> {}
  20. impl<T> Ptr<T> {
  21. pub const fn new(ptr: Option<NonNull<T>>) -> Self {
  22. Self(ptr)
  23. }
  24. pub fn from_raw(ptr: *mut T) -> Self {
  25. Self::new(NonNull::new(ptr))
  26. }
  27. pub fn null() -> Self {
  28. Self::new(None)
  29. }
  30. pub fn is_none(&self) -> bool {
  31. self.0.is_none()
  32. }
  33. pub fn is_some(&self) -> bool {
  34. self.0.is_some()
  35. }
  36. pub fn as_ptr(&self) -> *mut T {
  37. self.0.unwrap().as_ptr()
  38. }
  39. pub fn as_ref<'a>(&self) -> &'a T {
  40. unsafe { &*self.as_ptr() }
  41. }
  42. pub fn as_mut<'a>(&self) -> &'a mut T {
  43. unsafe { &mut *self.as_ptr() }
  44. }
  45. }
  46. impl PagePtr {
  47. pub unsafe fn increase_refcount(&self) -> u32 {
  48. self.as_mut().increase_refcount()
  49. }
  50. pub unsafe fn decrease_refcount(&self) -> u32 {
  51. self.as_mut().decrease_refcount()
  52. }
  53. pub unsafe fn load_refcount(&self) -> u32 {
  54. self.as_ref().refcount.load(Ordering::Acquire)
  55. }
  56. fn get_order(&self) -> u32 {
  57. self.as_ref().order
  58. }
  59. pub fn is_valid(&self, order: u32) -> bool {
  60. self.is_some() && self.get_order() == order
  61. }
  62. fn offset(&self, count: usize) -> Self {
  63. match self.0 {
  64. Some(non_null_ptr) => {
  65. let new_raw_ptr = unsafe { non_null_ptr.as_ptr().add(count) };
  66. Self::from_raw(new_raw_ptr)
  67. }
  68. None => Self::null(),
  69. }
  70. }
  71. }
  72. impl Into<PFN> for PagePtr {
  73. fn into(self) -> PFN {
  74. unsafe { PFN::from(self.as_ptr().offset_from(PAGE_ARRAY) as usize) }
  75. }
  76. }
  77. impl From<PFN> for PagePtr {
  78. fn from(pfn: PFN) -> Self {
  79. unsafe { Self::from_raw(PAGE_ARRAY.add(pfn.0)) }
  80. }
  81. }
  82. bitflags! {
  83. // TODO: Use atomic
  84. struct PageFlags: usize {
  85. const PRESENT = 1 << 0;
  86. const LOCKED = 1 << 1;
  87. const BUDDY = 1 << 2;
  88. const SLAB = 1 << 3;
  89. const DIRTY = 1 << 4;
  90. const FREE = 1 << 5;
  91. const LOCAL = 1 << 6;
  92. }
  93. }
  94. pub(super) struct Page {
  95. // Now only used for free page links in the buddy system.
  96. // Can be used for LRU page swap in the future.
  97. link: Link,
  98. flags: PageFlags, // TODO: This should be atomic.
  99. /// # Safety
  100. /// This field is only used in buddy system, which is protected by the global lock.
  101. order: u32,
  102. refcount: AtomicU32,
  103. }
  104. struct FreeArea {
  105. free_list: Link,
  106. count: usize,
  107. }
  108. /// Safety: `Zone` is `Send` because the `PAGE_ARRAY` is shared between cores.
  109. unsafe impl Send for Zone {}
  110. // /// Safety: TODO
  111. // unsafe impl Sync for Zone {}
  112. struct Zone {
  113. free_areas: [FreeArea; MAX_PAGE_ORDER as usize + 1],
  114. }
  115. struct PerCpuPages {
  116. batch: u32,
  117. _high: u32, // TODO: use in future
  118. free_areas: [FreeArea; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
  119. }
  120. impl PerCpuPages {
  121. const fn new() -> Self {
  122. Self {
  123. batch: BATCH_SIZE,
  124. _high: 0,
  125. free_areas: [const { FreeArea::new() }; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
  126. }
  127. }
  128. fn get_free_pages(&mut self, order: u32) -> PagePtr {
  129. assert!(order <= PAGE_ALLOC_COSTLY_ORDER);
  130. loop {
  131. let pages_ptr = self.free_areas[order as usize].get_free_pages();
  132. if pages_ptr.is_some() {
  133. return pages_ptr;
  134. }
  135. let batch = self.batch >> order;
  136. ZONE.lock()
  137. .get_bulk_free_pages(&mut self.free_areas[order as usize], order, batch);
  138. }
  139. }
  140. fn free_pages(&mut self, pages_ptr: PagePtr, order: u32) {
  141. assert!(order <= PAGE_ALLOC_COSTLY_ORDER);
  142. assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
  143. assert_eq!(pages_ptr.get_order(), order);
  144. self.free_areas[order as usize].add_pages(pages_ptr);
  145. }
  146. }
  147. impl Page {
  148. fn set_flags(&mut self, flags: PageFlags) {
  149. self.flags.insert(flags);
  150. }
  151. fn remove_flags(&mut self, flags: PageFlags) {
  152. self.flags.remove(flags);
  153. }
  154. fn set_order(&mut self, order: u32) {
  155. self.order = order;
  156. }
  157. unsafe fn increase_refcount(&mut self) -> u32 {
  158. self.refcount.fetch_add(1, Ordering::Relaxed)
  159. }
  160. unsafe fn decrease_refcount(&mut self) -> u32 {
  161. self.refcount.fetch_sub(1, Ordering::AcqRel)
  162. }
  163. pub fn is_buddy(&self) -> bool {
  164. self.flags.contains(PageFlags::BUDDY)
  165. }
  166. #[allow(dead_code)]
  167. pub fn is_slab(&self) -> bool {
  168. self.flags.contains(PageFlags::SLAB)
  169. }
  170. pub fn is_present(&self) -> bool {
  171. self.flags.contains(PageFlags::PRESENT)
  172. }
  173. pub fn is_free(&self) -> bool {
  174. self.flags.contains(PageFlags::FREE)
  175. }
  176. pub fn is_local(&self) -> bool {
  177. self.flags.contains(PageFlags::LOCAL)
  178. }
  179. }
  180. impl FreeArea {
  181. const fn new() -> Self {
  182. Self {
  183. free_list: Link::new(),
  184. count: 0,
  185. }
  186. }
  187. fn get_free_pages(&mut self) -> PagePtr {
  188. if let Some(pages_link) = self.free_list.next_mut() {
  189. assert_ne!(self.count, 0);
  190. let pages_ptr = unsafe { container_of!(pages_link, Page, link) };
  191. let pages_ptr = Ptr::from_raw(pages_ptr);
  192. self.count -= 1;
  193. pages_link.remove();
  194. pages_ptr
  195. } else {
  196. PagePtr::null()
  197. }
  198. }
  199. fn add_pages(&mut self, pages_ptr: PagePtr) {
  200. self.count += 1;
  201. pages_ptr.as_mut().set_flags(PageFlags::FREE);
  202. self.free_list.insert(&mut pages_ptr.as_mut().link)
  203. }
  204. fn del_pages(&mut self, pages_ptr: PagePtr) {
  205. assert!(self.count >= 1 && pages_ptr.as_ref().is_free());
  206. self.count -= 1;
  207. pages_ptr.as_mut().remove_flags(PageFlags::FREE);
  208. pages_ptr.as_mut().link.remove();
  209. }
  210. }
  211. impl Zone {
  212. const fn new() -> Self {
  213. Self {
  214. free_areas: [const { FreeArea::new() }; MAX_PAGE_ORDER as usize + 1],
  215. }
  216. }
  217. /// Only used for per-cpu pages
  218. fn get_bulk_free_pages(&mut self, free_area: &mut FreeArea, order: u32, count: u32) -> u32 {
  219. for i in 0..count {
  220. let pages_ptr = self.get_free_pages(order);
  221. if pages_ptr.is_none() {
  222. return i;
  223. }
  224. pages_ptr.as_mut().set_flags(PageFlags::LOCAL);
  225. free_area.add_pages(pages_ptr);
  226. }
  227. count
  228. }
  229. fn get_free_pages(&mut self, order: u32) -> PagePtr {
  230. for current_order in order..=MAX_PAGE_ORDER {
  231. let pages_ptr = self.free_areas[current_order as usize].get_free_pages();
  232. if pages_ptr.is_none() {
  233. continue;
  234. }
  235. pages_ptr.as_mut().set_order(order);
  236. if current_order > order {
  237. self.expand(pages_ptr, current_order, order);
  238. }
  239. assert!(pages_ptr.as_ref().is_present() && pages_ptr.as_ref().is_free());
  240. return pages_ptr;
  241. }
  242. PagePtr::new(None)
  243. }
  244. fn expand(&mut self, pages_ptr: PagePtr, order: u32, target_order: u32) {
  245. assert!(pages_ptr.is_some());
  246. let mut offset = 1 << order;
  247. for order in (target_order..order).rev() {
  248. offset >>= 1;
  249. let split_pages_ptr = pages_ptr.offset(offset);
  250. split_pages_ptr.as_mut().set_order(order);
  251. split_pages_ptr.as_mut().set_flags(PageFlags::BUDDY);
  252. self.free_areas[order as usize].add_pages(split_pages_ptr);
  253. }
  254. }
  255. fn free_pages(&mut self, mut pages_ptr: PagePtr, order: u32) {
  256. assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
  257. assert_eq!(pages_ptr.get_order(), order);
  258. let mut pfn: PFN = pages_ptr.into();
  259. let mut current_order = order;
  260. while current_order < MAX_PAGE_ORDER {
  261. let buddy_pfn = pfn.buddy_pfn(current_order);
  262. let buddy_pages_ptr = PagePtr::from(buddy_pfn);
  263. if !self.buddy_check(buddy_pages_ptr, current_order) {
  264. break;
  265. }
  266. pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
  267. buddy_pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
  268. self.free_areas[current_order as usize].del_pages(buddy_pages_ptr);
  269. pages_ptr = PagePtr::from(pfn.combined_pfn(buddy_pfn));
  270. pages_ptr.as_mut().set_flags(PageFlags::BUDDY);
  271. pfn = pfn.combined_pfn(buddy_pfn);
  272. current_order += 1;
  273. }
  274. pages_ptr.as_mut().set_order(current_order);
  275. self.free_areas[current_order as usize].add_pages(pages_ptr);
  276. }
  277. /// This function checks whether a page is free && is the buddy
  278. /// we can coalesce a page and its buddy if
  279. /// - the buddy is valid(present) &&
  280. /// - the buddy is right now in free_areas &&
  281. /// - a page and its buddy have the same order &&
  282. /// - a page and its buddy are in the same zone. // check when smp
  283. fn buddy_check(&self, pages_ptr: PagePtr, order: u32) -> bool {
  284. if !pages_ptr.as_ref().is_present() {
  285. return false;
  286. }
  287. if !(pages_ptr.as_ref().is_free()) {
  288. return false;
  289. }
  290. if pages_ptr.as_ref().is_local() {
  291. return false;
  292. }
  293. if pages_ptr.as_ref().order != order {
  294. return false;
  295. }
  296. assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
  297. true
  298. }
  299. /// Only used on buddy initialization
  300. fn create_pages(&mut self, start: usize, end: usize) {
  301. let mut start_pfn = PAddr::from(start).ceil_pfn();
  302. let end_pfn = PAddr::from(end).floor_pfn();
  303. while start_pfn < end_pfn {
  304. let mut order = usize::from(start_pfn).trailing_zeros().min(MAX_PAGE_ORDER);
  305. while start_pfn + order as usize > end_pfn {
  306. order -= 1;
  307. }
  308. let page_ptr: PagePtr = start_pfn.into();
  309. page_ptr.as_mut().set_flags(PageFlags::BUDDY);
  310. self.free_areas[order as usize].add_pages(page_ptr);
  311. start_pfn = start_pfn + (1 << order) as usize;
  312. }
  313. }
  314. }
  315. #[arch::define_percpu]
  316. static PER_CPU_PAGES: PerCpuPages = PerCpuPages::new();
  317. static ZONE: Spin<Zone> = Spin::new(Zone::new());
  318. fn __alloc_pages(order: u32) -> PagePtr {
  319. let pages_ptr;
  320. if order <= PAGE_ALLOC_COSTLY_ORDER {
  321. unsafe {
  322. pages_ptr = PER_CPU_PAGES.as_mut().get_free_pages(order);
  323. }
  324. } else {
  325. pages_ptr = ZONE.lock().get_free_pages(order);
  326. }
  327. unsafe {
  328. pages_ptr.as_mut().increase_refcount();
  329. }
  330. pages_ptr.as_mut().remove_flags(PageFlags::FREE);
  331. pages_ptr
  332. }
  333. fn __free_pages(pages_ptr: PagePtr, order: u32) {
  334. if order <= PAGE_ALLOC_COSTLY_ORDER {
  335. unsafe {
  336. PER_CPU_PAGES.as_mut().free_pages(pages_ptr, order);
  337. }
  338. } else {
  339. ZONE.lock().free_pages(pages_ptr, order);
  340. }
  341. }
  342. pub(super) fn alloc_page() -> PagePtr {
  343. __alloc_pages(0)
  344. }
  345. pub(super) fn alloc_pages(order: u32) -> PagePtr {
  346. __alloc_pages(order)
  347. }
  348. pub(super) fn early_alloc_pages(order: u32) -> PagePtr {
  349. let pages_ptr = ZONE.lock().get_free_pages(order);
  350. unsafe {
  351. pages_ptr.as_mut().increase_refcount();
  352. }
  353. pages_ptr.as_mut().remove_flags(PageFlags::FREE);
  354. pages_ptr
  355. }
  356. pub(super) fn free_pages(page_ptr: PagePtr, order: u32) {
  357. __free_pages(page_ptr, order)
  358. }
  359. #[no_mangle]
  360. pub extern "C" fn mark_present(start: usize, end: usize) {
  361. let mut start_pfn = PAddr::from(start).ceil_pfn();
  362. let end_pfn = PAddr::from(end).floor_pfn();
  363. while start_pfn < end_pfn {
  364. PagePtr::from(start_pfn)
  365. .as_mut()
  366. .set_flags(PageFlags::PRESENT);
  367. start_pfn = start_pfn + 1;
  368. }
  369. }
  370. #[no_mangle]
  371. pub extern "C" fn create_pages(start: usize, end: usize) {
  372. ZONE.lock().create_pages(start, end);
  373. }
  374. #[no_mangle]
  375. pub extern "C" fn page_to_pfn(page: *const Page) -> usize {
  376. unsafe { page.offset_from(PAGE_ARRAY) as usize }
  377. }
  378. #[no_mangle]
  379. pub extern "C" fn c_alloc_page() -> *const Page {
  380. alloc_page().as_ptr() as *const Page
  381. }
  382. #[no_mangle]
  383. pub extern "C" fn c_alloc_pages(order: u32) -> *const Page {
  384. alloc_pages(order).as_ptr() as *const Page
  385. }
  386. #[no_mangle]
  387. pub extern "C" fn c_alloc_page_table() -> usize {
  388. let pfn: PFN = alloc_page().into();
  389. let paddr: usize = usize::from(pfn) << 12;
  390. unsafe {
  391. core::ptr::write_bytes(paddr as *mut u8, 0, 4096);
  392. }
  393. paddr
  394. }