entry.rs 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. use core::{
  2. arch::naked_asm,
  3. ptr::NonNull,
  4. sync::atomic::AtomicUsize,
  5. };
  6. use intrusive_list::{container_of, Link};
  7. use super::mm::*;
  8. use buddy_allocator::{BuddyAllocator, BuddyRawPage};
  9. use riscv::register::satp;
  10. use eonix_mm::{
  11. address::{Addr as _, PAddr, VAddr, VRange},
  12. page_table::{PageAttribute, RawAttribute, PTE as _},
  13. paging::{Page, PageAccess, PageAlloc, PageBlock, RawPage as RawPageTrait, PFN},
  14. };
  15. use spin::Mutex;
  16. const ROOT_PAGE_TABLE_PFN: usize = ROOT_PAGE_TABLE_PHYS_ADDR >> 12;
  17. #[link_section = ".bss.stack"]
  18. static mut BOOT_STACK: [u8; 4096 * 16] = [0; 4096 * 16];
  19. static mut PAGES: [RawPage; 1024] = [const { RawPage::new() }; 1024];
  20. fn page(index: usize) -> &'static mut RawPage {
  21. let page = unsafe { PAGES.as_mut_ptr().add(index) };
  22. unsafe { &mut *page }
  23. }
  24. #[derive(Clone, Copy)]
  25. struct RawPageHandle(usize);
  26. impl From<PFN> for RawPageHandle {
  27. fn from(pfn: PFN) -> Self {
  28. assert!(usize::from(pfn) - ROOT_PAGE_TABLE_PFN < 1024, "PFN out of range");
  29. Self(usize::from(pfn) - ROOT_PAGE_TABLE_PFN)
  30. }
  31. }
  32. impl From<RawPageHandle> for PFN {
  33. fn from(raw_page: RawPageHandle) -> Self {
  34. PFN::from(raw_page.0 + ROOT_PAGE_TABLE_PFN)
  35. }
  36. }
  37. impl RawPageTrait for RawPageHandle {
  38. fn order(&self) -> u32 {
  39. page(self.0).order
  40. }
  41. fn refcount(&self) -> &AtomicUsize {
  42. &page(self.0).refcount
  43. }
  44. fn is_present(&self) -> bool {
  45. self.0 < 1024
  46. }
  47. }
  48. impl BuddyRawPage for RawPageHandle {
  49. unsafe fn from_link(link: &mut Link) -> Self {
  50. let page = container_of!(link, RawPage, link);
  51. let page_index = page.as_ptr().offset_from_unsigned(PAGES.as_ptr());
  52. assert!(page_index < 1024, "Page index out of range");
  53. Self(page_index)
  54. }
  55. unsafe fn get_link(&self) -> &mut Link {
  56. &mut page(self.0).link
  57. }
  58. fn set_order(&self, order: u32) {
  59. page(self.0).order = order;
  60. }
  61. fn is_buddy(&self) -> bool {
  62. page(self.0).buddy
  63. }
  64. fn is_free(&self) -> bool {
  65. page(self.0).free
  66. }
  67. fn set_buddy(&self) {
  68. page(self.0).buddy = true;
  69. }
  70. fn set_free(&self) {
  71. page(self.0).free = true;
  72. }
  73. fn clear_buddy(&self) {
  74. page(self.0).buddy = false;
  75. }
  76. fn clear_free(&self) {
  77. page(self.0).free = false;
  78. }
  79. }
  80. struct RawPage {
  81. link: Link,
  82. free: bool,
  83. buddy: bool,
  84. order: u32,
  85. refcount: AtomicUsize,
  86. }
  87. impl RawPage {
  88. const fn new() -> Self {
  89. Self {
  90. link: Link::new(),
  91. free: false,
  92. buddy: false,
  93. order: 0,
  94. refcount: AtomicUsize::new(0),
  95. }
  96. }
  97. }
  98. struct DirectPageAccess;
  99. impl PageAccess for DirectPageAccess {
  100. unsafe fn get_ptr_for_pfn(pfn: PFN) -> NonNull<PageBlock> {
  101. unsafe { NonNull::new_unchecked(PAddr::from(pfn).addr() as *mut _) }
  102. }
  103. }
  104. static BUDDY: Mutex<BuddyAllocator<RawPageHandle>> = Mutex::new(BuddyAllocator::new());
  105. #[derive(Clone)]
  106. struct BuddyPageAlloc;
  107. impl PageAlloc for BuddyPageAlloc {
  108. type RawPage = RawPageHandle;
  109. fn alloc_order(&self, order: u32) -> Option<Self::RawPage> {
  110. let retval = BUDDY.lock().alloc_order(order);
  111. retval
  112. }
  113. unsafe fn dealloc(&self, raw_page: Self::RawPage) {
  114. BUDDY.lock().dealloc(raw_page);
  115. }
  116. fn has_management_over(&self, page_ptr: Self::RawPage) -> bool {
  117. BuddyAllocator::has_management_over(page_ptr)
  118. }
  119. }
  120. type PageTable<'a> = eonix_mm::page_table::PageTable<'a, PagingModeSv48, BuddyPageAlloc, DirectPageAccess>;
  121. fn setup_page_tables() {
  122. let attr = PageAttribute::WRITE
  123. | PageAttribute::READ
  124. | PageAttribute::EXECUTE
  125. | PageAttribute::GLOBAL
  126. | PageAttribute::PRESENT;
  127. BUDDY.lock().create_pages(PAddr::from(0x80400000), PAddr::from(0x80800000));
  128. let root_table_page = Page::alloc_in(BuddyPageAlloc);
  129. let page_table = PageTable::new_in(&root_table_page, BuddyPageAlloc);
  130. // Map 0x80200000-0x81200000 16MB identically, use 2MB page
  131. for (idx, pte) in page_table
  132. .iter_kernel(VRange::from(VAddr::from(0x80200000)).grow(0x1000000), 2)
  133. .enumerate()
  134. {
  135. pte.set(PFN::from(idx * 0x200 + 0x80200), PageAttribute64::from_page_attr(attr));
  136. }
  137. // Map 0x0000_0000_0000_0000-0x0000_007F_FFFF_FFFF 256GB
  138. // to 0xFFFF_FF00_0000_0000 to 0xFFFF_FF7F_FFFF_FFFF, use 1 GB page
  139. for (idx, pte) in page_table
  140. .iter_kernel(VRange::from(VAddr::from(0xFFFF_FF00_0000_0000)).grow(0x80_0000_0000), 1)
  141. .enumerate()
  142. {
  143. pte.set(PFN::from(idx * 0x40000), PageAttribute64::from_page_attr(attr));
  144. }
  145. // Map kernel image
  146. for (idx, pte) in page_table
  147. .iter_kernel(VRange::from(VAddr::from(0xFFFF_FFFF_FFC0_0000)).grow(0x20_0000), 3)
  148. .enumerate()
  149. {
  150. pte.set(PFN::from(idx + 0x80200), PageAttribute64::from_page_attr(attr));
  151. }
  152. unsafe {
  153. satp::set(satp::Mode::Sv48, 0, PFN::from(page_table.addr()).into());
  154. }
  155. }
  156. extern "C" {
  157. fn kernel_init();
  158. }
  159. /// bootstrap in rust
  160. #[naked]
  161. #[no_mangle]
  162. #[link_section = ".text.entry"]
  163. unsafe extern "C" fn _start() -> ! {
  164. naked_asm!(
  165. "la sp, {stack_top}",
  166. // TODO: set up page table, somewhere may be wrong
  167. "call {setup_page_tables_fn}",
  168. "jr {kernel_init_fn}",
  169. stack_top = sym BOOT_STACK,
  170. setup_page_tables_fn = sym setup_page_tables,
  171. kernel_init_fn = sym kernel_init,
  172. )
  173. }