kernel_init.rs 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. use crate::{
  2. kernel::{
  3. self,
  4. cpu::init_localcpu,
  5. mem::{AsMemoryBlock, GlobalPageAlloc, KernelPageAccess, RawPage},
  6. },
  7. kernel_init,
  8. };
  9. use arch::DefaultPagingMode;
  10. use eonix_mm::{
  11. address::{Addr as _, AddrOps as _, PAddr, PRange, VAddr, VRange},
  12. page_table::{PageAttribute, PagingMode as _, PTE},
  13. paging::{NoAlloc, Page as GenericPage, PAGE_SIZE, PFN},
  14. };
  15. use eonix_runtime::context::ExecutionContext;
  16. use eonix_sync::LazyLock;
  17. static GLOBAL_PAGE_TABLE: LazyLock<
  18. eonix_mm::page_table::PageTable<DefaultPagingMode, NoAlloc, KernelPageAccess>,
  19. > = LazyLock::new(|| unsafe {
  20. GenericPage::with_raw(
  21. DefaultPagingMode::KERNEL_ROOT_TABLE_PFN,
  22. |root_table_page| eonix_mm::page_table::PageTable::with_root_table(root_table_page.clone()),
  23. )
  24. });
  25. const HUGE_PAGE_LEN: usize = 1 << 21;
  26. const P_KERNEL_BSS_START: PAddr = PAddr::from_val(0x200000);
  27. const P_KIMAGE_START: PAddr = PAddr::from_val(0x400000);
  28. const V_KERNEL_PAGE_ARRAY_START: VAddr = VAddr::from(0xffffff8040000000);
  29. const V_KERNEL_BSS_START: VAddr = VAddr::from(0xffffffffc0200000);
  30. const KERNEL_BSS_LEN: usize = HUGE_PAGE_LEN;
  31. #[repr(C)]
  32. #[derive(Copy, Clone)]
  33. struct E820MemMapEntry {
  34. base: u64,
  35. len: u64,
  36. entry_type: u32,
  37. acpi_attrs: u32,
  38. }
  39. #[repr(C)]
  40. #[derive(Copy, Clone)]
  41. struct BootLoaderData {
  42. entry_count: u32,
  43. entry_length: u32,
  44. block_count_1k: u32,
  45. block_count_64k: u32,
  46. all_entries: [E820MemMapEntry; 42],
  47. }
  48. impl E820MemMapEntry {
  49. const ENTRY_FREE: u32 = 1;
  50. // const ENTRY_USED: u32 = 2;
  51. fn is_free(&self) -> bool {
  52. self.entry_type == Self::ENTRY_FREE
  53. }
  54. // fn is_used(&self) -> bool {
  55. // self.entry_type == Self::ENTRY_USED
  56. // }
  57. fn range(&self) -> PRange {
  58. PRange::from(PAddr::from(self.base as usize)).grow(self.len as usize)
  59. }
  60. }
  61. impl BootLoaderData {
  62. // fn memory_size(&self) -> usize {
  63. // // The initial 1M is not counted in the E820 map. We add them to the total as well.
  64. // ((self.block_count_1k + 64 * self.block_count_64k) * 1024 + 1 * 1024 * 1024) as usize
  65. // }
  66. fn entries(&self) -> &[E820MemMapEntry] {
  67. &self.all_entries[..self.entry_count as usize]
  68. }
  69. fn free_entries(&self) -> impl Iterator<Item = &E820MemMapEntry> {
  70. self.entries().iter().filter(|entry| entry.is_free())
  71. }
  72. }
  73. #[no_mangle]
  74. pub(self) extern "C" fn _kernel_init(bootloader_data: &mut BootLoaderData) -> ! {
  75. // Map kernel BSS
  76. for pte in GLOBAL_PAGE_TABLE.iter_kernel_levels(
  77. VRange::from(V_KERNEL_BSS_START).grow(KERNEL_BSS_LEN),
  78. &DefaultPagingMode::LEVELS[..3],
  79. ) {
  80. let attr = PageAttribute::PRESENT
  81. | PageAttribute::WRITE
  82. | PageAttribute::READ
  83. | PageAttribute::HUGE
  84. | PageAttribute::GLOBAL;
  85. pte.set(PFN::from(P_KERNEL_BSS_START), attr.into());
  86. }
  87. unsafe {
  88. // SAFETY: We've just mapped the area with sufficient length.
  89. core::ptr::write_bytes(V_KERNEL_BSS_START.addr() as *mut (), 0, KERNEL_BSS_LEN);
  90. }
  91. let addr_max = bootloader_data
  92. .free_entries()
  93. .map(|entry| entry.range().end())
  94. .max()
  95. .expect("No free memory");
  96. let pfn_max = PFN::from(addr_max.ceil());
  97. let len_bytes_page_array = usize::from(pfn_max) * size_of::<RawPage>();
  98. let count_huge_pages = len_bytes_page_array.div_ceil(HUGE_PAGE_LEN);
  99. extern "C" {
  100. // Definition inside linker script.
  101. fn KIMAGE_PAGES();
  102. }
  103. let kimage_pages = unsafe { core::mem::transmute::<_, usize>(KIMAGE_PAGES as *const ()) };
  104. let paddr_after_kimage = P_KIMAGE_START + kimage_pages * PAGE_SIZE;
  105. let paddr_after_kimage_aligned = paddr_after_kimage.ceil_to(HUGE_PAGE_LEN);
  106. let mut paddr_free = paddr_after_kimage_aligned;
  107. // Map kernel page array.
  108. for pte in GLOBAL_PAGE_TABLE.iter_kernel_levels(
  109. VRange::from(V_KERNEL_PAGE_ARRAY_START).grow(HUGE_PAGE_LEN * count_huge_pages),
  110. &DefaultPagingMode::LEVELS[..3],
  111. ) {
  112. let attr = PageAttribute::PRESENT
  113. | PageAttribute::WRITE
  114. | PageAttribute::READ
  115. | PageAttribute::HUGE
  116. | PageAttribute::GLOBAL;
  117. pte.set(PFN::from(paddr_free), attr.into());
  118. paddr_free = paddr_free + HUGE_PAGE_LEN;
  119. }
  120. unsafe {
  121. // SAFETY: We've just mapped the area with sufficient length.
  122. core::ptr::write_bytes(
  123. V_KERNEL_PAGE_ARRAY_START.addr() as *mut (),
  124. 0,
  125. count_huge_pages * HUGE_PAGE_LEN,
  126. );
  127. }
  128. let paddr_unused_start = paddr_free;
  129. for entry in bootloader_data.free_entries() {
  130. let mut range = entry.range();
  131. GlobalPageAlloc::mark_present(range);
  132. if range.end() <= paddr_unused_start {
  133. continue;
  134. }
  135. if range.start() < paddr_unused_start {
  136. let (_, right) = range.split_at(paddr_unused_start);
  137. range = right;
  138. }
  139. unsafe {
  140. // SAFETY: We are in system initialization procedure where preemption is disabled.
  141. GlobalPageAlloc::add_pages(range);
  142. }
  143. }
  144. unsafe {
  145. // SAFETY: We are in system initialization procedure where preemption is disabled.
  146. GlobalPageAlloc::add_pages(PRange::new(PAddr::from(0x100000), PAddr::from(0x200000)));
  147. GlobalPageAlloc::add_pages(PRange::new(paddr_after_kimage, paddr_after_kimage_aligned));
  148. }
  149. let (stack_bottom_addr, stack_pfn) = {
  150. let kernel_stack_page = GenericPage::alloc_order_in(9, GlobalPageAlloc::early_alloc());
  151. let stack_area = kernel_stack_page.as_memblk();
  152. let stack_bottom_addr = stack_area
  153. .addr()
  154. .checked_add(stack_area.len())
  155. .expect("The stack bottom should not be null");
  156. let stack_pfn = kernel_stack_page.into_raw();
  157. (stack_bottom_addr, stack_pfn)
  158. };
  159. let mut to_ctx = ExecutionContext::new();
  160. to_ctx.set_interrupt(false);
  161. to_ctx.set_sp(stack_bottom_addr.get());
  162. to_ctx.call1(_init_on_new_stack, usize::from(stack_pfn));
  163. to_ctx.switch_noreturn();
  164. }
  165. extern "C" fn _init_on_new_stack(early_kernel_stack_pfn: PFN) -> ! {
  166. // Add the pages previously used by `_kernel_init` as a stack.
  167. unsafe {
  168. // SAFETY: We are in system initialization procedure where preemption is disabled.
  169. GlobalPageAlloc::add_pages(PRange::new(PAddr::from(0x8000), PAddr::from(0x80000)));
  170. }
  171. init_localcpu();
  172. extern "C" {
  173. fn init_allocator();
  174. }
  175. unsafe { init_allocator() };
  176. kernel::interrupt::init().unwrap();
  177. kernel_init(early_kernel_stack_pfn)
  178. }