entry.rs 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. use core::{arch::{asm, naked_asm}, ptr};
  2. use super::{mm::*, PAGE_SIZE};
  3. pub const ROOT_PAGE_TABLE_PHYS_ADDR: usize = 0x8030_0000;
  4. pub const KERNEL_PHYS_BASE: usize = 0x8000_0000;
  5. pub const KIMAGE_VIRT_BASE: usize = 0xFFFF_FFFF_FFC0_0000;
  6. #[link_section = ".bss.stack"]
  7. static mut BOOT_STACK: [u8; 4096 * 16] = [0; 4096 * 16];
  8. #[link_section = ".data.root_page_tables"]
  9. static mut ROOT_PAGE_TABLES: [PTE64; 512] = [PTE64(0); 512];
  10. #[link_section = ".data.boot_page_tables_lvl"]
  11. #[used]
  12. static mut LEVEL1_IDENTITY_TABLE: [PTE64; 512] = [PTE64(0); 512];
  13. #[link_section = ".data.boot_page_tables_lvl"]
  14. #[used]
  15. static mut LEVEL1_KERNEL_TABLE: [PTE64; 512] = [PTE64(0); 512];
  16. #[link_section = ".data.boot_page_tables_lvl"]
  17. #[used]
  18. static mut LEVEL0_KERNEL_TEXT_TABLE: [PTE64; 512] = [PTE64(0); 512];
  19. #[link_section = ".data.boot_page_tables_lvl"]
  20. #[used]
  21. static mut LEVEL0_KERNEL_RODATA_TABLE: [PTE64; 512] = [PTE64(0); 512];
  22. #[link_section = ".data.boot_page_tables_lvl"]
  23. #[used]
  24. static mut LEVEL0_KERNEL_DATA_TABLE: [PTE64; 512] = [PTE64(0); 512];
  25. #[inline]
  26. fn phys_to_ppn(phys_addr: usize) -> u64 {
  27. (phys_addr >> 12) as u64
  28. }
  29. #[inline]
  30. fn virt_to_vpn_idx(virt_addr: usize, level: u8) -> usize {
  31. match level {
  32. 2 => (virt_addr >> 30) & 0x1FF, // VPN[2] bits 38-30 (9 bits)
  33. 1 => (virt_addr >> 21) & 0x1FF, // VPN[1] bits 29-21 (9 bits)
  34. 0 => (virt_addr >> 12) & 0x1FF, // VPN[0] bits 20-12 (9 bits)
  35. _ => 0,
  36. }
  37. }
  38. fn fill_pte(page_table_entry: &mut PTE64, phys_addr: usize, flags: u64) {
  39. let ppn = phys_to_ppn(phys_addr);
  40. *page_table_entry = PTE64(ppn << 10 | flags); // PPN 10-53
  41. }
  42. fn setup_page_tables() {
  43. extern "C" {
  44. static TEXT_START: usize;
  45. static TEXT_END: usize;
  46. static RODATA_START: usize;
  47. static RODATA_END: usize;
  48. static DATA_START: usize;
  49. static DATA_END: usize;
  50. }
  51. unsafe {
  52. // 1. clear page table
  53. let root_page_tables_phys = ROOT_PAGE_TABLES.as_mut_ptr() as usize;
  54. let total_page_tables_size = (LEVEL0_KERNEL_DATA_TABLE.as_mut_ptr() as usize + PAGE_SIZE) - root_page_tables_phys;
  55. ptr::write_bytes(root_page_tables_phys as *mut u8, 0, total_page_tables_size);
  56. // 2. Identity Mapping
  57. // Level 2 (BOOT_PAGE_TABLES) -> Level 1 (LEVEL1_IDENTITY_TABLE) -> 2MB Huge Pages
  58. fill_pte(
  59. &mut ROOT_PAGE_TABLES[0],
  60. LEVEL1_IDENTITY_TABLE.as_ptr() as usize,
  61. PA_V
  62. );
  63. // LEVEL1_IDENTITY_TABLE (Level 1)
  64. // KERNEL_PHYS_BASE 1GB
  65. let identity_map_start_phys = KERNEL_PHYS_BASE;
  66. let identity_map_size = LEVEL2_PAGE_SIZE;
  67. let mut current_phys_addr = identity_map_start_phys;
  68. let end_phys_addr = identity_map_start_phys + identity_map_size;
  69. while current_phys_addr < end_phys_addr {
  70. let pte_idx_lvl1 = virt_to_vpn_idx(current_phys_addr, 1);
  71. fill_pte(
  72. &mut LEVEL1_IDENTITY_TABLE[pte_idx_lvl1],
  73. current_phys_addr, // 2MB
  74. PA_KERNEL_RWX
  75. );
  76. current_phys_addr += LEVEL1_PAGE_SIZE;
  77. }
  78. // 3. Kernel Space Mapping
  79. let kimage_vpn2_idx = virt_to_vpn_idx(KIMAGE_VIRT_BASE, 2);
  80. // ROOT_PAGE_TABLES (Level 2) -> LEVEL1_KERNEL_TABLE
  81. fill_pte(
  82. &mut ROOT_PAGE_TABLES[kimage_vpn2_idx],
  83. LEVEL1_KERNEL_TABLE.as_ptr() as usize,
  84. PA_V
  85. );
  86. let get_phys_addr = |virt_addr: usize, virt_base: usize, phys_base: usize| {
  87. phys_base + (virt_addr - virt_base)
  88. };
  89. // .text
  90. let text_virt_start = TEXT_START;
  91. let text_phys_start = get_phys_addr(TEXT_START, KIMAGE_VIRT_BASE, KERNEL_PHYS_BASE);
  92. let text_size = TEXT_END - TEXT_START;
  93. let text_vpn1_idx = virt_to_vpn_idx(text_virt_start, 1);
  94. fill_pte(
  95. &mut LEVEL1_KERNEL_TABLE[text_vpn1_idx],
  96. LEVEL0_KERNEL_TEXT_TABLE.as_ptr() as usize,
  97. PA_V
  98. );
  99. let mut current_virt = text_virt_start;
  100. let mut current_phys = text_phys_start;
  101. while current_virt < text_virt_start + text_size {
  102. let pte_idx_lvl0 = virt_to_vpn_idx(current_virt, 0);
  103. fill_pte(
  104. &mut LEVEL0_KERNEL_TEXT_TABLE[pte_idx_lvl0],
  105. current_phys,
  106. PA_KERNEL_RWX
  107. );
  108. current_virt += LEVEL0_PAGE_SIZE;
  109. current_phys += LEVEL0_PAGE_SIZE;
  110. }
  111. // .rodata
  112. let rodata_virt_start = RODATA_START;
  113. let rodata_phys_start = get_phys_addr(RODATA_START, KIMAGE_VIRT_BASE, KERNEL_PHYS_BASE);
  114. let rodata_size = RODATA_END - RODATA_START;
  115. let rodata_vpn1_idx = virt_to_vpn_idx(rodata_virt_start, 1);
  116. if rodata_vpn1_idx != text_vpn1_idx {
  117. fill_pte(
  118. &mut LEVEL1_KERNEL_TABLE[rodata_vpn1_idx],
  119. LEVEL0_KERNEL_RODATA_TABLE.as_ptr() as usize,
  120. PA_V
  121. );
  122. }
  123. current_virt = rodata_virt_start;
  124. current_phys = rodata_phys_start;
  125. while current_virt < rodata_virt_start + rodata_size {
  126. let pte_idx_lvl0 = virt_to_vpn_idx(current_virt, 0);
  127. fill_pte(
  128. &mut LEVEL0_KERNEL_RODATA_TABLE[pte_idx_lvl0],
  129. current_phys,
  130. PA_KERNEL_RO
  131. );
  132. current_virt += LEVEL0_PAGE_SIZE;
  133. current_phys += LEVEL0_PAGE_SIZE;
  134. }
  135. // .data
  136. let data_virt_start = DATA_START;
  137. let data_phys_start = get_phys_addr(DATA_START, KIMAGE_VIRT_BASE, KERNEL_PHYS_BASE);
  138. let data_size = DATA_END - DATA_START;
  139. let data_vpn1_idx = virt_to_vpn_idx(data_virt_start, 1);
  140. if data_vpn1_idx != text_vpn1_idx && data_vpn1_idx != rodata_vpn1_idx {
  141. fill_pte(
  142. &mut LEVEL1_KERNEL_TABLE[data_vpn1_idx],
  143. LEVEL0_KERNEL_DATA_TABLE.as_ptr() as usize,
  144. PA_V
  145. );
  146. }
  147. current_virt = data_virt_start;
  148. current_phys = data_phys_start;
  149. while current_virt < data_virt_start + data_size {
  150. let pte_idx_lvl0 = virt_to_vpn_idx(current_virt, 0);
  151. fill_pte(
  152. &mut LEVEL0_KERNEL_DATA_TABLE[pte_idx_lvl0],
  153. current_phys,
  154. PA_KERNEL_RW
  155. );
  156. current_virt += LEVEL0_PAGE_SIZE;
  157. current_phys += LEVEL0_PAGE_SIZE;
  158. }
  159. }
  160. }
  161. fn enable_mmu() {
  162. unsafe {
  163. let satp_val = ROOT_PAGE_TABLE_PHYS_ADDR | (8 << 60); // Sv39 mode (8)
  164. asm!(
  165. "csrw satp, {satp_val}",
  166. "sfence.vma",
  167. satp_val = in(reg) satp_val,
  168. );
  169. }
  170. }
  171. extern "C" {
  172. fn kernel_init();
  173. }
  174. /// bootstrap in rust
  175. #[naked]
  176. #[no_mangle]
  177. #[link_section = ".text.entry"]
  178. unsafe extern "C" fn _start() -> ! {
  179. naked_asm!(
  180. "la sp, {stack_top}",
  181. // TODO: set up page table, somewhere may be wrong
  182. "call {setup_page_tables_fn}",
  183. "call {enable_mmu_fn}",
  184. "jr {kernel_init_fn}",
  185. stack_top = sym BOOT_STACK,
  186. setup_page_tables_fn = sym setup_page_tables,
  187. enable_mmu_fn = sym enable_mmu,
  188. kernel_init_fn = sym kernel_init,
  189. )
  190. }