bootstrap.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. use core::alloc::Allocator;
  2. use core::arch::{asm, global_asm, naked_asm};
  3. use core::cell::RefCell;
  4. use core::hint::spin_loop;
  5. use core::ptr::NonNull;
  6. use core::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
  7. use eonix_hal_traits::mm::Memory;
  8. use eonix_mm::address::{Addr as _, PAddr, PRange, PhysAccess, VAddr, VRange};
  9. use eonix_mm::page_table::{
  10. PageAttribute, PageTable, PagingMode, TableAttribute, PTE as _,
  11. };
  12. use eonix_mm::paging::{
  13. Folio, FrameAlloc, PageAccess, PageBlock, PAGE_SIZE, PFN,
  14. };
  15. use eonix_percpu::PercpuArea;
  16. use fdt::Fdt;
  17. use riscv::asm::sfence_vma_all;
  18. use riscv::register::satp;
  19. use sbi::hsm::hart_start;
  20. use sbi::legacy::console_putchar;
  21. use sbi::PhysicalAddress;
  22. use super::config::mm::*;
  23. use super::config::{self};
  24. use super::console::write_str;
  25. use super::cpu::{CPUID, CPU_COUNT};
  26. use super::time::set_next_timer;
  27. use crate::arch::cpu::CPU;
  28. use crate::arch::fdt::{init_dtb_and_fdt, FdtExt, FDT};
  29. use crate::arch::mm::{
  30. ArchPagingMode, ArchPhysAccess, FreeRam, PageAccessImpl, PageAttribute64,
  31. RawPageTableSv48, GLOBAL_PAGE_TABLE,
  32. };
  33. use crate::bootstrap::BootStrapData;
  34. use crate::mm::{
  35. ArchMemory, BasicPageAlloc, BasicPageAllocRef, ScopedAllocator,
  36. };
  37. #[unsafe(link_section = ".bootstrap.stack")]
  38. static BOOT_STACK: [u8; 4096 * 16] = [0; 4096 * 16];
  39. static BOOT_STACK_START: &'static [u8; 4096 * 16] = &BOOT_STACK;
  40. #[unsafe(link_section = ".bootstrap.stack")]
  41. static TEMP_AP_STACK: [u8; 256] = [0; 256];
  42. static TEMP_AP_STACK_START: &'static [u8; 256] = &TEMP_AP_STACK;
  43. #[repr(C, align(4096))]
  44. struct BootPageTable([u64; PTES_PER_PAGE]);
  45. /// map 0x8000 0000 to itself and 0xffff ffff 8000 0000
  46. #[unsafe(link_section = ".bootstrap.page_table.1")]
  47. static BOOT_PAGE_TABLE: BootPageTable = {
  48. let mut arr: [u64; PTES_PER_PAGE] = [0; PTES_PER_PAGE];
  49. arr[0] = 0 | 0x2f;
  50. arr[510] = 0 | 0x2f;
  51. arr[511] = (0x80202 << 10) | 0x21;
  52. BootPageTable(arr)
  53. };
  54. #[unsafe(link_section = ".bootstrap.page_table.2")]
  55. #[used]
  56. static PT1: BootPageTable = {
  57. let mut arr: [u64; PTES_PER_PAGE] = [0; PTES_PER_PAGE];
  58. arr[510] = (0x80000 << 10) | 0x2f;
  59. BootPageTable(arr)
  60. };
  61. static BSP_PAGE_ALLOC: AtomicPtr<RefCell<BasicPageAlloc>> =
  62. AtomicPtr::new(core::ptr::null_mut());
  63. static AP_COUNT: AtomicUsize = AtomicUsize::new(0);
  64. static AP_STACK: AtomicUsize = AtomicUsize::new(0);
  65. static AP_SEM: AtomicBool = AtomicBool::new(false);
  66. /// bootstrap in rust
  67. #[unsafe(naked)]
  68. #[unsafe(no_mangle)]
  69. #[unsafe(link_section = ".bootstrap.entry")]
  70. unsafe extern "C" fn _start(hart_id: usize, dtb_addr: usize) {
  71. naked_asm!(
  72. "
  73. ld sp, 2f
  74. li t0, 0x10000
  75. add sp, sp, t0
  76. ld t0, 3f
  77. srli t0, t0, 12
  78. li t1, 9 << 60
  79. or t0, t0, t1
  80. csrw satp, t0
  81. sfence.vma
  82. ld t0, 4f
  83. jalr t0 // call riscv64_start
  84. .pushsection .bootstrap.data, \"aw\", @progbits
  85. 2:
  86. .8byte {boot_stack}
  87. 3:
  88. .8byte {page_table}
  89. 4:
  90. .8byte {riscv64_start}
  91. .popsection
  92. ",
  93. boot_stack = sym BOOT_STACK,
  94. page_table = sym BOOT_PAGE_TABLE,
  95. riscv64_start = sym riscv64_start,
  96. )
  97. }
  98. pub unsafe extern "C" fn riscv64_start(hart_id: usize, dtb_addr: PAddr) -> ! {
  99. let fdt = Fdt::from_ptr(ArchPhysAccess::as_ptr(dtb_addr).as_ptr())
  100. .expect("Failed to parse DTB from static memory.");
  101. let real_allocator = RefCell::new(BasicPageAlloc::new());
  102. let alloc = BasicPageAllocRef::new(&real_allocator);
  103. for range in fdt.present_ram().free_ram() {
  104. real_allocator.borrow_mut().add_range(range);
  105. }
  106. setup_kernel_page_table(alloc.clone());
  107. unsafe {
  108. init_dtb_and_fdt(dtb_addr);
  109. }
  110. setup_cpu(&alloc, hart_id);
  111. ScopedAllocator::new(&mut [0; 1024])
  112. .with_alloc(|mem_alloc| bootstrap_smp(mem_alloc, &real_allocator));
  113. unsafe extern "Rust" {
  114. fn _eonix_hal_main(_: BootStrapData) -> !;
  115. }
  116. let start = unsafe {
  117. ((&BOOT_STACK_START) as *const &'static [u8; 4096 * 16]).read_volatile()
  118. as *const _ as usize
  119. };
  120. let bootstrap_data = BootStrapData {
  121. early_stack: PRange::new(
  122. PAddr::from(start),
  123. PAddr::from(start + 4096 * 16),
  124. ),
  125. allocator: Some(real_allocator),
  126. };
  127. // set current hart's mtimecmp register
  128. set_next_timer();
  129. unsafe {
  130. _eonix_hal_main(bootstrap_data);
  131. }
  132. }
  133. unsafe extern "C" {
  134. fn BSS_LENGTH();
  135. }
  136. /// TODO:
  137. /// 对kernel image添加更细的控制,或者不加也行
  138. fn setup_kernel_page_table(alloc: BasicPageAllocRef) {
  139. let global_page_table = PageTable::<ArchPagingMode, _, _>::new(
  140. GLOBAL_PAGE_TABLE.clone(),
  141. alloc.clone(),
  142. PageAccessImpl,
  143. );
  144. let attr = PageAttribute::WRITE
  145. | PageAttribute::READ
  146. | PageAttribute::EXECUTE
  147. | PageAttribute::GLOBAL
  148. | PageAttribute::PRESENT;
  149. const KERNEL_BSS_START: VAddr = VAddr::from(0xffffffff40000000);
  150. // Map kernel BSS
  151. let bss_range = VRange::from(KERNEL_BSS_START).grow(BSS_LENGTH as usize);
  152. for pte in global_page_table.iter_kernel(bss_range) {
  153. let page = alloc.alloc().unwrap();
  154. let attr = attr.difference(PageAttribute::EXECUTE);
  155. pte.set(page.into_raw(), attr.into());
  156. }
  157. sfence_vma_all();
  158. unsafe {
  159. core::ptr::write_bytes(
  160. KERNEL_BSS_START.addr() as *mut (),
  161. 0,
  162. BSS_LENGTH as usize,
  163. );
  164. }
  165. unsafe {
  166. satp::set(
  167. satp::Mode::Sv48,
  168. 0,
  169. usize::from(PFN::from(global_page_table.addr())),
  170. );
  171. }
  172. sfence_vma_all();
  173. core::mem::forget(global_page_table);
  174. }
  175. /// set up tp register to percpu
  176. fn setup_cpu(alloc: impl FrameAlloc, hart_id: usize) {
  177. CPU_COUNT.fetch_add(1, Ordering::Relaxed);
  178. let mut percpu_area = PercpuArea::new(|layout| {
  179. let page_count = layout.size().div_ceil(PAGE_SIZE);
  180. let page = alloc.alloc_at_least(page_count).unwrap();
  181. let ptr = unsafe {
  182. // TODO: safety
  183. ArchPhysAccess::as_ptr(page.start())
  184. };
  185. page.into_raw();
  186. ptr
  187. });
  188. // set tp(x4) register
  189. percpu_area.setup(|pointer| {
  190. let percpu_base_addr = pointer.addr().get();
  191. unsafe {
  192. asm!(
  193. "mv tp, {0}",
  194. in(reg) percpu_base_addr,
  195. options(nostack, preserves_flags)
  196. );
  197. }
  198. });
  199. CPUID.set(hart_id);
  200. let mut cpu = CPU::local();
  201. unsafe {
  202. cpu.as_mut().init();
  203. }
  204. percpu_area.register(cpu.cpuid());
  205. }
  206. fn get_ap_start_addr() -> usize {
  207. unsafe extern "C" {
  208. fn _ap_start();
  209. }
  210. static AP_START_VALUE: &'static unsafe extern "C" fn() =
  211. &(_ap_start as unsafe extern "C" fn());
  212. unsafe { (AP_START_VALUE as *const _ as *const usize).read_volatile() }
  213. }
  214. fn bootstrap_smp(alloc: impl Allocator, page_alloc: &RefCell<BasicPageAlloc>) {
  215. let local_hart_id = CPU::local().cpuid();
  216. let mut ap_count = 0;
  217. for hart_id in FDT.harts().filter(|&id| id != local_hart_id) {
  218. let stack_range = {
  219. let page_alloc = BasicPageAllocRef::new(&page_alloc);
  220. let ap_stack = page_alloc.alloc_order(4).unwrap();
  221. let stack_range = ap_stack.range();
  222. ap_stack.into_raw();
  223. stack_range
  224. };
  225. let old = BSP_PAGE_ALLOC
  226. .swap((&raw const *page_alloc) as *mut _, Ordering::Release);
  227. assert!(old.is_null());
  228. while AP_STACK
  229. .compare_exchange_weak(
  230. 0,
  231. stack_range.end().addr(),
  232. Ordering::Release,
  233. Ordering::Relaxed,
  234. )
  235. .is_err()
  236. {
  237. spin_loop();
  238. }
  239. unsafe {
  240. hart_start(hart_id, PhysicalAddress::new(get_ap_start_addr()), 0);
  241. }
  242. while AP_COUNT.load(Ordering::Acquire) == ap_count {
  243. spin_loop();
  244. }
  245. let old = BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::Acquire);
  246. assert_eq!(old as *const _, &raw const *page_alloc);
  247. ap_count += 1;
  248. }
  249. }
  250. #[unsafe(naked)]
  251. #[unsafe(no_mangle)]
  252. #[unsafe(link_section = ".bootstrap.apentry")]
  253. unsafe extern "C" fn _ap_start(hart_id: usize) {
  254. naked_asm!(
  255. "
  256. la sp, 1f // set temp stack
  257. mv s0, a0 // save hart id
  258. ld t0, 2f
  259. srli t0, t0, 12
  260. li t1, 9 << 60
  261. or t0, t0, t1
  262. csrw satp, t0
  263. sfence.vma
  264. ld t0, 3f
  265. jalr t0
  266. mv sp, a0
  267. mv a0, s0
  268. ld t0, 4f
  269. jalr t0
  270. .pushsection .bootstrap.data, \"aw\", @progbits
  271. 1: .8byte {temp_stack}
  272. 2: .8byte {page_table}
  273. 3: .8byte {get_ap_stack}
  274. 4: .8byte {ap_entry}
  275. .popsection
  276. ",
  277. temp_stack = sym TEMP_AP_STACK_START,
  278. page_table = sym BOOT_PAGE_TABLE,
  279. get_ap_stack = sym get_ap_stack,
  280. ap_entry = sym ap_entry,
  281. )
  282. }
  283. fn get_ap_stack() -> usize {
  284. while AP_SEM
  285. .compare_exchange_weak(
  286. false,
  287. true,
  288. Ordering::Acquire,
  289. Ordering::Relaxed,
  290. )
  291. .is_err()
  292. {
  293. core::hint::spin_loop();
  294. }
  295. let stack_addr = loop {
  296. let addr = AP_STACK.swap(0, Ordering::AcqRel);
  297. if addr != 0 {
  298. break addr;
  299. }
  300. core::hint::spin_loop();
  301. };
  302. AP_SEM.store(false, Ordering::Release);
  303. stack_addr
  304. }
  305. fn ap_entry(hart_id: usize, stack_bottom: PAddr) -> ! {
  306. let stack_range =
  307. PRange::new(stack_bottom - (1 << 3) * PAGE_SIZE, stack_bottom);
  308. {
  309. // SAFETY: Acquire all the work done by the BSP and other APs.
  310. let alloc = loop {
  311. let alloc =
  312. BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::AcqRel);
  313. if !alloc.is_null() {
  314. break alloc;
  315. }
  316. };
  317. let ref_alloc = unsafe { &*alloc };
  318. setup_cpu(BasicPageAllocRef::new(&ref_alloc), hart_id);
  319. // SAFETY: Release our allocation work.
  320. BSP_PAGE_ALLOC.store(alloc, Ordering::Release);
  321. }
  322. // SAFETY: Make sure the allocator is set before we increment the AP count.
  323. AP_COUNT.fetch_add(1, Ordering::Release);
  324. unsafe extern "Rust" {
  325. fn _eonix_hal_ap_main(stack_range: PRange) -> !;
  326. }
  327. // set current hart's mtimecmp register
  328. set_next_timer();
  329. unsafe {
  330. _eonix_hal_ap_main(stack_range);
  331. }
  332. }
  333. pub fn early_console_write(s: &str) {
  334. write_str(s);
  335. }
  336. pub fn early_console_putchar(ch: u8) {
  337. console_putchar(ch);
  338. }
  339. pub fn shutdown() -> ! {
  340. sbi::legacy::shutdown();
  341. }