init.rs 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. use crate::{
  2. arch::{
  3. bootstrap::{EARLY_GDT_DESCRIPTOR, KERNEL_PML4},
  4. cpu::{wrmsr, CPU},
  5. io::Port8,
  6. mm::{ArchPhysAccess, GLOBAL_PAGE_TABLE, V_KERNEL_BSS_START},
  7. },
  8. bootstrap::BootStrapData,
  9. mm::{ArchMemory, ArchPagingMode, BasicPageAlloc, BasicPageAllocRef, ScopedAllocator},
  10. };
  11. use acpi::{platform::ProcessorState, AcpiHandler, AcpiTables, PhysicalMapping, PlatformInfo};
  12. use core::{
  13. alloc::Allocator,
  14. arch::{asm, global_asm},
  15. cell::RefCell,
  16. hint::spin_loop,
  17. sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering},
  18. };
  19. use eonix_hal_traits::mm::Memory;
  20. use eonix_mm::{
  21. address::{Addr as _, PAddr, PRange, PhysAccess, VRange},
  22. page_table::{PageAttribute, PagingMode, PTE as _},
  23. paging::{Page, PageAccess, PageAlloc, PAGE_SIZE},
  24. };
  25. use eonix_percpu::PercpuArea;
  26. static BSP_PAGE_ALLOC: AtomicPtr<RefCell<BasicPageAlloc>> = AtomicPtr::new(core::ptr::null_mut());
  27. static AP_COUNT: AtomicUsize = AtomicUsize::new(0);
  28. static AP_STACK: AtomicUsize = AtomicUsize::new(0);
  29. static AP_SEM: AtomicBool = AtomicBool::new(false);
  30. global_asm!(
  31. r#"
  32. .pushsection .stage1.smp, "ax", @progbits
  33. .code16
  34. ljmp $0x0, $2f
  35. 2:
  36. lgdt {early_gdt_descriptor}
  37. mov $0xc0000080, %ecx
  38. rdmsr
  39. or $0x901, %eax # set LME, NXE, SCE
  40. wrmsr
  41. mov %cr4, %eax
  42. or $0xa0, %eax # set PAE, PGE
  43. mov %eax, %cr4
  44. mov ${kernel_pml4}, %eax
  45. mov %eax, %cr3
  46. mov %cr0, %eax
  47. or $0x80010001, %eax # set PE, WP, PG
  48. mov %eax, %cr0
  49. ljmp $0x08, $2f
  50. .code64
  51. 2:
  52. mov $0x10, %ax
  53. mov %ax, %ds
  54. mov %ax, %es
  55. mov %ax, %ss
  56. xor %rax, %rax
  57. inc %rax
  58. mov ${ap_semaphore}, %rcx
  59. 2:
  60. xchg %rax, (%rcx) # AcqRel
  61. cmp $0, %rax
  62. je 2f
  63. pause
  64. jmp 2b
  65. 2:
  66. mov ${ap_stack}, %rcx
  67. 2:
  68. mov (%rcx), %rsp # Acquire
  69. cmp $0, %rsp
  70. jne 2f
  71. pause
  72. jmp 2b
  73. 2:
  74. xor %rbp, %rbp
  75. mov %rbp, (%rcx) # Relaxed
  76. mov ${ap_semaphore}, %rcx
  77. xchg %rax, (%rcx) # Release
  78. mov %rsp, %rdi
  79. push %rbp # NULL return address
  80. mov ${ap_entry}, %rax
  81. jmp *%rax
  82. .popsection
  83. "#,
  84. early_gdt_descriptor = sym EARLY_GDT_DESCRIPTOR,
  85. kernel_pml4 = const KERNEL_PML4,
  86. ap_semaphore = sym AP_SEM,
  87. ap_stack = sym AP_STACK,
  88. ap_entry = sym ap_entry,
  89. options(att_syntax),
  90. );
  91. fn enable_sse() {
  92. unsafe {
  93. asm!(
  94. "mov %cr0, %rax",
  95. "and $(~0xc), %rax",
  96. "or $0x22, %rax",
  97. "mov %rax, %cr0",
  98. "mov %cr4, %rax",
  99. "or $0x600, %rax",
  100. "mov %rax, %cr4",
  101. "fninit",
  102. out("rax") _,
  103. options(att_syntax, nomem, nostack)
  104. )
  105. }
  106. }
  107. fn setup_cpu(alloc: impl PageAlloc) {
  108. let mut percpu_area = PercpuArea::new(|layout| {
  109. // TODO: Use page size defined in `arch`.
  110. let page_count = layout.size().div_ceil(PAGE_SIZE);
  111. let page = Page::alloc_at_least_in(page_count, alloc);
  112. let ptr = ArchPhysAccess::get_ptr_for_page(&page).cast();
  113. page.into_raw();
  114. ptr
  115. });
  116. percpu_area.setup(|pointer| {
  117. wrmsr(0xC0000101, pointer.addr().get() as u64);
  118. unsafe {
  119. // SAFETY: %gs:0 points to the start of the percpu area.
  120. asm!(
  121. "movq {}, %gs:0",
  122. in(reg) pointer.addr().get(),
  123. options(nostack, preserves_flags, att_syntax)
  124. );
  125. }
  126. });
  127. let mut cpu = CPU::local();
  128. unsafe {
  129. // SAFETY: Preemption is disabled and interrupt MUST be disabled since
  130. // we are doing this in the kernel initialization phase.
  131. cpu.as_mut().init();
  132. }
  133. percpu_area.register(cpu.cpuid());
  134. }
  135. fn setup_pic() {
  136. // TODO: Remove this when we have completely switched to APIC.
  137. const PIC1_COMMAND: Port8 = Port8::new(0x20);
  138. const PIC1_DATA: Port8 = Port8::new(0x21);
  139. const PIC2_COMMAND: Port8 = Port8::new(0xA0);
  140. const PIC2_DATA: Port8 = Port8::new(0xA1);
  141. // Initialize PIC
  142. PIC1_COMMAND.write(0x11); // edge trigger mode
  143. PIC1_DATA.write(0x20); // IRQ 0-7 offset
  144. PIC1_DATA.write(0x04); // cascade with slave PIC
  145. PIC1_DATA.write(0x01); // no buffer mode
  146. PIC2_COMMAND.write(0x11); // edge trigger mode
  147. PIC2_DATA.write(0x28); // IRQ 8-15 offset
  148. PIC2_DATA.write(0x02); // cascade with master PIC
  149. PIC2_DATA.write(0x01); // no buffer mode
  150. // Allow all IRQs
  151. PIC1_DATA.write(0x0);
  152. PIC2_DATA.write(0x0);
  153. }
  154. fn bootstrap_smp(alloc: impl Allocator, page_alloc: &RefCell<BasicPageAlloc>) {
  155. #[derive(Clone)]
  156. struct Handler;
  157. impl AcpiHandler for Handler {
  158. unsafe fn map_physical_region<T>(
  159. &self,
  160. physical_address: usize,
  161. size: usize,
  162. ) -> PhysicalMapping<Self, T> {
  163. unsafe {
  164. PhysicalMapping::new(
  165. physical_address,
  166. ArchPhysAccess::as_ptr(PAddr::from(physical_address)),
  167. size,
  168. size,
  169. self.clone(),
  170. )
  171. }
  172. }
  173. fn unmap_physical_region<T>(_: &PhysicalMapping<Self, T>) {}
  174. }
  175. let acpi_tables = unsafe {
  176. // SAFETY: Probing for RSDP in BIOS memory should be fine.
  177. AcpiTables::search_for_rsdp_bios(Handler).unwrap()
  178. };
  179. let platform_info = PlatformInfo::new_in(&acpi_tables, &alloc).unwrap();
  180. let processor_info = platform_info.processor_info.unwrap();
  181. let ap_count = processor_info
  182. .application_processors
  183. .iter()
  184. .filter(|ap| !matches!(ap.state, ProcessorState::Disabled))
  185. .count();
  186. unsafe {
  187. CPU::local().bootstrap_cpus();
  188. }
  189. for current_count in 0..ap_count {
  190. let stack_range = {
  191. let page_alloc = BasicPageAllocRef::new(&page_alloc);
  192. let ap_stack = Page::alloc_order_in(4, page_alloc);
  193. let stack_range = ap_stack.range();
  194. ap_stack.into_raw();
  195. stack_range
  196. };
  197. // SAFETY: All the APs can see the allocator work done before this point.
  198. let old = BSP_PAGE_ALLOC.swap((&raw const *page_alloc) as *mut _, Ordering::Release);
  199. assert!(
  200. old.is_null(),
  201. "BSP_PAGE_ALLOC should be null before we release it"
  202. );
  203. // SAFETY: The AP reading the stack will see the allocation work.
  204. while let Err(_) = AP_STACK.compare_exchange_weak(
  205. 0,
  206. stack_range.end().addr(),
  207. Ordering::Release,
  208. Ordering::Relaxed,
  209. ) {
  210. // Spin until we can set the stack pointer for the AP.
  211. spin_loop();
  212. }
  213. spin_loop();
  214. // SAFETY: Make sure if we read the AP count, the allocator MUST have been released.
  215. while AP_COUNT.load(Ordering::Acquire) == current_count {
  216. // Wait for the AP to finish its initialization.
  217. spin_loop();
  218. }
  219. // SAFETY: We acquire the work done by the AP.
  220. let old = BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::Acquire);
  221. assert_eq!(
  222. old as *const _, &raw const *page_alloc,
  223. "We should read the previously saved allocator"
  224. );
  225. }
  226. }
  227. pub extern "C" fn kernel_init() -> ! {
  228. let global_page_table = &GLOBAL_PAGE_TABLE;
  229. let paging_levels = ArchPagingMode::LEVELS;
  230. enable_sse();
  231. let real_allocator = RefCell::new(BasicPageAlloc::new());
  232. let alloc = BasicPageAllocRef::new(&real_allocator);
  233. unsafe extern "C" {
  234. fn BSS_LENGTH();
  235. }
  236. for range in ArchMemory::free_ram() {
  237. real_allocator.borrow_mut().add_range(range);
  238. }
  239. // Map kernel BSS
  240. for pte in global_page_table.iter_kernel_in(
  241. VRange::from(V_KERNEL_BSS_START).grow(BSS_LENGTH as usize),
  242. paging_levels,
  243. &alloc,
  244. ) {
  245. let attr = PageAttribute::PRESENT
  246. | PageAttribute::WRITE
  247. | PageAttribute::READ
  248. | PageAttribute::HUGE
  249. | PageAttribute::GLOBAL;
  250. let page = Page::alloc_in(&alloc);
  251. pte.set(page.into_raw(), attr.into());
  252. }
  253. unsafe {
  254. // SAFETY: We've just mapped the area with sufficient length.
  255. core::ptr::write_bytes(V_KERNEL_BSS_START.addr() as *mut (), 0, BSS_LENGTH as usize);
  256. }
  257. setup_cpu(&alloc);
  258. setup_pic();
  259. ScopedAllocator::new(&mut [0; 1024])
  260. .with_alloc(|mem_alloc| bootstrap_smp(mem_alloc, &real_allocator));
  261. unsafe extern "Rust" {
  262. fn _eonix_hal_main(_: BootStrapData) -> !;
  263. }
  264. let bootstrap_data = BootStrapData {
  265. early_stack: PRange::new(PAddr::from(0x6000), PAddr::from(0x80000)),
  266. allocator: Some(real_allocator),
  267. };
  268. unsafe {
  269. _eonix_hal_main(bootstrap_data);
  270. }
  271. }
  272. pub extern "C" fn ap_entry(stack_bottom: PAddr) -> ! {
  273. let stack_range = PRange::new(stack_bottom - (1 << 3) * PAGE_SIZE, stack_bottom);
  274. {
  275. // SAFETY: Acquire all the work done by the BSP and other APs.
  276. let alloc = loop {
  277. let alloc = BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::AcqRel);
  278. if !alloc.is_null() {
  279. break alloc;
  280. }
  281. };
  282. let ref_alloc = unsafe { &*alloc };
  283. setup_cpu(BasicPageAllocRef::new(&ref_alloc));
  284. // SAFETY: Release our allocation work.
  285. BSP_PAGE_ALLOC.store(alloc, Ordering::Release);
  286. }
  287. // SAFETY: Make sure the allocator is set before we increment the AP count.
  288. AP_COUNT.fetch_add(1, Ordering::Release);
  289. unsafe extern "Rust" {
  290. fn _eonix_hal_ap_main(stack_range: PRange) -> !;
  291. }
  292. unsafe {
  293. _eonix_hal_ap_main(stack_range);
  294. }
  295. }