init.rs 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. use super::{enable_sse, GDTEntry, InterruptControl, GDT};
  2. use core::{pin::Pin, ptr::addr_of};
  3. #[repr(C)]
  4. #[derive(Debug, Clone, Copy)]
  5. #[allow(non_camel_case_types)]
  6. struct TSS_SP {
  7. low: u32,
  8. high: u32,
  9. }
  10. #[repr(C)]
  11. pub(crate) struct TSS {
  12. _reserved1: u32,
  13. rsp: [TSS_SP; 3],
  14. _reserved2: u32,
  15. _reserved3: u32,
  16. ist: [TSS_SP; 7],
  17. _reserved4: u32,
  18. _reserved5: u32,
  19. _reserved6: u16,
  20. iomap_base: u16,
  21. }
  22. impl TSS {
  23. pub fn new() -> Self {
  24. Self {
  25. _reserved1: 0,
  26. rsp: [TSS_SP { low: 0, high: 0 }; 3],
  27. _reserved2: 0,
  28. _reserved3: 0,
  29. ist: [TSS_SP { low: 0, high: 0 }; 7],
  30. _reserved4: 0,
  31. _reserved5: 0,
  32. _reserved6: 0,
  33. iomap_base: 0,
  34. }
  35. }
  36. pub fn set_rsp0(&mut self, rsp: u64) {
  37. self.rsp[0].low = rsp as u32;
  38. self.rsp[0].high = (rsp >> 32) as u32;
  39. }
  40. }
  41. /// Architecture-specific cpu status data.
  42. pub struct CPU {
  43. cpuid: usize,
  44. gdt: GDT,
  45. tss: TSS,
  46. pub interrupt: InterruptControl,
  47. }
  48. impl CPU {
  49. pub fn new(base: usize) -> Self {
  50. let (interrupt_control, cpuid) = InterruptControl::new(base);
  51. Self {
  52. cpuid,
  53. gdt: GDT::new(),
  54. tss: TSS::new(),
  55. interrupt: interrupt_control,
  56. }
  57. }
  58. /// Load GDT and TSS in place.
  59. ///
  60. /// # Safety
  61. /// Make sure preemption and interrupt are disabled before calling this function.
  62. pub unsafe fn init(self: Pin<&mut Self>) {
  63. enable_sse();
  64. // SAFETY: We don't move the object.
  65. let self_mut = self.get_unchecked_mut();
  66. let tss_addr = addr_of!(self_mut.tss);
  67. self_mut.gdt.set_tss(tss_addr as u64);
  68. self_mut.gdt.load();
  69. // SAFETY: `self` is pinned, so are its fields.
  70. Pin::new_unchecked(&mut self_mut.interrupt).setup_idt();
  71. self_mut.interrupt.setup_timer();
  72. }
  73. /// Bootstrap all CPUs.
  74. /// This should only be called on the BSP.
  75. pub unsafe fn bootstrap_cpus(&self) {
  76. self.interrupt.send_sipi();
  77. }
  78. pub unsafe fn set_rsp0(&mut self, rsp: u64) {
  79. self.tss.set_rsp0(rsp);
  80. }
  81. pub unsafe fn set_tls32(&mut self, desc: GDTEntry) {
  82. self.gdt.set_tls32(desc);
  83. }
  84. pub fn cpuid(&self) -> usize {
  85. self.cpuid
  86. }
  87. }
  88. #[macro_export]
  89. macro_rules! define_smp_bootstrap {
  90. ($cpu_count:literal, $ap_entry:ident, $alloc_kstack:tt) => {
  91. static BOOT_SEMAPHORE: core::sync::atomic::AtomicU64 =
  92. core::sync::atomic::AtomicU64::new(0);
  93. static BOOT_STACK: core::sync::atomic::AtomicU64 =
  94. core::sync::atomic::AtomicU64::new(0);
  95. static CPU_COUNT: core::sync::atomic::AtomicU64 =
  96. core::sync::atomic::AtomicU64::new(1);
  97. core::arch::global_asm!(
  98. r#"
  99. .pushsection .stage1.smp
  100. .code16
  101. .globl ap_bootstrap
  102. .type ap_bootstrap, @function
  103. ap_bootstrap:
  104. ljmp $0x0, $2f
  105. 2:
  106. # we use the shared gdt for cpu bootstrapping
  107. lgdt EARLY_GDT_DESCRIPTOR
  108. # set msr
  109. mov $0xc0000080, %ecx
  110. rdmsr
  111. or $0x901, %eax # set LME, NXE, SCE
  112. wrmsr
  113. # set cr4
  114. mov %cr4, %eax
  115. or $0xa0, %eax # set PAE, PGE
  116. mov %eax, %cr4
  117. # load new page table
  118. mov ${KERNEL_PML4}, %eax
  119. mov %eax, %cr3
  120. mov %cr0, %eax
  121. // SET PE, WP, PG
  122. or $0x80010001, %eax
  123. mov %eax, %cr0
  124. ljmp $0x08, $2f
  125. .code64
  126. 2:
  127. mov $0x10, %ax
  128. mov %ax, %ds
  129. mov %ax, %es
  130. mov %ax, %ss
  131. xor %rsp, %rsp
  132. xor %rax, %rax
  133. inc %rax
  134. 2:
  135. xchg %rax, {BOOT_SEMAPHORE}
  136. cmp $0, %rax
  137. je 2f
  138. pause
  139. jmp 2b
  140. 2:
  141. mov {BOOT_STACK}, %rsp # Acquire
  142. cmp $0, %rsp
  143. jne 2f
  144. pause
  145. jmp 2b
  146. 2:
  147. xor %rax, %rax
  148. mov %rax, {BOOT_STACK} # Release
  149. xchg %rax, {BOOT_SEMAPHORE}
  150. lock incq {CPU_COUNT}
  151. xor %rbp, %rbp
  152. push %rbp # NULL return address
  153. jmp {AP_ENTRY}
  154. .popsection
  155. "#,
  156. KERNEL_PML4 = const 0x1000,
  157. BOOT_SEMAPHORE = sym BOOT_SEMAPHORE,
  158. BOOT_STACK = sym BOOT_STACK,
  159. CPU_COUNT = sym CPU_COUNT,
  160. AP_ENTRY = sym $ap_entry,
  161. options(att_syntax),
  162. );
  163. pub unsafe fn wait_cpus_online() {
  164. use core::sync::atomic::Ordering;
  165. while CPU_COUNT.load(Ordering::Acquire) != $cpu_count {
  166. if BOOT_STACK.load(Ordering::Acquire) == 0 {
  167. let stack_bottom = $alloc_kstack as u64;
  168. BOOT_STACK.store(stack_bottom, Ordering::Release);
  169. }
  170. $crate::pause();
  171. }
  172. }
  173. };
  174. }