cpu.rs 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. use super::gdt::{GDTEntry, GDT};
  2. use super::interrupt::InterruptControl;
  3. use super::trap::TrapContext;
  4. use core::arch::asm;
  5. use core::marker::PhantomPinned;
  6. use core::mem::size_of;
  7. use core::pin::Pin;
  8. use eonix_preempt::PreemptGuard;
  9. use eonix_sync_base::LazyLock;
  10. #[eonix_percpu::define_percpu]
  11. static LOCAL_CPU: LazyLock<CPU> = LazyLock::new(CPU::new);
  12. #[repr(C)]
  13. #[derive(Debug, Clone, Copy)]
  14. #[allow(non_camel_case_types)]
  15. struct TSS_SP {
  16. low: u32,
  17. high: u32,
  18. }
  19. #[repr(C)]
  20. pub(crate) struct TSS {
  21. _reserved1: u32,
  22. rsp: [TSS_SP; 3],
  23. _reserved2: u32,
  24. _reserved3: u32,
  25. ist: [TSS_SP; 7],
  26. _reserved4: u32,
  27. _reserved5: u32,
  28. _reserved6: u16,
  29. iomap_base: u16,
  30. _pinned: PhantomPinned,
  31. }
  32. #[derive(Debug, Clone)]
  33. pub enum UserTLS {
  34. /// TODO: This is not used yet.
  35. #[allow(dead_code)]
  36. TLS64(u64),
  37. TLS32 {
  38. base: u64,
  39. desc: GDTEntry,
  40. },
  41. }
  42. /// Architecture-specific cpu status data.
  43. pub struct CPU {
  44. cpuid: usize,
  45. gdt: GDT,
  46. tss: TSS,
  47. interrupt: InterruptControl,
  48. }
  49. impl UserTLS {
  50. /// # Return
  51. /// Returns the TLS descriptor and the index of the TLS segment.
  52. pub fn new32(base: u32, limit: u32, is_limit_in_pages: bool) -> (Self, u32) {
  53. let flags = if is_limit_in_pages { 0xc } else { 0x4 };
  54. (
  55. Self::TLS32 {
  56. base: base as u64,
  57. desc: GDTEntry::new(base, limit, 0xf2, flags),
  58. },
  59. 7,
  60. )
  61. }
  62. }
  63. impl CPU {
  64. pub fn new() -> Self {
  65. let (interrupt_control, cpuid) = InterruptControl::new();
  66. Self {
  67. cpuid,
  68. gdt: GDT::new(),
  69. tss: TSS::new(),
  70. interrupt: interrupt_control,
  71. }
  72. }
  73. /// Load GDT and TSS in place.
  74. ///
  75. /// # Safety
  76. /// Make sure preemption and interrupt are disabled before calling this function.
  77. pub(crate) unsafe fn init(mut self: Pin<&mut Self>) {
  78. let tss = &self.as_ref().get_ref().tss;
  79. let tss_addr = tss as *const _ as u64;
  80. let mut gdt = unsafe {
  81. // SAFETY: We don't move the field out.
  82. self.as_mut().map_unchecked_mut(|me| &mut me.gdt)
  83. };
  84. unsafe {
  85. // SAFETY: We don't move `gdt` out.
  86. gdt.as_mut().get_unchecked_mut().set_tss(tss_addr as u64);
  87. }
  88. gdt.load();
  89. let mut interrupt = unsafe {
  90. // SAFETY: We don't move the field out.
  91. self.as_mut().map_unchecked_mut(|me| &mut me.interrupt)
  92. };
  93. // SAFETY: `self` is pinned, so are its fields.
  94. interrupt.as_mut().setup_idt();
  95. interrupt.as_mut().setup_timer();
  96. }
  97. /// Bootstrap all CPUs.
  98. /// This should only be called on the BSP.
  99. pub unsafe fn bootstrap_cpus(&self) {
  100. self.interrupt.send_sipi();
  101. }
  102. pub unsafe fn load_interrupt_stack(self: Pin<&mut Self>, rsp: u64) {
  103. unsafe {
  104. self.map_unchecked_mut(|me| &mut me.tss)
  105. .set_rsp0(rsp + size_of::<TrapContext>() as u64);
  106. }
  107. }
  108. pub fn set_tls32(self: Pin<&mut Self>, user_tls: &UserTLS) {
  109. let UserTLS::TLS32 { desc, base } = user_tls else {
  110. unimplemented!("TLS64 is not supported yet")
  111. };
  112. unsafe {
  113. // SAFETY: We don't move the GDT object.
  114. self.get_unchecked_mut().gdt.set_tls32(*desc);
  115. }
  116. const IA32_KERNEL_GS_BASE: u32 = 0xc0000102;
  117. wrmsr(IA32_KERNEL_GS_BASE, *base);
  118. }
  119. pub fn cpuid(&self) -> usize {
  120. self.cpuid
  121. }
  122. pub fn end_of_interrupt(self: Pin<&mut Self>) {
  123. unsafe {
  124. // SAFETY: We don't move the `interrupt` field out.
  125. self.map_unchecked_mut(|me| &mut me.interrupt)
  126. .end_of_interrupt();
  127. }
  128. }
  129. pub fn local() -> PreemptGuard<Pin<&'static mut Self>> {
  130. unsafe {
  131. // SAFETY: We pass the reference into a `PreemptGuard`, which ensures
  132. // that preemption is disabled.
  133. PreemptGuard::new(Pin::new_unchecked(LOCAL_CPU.as_mut().get_mut()))
  134. }
  135. }
  136. }
  137. impl TSS {
  138. pub fn new() -> Self {
  139. Self {
  140. _reserved1: 0,
  141. rsp: [TSS_SP { low: 0, high: 0 }; 3],
  142. _reserved2: 0,
  143. _reserved3: 0,
  144. ist: [TSS_SP { low: 0, high: 0 }; 7],
  145. _reserved4: 0,
  146. _reserved5: 0,
  147. _reserved6: 0,
  148. iomap_base: 0,
  149. _pinned: PhantomPinned,
  150. }
  151. }
  152. pub fn set_rsp0(self: Pin<&mut Self>, rsp: u64) {
  153. unsafe {
  154. // SAFETY: We don't move the TSS object.
  155. let me = self.get_unchecked_mut();
  156. me.rsp[0].low = rsp as u32;
  157. me.rsp[0].high = (rsp >> 32) as u32;
  158. }
  159. }
  160. }
  161. #[inline(always)]
  162. pub fn halt() {
  163. unsafe {
  164. asm!("hlt", options(att_syntax, nostack));
  165. }
  166. }
  167. #[inline(always)]
  168. pub fn rdmsr(msr: u32) -> u64 {
  169. let edx: u32;
  170. let eax: u32;
  171. unsafe {
  172. asm!(
  173. "rdmsr",
  174. in("ecx") msr,
  175. out("eax") eax,
  176. out("edx") edx,
  177. options(att_syntax),
  178. );
  179. }
  180. (edx as u64) << 32 | eax as u64
  181. }
  182. #[inline(always)]
  183. pub fn wrmsr(msr: u32, value: u64) {
  184. let eax = value as u32;
  185. let edx = (value >> 32) as u32;
  186. unsafe {
  187. asm!(
  188. "wrmsr",
  189. in("ecx") msr,
  190. in("eax") eax,
  191. in("edx") edx,
  192. options(att_syntax),
  193. );
  194. }
  195. }