mm.rs 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. use crate::traits::mm::Memory;
  2. use core::{
  3. arch::asm,
  4. marker::PhantomData,
  5. ptr::NonNull,
  6. sync::atomic::{compiler_fence, Ordering},
  7. };
  8. use eonix_mm::{
  9. address::{Addr as _, AddrOps, PAddr, PRange, PhysAccess, VAddr},
  10. page_table::{
  11. PageAttribute, PageTable, PageTableLevel, PagingMode, RawAttribute, RawPageTable,
  12. TableAttribute, PTE,
  13. },
  14. paging::{NoAlloc, Page, PageBlock, PAGE_SIZE, PFN},
  15. };
  16. use eonix_sync_base::LazyLock;
  17. use loongArch64::register::pgdl;
  18. pub const KIMAGE_OFFSET: usize = 0xffff_ffff_0000_0000;
  19. pub const ROOT_PAGE_TABLE_PFN: usize = 0x8000_1000 >> 12;
  20. pub const PAGE_TABLE_BASE: PFN = PFN::from_val(ROOT_PAGE_TABLE_PFN);
  21. pub static GLOBAL_PAGE_TABLE: LazyLock<PageTable<ArchPagingMode, NoAlloc, ArchPhysAccess>> =
  22. LazyLock::new(|| unsafe {
  23. Page::with_raw(PAGE_TABLE_BASE, |root_table_page| {
  24. PageTable::with_root_table(root_table_page.clone())
  25. })
  26. });
  27. pub const PA_VP: u64 = ((1 << 0) | (1 << 7));
  28. pub const PA_D: u64 = 1 << 1;
  29. pub const PA_U: u64 = 3 << 2;
  30. pub const PA_CACHED: u64 = 1 << 4;
  31. pub const PA_G: u64 = 1 << 6;
  32. pub const PA_W: u64 = 1 << 8;
  33. pub const PA_NR: u64 = 1 << 61;
  34. pub const PA_NX: u64 = 1 << 62;
  35. // in RSW
  36. pub const PA_COW: u64 = 1 << 9;
  37. pub const PA_MMAP: u64 = 1 << 10;
  38. pub const PA_PT_USER: u64 = 1 << 59;
  39. pub const PA_PT: u64 = 1 << 60;
  40. pub const PA_FLAGS_MASK: u64 = 0xF800_0000_0000_0FFF;
  41. #[repr(transparent)]
  42. #[derive(Clone, Copy)]
  43. pub struct PTE64(u64);
  44. #[derive(Clone, Copy)]
  45. pub struct PageAttribute64(u64);
  46. pub struct RawPageTable48<'a>(NonNull<PTE64>, PhantomData<&'a ()>);
  47. pub struct PagingMode48;
  48. pub struct ArchPhysAccess;
  49. pub struct ArchMemory;
  50. impl PTE for PTE64 {
  51. type Attr = PageAttribute64;
  52. fn set(&mut self, pfn: PFN, attr: Self::Attr) {
  53. let pfn = ((usize::from(pfn) as u64) << 12) & !PA_FLAGS_MASK;
  54. self.0 = pfn | attr.0;
  55. }
  56. fn get(&self) -> (PFN, Self::Attr) {
  57. let pfn = PFN::from((self.0 & !PA_FLAGS_MASK) as usize >> 12);
  58. let attr = PageAttribute64(self.0 & PA_FLAGS_MASK);
  59. (pfn, attr)
  60. }
  61. }
  62. impl PagingMode for PagingMode48 {
  63. type Entry = PTE64;
  64. type RawTable<'a> = RawPageTable48<'a>;
  65. const LEVELS: &'static [PageTableLevel] = &[
  66. PageTableLevel::new(39, 9),
  67. PageTableLevel::new(30, 9),
  68. PageTableLevel::new(21, 9),
  69. PageTableLevel::new(12, 9),
  70. ];
  71. }
  72. pub type ArchPagingMode = PagingMode48;
  73. impl<'a> RawPageTable<'a> for RawPageTable48<'a> {
  74. type Entry = PTE64;
  75. fn index(&self, index: u16) -> &'a Self::Entry {
  76. unsafe { self.0.add(index as usize).as_ref() }
  77. }
  78. fn index_mut(&mut self, index: u16) -> &'a mut Self::Entry {
  79. unsafe { self.0.add(index as usize).as_mut() }
  80. }
  81. unsafe fn from_ptr(ptr: NonNull<PageBlock>) -> Self {
  82. Self(ptr.cast(), PhantomData)
  83. }
  84. }
  85. impl RawAttribute for PageAttribute64 {
  86. fn null() -> Self {
  87. Self(0)
  88. }
  89. fn as_table_attr(self) -> Option<TableAttribute> {
  90. let mut table_attr = TableAttribute::empty();
  91. if self.0 & PA_PT == PA_PT {
  92. table_attr |= TableAttribute::PRESENT;
  93. }
  94. if self.0 & PA_PT_USER == PA_PT_USER {
  95. table_attr |= TableAttribute::USER;
  96. }
  97. Some(table_attr)
  98. }
  99. fn as_page_attr(self) -> Option<PageAttribute> {
  100. let mut page_attr = PageAttribute::empty();
  101. if self.0 & PA_PT == PA_PT {
  102. return None;
  103. }
  104. if self.0 & PA_VP == PA_VP {
  105. page_attr |= PageAttribute::PRESENT;
  106. }
  107. if self.0 & PA_NR == 0 {
  108. page_attr |= PageAttribute::READ;
  109. }
  110. if self.0 & PA_W != 0 {
  111. page_attr |= PageAttribute::WRITE;
  112. }
  113. if self.0 & PA_NX == 0 {
  114. page_attr |= PageAttribute::EXECUTE;
  115. }
  116. if self.0 & PA_U == PA_U {
  117. page_attr |= PageAttribute::USER;
  118. }
  119. if self.0 & PA_D != 0 {
  120. page_attr |= PageAttribute::DIRTY;
  121. }
  122. if self.0 & PA_G != 0 {
  123. page_attr |= PageAttribute::GLOBAL;
  124. }
  125. if self.0 & PA_COW != 0 {
  126. page_attr |= PageAttribute::COPY_ON_WRITE;
  127. }
  128. if self.0 & PA_MMAP != 0 {
  129. page_attr |= PageAttribute::MAPPED;
  130. }
  131. Some(page_attr)
  132. }
  133. }
  134. impl From<PageAttribute> for PageAttribute64 {
  135. fn from(page_attr: PageAttribute) -> Self {
  136. let mut raw_attr = PA_NR | PA_NX | PA_CACHED;
  137. for attr in page_attr.iter() {
  138. match attr {
  139. PageAttribute::PRESENT => raw_attr |= PA_VP,
  140. PageAttribute::READ => raw_attr &= !PA_NR,
  141. PageAttribute::WRITE => raw_attr |= PA_W,
  142. PageAttribute::EXECUTE => raw_attr &= !PA_NX,
  143. PageAttribute::USER => raw_attr |= PA_U,
  144. PageAttribute::DIRTY => raw_attr |= PA_D,
  145. PageAttribute::GLOBAL => raw_attr |= PA_G,
  146. PageAttribute::COPY_ON_WRITE => raw_attr |= PA_COW,
  147. PageAttribute::MAPPED => raw_attr |= PA_MMAP,
  148. PageAttribute::ACCESSED | PageAttribute::ANONYMOUS => {}
  149. _ => unreachable!("Invalid page attribute"),
  150. }
  151. }
  152. Self(raw_attr)
  153. }
  154. }
  155. impl From<TableAttribute> for PageAttribute64 {
  156. fn from(table_attr: TableAttribute) -> Self {
  157. let mut raw_attr = 0;
  158. for attr in table_attr.iter() {
  159. match attr {
  160. TableAttribute::PRESENT => raw_attr |= PA_PT,
  161. TableAttribute::USER => raw_attr |= PA_PT_USER,
  162. TableAttribute::GLOBAL | TableAttribute::ACCESSED => {}
  163. _ => unreachable!("Invalid table attribute"),
  164. }
  165. }
  166. Self(raw_attr)
  167. }
  168. }
  169. impl ArchPhysAccess {
  170. const PHYS_OFFSET: usize = 0xffff_ff00_0000_0000;
  171. }
  172. impl PhysAccess for ArchPhysAccess {
  173. unsafe fn as_ptr<T>(paddr: PAddr) -> NonNull<T> {
  174. let alignment: usize = align_of::<T>();
  175. assert!(paddr.addr() % alignment == 0, "Alignment error");
  176. unsafe {
  177. // SAFETY: We can assume that we'll never have `self.addr()` equals
  178. // to `-PHYS_OFFSET`. Otherwise, the kernel might be broken.
  179. NonNull::new_unchecked((Self::PHYS_OFFSET + paddr.addr()) as *mut T)
  180. }
  181. }
  182. unsafe fn from_ptr<T>(ptr: NonNull<T>) -> PAddr {
  183. let addr = ptr.addr().get();
  184. assert!(addr % align_of::<T>() == 0, "Alignment error");
  185. assert!(
  186. addr >= Self::PHYS_OFFSET,
  187. "Address is not a valid physical address"
  188. );
  189. PAddr::from_val(addr - Self::PHYS_OFFSET)
  190. }
  191. }
  192. impl Memory for ArchMemory {
  193. fn present_ram() -> impl Iterator<Item = PRange> {
  194. let range1 = core::iter::once(PRange::from(PAddr::from_val(0)).grow(0x1000_0000));
  195. let range2 = core::iter::once(PRange::from(PAddr::from_val(0x8000_0000)).grow(0x3000_0000));
  196. range2.chain(range1)
  197. }
  198. fn free_ram() -> impl Iterator<Item = PRange> {
  199. unsafe extern "C" {
  200. fn __kernel_start();
  201. fn __kernel_end();
  202. }
  203. let kernel_start = PAddr::from(__kernel_start as usize);
  204. let kernel_end = PAddr::from(__kernel_end as usize);
  205. let paddr_after_kimage_aligned = kernel_end.ceil_to(PAGE_SIZE);
  206. Self::present_ram()
  207. .filter(move |range| {
  208. range.end() <= kernel_start || range.end() > paddr_after_kimage_aligned
  209. })
  210. .map(move |range| {
  211. if range.end() > paddr_after_kimage_aligned
  212. && range.start() < paddr_after_kimage_aligned
  213. {
  214. let (_, right) = range.split_at(paddr_after_kimage_aligned);
  215. right
  216. } else {
  217. range
  218. }
  219. })
  220. }
  221. }
  222. pub type DefaultPagingMode = PagingMode48;
  223. #[inline(always)]
  224. pub fn flush_tlb(vaddr: usize) {
  225. unsafe {
  226. asm!(
  227. "dbar 0x0",
  228. "invtlb 0x5, $zero, {vaddr}",
  229. vaddr = in(reg) vaddr,
  230. );
  231. }
  232. }
  233. #[inline(always)]
  234. pub fn flush_tlb_all() {
  235. unsafe {
  236. asm!("dbar 0x0", "invtlb 0x0, $zero, $zero");
  237. }
  238. }
  239. #[inline(always)]
  240. pub fn get_root_page_table_pfn() -> PFN {
  241. PFN::from(PAddr::from(pgdl::read().base()))
  242. }
  243. #[inline(always)]
  244. pub fn set_root_page_table_pfn(pfn: PFN) {
  245. compiler_fence(Ordering::SeqCst);
  246. unsafe {
  247. pgdl::set_base(PAddr::from(pfn).addr());
  248. }
  249. compiler_fence(Ordering::SeqCst);
  250. // Invalidate all user space TLB entries.
  251. unsafe {
  252. asm!("dbar 0x0", "invtlb 0x0, $zero, $zero");
  253. }
  254. }