Ver Fonte

feat(hal): add riscv64's Sv39 page mode

Heinz há 8 meses atrás
pai
commit
601c879103

+ 69 - 0
crates/eonix_hal/src/arch/riscv64/config.rs

@@ -0,0 +1,69 @@
+/// mm
+pub mod mm {
+    pub const ROOT_PAGE_TABLE_PHYS_ADDR: usize = 0x8040_0000;
+    pub const PHYS_MAP_VIRT: usize = 0xffff_ffc0_0000_0000;
+    pub const KIMAGE_PHYS_BASE: usize = 0x8020_0000;
+    pub const KIMAGE_OFFSET: usize = 0xffff_ffff_0000_0000;
+    pub const MMIO_VIRT_BASE: usize = KIMAGE_OFFSET;
+    pub const KIMAGE_VIRT_BASE: usize = KIMAGE_OFFSET + KIMAGE_PHYS_BASE;
+    pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
+    pub const PAGE_SIZE_BITS: usize = 12;
+    // 127GB
+    pub const MEMORY_SIZE: usize = 0x1F_C000_0000;
+
+    pub const PTE_SIZE: usize = 8;
+    pub const PTES_PER_PAGE: usize = PAGE_SIZE / PTE_SIZE;
+    pub const ROOT_PAGE_TABLE_PFN: usize = ROOT_PAGE_TABLE_PHYS_ADDR >> 12;
+    pub const PAGE_TABLE_PHYS_END: usize = 0x8080_0000;
+
+    #[derive(Clone, Copy)]
+    pub enum PageSize {
+        _4KbPage = 4096,
+        _2MbPage = 2 * 1024 * 1024,
+        _1GbPage = 1 * 1024 * 1024 * 1024,
+    }
+}
+
+/// smp
+pub mod smp {
+    use spin::Once;
+
+    pub static NUM_HARTS: Once<usize> = Once::new();
+
+    pub fn set_num_harts(num: usize) {
+        NUM_HARTS.call_once(|| num);
+    }
+
+    pub fn get_num_harts() -> usize {
+        *NUM_HARTS.get().expect("NUM_HARTS should be initialized by now")
+    }
+}
+
+pub mod platform {
+    pub mod virt {
+        pub const PLIC_BASE: usize = 0x0C00_0000;
+
+        pub const PLIC_ENABLE_PER_HART_OFFSET: usize = 0x80; // 每个 Hart使能块 0x80 字节 (128 字节)
+
+        pub const PLIC_THRESHOLD_CLAIM_COMPLETE_PER_HART_OFFSET: usize = 0x1000; // 每个 Hart/上下文块 0x1000 字节 (4KB)
+
+        pub const PLIC_PRIORITY_OFFSET: usize = 0x0000_0000;
+        pub const PLIC_PENDING_OFFSET: usize = 0x0000_1000;
+        pub const PLIC_ENABLE_OFFSET: usize = 0x0000_2000; // Varies by context and mode (M/S/U)
+        pub const PLIC_THRESHOLD_OFFSET: usize = 0x0020_0000; // Varies by context and mode (M/S/U)
+        pub const PLIC_CLAIM_COMPLETE_OFFSET: usize = 0x0020_0004; // Varies by context and mode (M/S/U)
+
+        // PLIC Context IDs for S-mode (assuming hart 0's S-mode context is 1, hart 1's is 3, etc.)
+        // A common pattern is: context_id = hart_id * 2 + 1 (for S-mode)
+        pub const PLIC_S_MODE_CONTEXT_STRIDE: usize = 2;
+
+        // CLINT (Core Local Interruptor) memory-mapped registers
+        // Base address for CLINT on QEMU virt platform
+        pub const CLINT_BASE: usize = 0x200_0000;
+        pub const CLINT_MSIP_OFFSET: usize = 0x0000;      // Machine-mode Software Interrupt Pending (MSIP)
+        pub const CLINT_MTIMECMP_OFFSET: usize = 0x4000;  // Machine-mode Timer Compare (MTIMECMP)
+        pub const CLINT_MTIME_OFFSET: usize = 0xBFF8;
+        pub const CPU_FREQ_HZ: u64 = 10_000_000;
+    }
+}
+

+ 314 - 0
crates/eonix_hal/src/arch/riscv64/mm.rs

@@ -0,0 +1,314 @@
+extern crate alloc;
+
+
+use crate::{arch::{fdt::get_global_fdt, riscv64::config::mm::KIMAGE_OFFSET}, traits::mm::Memory};
+use core::{marker::PhantomData, ptr::NonNull};
+use eonix_mm::{
+    address::{Addr as _, AddrOps, PAddr, PRange, PhysAccess, VAddr}, page_table::{
+        PageAttribute, PageTable, PageTableLevel, PagingMode, RawAttribute, RawPageTable, TableAttribute, PTE
+    }, paging::{NoAlloc, Page, PageBlock, PFN}
+};
+use eonix_sync_base::LazyLock;
+use fdt::Fdt;
+use riscv::{asm::sfence_vma_all, register::satp};
+
+use super::config::mm::{
+    ROOT_PAGE_TABLE_PFN,
+    PHYS_MAP_VIRT
+};
+
+pub const P_KIMAGE_START: PAddr = PAddr::from_val(0x80200000);
+pub const V_KERNEL_BSS_START: VAddr = VAddr::from(0xffffffff00000000);
+
+pub const PAGE_TABLE_BASE: PFN = PFN::from_val(ROOT_PAGE_TABLE_PFN);
+pub static GLOBAL_PAGE_TABLE: LazyLock<PageTable<ArchPagingMode, NoAlloc, ArchPhysAccess>> =
+    LazyLock::new(|| unsafe {
+        Page::with_raw(PAGE_TABLE_BASE, |root_table_page| {
+            PageTable::with_root_table(root_table_page.clone())
+        })
+    });
+
+pub const PA_V: u64 = 0b1 << 0;
+pub const PA_R: u64 = 0b1 << 1;
+pub const PA_W: u64 = 0b1 << 2;
+pub const PA_X: u64 = 0b1 << 3;
+pub const PA_U: u64 = 0b1 << 4;
+pub const PA_G: u64 = 0b1 << 5;
+pub const PA_A: u64 = 0b1 << 6;
+pub const PA_D: u64 = 0b1 << 7;
+
+// in RSW
+pub const PA_COW: u64 = 0b1 << 8;
+pub const PA_MMAP: u64 = 0b1 << 9;
+
+#[allow(dead_code)]
+pub const PA_SHIFT: u64 = 10;
+// Bit 0-9 (V, R, W, X, U, G, A, D, RSW)
+#[allow(dead_code)]
+pub const PA_FLAGS_MASK: u64 = 0x3FF; // 0b11_1111_1111
+
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct PTE64(pub u64);
+
+#[derive(Clone, Copy)]
+pub struct PageAttribute64(u64);
+
+pub struct RawPageTableSv39<'a>(NonNull<PTE64>, PhantomData<&'a ()>);
+
+pub struct PagingModeSv39;
+
+pub struct ArchPhysAccess;
+
+pub struct ArchMemory;
+
+impl PTE for PTE64 {
+    type Attr = PageAttribute64;
+
+    fn set(&mut self, pfn: PFN, attr: Self::Attr) {
+        self.0 = (usize::from(pfn) << PA_SHIFT) as u64 | attr.0;
+    }
+
+    fn get(&self) -> (PFN, Self::Attr) {
+        let pfn = PFN::from(self.0 as usize >> PA_SHIFT);
+        let attr = PageAttribute64(self.0 & PA_FLAGS_MASK);
+        (pfn, attr)
+    }
+}
+
+impl PagingMode for PagingModeSv39 {
+    type Entry = PTE64;
+    type RawTable<'a> = RawPageTableSv39<'a>;
+    const LEVELS: &'static [PageTableLevel] = &[
+        PageTableLevel::new(30, 9),
+        PageTableLevel::new(21, 9),
+        PageTableLevel::new(12, 9),
+    ];
+}
+
+pub type ArchPagingMode = PagingModeSv39;
+
+impl<'a> RawPageTable<'a> for RawPageTableSv39<'a> {
+    type Entry = PTE64;
+
+    fn index(&self, index: u16) -> &'a Self::Entry {
+        unsafe { self.0.add(index as usize).as_ref() }
+    }
+
+    fn index_mut(&mut self, index: u16) -> &'a mut Self::Entry {
+        unsafe { self.0.add(index as usize).as_mut() }
+    }
+
+    unsafe fn from_ptr(ptr: NonNull<PageBlock>) -> Self {
+        Self(ptr.cast(), PhantomData)
+    }
+}
+
+impl RawAttribute for PageAttribute64 {
+    fn null() -> Self {
+        Self(0)
+    }
+
+    fn as_table_attr(self) -> Option<TableAttribute> {
+        let mut table_attr = TableAttribute::empty();
+
+        if self.0 & (PA_R | PA_W | PA_X) != 0 {
+            panic!("Encountered a huge page while parsing table attributes");
+        }
+
+        if self.0 & PA_V != 0 {
+            table_attr |= TableAttribute::PRESENT;
+        }
+        if self.0 & PA_G != 0 {
+            table_attr |= TableAttribute::GLOBAL;
+        }
+        if self.0 & PA_U != 0 {
+            table_attr |= TableAttribute::USER;
+        }
+        if self.0 & PA_A != 0 {
+            table_attr |= TableAttribute::ACCESSED;
+        }
+
+        Some(table_attr)
+    }
+
+    fn as_page_attr(self) -> Option<PageAttribute> {
+        let mut page_attr = PageAttribute::empty();
+
+        if self.0 & (PA_R | PA_W | PA_X) == 0 {
+            panic!("Invalid page attribute combination");
+        }
+
+        if self.0 & PA_V != 0 {
+            page_attr |= PageAttribute::PRESENT;
+        }
+
+        if self.0 & PA_R != 0 {
+            page_attr |= PageAttribute::READ;
+        }
+
+        if self.0 & PA_W != 0 {
+            page_attr |= PageAttribute::WRITE;
+        }
+
+        if self.0 & PA_X != 0 {
+            page_attr |= PageAttribute::EXECUTE;
+        }
+
+        if self.0 & PA_U != 0 {
+            page_attr |= PageAttribute::USER;
+        }
+
+        if self.0 & PA_A != 0 {
+            page_attr |= PageAttribute::ACCESSED;
+        }
+
+        if self.0 & PA_D != 0 {
+            page_attr |= PageAttribute::DIRTY;
+        }
+
+        if self.0 & PA_G != 0 {
+            page_attr |= PageAttribute::GLOBAL;
+        }
+
+        if self.0 & PA_COW != 0 {
+            page_attr |= PageAttribute::COPY_ON_WRITE;
+        }
+
+        if self.0 & PA_MMAP != 0 {
+            page_attr |= PageAttribute::MAPPED;
+        }
+
+        /*if self.0 & PA_ANON != 0 {
+            page_attr |= PageAttribute::ANONYMOUS;
+        }*/
+
+        Some(page_attr)
+    }
+}
+
+impl From<PageAttribute> for PageAttribute64 {
+    fn from(page_attr: PageAttribute) -> Self {
+        let mut raw_attr = 0;
+
+        for attr in page_attr.iter() {
+            match attr {
+                PageAttribute::PRESENT => raw_attr |= PA_V,
+                PageAttribute::READ => raw_attr |= PA_R,
+                PageAttribute::WRITE => raw_attr |= PA_W,
+                PageAttribute::EXECUTE => raw_attr |= PA_X,
+                PageAttribute::USER => raw_attr |= PA_U,
+                PageAttribute::ACCESSED => raw_attr |= PA_A,
+                PageAttribute::DIRTY => raw_attr |= PA_D,
+                PageAttribute::GLOBAL => raw_attr |= PA_G,
+                PageAttribute::COPY_ON_WRITE => raw_attr |= PA_COW,
+                PageAttribute::MAPPED => raw_attr |= PA_MMAP,
+                PageAttribute::ANONYMOUS => {},
+                _ => unreachable!("Invalid page attribute"),
+            }
+        }
+
+        Self(raw_attr)
+    }
+}
+
+impl From<TableAttribute> for PageAttribute64 {
+    fn from(table_attr: TableAttribute) -> Self {
+        let mut raw_attr = 0;
+
+        for attr in table_attr.iter() {
+            match attr {
+                TableAttribute::PRESENT => raw_attr |= PA_V,
+                TableAttribute::GLOBAL => raw_attr |= PA_G,
+                TableAttribute::USER => raw_attr |= PA_U,
+                TableAttribute::ACCESSED => raw_attr |= PA_A,
+                _ => unreachable!("Invalid table attribute"),
+            }
+        }
+
+        Self(raw_attr)
+    }
+}
+
+impl ArchPhysAccess {
+    const PHYS_OFFSET: usize = PHYS_MAP_VIRT;
+}
+
+impl PhysAccess for ArchPhysAccess {
+    unsafe fn as_ptr<T>(paddr: PAddr) -> NonNull<T> {
+        let alignment: usize = align_of::<T>();
+        assert!(paddr.addr() % alignment == 0, "Alignment error");
+
+        unsafe {
+            // SAFETY: We can assume that we'll never have `self.addr()` equals
+            //         to `-PHYS_OFFSET`. Otherwise, the kernel might be broken.
+            NonNull::new_unchecked((Self::PHYS_OFFSET + paddr.addr()) as *mut T)
+        }
+    }
+
+    unsafe fn from_ptr<T>(ptr: NonNull<T>) -> PAddr {
+        let addr = ptr.addr().get();
+
+        assert!(addr % align_of::<T>() == 0, "Alignment error");
+        assert!(
+            addr >= Self::PHYS_OFFSET,
+            "Address is not a valid physical address"
+        );
+
+        PAddr::from_val(addr - Self::PHYS_OFFSET)
+    }
+}
+
+impl Memory for ArchMemory {
+    /// TODO: fuck, I don't know how to fix
+    fn present_ram() -> impl Iterator<Item = eonix_mm::address::PRange> {
+        let fdt: &'static Fdt<'static> = get_global_fdt();
+
+        let memory_node = fdt.memory().clone();
+
+        let owned_ranges: alloc::vec::Vec<PRange> = memory_node.regions() 
+            .map(|mem_region| {
+                let base = mem_region.starting_address as usize;
+                let size = mem_region.size.expect("this region has no memory") as usize;
+                PRange::from(PAddr::from(base as usize)).grow(size as usize)
+            }).collect();
+        
+        owned_ranges.into_iter()
+    }
+
+    fn free_ram() -> impl Iterator<Item = eonix_mm::address::PRange> {
+        unsafe extern "C" {
+            fn __kernel_start();
+            fn __kernel_end();
+        }
+
+        let kernel_end = PAddr::from(__kernel_end as usize - KIMAGE_OFFSET);
+        let paddr_after_kimage_aligned = kernel_end.ceil_to(0x200000);
+
+        core::iter::once(PRange::new(
+            kernel_end,
+            paddr_after_kimage_aligned,
+        ))
+        .chain(
+            Self::present_ram()
+                .filter(move |range| range.end() > paddr_after_kimage_aligned)
+                .map(move |range| {
+                    if range.start() < paddr_after_kimage_aligned {
+                        let (_, right) = range.split_at(paddr_after_kimage_aligned);
+                        right
+                    } else {
+                        range
+                    }
+                }),
+        )
+    }
+}
+
+pub type DefaultPagingMode = PagingModeSv39;
+
+pub fn setup_kernel_satp() {
+    unsafe {
+        satp::set(satp::Mode::Sv39, 0, PFN::from(ROOT_PAGE_TABLE_PFN).into());
+    }
+    sfence_vma_all();
+}