Эх сурвалжийг харах

hal, x86: support the new folio abstraction

Check f9b5b3a3dfe6 ("mem: introduce new `Folio` abstraction") for more
details.

Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 1 долоо хоног өмнө
parent
commit
4ccaa6beeb

+ 31 - 31
crates/eonix_hal/src/arch/x86_64/bootstrap/init.rs

@@ -9,17 +9,20 @@ use acpi::{AcpiHandler, AcpiTables, PhysicalMapping, PlatformInfo};
 use eonix_hal_traits::mm::Memory;
 use eonix_mm::address::{Addr as _, PAddr, PRange, PhysAccess, VRange};
 use eonix_mm::page_table::{PageAttribute, PagingMode, PTE as _};
-use eonix_mm::paging::{Page, PageAccess, PageAlloc, PAGE_SIZE};
+use eonix_mm::paging::{Folio, FrameAlloc, PageAccess, PAGE_SIZE};
 use eonix_percpu::PercpuArea;
 
 use crate::arch::bootstrap::{EARLY_GDT_DESCRIPTOR, KERNEL_PML4};
 use crate::arch::cpu::{wrmsr, CPU};
 use crate::arch::io::Port8;
-use crate::arch::mm::{ArchPhysAccess, GLOBAL_PAGE_TABLE, V_KERNEL_BSS_START};
+use crate::arch::mm::{
+    with_global_page_table, ArchPhysAccess, PageAccessImpl, GLOBAL_PAGE_TABLE,
+    V_KERNEL_BSS_START,
+};
 use crate::bootstrap::BootStrapData;
+use crate::extern_symbol_value;
 use crate::mm::{
-    ArchMemory, ArchPagingMode, BasicPageAlloc, BasicPageAllocRef,
-    ScopedAllocator,
+    ArchMemory, BasicPageAlloc, BasicPageAllocRef, ScopedAllocator,
 };
 
 static BSP_PAGE_ALLOC: AtomicPtr<RefCell<BasicPageAlloc>> =
@@ -122,14 +125,17 @@ fn enable_sse() {
     }
 }
 
-fn setup_cpu(alloc: impl PageAlloc) {
+fn setup_cpu(alloc: impl FrameAlloc) {
     let mut percpu_area = PercpuArea::new(|layout| {
         // TODO: Use page size defined in `arch`.
         let page_count = layout.size().div_ceil(PAGE_SIZE);
-        let page = Page::alloc_at_least_in(page_count, alloc);
+        let folio = alloc.alloc_at_least(page_count).unwrap();
 
-        let ptr = ArchPhysAccess::get_ptr_for_page(&page).cast();
-        page.into_raw();
+        let ptr = unsafe {
+            // TODO: safety
+            ArchPhysAccess::as_ptr(folio.start())
+        };
+        folio.into_raw();
 
         ptr
     });
@@ -225,7 +231,7 @@ fn bootstrap_smp(alloc: impl Allocator, page_alloc: &RefCell<BasicPageAlloc>) {
         let stack_range = {
             let page_alloc = BasicPageAllocRef::new(&page_alloc);
 
-            let ap_stack = Page::alloc_order_in(4, page_alloc);
+            let ap_stack = page_alloc.alloc_order(4).unwrap();
             let stack_range = ap_stack.range();
             ap_stack.into_raw();
 
@@ -269,44 +275,38 @@ fn bootstrap_smp(alloc: impl Allocator, page_alloc: &RefCell<BasicPageAlloc>) {
 }
 
 pub extern "C" fn kernel_init() -> ! {
-    let global_page_table = &GLOBAL_PAGE_TABLE;
-    let paging_levels = ArchPagingMode::LEVELS;
-
     enable_sse();
 
     let real_allocator = RefCell::new(BasicPageAlloc::new());
     let alloc = BasicPageAllocRef::new(&real_allocator);
 
-    unsafe extern "C" {
-        fn BSS_LENGTH();
-    }
+    let bss_length = extern_symbol_value!(BSS_LENGTH);
+    let bss_range = VRange::from(V_KERNEL_BSS_START).grow(bss_length);
 
     for range in ArchMemory::free_ram() {
         real_allocator.borrow_mut().add_range(range);
     }
 
     // Map kernel BSS
-    for pte in global_page_table.iter_kernel_in(
-        VRange::from(V_KERNEL_BSS_START).grow(BSS_LENGTH as usize),
-        paging_levels,
-        &alloc,
-    ) {
-        let attr = PageAttribute::PRESENT
-            | PageAttribute::WRITE
-            | PageAttribute::READ
-            | PageAttribute::HUGE
-            | PageAttribute::GLOBAL;
-
-        let page = Page::alloc_in(&alloc);
-        pte.set(page.into_raw(), attr.into());
-    }
+    with_global_page_table(alloc.clone(), PageAccessImpl, |table| {
+        for pte in table.iter_kernel(bss_range) {
+            let attr = PageAttribute::PRESENT
+                | PageAttribute::WRITE
+                | PageAttribute::READ
+                | PageAttribute::HUGE
+                | PageAttribute::GLOBAL;
+
+            let page = alloc.alloc().unwrap();
+            pte.set(page.into_raw(), attr.into());
+        }
+    });
 
     unsafe {
         // SAFETY: We've just mapped the area with sufficient length.
         core::ptr::write_bytes(
-            V_KERNEL_BSS_START.addr() as *mut (),
+            bss_range.start().addr() as *mut u8,
             0,
-            BSS_LENGTH as usize,
+            bss_length,
         );
     }
 

+ 31 - 11
crates/eonix_hal/src/arch/x86_64/mm.rs

@@ -6,10 +6,10 @@ use eonix_mm::address::{
     Addr as _, AddrOps as _, PAddr, PRange, PhysAccess, VAddr,
 };
 use eonix_mm::page_table::{
-    PageAttribute, PageTable, PageTableLevel, PagingMode, RawAttribute,
-    RawPageTable, TableAttribute, PTE,
+    PageAttribute, PageTable, PageTableAlloc, PageTableLevel, PagingMode,
+    RawAttribute, RawPageTable, TableAttribute, PTE,
 };
-use eonix_mm::paging::{NoAlloc, Page, PageBlock, PAGE_SIZE, PFN};
+use eonix_mm::paging::{BasicFolio, PageAccess, PageBlock, PAGE_SIZE, PFN};
 use eonix_sync_base::LazyLock;
 
 use crate::traits::mm::Memory;
@@ -35,14 +35,7 @@ pub const P_KIMAGE_START: PAddr = PAddr::from_val(0x200000);
 pub const V_KERNEL_BSS_START: VAddr = VAddr::from(0xffffffffc0200000);
 
 const KERNEL_PML4_PFN: PFN = PFN::from_val(0x1000 >> 12);
-
-pub static GLOBAL_PAGE_TABLE: LazyLock<
-    PageTable<ArchPagingMode, NoAlloc, ArchPhysAccess>,
-> = LazyLock::new(|| unsafe {
-    Page::with_raw(KERNEL_PML4_PFN, |root_table_page| {
-        PageTable::with_root_table(root_table_page.clone())
-    })
-});
+pub const GLOBAL_PAGE_TABLE: BasicFolio = BasicFolio::new(KERNEL_PML4_PFN, 0);
 
 #[repr(transparent)]
 pub struct PTE64(u64);
@@ -56,6 +49,9 @@ pub struct PagingMode4Levels;
 
 pub struct ArchPhysAccess;
 
+#[derive(Clone)]
+pub struct PageAccessImpl;
+
 pub struct ArchMemory;
 
 #[repr(C)]
@@ -124,6 +120,9 @@ impl<'a> RawPageTable<'a> for RawPageTable4Levels<'a> {
     }
 }
 
+unsafe impl Send for RawPageTable4Levels<'_> {}
+unsafe impl Sync for RawPageTable4Levels<'_> {}
+
 impl RawAttribute for PageAttribute64 {
     fn null() -> Self {
         Self(0)
@@ -278,6 +277,12 @@ impl PhysAccess for ArchPhysAccess {
     }
 }
 
+impl PageAccess for PageAccessImpl {
+    unsafe fn get_ptr_for_pfn(&self, pfn: PFN) -> NonNull<PageBlock> {
+        unsafe { ArchPhysAccess::as_ptr(PAddr::from(pfn)) }
+    }
+}
+
 impl E820MemMapEntry {
     const ENTRY_FREE: u32 = 1;
     // const ENTRY_USED: u32 = 2;
@@ -425,3 +430,18 @@ pub fn set_root_page_table_pfn(pfn: PFN) {
         );
     }
 }
+
+pub fn with_global_page_table<A, X>(
+    alloc: A, access: X,
+    func: impl FnOnce(&mut PageTable<ArchPagingMode, A, X>),
+) where
+    A: PageTableAlloc<Folio = BasicFolio>,
+    X: PageAccess,
+{
+    let mut global_page_table =
+        PageTable::new(GLOBAL_PAGE_TABLE.clone(), alloc, access);
+
+    func(&mut global_page_table);
+
+    core::mem::forget(global_page_table);
+}