Sfoglia il codice sorgente

riscv64: rewrite FDT and present free memory parsing

Strip out the memory used by the kernel and FDT data out of free memory
block returned by FDT.

Closes: #54 ("Random kernel freezing on process creation / exiting")
Fixes: 4351cf55739f3 ("partial work: fix riscv64 bootstrap")
Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 1 settimana fa
parent
commit
5814c006a8

+ 3 - 4
crates/eonix_hal/src/arch/riscv64/bootstrap.rs

@@ -29,7 +29,7 @@ use super::time::set_next_timer;
 use crate::arch::cpu::CPU;
 use crate::arch::fdt::{init_dtb_and_fdt, FdtExt, FDT};
 use crate::arch::mm::{
-    ArchPagingMode, ArchPhysAccess, FreeRam, PageAccessImpl, PageAttribute64,
+    ArchPagingMode, ArchPhysAccess, PageAccessImpl, PageAttribute64,
     RawPageTableSv48, GLOBAL_PAGE_TABLE,
 };
 use crate::bootstrap::BootStrapData;
@@ -113,13 +113,12 @@ unsafe extern "C" fn _start(hart_id: usize, dtb_addr: usize) {
 }
 
 pub unsafe extern "C" fn riscv64_start(hart_id: usize, dtb_addr: PAddr) -> ! {
-    let fdt = Fdt::from_ptr(ArchPhysAccess::as_ptr(dtb_addr).as_ptr())
-        .expect("Failed to parse DTB from static memory.");
+    let fdt = unsafe { FdtExt::new(dtb_addr) };
 
     let real_allocator = RefCell::new(BasicPageAlloc::new());
     let alloc = BasicPageAllocRef::new(&real_allocator);
 
-    for range in fdt.present_ram().free_ram() {
+    for range in fdt.free_ram() {
         real_allocator.borrow_mut().add_range(range);
     }
 

+ 74 - 41
crates/eonix_hal/src/arch/riscv64/fdt.rs

@@ -1,62 +1,95 @@
-use super::mm::{ArchPhysAccess, PresentRam};
-use crate::arch::riscv64::config::mm::KIMAGE_OFFSET;
-use core::sync::atomic::{AtomicPtr, Ordering};
-use eonix_mm::address::{PAddr, PRange, PhysAccess};
+use core::ops::Deref;
+use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+use eonix_mm::address::{Addr, AddrOps, PAddr, PRange, PhysAccess};
 use eonix_sync_base::LazyLock;
 use fdt::Fdt;
 
-static DTB_VIRT_PTR: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
-pub static FDT: LazyLock<Fdt<'static>> = LazyLock::new(|| unsafe {
-    Fdt::from_ptr(DTB_VIRT_PTR.load(Ordering::Acquire))
-        .expect("Failed to parse DTB from static memory.")
+use super::mm::ArchPhysAccess;
+use crate::arch::riscv64::config::mm::KIMAGE_OFFSET;
+use crate::extern_symbol_addr;
+
+static DTB_PADDR: AtomicUsize = AtomicUsize::new(0);
+pub static FDT: LazyLock<FdtExt> = LazyLock::new(|| unsafe {
+    FdtExt::new(PAddr::from_val(DTB_PADDR.load(Ordering::Relaxed)))
 });
 
-pub trait FdtExt {
-    fn harts(&self) -> impl Iterator<Item = usize>;
+pub struct FdtExt {
+    fdt: Fdt<'static>,
+    range: PRange,
+}
 
-    fn hart_count(&self) -> usize {
-        self.harts().count()
-    }
+impl FdtExt {
+    /// # Safety
+    /// The caller MUST ensure that [`addr`] points to valid FDT.
+    pub unsafe fn new(addr: PAddr) -> Self {
+        let fdt = unsafe {
+            Fdt::from_ptr(ArchPhysAccess::as_ptr(addr).as_ptr())
+                .expect("Failed to parse DTB from static memory.")
+        };
 
-    fn present_ram(&self) -> impl Iterator<Item = PRange>;
-}
+        Self {
+            range: PRange::from(addr).grow(fdt.total_size()),
+            fdt,
+        }
+    }
 
-impl FdtExt for Fdt<'_> {
-    fn harts(&self) -> impl Iterator<Item = usize> {
+    pub fn harts(&self) -> impl Iterator<Item = usize> {
         self.cpus().map(|cpu| cpu.ids().all()).flatten()
     }
 
-    fn present_ram(&self) -> impl Iterator<Item = PRange> + PresentRam {
-        struct Present<I>(I);
-        impl<I> PresentRam for Present<I> where I: Iterator<Item = PRange> {}
-        impl<I> Iterator for Present<I>
-        where
-            I: Iterator<Item = PRange>,
-        {
-            type Item = PRange;
-
-            fn next(&mut self) -> Option<Self::Item> {
-                self.0.next()
-            }
-        }
+    pub fn hart_count(&self) -> usize {
+        self.harts().count()
+    }
 
+    pub fn present_ram(&self) -> impl Iterator<Item = PRange> {
         let mut index = 0;
-        Present(core::iter::from_fn(move || {
-            self.memory()
+
+        core::iter::from_fn(move || {
+            let item = self
+                .memory()
                 .regions()
                 .filter_map(|region| {
-                    region.size.map(|len| {
-                        PRange::from(PAddr::from(region.starting_address as usize)).grow(len)
-                    })
+                    let start = PAddr::from(region.starting_address as usize);
+                    Some(start).zip(region.size)
                 })
-                .skip(index)
-                .next()
-                .inspect(|_| index += 1)
-        }))
+                .map(|(start, len)| PRange::from(start).grow(len))
+                .nth(index);
+
+            index += 1;
+            item
+        })
+    }
+
+    pub fn free_ram(&self) -> impl Iterator<Item = PRange> {
+        let kernel_end = extern_symbol_addr!(__kernel_end) - KIMAGE_OFFSET;
+        let kernel_end = PAddr::from(kernel_end).ceil();
+
+        // TODO: move this to some platform-specific crate
+        self.present_ram().map(move |mut range| {
+            // Strip out parts before __kernel_end
+            if range.overlap_with(&PRange::from(kernel_end)) {
+                (_, range) = range.split_at(kernel_end);
+            }
+
+            // Strip out part after the FDT
+            if range.overlap_with(&self.range) {
+                (range, _) = range.split_at(self.range.start());
+            }
+
+            range
+        })
+    }
+}
+
+impl Deref for FdtExt {
+    type Target = Fdt<'static>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.fdt
     }
 }
 
 pub unsafe fn init_dtb_and_fdt(dtb_paddr: PAddr) {
-    let dtb_virt_ptr = ArchPhysAccess::as_ptr(dtb_paddr);
-    DTB_VIRT_PTR.store(dtb_virt_ptr.as_ptr(), Ordering::Release);
+    DTB_PADDR.store(dtb_paddr.addr(), Ordering::Relaxed);
 }

+ 1 - 35
crates/eonix_hal/src/arch/riscv64/mm.rs

@@ -275,46 +275,12 @@ impl Memory for ArchMemory {
     }
 
     fn free_ram() -> impl Iterator<Item = PRange> {
-        FDT.present_ram().free_ram()
+        FDT.free_ram()
     }
 }
 
 pub type DefaultPagingMode = PagingModeSv48;
 
-pub trait PresentRam: Iterator<Item = PRange> {}
-
-pub trait FreeRam: PresentRam {
-    fn free_ram(self) -> impl Iterator<Item = PRange>;
-}
-
-impl<T> FreeRam for T
-where
-    T: PresentRam,
-{
-    fn free_ram(self) -> impl Iterator<Item = PRange> {
-        let kernel_end = extern_symbol_addr!(__kernel_end) - KIMAGE_OFFSET;
-        let kernel_end = PAddr::from(kernel_end).ceil();
-
-        let paddr_after_kimage_aligned = kernel_end.ceil_to(0x200000);
-
-        core::iter::once(PRange::new(kernel_end, paddr_after_kimage_aligned))
-            .chain(
-                self.filter(move |range| {
-                    range.end() > paddr_after_kimage_aligned
-                })
-                .map(move |range| {
-                    if range.start() < paddr_after_kimage_aligned {
-                        let (_, right) =
-                            range.split_at(paddr_after_kimage_aligned);
-                        right
-                    } else {
-                        range
-                    }
-                }),
-            )
-    }
-}
-
 #[inline(always)]
 pub fn flush_tlb(vaddr: usize) {
     sfence_vma(0, vaddr);