Bladeren bron

style: reformat the files related to next patches

Reformat the files with new the format style to make the real changes
clearer.

Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 1 week geleden
bovenliggende
commit
2392ac19d2
3 gewijzigde bestanden met toevoegingen van 70 en 35 verwijderingen
  1. 36 14
      crates/eonix_hal/src/arch/riscv64/bootstrap.rs
  2. 32 20
      crates/eonix_hal/src/arch/riscv64/mm.rs
  3. 2 1
      src/kernel_init.rs

+ 36 - 14
crates/eonix_hal/src/arch/riscv64/bootstrap.rs

@@ -7,8 +7,12 @@ use core::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
 
 use eonix_hal_traits::mm::Memory;
 use eonix_mm::address::{Addr as _, PAddr, PRange, PhysAccess, VAddr, VRange};
-use eonix_mm::page_table::{PageAttribute, PageTable, PagingMode, TableAttribute, PTE as _};
-use eonix_mm::paging::{Folio, FrameAlloc, PageAccess, PageBlock, PAGE_SIZE, PFN};
+use eonix_mm::page_table::{
+    PageAttribute, PageTable, PagingMode, TableAttribute, PTE as _,
+};
+use eonix_mm::paging::{
+    Folio, FrameAlloc, PageAccess, PageBlock, PAGE_SIZE, PFN,
+};
 use eonix_percpu::PercpuArea;
 use fdt::Fdt;
 use riscv::asm::sfence_vma_all;
@@ -25,11 +29,13 @@ use super::time::set_next_timer;
 use crate::arch::cpu::CPU;
 use crate::arch::fdt::{init_dtb_and_fdt, FdtExt, FDT};
 use crate::arch::mm::{
-    ArchPagingMode, ArchPhysAccess, FreeRam, PageAccessImpl, PageAttribute64, RawPageTableSv48,
-    GLOBAL_PAGE_TABLE,
+    ArchPagingMode, ArchPhysAccess, FreeRam, PageAccessImpl, PageAttribute64,
+    RawPageTableSv48, GLOBAL_PAGE_TABLE,
 };
 use crate::bootstrap::BootStrapData;
-use crate::mm::{ArchMemory, BasicPageAlloc, BasicPageAllocRef, ScopedAllocator};
+use crate::mm::{
+    ArchMemory, BasicPageAlloc, BasicPageAllocRef, ScopedAllocator,
+};
 
 #[unsafe(link_section = ".bootstrap.stack")]
 static BOOT_STACK: [u8; 4096 * 16] = [0; 4096 * 16];
@@ -64,7 +70,8 @@ static PT1: BootPageTable = {
     BootPageTable(arr)
 };
 
-static BSP_PAGE_ALLOC: AtomicPtr<RefCell<BasicPageAlloc>> = AtomicPtr::new(core::ptr::null_mut());
+static BSP_PAGE_ALLOC: AtomicPtr<RefCell<BasicPageAlloc>> =
+    AtomicPtr::new(core::ptr::null_mut());
 
 static AP_COUNT: AtomicUsize = AtomicUsize::new(0);
 static AP_STACK: AtomicUsize = AtomicUsize::new(0);
@@ -130,11 +137,14 @@ pub unsafe extern "C" fn riscv64_start(hart_id: usize, dtb_addr: PAddr) -> ! {
     }
 
     let start = unsafe {
-        ((&BOOT_STACK_START) as *const &'static [u8; 4096 * 16]).read_volatile() as *const _
-            as usize
+        ((&BOOT_STACK_START) as *const &'static [u8; 4096 * 16]).read_volatile()
+            as *const _ as usize
     };
     let bootstrap_data = BootStrapData {
-        early_stack: PRange::new(PAddr::from(start), PAddr::from(start + 4096 * 16)),
+        early_stack: PRange::new(
+            PAddr::from(start),
+            PAddr::from(start + 4096 * 16),
+        ),
         allocator: Some(real_allocator),
     };
 
@@ -179,7 +189,11 @@ fn setup_kernel_page_table(alloc: BasicPageAllocRef) {
     sfence_vma_all();
 
     unsafe {
-        core::ptr::write_bytes(KERNEL_BSS_START.addr() as *mut (), 0, BSS_LENGTH as usize);
+        core::ptr::write_bytes(
+            KERNEL_BSS_START.addr() as *mut (),
+            0,
+            BSS_LENGTH as usize,
+        );
     }
 
     unsafe {
@@ -255,7 +269,8 @@ fn bootstrap_smp(alloc: impl Allocator, page_alloc: &RefCell<BasicPageAlloc>) {
             stack_range
         };
 
-        let old = BSP_PAGE_ALLOC.swap((&raw const *page_alloc) as *mut _, Ordering::Release);
+        let old = BSP_PAGE_ALLOC
+            .swap((&raw const *page_alloc) as *mut _, Ordering::Release);
         assert!(old.is_null());
 
         while AP_STACK
@@ -324,7 +339,12 @@ unsafe extern "C" fn _ap_start(hart_id: usize) {
 
 fn get_ap_stack() -> usize {
     while AP_SEM
-        .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+        .compare_exchange_weak(
+            false,
+            true,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+        )
         .is_err()
     {
         core::hint::spin_loop();
@@ -344,12 +364,14 @@ fn get_ap_stack() -> usize {
 }
 
 fn ap_entry(hart_id: usize, stack_bottom: PAddr) -> ! {
-    let stack_range = PRange::new(stack_bottom - (1 << 3) * PAGE_SIZE, stack_bottom);
+    let stack_range =
+        PRange::new(stack_bottom - (1 << 3) * PAGE_SIZE, stack_bottom);
 
     {
         // SAFETY: Acquire all the work done by the BSP and other APs.
         let alloc = loop {
-            let alloc = BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::AcqRel);
+            let alloc =
+                BSP_PAGE_ALLOC.swap(core::ptr::null_mut(), Ordering::AcqRel);
 
             if !alloc.is_null() {
                 break alloc;

+ 32 - 20
crates/eonix_hal/src/arch/riscv64/mm.rs

@@ -4,8 +4,8 @@ use core::ptr::NonNull;
 use eonix_hal_traits::mm::Memory;
 use eonix_mm::address::{Addr as _, AddrOps, PAddr, PRange, PhysAccess, VAddr};
 use eonix_mm::page_table::{
-    PageAttribute, PageTable, PageTableLevel, PagingMode, RawAttribute, RawPageTable,
-    TableAttribute, PTE,
+    PageAttribute, PageTable, PageTableLevel, PagingMode, RawAttribute,
+    RawPageTable, TableAttribute, PTE,
 };
 use eonix_mm::paging::{BasicFolio, Folio, PageAccess, PageBlock, PFN};
 use eonix_sync_base::LazyLock;
@@ -115,7 +115,9 @@ impl RawAttribute for PageAttribute64 {
             table_attr |= TableAttribute::PRESENT;
         }
 
-        if table_attr.contains(TableAttribute::PRESENT) && self.0 & (PA_R | PA_W | PA_X) != 0 {
+        if table_attr.contains(TableAttribute::PRESENT)
+            && self.0 & (PA_R | PA_W | PA_X) != 0
+        {
             return None;
         }
 
@@ -139,7 +141,9 @@ impl RawAttribute for PageAttribute64 {
             page_attr |= PageAttribute::PRESENT;
         }
 
-        if page_attr.contains(PageAttribute::PRESENT) && (self.0 & (PA_R | PA_W | PA_X) == 0) {
+        if page_attr.contains(PageAttribute::PRESENT)
+            && (self.0 & (PA_R | PA_W | PA_X) == 0)
+        {
             return None;
         }
 
@@ -278,18 +282,22 @@ impl Memory for ArchMemory {
         let kernel_end = PAddr::from(__kernel_end as usize - KIMAGE_OFFSET);
         let paddr_after_kimage_aligned = kernel_end.ceil_to(0x200000);
 
-        core::iter::once(PRange::new(kernel_end, paddr_after_kimage_aligned)).chain(
-            Self::present_ram()
-                .filter(move |range| range.end() > paddr_after_kimage_aligned)
-                .map(move |range| {
-                    if range.start() < paddr_after_kimage_aligned {
-                        let (_, right) = range.split_at(paddr_after_kimage_aligned);
-                        right
-                    } else {
-                        range
-                    }
-                }),
-        )
+        core::iter::once(PRange::new(kernel_end, paddr_after_kimage_aligned))
+            .chain(
+                Self::present_ram()
+                    .filter(move |range| {
+                        range.end() > paddr_after_kimage_aligned
+                    })
+                    .map(move |range| {
+                        if range.start() < paddr_after_kimage_aligned {
+                            let (_, right) =
+                                range.split_at(paddr_after_kimage_aligned);
+                            right
+                        } else {
+                            range
+                        }
+                    }),
+            )
     }
 }
 
@@ -314,17 +322,21 @@ where
         let kernel_end = PAddr::from(__kernel_end as usize - KIMAGE_OFFSET);
         let paddr_after_kimage_aligned = kernel_end.ceil_to(0x200000);
 
-        core::iter::once(PRange::new(kernel_end, paddr_after_kimage_aligned)).chain(
-            self.filter(move |range| range.end() > paddr_after_kimage_aligned)
+        core::iter::once(PRange::new(kernel_end, paddr_after_kimage_aligned))
+            .chain(
+                self.filter(move |range| {
+                    range.end() > paddr_after_kimage_aligned
+                })
                 .map(move |range| {
                     if range.start() < paddr_after_kimage_aligned {
-                        let (_, right) = range.split_at(paddr_after_kimage_aligned);
+                        let (_, right) =
+                            range.split_at(paddr_after_kimage_aligned);
                         right
                     } else {
                         range
                     }
                 }),
-        )
+            )
     }
 }
 

+ 2 - 1
src/kernel_init.rs

@@ -19,7 +19,8 @@ fn setup_kernel_page_array(alloc: BasicPageAllocRef, count_pages: usize) {
     // Map kernel page array.
     const V_KERNEL_PAGE_ARRAY_START: VAddr = VAddr::from(0xffffff8040000000);
 
-    let range = VRange::from(V_KERNEL_PAGE_ARRAY_START).grow(PAGE_SIZE * count_pages);
+    let range =
+        VRange::from(V_KERNEL_PAGE_ARRAY_START).grow(PAGE_SIZE * count_pages);
     for pte in global_page_table.iter_kernel(range) {
         let attr = PageAttribute::PRESENT
             | PageAttribute::WRITE