Sfoglia il codice sorgente

mm, arch: refine page cache impl and count cpu on la64

Original commit message:

Merge remote-tracking branch 'Shao-ZW/temp' into dockerfile

(cherry picked from commit 0cf7998d17b8cf4622a9c84fe0724985dfd3631e)
Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 6 mesi fa
parent
commit
b321265202

+ 5 - 5
Cargo.toml

@@ -51,19 +51,19 @@ debug = true
 panic = "abort"
 
 [profile.dev.package.eonix_preempt]
-opt-level = 2
+opt-level = "s"
 
 [profile.dev.package.eonix_runtime]
-opt-level = 2
+opt-level = "s"
 
 [profile.dev.package.eonix_sync]
-opt-level = 2
+opt-level = "s"
 
 [profile.dev.package.intrusive_list]
-opt-level = 2
+opt-level = "s"
 
 [profile.dev.package.eonix_hal]
-opt-level = 2
+opt-level = "s"
 
 [profile.dev.package."*"]
 opt-level = "s"

+ 4 - 1
crates/eonix_hal/src/arch/loongarch64/bootstrap.rs

@@ -1,4 +1,5 @@
 use super::cpu::CPUID;
+use super::cpu::CPU_COUNT;
 use crate::{
     arch::{
         cpu::CPU,
@@ -16,7 +17,7 @@ use core::{
     alloc::Allocator,
     arch::asm,
     cell::RefCell,
-    sync::atomic::{AtomicBool, AtomicUsize},
+    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
 };
 use eonix_hal_traits::mm::Memory;
 use eonix_mm::{
@@ -228,6 +229,8 @@ fn setup_cpu(alloc: impl PageAlloc, hart_id: usize) {
     euen::set_fpe(true);
     euen::set_sxe(true);
 
+    CPU_COUNT.fetch_add(1, Ordering::Relaxed);
+
     let mut percpu_area = PercpuArea::new(|layout| {
         let page_count = layout.size().div_ceil(PAGE_SIZE);
         let page = Page::alloc_at_least_in(page_count, alloc);

+ 3 - 0
crates/eonix_hal/src/arch/loongarch64/cpu.rs

@@ -1,8 +1,11 @@
 use super::trap::setup_trap;
+use core::sync::atomic::AtomicUsize;
 use core::{arch::asm, pin::Pin, ptr::NonNull};
 use eonix_preempt::PreemptGuard;
 use eonix_sync_base::LazyLock;
 
+pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(0);
+
 #[eonix_percpu::define_percpu]
 pub static CPUID: usize = 0;
 

+ 3 - 1
crates/eonix_hal/src/arch/riscv64/bootstrap.rs

@@ -1,7 +1,7 @@
 use super::{
     config::{self, mm::*},
     console::write_str,
-    cpu::CPUID,
+    cpu::{CPUID, CPU_COUNT},
     time::set_next_timer,
     trap::TRAP_SCRATCH,
 };
@@ -202,6 +202,8 @@ fn setup_kernel_page_table(alloc: impl PageAlloc) {
 
 /// set up tp register to percpu
 fn setup_cpu(alloc: impl PageAlloc, hart_id: usize) {
+    CPU_COUNT.fetch_add(1, Ordering::Relaxed);
+
     let mut percpu_area = PercpuArea::new(|layout| {
         let page_count = layout.size().div_ceil(PAGE_SIZE);
         let page = Page::alloc_at_least_in(page_count, alloc);

+ 3 - 1
crates/eonix_hal/src/arch/riscv64/cpu.rs

@@ -3,7 +3,7 @@ use super::{
     trap::{setup_trap, TRAP_SCRATCH},
 };
 use crate::arch::fdt::{FdtExt, FDT};
-use core::{arch::asm, pin::Pin, ptr::NonNull};
+use core::{arch::asm, pin::Pin, ptr::NonNull, sync::atomic::AtomicUsize};
 use eonix_preempt::PreemptGuard;
 use eonix_sync_base::LazyLock;
 use riscv::register::{
@@ -12,6 +12,8 @@ use riscv::register::{
 };
 use sbi::PhysicalAddress;
 
+pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(0);
+
 #[eonix_percpu::define_percpu]
 pub static CPUID: usize = 0;
 

+ 1 - 1
crates/eonix_hal/src/lib.rs

@@ -19,7 +19,7 @@ pub mod fpu {
 }
 
 pub mod processor {
-    pub use crate::arch::cpu::{halt, UserTLS, CPU};
+    pub use crate::arch::cpu::{halt, UserTLS, CPU, CPU_COUNT};
 }
 
 /// Re-export the arch module for use in other crates

+ 50 - 48
src/kernel/mem/mm_area.rs

@@ -2,10 +2,10 @@ use super::mm_list::EMPTY_PAGE;
 use super::paging::AllocZeroed as _;
 use super::{AsMemoryBlock, Mapping, Page, Permission};
 use crate::kernel::constants::EINVAL;
-use crate::kernel::mem::page_cache::PageCacheRawPage;
-use crate::KResult;
-use core::sync::atomic;
-use core::{borrow::Borrow, cell::UnsafeCell, cmp};
+use crate::prelude::KResult;
+use core::borrow::Borrow;
+use core::cell::UnsafeCell;
+use core::cmp;
 use eonix_mm::address::{AddrOps as _, VAddr, VRange};
 use eonix_mm::page_table::{PageAttribute, RawAttribute, PTE};
 use eonix_mm::paging::{PAGE_SIZE, PFN};
@@ -145,53 +145,55 @@ impl MMArea {
 
         let file_offset = file_mapping.offset + offset;
         let cnt_to_read = (file_mapping.length - offset).min(0x1000);
-        let raw_page = page_cache.get_page(file_offset).await?.ok_or(EINVAL)?;
-
-        // Non-write faults: we find page in pagecache and do mapping
-        // Write fault: we need to care about shared or private mapping.
-        if !write {
-            // Bss is embarrassing in pagecache!
-            // We have to assume cnt_to_read < PAGE_SIZE all bss
-            if cnt_to_read < PAGE_SIZE {
-                let new_page = Page::zeroed();
-                unsafe {
-                    let page_data = new_page.as_memblk().as_bytes_mut();
-                    page_data[..cnt_to_read]
-                        .copy_from_slice(&raw_page.as_memblk().as_bytes()[..cnt_to_read]);
-                }
-                *pfn = new_page.into_raw();
-            } else {
-                raw_page.refcount().fetch_add(1, atomic::Ordering::Relaxed);
-                *pfn = Into::<PFN>::into(raw_page);
-            }
 
-            if self.permission.write {
-                if self.is_shared {
-                    // The page may will not be written,
-                    // But we simply assume page will be dirty
-                    raw_page.set_dirty();
-                    attr.insert(PageAttribute::WRITE);
+        page_cache
+            .with_page(file_offset, |page, cache_page| {
+                // Non-write faults: we find page in pagecache and do mapping
+                // Write fault: we need to care about shared or private mapping.
+                if !write {
+                    // Bss is embarrassing in pagecache!
+                    // We have to assume cnt_to_read < PAGE_SIZE all bss
+                    if cnt_to_read < PAGE_SIZE {
+                        let new_page = Page::zeroed();
+                        unsafe {
+                            let page_data = new_page.as_memblk().as_bytes_mut();
+                            page_data[..cnt_to_read]
+                                .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
+                        }
+                        *pfn = new_page.into_raw();
+                    } else {
+                        *pfn = page.clone().into_raw();
+                    }
+
+                    if self.permission.write {
+                        if self.is_shared {
+                            // The page may will not be written,
+                            // But we simply assume page will be dirty
+                            cache_page.set_dirty();
+                            attr.insert(PageAttribute::WRITE);
+                        } else {
+                            attr.insert(PageAttribute::COPY_ON_WRITE);
+                        }
+                    }
                 } else {
-                    attr.insert(PageAttribute::COPY_ON_WRITE);
-                }
-            }
-        } else {
-            if self.is_shared {
-                raw_page.refcount().fetch_add(1, atomic::Ordering::Relaxed);
-                raw_page.set_dirty();
-                *pfn = Into::<PFN>::into(raw_page);
-            } else {
-                let new_page = Page::zeroed();
-                unsafe {
-                    let page_data = new_page.as_memblk().as_bytes_mut();
-                    page_data[..cnt_to_read]
-                        .copy_from_slice(&raw_page.as_memblk().as_bytes()[..cnt_to_read]);
-                }
-                *pfn = new_page.into_raw();
-            }
+                    if self.is_shared {
+                        cache_page.set_dirty();
+                        *pfn = page.clone().into_raw();
+                    } else {
+                        let new_page = Page::zeroed();
+                        unsafe {
+                            let page_data = new_page.as_memblk().as_bytes_mut();
+                            page_data[..cnt_to_read]
+                                .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
+                        }
+                        *pfn = new_page.into_raw();
+                    }
 
-            attr.insert(PageAttribute::WRITE);
-        }
+                    attr.insert(PageAttribute::WRITE);
+                }
+            })
+            .await?
+            .ok_or(EINVAL)?;
 
         attr.insert(PageAttribute::PRESENT);
         attr.remove(PageAttribute::MAPPED);

+ 2 - 9
src/kernel/mem/page_alloc/raw_page.rs

@@ -1,5 +1,5 @@
-use crate::kernel::mem::{page_cache::PageCacheRawPage, MemoryBlock};
-use crate::kernel::mem::{AsMemoryBlock, PhysAccess};
+use crate::kernel::mem::page_cache::PageCacheRawPage;
+use crate::kernel::mem::PhysAccess;
 use buddy_allocator::BuddyRawPage;
 use core::{
     ptr::NonNull,
@@ -271,10 +271,3 @@ impl PageCacheRawPage for RawPagePtr {
         self.as_mut().shared_data = PageType::PageCache(PageCacheInner { valid_size: 0 });
     }
 }
-
-/// SAFETY: `RawPagePtr` is a pointer to a valid `RawPage` struct.
-impl AsMemoryBlock for RawPagePtr {
-    fn as_memblk(&self) -> MemoryBlock {
-        unsafe { MemoryBlock::new(self.real_ptr::<()>().addr(), PAGE_SIZE) }
-    }
-}

+ 43 - 22
src/kernel/mem/page_cache.rs

@@ -1,4 +1,4 @@
-use super::access::AsMemoryBlock;
+use super::{paging::AllocZeroed, Page};
 use crate::{
     io::{Buffer, FillResult, Stream},
     kernel::mem::page_alloc::RawPagePtr,
@@ -7,7 +7,12 @@ use crate::{
 };
 use align_ext::AlignExt;
 use alloc::{collections::btree_map::BTreeMap, sync::Weak};
-use eonix_mm::paging::{PageAlloc, RawPage, PAGE_SIZE, PAGE_SIZE_BITS};
+use core::mem::ManuallyDrop;
+use eonix_hal::mm::ArchPhysAccess;
+use eonix_mm::{
+    address::{PAddr, PhysAccess},
+    paging::{PageAlloc, RawPage, PAGE_SIZE, PAGE_SIZE_BITS, PFN},
+};
 use eonix_sync::Mutex;
 
 pub struct PageCache {
@@ -58,13 +63,11 @@ impl CachePage {
     }
 
     pub fn new_zeroed() -> Self {
-        let page = GlobalPageAlloc.alloc().unwrap();
-        // SAFETY: We own the page exclusively, so we can safely zero it.
-        unsafe {
-            page.as_memblk().as_bytes_mut().fill(0);
-        }
-        page.cache_init();
-        Self(page)
+        let page = Page::zeroed();
+        let raw_page_ptr = RawPagePtr::from(page.into_raw());
+
+        raw_page_ptr.cache_init();
+        Self(raw_page_ptr)
     }
 
     pub fn valid_size(&self) -> usize {
@@ -77,13 +80,21 @@ impl CachePage {
 
     pub fn all(&self) -> &[u8] {
         unsafe {
-            self.0.as_memblk().as_bytes()
+            core::slice::from_raw_parts(
+                // SAFETY: The page is exclusively owned by us, so we can safely access its data.
+                ArchPhysAccess::as_ptr(PAddr::from(PFN::from(self.0))).as_ptr(),
+                PAGE_SIZE,
+            )
         }
     }
 
     pub fn all_mut(&mut self) -> &mut [u8] {
         unsafe {
-            self.0.as_memblk().as_bytes_mut()
+            core::slice::from_raw_parts_mut(
+                // SAFETY: The page is exclusively owned by us, so we can safely access its data.
+                ArchPhysAccess::as_ptr(PAddr::from(PFN::from(self.0))).as_ptr(),
+                PAGE_SIZE,
+            )
         }
     }
 
@@ -247,7 +258,10 @@ impl PageCache {
         Ok(())
     }
 
-    pub async fn get_page(&self, offset: usize) -> KResult<Option<RawPagePtr>> {
+    pub async fn with_page<F, O>(&self, offset: usize, func: F) -> KResult<Option<O>>
+    where
+        F: FnOnce(&Page, &CachePage) -> O,
+    {
         let offset_aligin = offset.align_down(PAGE_SIZE);
         let page_id = offset_aligin >> PAGE_SIZE_BITS;
         let size = self.backend.upgrade().unwrap().size();
@@ -258,16 +272,23 @@ impl PageCache {
 
         let mut pages = self.pages.lock().await;
 
-        if let Some(page) = pages.get(&page_id) {
-            Ok(Some(page.0))
-        } else {
-            let mut new_page = CachePage::new();
-            self.backend
-                .upgrade()
-                .unwrap()
-                .read_page(&mut new_page, offset_aligin)?;
-            pages.insert(page_id, new_page);
-            Ok(Some(new_page.0))
+        let raw_page_ptr = match pages.get(&page_id) {
+            Some(CachePage(raw_page_ptr)) => *raw_page_ptr,
+            None => {
+                let mut new_page = CachePage::new();
+                self.backend
+                    .upgrade()
+                    .unwrap()
+                    .read_page(&mut new_page, offset_aligin)?;
+                pages.insert(page_id, new_page);
+                new_page.0
+            }
+        };
+
+        unsafe {
+            let page = ManuallyDrop::new(Page::from_raw_unchecked(PFN::from(raw_page_ptr)));
+
+            Ok(Some(func(&page, &CachePage(raw_page_ptr))))
         }
     }
 }