Prechádzať zdrojové kódy

fix: intermediate page tables should not set A, D and U bits

greatbridf 7 mesiacov pred
rodič
commit
9cf926f974

+ 1 - 1
arch/src/riscv64/mod.rs

@@ -13,7 +13,7 @@ pub use fpu::*;
 
 #[inline(always)]
 pub fn flush_tlb(vaddr: usize) {
-    sfence_vma(vaddr, 0);
+    sfence_vma(0, vaddr);
 }
 
 #[inline(always)]

+ 0 - 1
crates/eonix_hal/eonix_hal_traits/src/fault.rs

@@ -3,7 +3,6 @@ use bitflags::bitflags;
 bitflags! {
     #[derive(Debug)]
     pub struct PageFaultErrorCode: u32 {
-        const NonPresent = 1;
         const Read = 2;
         const Write = 4;
         const InstructionFetch = 8;

+ 1 - 2
crates/eonix_hal/src/arch/riscv64/mm.rs

@@ -217,8 +217,7 @@ impl From<TableAttribute> for PageAttribute64 {
             match attr {
                 TableAttribute::PRESENT => raw_attr |= PA_V,
                 TableAttribute::GLOBAL => raw_attr |= PA_G,
-                TableAttribute::USER => raw_attr |= PA_U,
-                TableAttribute::ACCESSED => raw_attr |= PA_A,
+                TableAttribute::USER | TableAttribute::ACCESSED => {}
                 _ => unreachable!("Invalid table attribute"),
             }
         }

+ 10 - 9
crates/eonix_hal/src/arch/riscv64/trap/trap_context.rs

@@ -138,11 +138,10 @@ impl RawTrapContext for TrapContext {
                         no: self.syscall_no(),
                         args: self.syscall_args(),
                     },
-                    Exception::InstructionPageFault
+                    exception @ (Exception::InstructionPageFault
                     | Exception::LoadPageFault
-                    | Exception::StorePageFault => {
-                        let e = Exception::from_number(e).unwrap();
-                        TrapType::Fault(Fault::PageFault(self.get_page_fault_error_code(e)))
+                    | Exception::StorePageFault) => {
+                        TrapType::Fault(Fault::PageFault(self.get_page_fault_error_code(exception)))
                     }
                     // breakpoint and supervisor env call
                     _ => TrapType::Fault(Fault::Unknown(e)),
@@ -193,14 +192,12 @@ impl RawTrapContext for TrapContext {
 
 impl TrapContext {
     /// TODO: get PageFaultErrorCode also need check pagetable
-    fn get_page_fault_error_code(&self, exception_type: Exception) -> PageFaultErrorCode {
-        let scause_val = self.scause;
+    fn get_page_fault_error_code(&self, exception: Exception) -> PageFaultErrorCode {
         let mut error_code = PageFaultErrorCode::empty();
 
-        match exception_type {
+        match exception {
             Exception::InstructionPageFault => {
                 error_code |= PageFaultErrorCode::InstructionFetch;
-                error_code |= PageFaultErrorCode::Read;
             }
             Exception::LoadPageFault => {
                 error_code |= PageFaultErrorCode::Read;
@@ -212,7 +209,11 @@ impl TrapContext {
                 unreachable!();
             }
         }
-        // TODO: here need check pagetable to confirm NonPresent and UserAccess
+
+        if self.sstatus.spp() == SPP::User {
+            error_code |= PageFaultErrorCode::UserAccess;
+        }
+
         error_code
     }
 }

+ 40 - 51
src/kernel/mem/mm_area.rs

@@ -79,27 +79,23 @@ impl MMArea {
         }
     }
 
-    /// # Return
-    /// Whether the whole handling process is done.
-    pub fn handle_cow(&self, pte: &mut impl PTE) -> bool {
-        let mut page_attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
-        let pfn = pte.get_pfn();
+    pub fn handle_cow(&self, pfn: &mut PFN, attr: &mut PageAttribute) {
+        assert!(attr.contains(PageAttribute::COPY_ON_WRITE));
 
-        page_attr.remove(PageAttribute::COPY_ON_WRITE);
-        page_attr.set(PageAttribute::WRITE, self.permission.write);
+        attr.remove(PageAttribute::COPY_ON_WRITE);
+        attr.set(PageAttribute::WRITE, self.permission.write);
 
-        let page = unsafe { Page::from_raw(pfn) };
+        let page = unsafe { Page::from_raw(*pfn) };
         if page.is_exclusive() {
             // SAFETY: This is actually safe. If we read `1` here and we have `MMList` lock
             // held, there couldn't be neither other processes sharing the page, nor other
             // threads making the page COW at the same time.
-            pte.set_attr(page_attr.into());
             core::mem::forget(page);
-            return true;
+            return;
         }
 
         let new_page;
-        if is_empty_page(pfn) {
+        if *pfn == EMPTY_PAGE.pfn() {
             new_page = Page::zeroed();
         } else {
             new_page = Page::alloc();
@@ -115,33 +111,30 @@ impl MMArea {
             };
         }
 
-        page_attr.remove(PageAttribute::ACCESSED);
-
-        pte.set(new_page.into_raw(), page_attr.into());
-
-        false
+        attr.remove(PageAttribute::ACCESSED);
+        *pfn = new_page.into_raw();
     }
 
     /// # Arguments
     /// * `offset`: The offset from the start of the mapping, aligned to 4KB boundary.
-    pub fn handle_mmap(&self, pte: &mut impl PTE, offset: usize) -> KResult<()> {
+    pub fn handle_mmap(
+        &self,
+        pfn: &mut PFN,
+        attr: &mut PageAttribute,
+        offset: usize,
+    ) -> KResult<()> {
         // TODO: Implement shared mapping
-        let mut page_attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
-        let pfn = pte.get_pfn();
-
-        match &self.mapping {
-            Mapping::File(mapping) if offset < mapping.length => {
-                let page = unsafe {
-                    // SAFETY: Since we are here, the `pfn` must refer to a valid buddy page.
-                    Page::with_raw(pfn, |page| page.clone())
-                };
-
-                let page_data = unsafe {
-                    // SAFETY: `page` is marked as mapped, so others trying to read or write to
-                    //         it will be blocked and enter the page fault handler, where they will
-                    //         be blocked by the mutex held by us.
-                    page.as_memblk().as_bytes_mut()
-                };
+        let Mapping::File(mapping) = &self.mapping else {
+            panic!("Anonymous mapping should not be PA_MMAP");
+        };
+
+        assert!(offset < mapping.length, "Offset out of range");
+        unsafe {
+            Page::with_raw(*pfn, |page| {
+                // SAFETY: `page` is marked as mapped, so others trying to read or write to
+                //         it will be blocked and enter the page fault handler, where they will
+                //         be blocked by the mutex held by us.
+                let page_data = page.as_memblk().as_bytes_mut();
 
                 let cnt_to_read = (mapping.length - offset).min(0x1000);
                 let cnt_read = mapping.file.read(
@@ -150,39 +143,35 @@ impl MMArea {
                 )?;
 
                 page_data[cnt_read..].fill(0);
-            }
-            Mapping::File(_) => panic!("Offset out of range"),
-            _ => panic!("Anonymous mapping should not be PA_MMAP"),
-        }
 
-        page_attr.insert(PageAttribute::PRESENT);
-        page_attr.remove(PageAttribute::MAPPED);
+                KResult::Ok(())
+            })?;
+        }
 
-        pte.set_attr(page_attr.into());
+        attr.insert(PageAttribute::PRESENT);
+        attr.remove(PageAttribute::MAPPED);
         Ok(())
     }
 
     pub fn handle(&self, pte: &mut impl PTE, offset: usize) -> KResult<()> {
-        let page_attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
+        let mut attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
+        let mut pfn = pte.get_pfn();
 
-        if page_attr.contains(PageAttribute::COPY_ON_WRITE) {
-            self.handle_cow(pte);
+        if attr.contains(PageAttribute::COPY_ON_WRITE) {
+            self.handle_cow(&mut pfn, &mut attr);
         }
 
-        if page_attr.contains(PageAttribute::MAPPED) {
-            self.handle_mmap(pte, offset)?;
+        if attr.contains(PageAttribute::MAPPED) {
+            self.handle_mmap(&mut pfn, &mut attr, offset)?;
         }
 
+        attr.set(PageAttribute::ACCESSED, true);
+        pte.set(pfn, attr.into());
+
         Ok(())
     }
 }
 
-/// check pfn with EMPTY_PAGE's pfn
-fn is_empty_page(pfn: PFN) -> bool {
-    let empty_pfn = EMPTY_PAGE.pfn();
-    pfn == empty_pfn
-}
-
 impl Eq for MMArea {}
 impl PartialEq for MMArea {
     fn eq(&self, other: &Self) -> bool {

+ 13 - 2
src/kernel/mem/mm_list/page_fault.rs

@@ -1,7 +1,8 @@
 use super::{MMList, VAddr};
 use crate::kernel::task::{Signal, Thread};
+use arch::flush_tlb;
 use eonix_hal::traits::fault::PageFaultErrorCode;
-use eonix_mm::address::{AddrOps as _, VRange};
+use eonix_mm::address::{Addr as _, AddrOps as _, VRange};
 use eonix_mm::paging::PAGE_SIZE;
 use eonix_runtime::task::Task;
 
@@ -80,7 +81,17 @@ impl MMList {
             .expect("If we can find the mapped area, we should be able to find the PTE");
 
         area.handle(pte, addr.floor() - area.range().start())
-            .map_err(|_| Signal::SIGBUS)
+            .map_err(|_| Signal::SIGBUS)?;
+
+        #[cfg(not(target_arch = "x86_64"))]
+        {
+            // Flush the TLB for the affected address range.
+            // x86 CPUs will try to retrieve the PTE again for non-present entries.
+            // So we don't need to flush TLB.
+            flush_tlb(addr.floor().addr());
+        }
+
+        Ok(())
     }
 }