Эх сурвалжийг харах

feat(arch): working impl of loongarch64

greatbridf 7 сар өмнө
parent
commit
af00747a1a

+ 12 - 5
crates/eonix_hal/src/arch/loongarch64/bootstrap.rs

@@ -25,7 +25,7 @@ use eonix_mm::{
     paging::{Page, PageAccess, PageAlloc, PAGE_SIZE, PFN},
 };
 use eonix_percpu::PercpuArea;
-use loongArch64::register::euen;
+use loongArch64::register::{euen, pgdl};
 
 #[unsafe(link_section = ".bootstrap.stack")]
 static BOOT_STACK: [u8; 4096 * 16] = [0; 4096 * 16];
@@ -34,11 +34,12 @@ static BOOT_STACK_START: &'static [u8; 4096 * 16] = &BOOT_STACK;
 #[repr(C, align(4096))]
 struct PageTable([u64; 512]);
 
-/// map 0x8000_0000 to 0xffff_0000_8000_0000 and 0xffff_ffff_8000_0000
+/// map 0x8000_0000 to 0x8000_0000 and 0xffff_ffff_8000_0000
 #[unsafe(link_section = ".bootstrap.page_table.1")]
 static BOOT_PAGE_TABLE: PageTable = {
     let mut arr = [0; 512];
     arr[0] = 0 | 0x11d3; // G | W | P | H | Cached | D | V
+    arr[510] = 0 | 0x11d3; // G | W | P | H | Cached | D | V
     arr[511] = 0x8000_2000 | (1 << 60); // PT1, PT
 
     PageTable(arr)
@@ -57,7 +58,7 @@ static PT1: PageTable = {
 #[unsafe(naked)]
 #[unsafe(no_mangle)]
 #[unsafe(link_section = ".bootstrap.entry")]
-unsafe extern "C" fn _start(hart_id: usize, dtb_addr: usize) -> ! {
+unsafe extern "C" fn _start() -> ! {
     naked_asm!(
         "
             li.d      $t0, 0xc
@@ -87,6 +88,8 @@ unsafe extern "C" fn _start(hart_id: usize, dtb_addr: usize) -> ! {
             csrwr     $t0, {CSR_CRMD}
 
             la.global $sp, {boot_stack}
+            li.d      $t0, 0xffffff0000000000
+            or        $sp, $sp, $t0
             li.d      $t0, {BOOT_STACK_SIZE}
             add.d     $sp, $sp, $t0
 
@@ -135,7 +138,10 @@ unsafe extern "C" fn tlb_refill_entry() {
 
 /// TODO:
 /// 启动所有的cpu
-pub unsafe extern "C" fn riscv64_start(hart_id: usize, dtb_addr: PAddr) -> ! {
+pub unsafe extern "C" fn riscv64_start(hart_id: usize) -> ! {
+    pgdl::set_base(0xffff_ffff_ffff_0000);
+    flush_tlb_all();
+
     let real_allocator = RefCell::new(BasicPageAlloc::new());
     let alloc = BasicPageAllocRef::new(&real_allocator);
 
@@ -217,6 +223,7 @@ fn setup_kernel_page_table(alloc: impl PageAlloc) {
 fn setup_cpu(alloc: impl PageAlloc, hart_id: usize) {
     // enable FPU
     euen::set_fpe(true);
+    euen::set_sxe(true);
 
     let mut percpu_area = PercpuArea::new(|layout| {
         let page_count = layout.size().div_ceil(PAGE_SIZE);
@@ -252,7 +259,7 @@ fn setup_cpu(alloc: impl PageAlloc, hart_id: usize) {
     unsafe {
         asm!(
             "csrwr {tp}, {CSR_KERNEL_TP}",
-            tp = in(reg) PercpuArea::get_for(cpu.cpuid()).unwrap().as_ptr(),
+            tp = inout(reg) PercpuArea::get_for(cpu.cpuid()).unwrap().as_ptr() => _,
             CSR_KERNEL_TP = const CSR_KERNEL_TP,
         )
     }

+ 2 - 1
crates/eonix_hal/src/arch/loongarch64/fdt.rs

@@ -7,7 +7,8 @@ use fdt::Fdt;
 const FDT_PADDR: PAddr = PAddr::from_val(0x100000);
 
 pub static FDT: LazyLock<Fdt<'static>> = LazyLock::new(|| unsafe {
-    Fdt::from_ptr(FDT_PADDR.addr() as *const u8).expect("Failed to parse DTB from static memory.")
+    Fdt::from_ptr(ArchPhysAccess::as_ptr(FDT_PADDR).as_ptr())
+        .expect("Failed to parse DTB from static memory.")
 });
 
 pub trait FdtExt {

+ 5 - 3
crates/eonix_hal/src/arch/loongarch64/link.x

@@ -13,6 +13,8 @@ SECTIONS {
         . = ALIGN(16);
         KEEP(*(.bootstrap.stack));
     } > RAM
+
+    __kernel_start = ORIGIN(RAM);
 }
 INSERT BEFORE .text;
 
@@ -43,9 +45,8 @@ SECTIONS {
 
     PERCPU_LENGTH = ABSOLUTE(__epercpu - __spercpu);
 
-    KIMAGE_PAGES = (__edata - _stext + 0x1000 - 1) / 0x1000;
+    KIMAGE_PAGES = (__kernel_end - _stext + 0x1000 - 1) / 0x1000;
     KIMAGE_32K_COUNT = (KIMAGE_PAGES + 8 - 1) / 8;
-    __kernel_end = .;
 
     BSS_LENGTH = ABSOLUTE(__ebss - __sbss);
 }
@@ -90,5 +91,6 @@ SECTIONS {
     } > VDSO AT> RAM
 
     VDSO_PADDR = LOADADDR(.vdso);
+    __kernel_end = ABSOLUTE(LOADADDR(.vdso) + SIZEOF(.vdso));
 }
-INSERT AFTER .data;
+INSERT BEFORE .bss;

+ 5 - 5
crates/eonix_hal/src/arch/loongarch64/mm.rs

@@ -211,7 +211,7 @@ impl From<TableAttribute> for PageAttribute64 {
 }
 
 impl ArchPhysAccess {
-    const PHYS_OFFSET: usize = 0xffff_0000_0000_0000;
+    const PHYS_OFFSET: usize = 0xffff_ff00_0000_0000;
 }
 
 impl PhysAccess for ArchPhysAccess {
@@ -253,8 +253,8 @@ impl Memory for ArchMemory {
             fn __kernel_end();
         }
 
-        let kernel_start = PAddr::from(__kernel_start as usize - KIMAGE_OFFSET);
-        let kernel_end = PAddr::from(__kernel_end as usize - KIMAGE_OFFSET);
+        let kernel_start = PAddr::from(__kernel_start as usize);
+        let kernel_end = PAddr::from(__kernel_end as usize);
         let paddr_after_kimage_aligned = kernel_end.ceil_to(PAGE_SIZE);
 
         Self::present_ram()
@@ -296,7 +296,7 @@ pub fn flush_tlb_all() {
 
 #[inline(always)]
 pub fn get_root_page_table_pfn() -> PFN {
-    PFN::from(pgdl::read().base())
+    PFN::from(PAddr::from(pgdl::read().base()))
 }
 
 #[inline(always)]
@@ -311,6 +311,6 @@ pub fn set_root_page_table_pfn(pfn: PFN) {
 
     // Invalidate all user space TLB entries.
     unsafe {
-        asm!("dbar 0x0", "invtlb 0x3, $zero, $zero");
+        asm!("dbar 0x0", "invtlb 0x0, $zero, $zero");
     }
 }

+ 2 - 2
crates/eonix_hal/src/arch/loongarch64/trap/mod.rs

@@ -281,8 +281,8 @@ impl TrapReturn for TrapContext {
             asm!(
                 "csrwr {captured_trap_context}, {CSR_CAPTURED_TRAP_CONTEXT_ADDR}",
                 "csrwr {capturer_task_context}, {CSR_CAPTURER_TASK_CONTEXT_ADDR}",
-                captured_trap_context = in(reg) &raw mut *self,
-                capturer_task_context = in(reg) &raw mut capturer_ctx,
+                captured_trap_context = inout(reg) &raw mut *self => _,
+                capturer_task_context = inout(reg) &raw mut capturer_ctx => _,
                 CSR_CAPTURED_TRAP_CONTEXT_ADDR = const CSR_CAPTURED_TRAP_CONTEXT_ADDR,
                 CSR_CAPTURER_TASK_CONTEXT_ADDR = const CSR_CAPTURER_TASK_CONTEXT_ADDR,
                 options(nomem, nostack, preserves_flags),

+ 1 - 1
crates/eonix_hal/src/link.x.in

@@ -3,7 +3,7 @@ PROVIDE(_stext = ORIGIN(REGION_TEXT));
 SECTIONS {
     .text _stext :
     {
-        __kernel_start = .;
+        PROVIDE(__kernel_start = .);
         __stext = .;
 
         *(.text.entry);

+ 1 - 1
crates/eonix_percpu/eonix_percpu_macros/src/loongarch64.rs

@@ -16,7 +16,7 @@ pub fn get_percpu_pointer(percpu: &Ident, ty: &Type) -> TokenStream {
                 "sub.d     {base}, {base}, {start}",
                 "add.d     {base}, {base}, $tp",
                 base = inout(reg) &raw const #percpu => base,
-                start = inout(reg) PERCPU_DATA_START as usize => _,
+                start = in(reg) PERCPU_DATA_START as usize,
                 options(nostack, preserves_flags)
             );
 

+ 6 - 10
src/driver/virtio/loongarch64.rs

@@ -1,15 +1,13 @@
-use crate::{
-    kernel::{
-        block::{make_device, BlockDevice},
-        constants::EIO,
-        pcie::{self, PCIDevice, PCIDriver, PciError, SegmentGroup},
-    },
-    println_debug,
+use super::virtio_blk::HAL;
+use crate::kernel::{
+    block::{make_device, BlockDevice},
+    constants::EIO,
+    pcie::{self, PCIDevice, PCIDriver, PciError, SegmentGroup},
 };
 use alloc::sync::Arc;
 use core::sync::atomic::{AtomicUsize, Ordering};
 use eonix_hal::{fence::memory_barrier, mm::ArchPhysAccess};
-use eonix_log::{println_trace, println_warn};
+use eonix_log::println_warn;
 use eonix_mm::address::PhysAccess;
 use eonix_runtime::task::Task;
 use eonix_sync::Spin;
@@ -24,8 +22,6 @@ use virtio_drivers::{
     },
 };
 
-use super::virtio_blk::HAL;
-
 impl ConfigurationAccess for &SegmentGroup {
     fn read_word(&self, device_function: DeviceFunction, register_offset: u8) -> u32 {
         let conf_space = self

+ 3 - 4
src/driver/virtio/virtio_blk.rs

@@ -3,11 +3,10 @@ use crate::{
     kernel::{
         block::{BlockDeviceRequest, BlockRequestQueue},
         constants::EIO,
-        mem::{AsMemoryBlock, MemoryBlock, Page},
+        mem::{AsMemoryBlock, Page},
     },
     prelude::KResult,
 };
-use core::num::NonZero;
 use eonix_hal::mm::ArchPhysAccess;
 use eonix_mm::{
     address::{Addr, PAddr, PhysAccess},
@@ -50,9 +49,9 @@ unsafe impl Hal for HAL {
 
     unsafe fn mmio_phys_to_virt(
         paddr: virtio_drivers::PhysAddr,
-        size: usize,
+        _size: usize,
     ) -> core::ptr::NonNull<u8> {
-        MemoryBlock::new(NonZero::new(paddr).expect("paddr must be non-zero"), size).as_byte_ptr()
+        unsafe { ArchPhysAccess::as_ptr(PAddr::from(paddr)) }
     }
 
     unsafe fn share(

+ 7 - 2
src/kernel/mem/mm_area.rs

@@ -155,7 +155,7 @@ impl MMArea {
         Ok(())
     }
 
-    pub fn handle(&self, pte: &mut impl PTE, offset: usize) -> KResult<()> {
+    pub fn handle(&self, pte: &mut impl PTE, offset: usize, write: bool) -> KResult<()> {
         let mut attr = pte.get_attr().as_page_attr().expect("Not a page attribute");
         let mut pfn = pte.get_pfn();
 
@@ -167,7 +167,12 @@ impl MMArea {
             self.handle_mmap(&mut pfn, &mut attr, offset)?;
         }
 
-        attr.set(PageAttribute::ACCESSED, true);
+        attr.insert(PageAttribute::ACCESSED);
+
+        if write {
+            attr.insert(PageAttribute::DIRTY);
+        }
+
         pte.set(pfn, attr.into());
 
         Ok(())

+ 2 - 2
src/kernel/mem/mm_list.rs

@@ -644,7 +644,7 @@ impl MMList {
                 let page_start = current.floor() + idx * 0x1000;
                 let page_end = page_start + 0x1000;
 
-                area.handle(pte, page_start - area_start)?;
+                area.handle(pte, page_start - area_start, true)?;
 
                 let start_offset;
                 if page_start < current {
@@ -761,7 +761,7 @@ where
             return;
         }
 
-        from_attr.remove(PageAttribute::WRITE);
+        from_attr.remove(PageAttribute::WRITE | PageAttribute::DIRTY);
         from_attr.insert(PageAttribute::COPY_ON_WRITE);
 
         let pfn = unsafe {

+ 13 - 3
src/kernel/mem/mm_list/page_fault.rs

@@ -90,8 +90,12 @@ impl MMList {
             .next()
             .expect("If we can find the mapped area, we should be able to find the PTE");
 
-        area.handle(pte, addr.floor() - area.range().start())
-            .map_err(|_| Signal::SIGBUS)?;
+        area.handle(
+            pte,
+            addr.floor() - area.range().start(),
+            error.contains(PageFaultErrorCode::Write),
+        )
+        .map_err(|_| Signal::SIGBUS)?;
 
         flush_tlb(addr.floor().addr());
 
@@ -160,9 +164,15 @@ pub fn handle_kernel_page_fault(
         .next()
         .expect("If we can find the mapped area, we should be able to find the PTE");
 
-    if let Err(_) = area.handle(pte, addr.floor() - area.range().start()) {
+    if let Err(_) = area.handle(
+        pte,
+        addr.floor() - area.range().start(),
+        error.contains(PageFaultErrorCode::Write),
+    ) {
         return Some(try_page_fault_fix(fault_pc, addr));
     }
 
+    flush_tlb(addr.floor().addr());
+
     None
 }

+ 1 - 1
src/kernel/syscall/mm.rs

@@ -101,7 +101,7 @@ fn do_mmap2(
     addr.map(|addr| addr.addr())
 }
 
-#[cfg(target_arch = "riscv64")]
+#[cfg(any(target_arch = "riscv64", target_arch = "loongarch64"))]
 #[eonix_macros::define_syscall(SYS_MMAP)]
 fn mmap(
     addr: usize,

+ 17 - 0
src/kernel/syscall/procops.rs

@@ -702,6 +702,9 @@ fn fork() -> KResult<u32> {
     do_clone(thread, clone_args)
 }
 
+// Some old platforms including x86_32, riscv and arm have the last two arguments
+// swapped, so we need to define two versions of `clone` syscall.
+#[cfg(not(target_arch = "loongarch64"))]
 #[eonix_macros::define_syscall(SYS_CLONE)]
 fn clone(
     clone_flags: usize,
@@ -715,6 +718,20 @@ fn clone(
     do_clone(thread, clone_args)
 }
 
+#[cfg(target_arch = "loongarch64")]
+#[eonix_macros::define_syscall(SYS_CLONE)]
+fn clone(
+    clone_flags: usize,
+    new_sp: usize,
+    parent_tidptr: usize,
+    child_tidptr: usize,
+    tls: usize,
+) -> KResult<u32> {
+    let clone_args = CloneArgs::for_clone(clone_flags, new_sp, child_tidptr, parent_tidptr, tls)?;
+
+    do_clone(thread, clone_args)
+}
+
 #[eonix_macros::define_syscall(SYS_FUTEX)]
 fn futex(
     uaddr: usize,

+ 3 - 0
src/kernel/syscall/sysinfo.rs

@@ -53,6 +53,9 @@ fn newuname(buffer: *mut NewUTSName) -> KResult<()> {
     #[cfg(target_arch = "riscv64")]
     copy_cstr_to_array(b"riscv64", &mut uname.machine);
 
+    #[cfg(target_arch = "loongarch64")]
+    copy_cstr_to_array(b"loongarch64", &mut uname.machine);
+
     copy_cstr_to_array(b"(none)", &mut uname.domainname);
 
     buffer.write(uname)

+ 1 - 1
src/kernel/task/signal/signal_action.rs

@@ -50,7 +50,7 @@ unsafe extern "C" fn vdso_rt_sigreturn() {
     #[cfg(target_arch = "loongarch64")]
     naked_asm!(
         "li.d $a7, {sys_rt_sigreturn}",
-        "syscall {sys_rt_sigreturn}",
+        "syscall 0",
         sys_rt_sigreturn = const posix_types::syscall_no::SYS_RT_SIGRETURN,
     );
 

+ 2 - 2
src/kernel/task/thread.rs

@@ -185,7 +185,7 @@ impl ThreadBuilder {
         let mut trap_ctx = thread.trap_ctx.borrow().clone();
         trap_ctx.set_user_return_value(0);
 
-        #[cfg(target_arch = "riscv64")]
+        #[cfg(any(target_arch = "riscv64", target_arch = "loongarch64"))]
         {
             let pc = trap_ctx.get_program_counter();
             trap_ctx.set_program_counter(pc + 4);
@@ -410,7 +410,7 @@ impl Thread {
                         let mut trap_ctx = self.trap_ctx.borrow();
                         trap_ctx.set_user_return_value(retval);
 
-                        #[cfg(target_arch = "riscv64")]
+                        #[cfg(any(target_arch = "riscv64", target_arch = "loongarch64"))]
                         {
                             let pc = trap_ctx.get_program_counter();
                             trap_ctx.set_program_counter(pc + 4);