Browse Source

Merge remote-tracking branch 'github/mm' into shiai-master

greatbridf 3 months ago
parent
commit
a3f7ad83a1
7 changed files with 134 additions and 29 deletions
  1. 0 5
      include/types/allocator.hpp
  2. 8 8
      src/kernel/allocator.cc
  3. 1 1
      src/kernel/cpu.rs
  4. 114 13
      src/kernel/mem/page_alloc.rs
  5. 8 1
      src/kernel/mem/paging.rs
  6. 0 1
      src/kinit.cpp
  7. 3 0
      src/lib.rs

+ 0 - 5
include/types/allocator.hpp

@@ -35,8 +35,3 @@ class brk_memory_allocator {
 };
 
 } // namespace types::memory
-
-namespace kernel::kinit {
-void init_allocator();
-
-} // namespace kernel::kinit

+ 8 - 8
src/kernel/allocator.cc

@@ -218,14 +218,14 @@ static constexpr int __cache_index(std::size_t size) {
     return -1;
 }
 
-void kernel::kinit::init_allocator() {
-    mem::init_slab_cache(caches + 0, 32);
-    mem::init_slab_cache(caches + 1, 64);
-    mem::init_slab_cache(caches + 2, 96);
-    mem::init_slab_cache(caches + 3, 128);
-    mem::init_slab_cache(caches + 4, 192);
-    mem::init_slab_cache(caches + 5, 256);
-    mem::init_slab_cache(caches + 6, 512);
+extern "C" void init_allocator() {
+    kernel::mem::init_slab_cache(caches + 0, 32);
+    kernel::mem::init_slab_cache(caches + 1, 64);
+    kernel::mem::init_slab_cache(caches + 2, 96);
+    kernel::mem::init_slab_cache(caches + 3, 128);
+    kernel::mem::init_slab_cache(caches + 4, 192);
+    kernel::mem::init_slab_cache(caches + 5, 256);
+    kernel::mem::init_slab_cache(caches + 6, 512);
 
     types::memory::k_alloc = new types::memory::brk_memory_allocator(
         (std::byte*)KERNEL_HEAP_START, KERNEL_HEAP_SIZE);

+ 1 - 1
src/kernel/cpu.rs

@@ -18,7 +18,7 @@ pub unsafe fn init_thiscpu() {
     let status = arch::CPUStatus::new_thiscpu(|layout| {
         // TODO: Use page size defined in `arch`.
         let page_count = (layout.size() + 0x1000 - 1) / 0x1000;
-        let page = Page::alloc_ceil(page_count);
+        let page = Page::early_alloc_ceil(page_count);
         let pointer = page.as_cached().as_ptr();
         core::mem::forget(page);
 

+ 114 - 13
src/kernel/mem/page_alloc.rs

@@ -7,6 +7,8 @@ use core::{ptr::NonNull, sync::atomic::AtomicU32};
 use lazy_static::lazy_static;
 
 const MAX_PAGE_ORDER: u32 = 10;
+const PAGE_ALLOC_COSTLY_ORDER: u32 = 3;
+const BATCH_SIZE: u32 = 64;
 const PAGE_ARRAY: *mut Page = 0xffffff8040000000 as *mut Page;
 
 pub(super) type PagePtr = Ptr<Page>;
@@ -109,6 +111,7 @@ bitflags! {
         const SLAB    = 1 << 3;
         const DIRTY   = 1 << 4;
         const FREE    = 1 << 5;
+        const LOCAL   = 1 << 6;
     }
 }
 
@@ -137,6 +140,47 @@ struct Zone {
     free_areas: [FreeArea; MAX_PAGE_ORDER as usize + 1],
 }
 
+struct PerCpuPages {
+    batch: u32,
+    high: u32,
+    free_areas: [FreeArea; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
+}
+
+impl PerCpuPages {
+    const fn new() -> Self {
+        Self {
+            batch: BATCH_SIZE,
+            high: 0,
+            free_areas: [const { FreeArea::new() }; PAGE_ALLOC_COSTLY_ORDER as usize + 1],
+        }
+    }
+
+    fn get_free_pages(&mut self, order: u32) -> PagePtr {
+        assert!(order <= PAGE_ALLOC_COSTLY_ORDER);
+
+        loop {
+            let pages_ptr = self.free_areas[order as usize].get_free_pages();
+            if pages_ptr.is_some() {
+                return pages_ptr;
+            }
+
+            let batch = self.batch >> order;
+            ZONE.lock()
+                .get_bulk_free_pages(&mut self.free_areas[order as usize], order, batch);
+        }
+    }
+
+    fn free_pages(&mut self, mut pages_ptr: PagePtr, order: u32) {
+        assert!(order <= PAGE_ALLOC_COSTLY_ORDER);
+        assert_eq!(unsafe { pages_ptr.load_refcount() }, 0);
+        assert_eq!(pages_ptr.get_order(), order);
+
+        unimplemented!()
+    }
+}
+
+
+
 impl Page {
     fn set_flags(&mut self, flags: PageFlags) {
         self.flags.insert(flags);
@@ -174,6 +218,10 @@ impl Page {
     pub fn is_free(&self) -> bool {
         self.flags.contains(PageFlags::FREE)
     }
+
+    pub fn is_local(&self) -> bool {
+        self.flags.contains(PageFlags::LOCAL)
+    }
 }
 
 impl FreeArea {
@@ -184,7 +232,7 @@ impl FreeArea {
         }
     }
 
-    fn alloc_pages(&mut self) -> PagePtr {
+    fn get_free_pages(&mut self) -> PagePtr {
         if let Some(pages_link) = self.free_list.next_mut() {
             assert_ne!(self.count, 0);
 
@@ -192,7 +240,6 @@ impl FreeArea {
             let pages_ptr = Ptr::from_raw(pages_ptr);
 
             self.count -= 1;
-            pages_ptr.as_mut().remove_flags(PageFlags::FREE);
             pages_link.remove();
 
             pages_ptr
@@ -222,22 +269,33 @@ impl Zone {
         }
     }
 
-    fn alloc_pages(&mut self, order: u32) -> PagePtr {
+    /// Only used for per-cpu pages
+    fn get_bulk_free_pages(&mut self, free_area: &mut FreeArea, order: u32, count: u32) -> u32 {
+        for i in 0..count {
+            let pages_ptr = self.get_free_pages(order);
+            if pages_ptr.is_none() {
+                return i;
+            }
+
+            pages_ptr.as_mut().set_flags(PageFlags::LOCAL);
+            free_area.add_pages(pages_ptr);
+        }
+        count
+    }
+
+    fn get_free_pages(&mut self, order: u32) -> PagePtr {
         for current_order in order..=MAX_PAGE_ORDER {
-            let pages_ptr = self.free_areas[current_order as usize].alloc_pages();
+            let pages_ptr = self.free_areas[current_order as usize].get_free_pages();
             if pages_ptr.is_none() {
                 continue;
             }
 
-            unsafe {
-                pages_ptr.as_mut().increase_refcount();
-            }
             pages_ptr.as_mut().set_order(order);
 
             if current_order > order {
                 self.expand(pages_ptr, current_order, order);
             }
-            assert!(pages_ptr.as_ref().is_present() && !pages_ptr.as_ref().is_free());
+            assert!(pages_ptr.as_ref().is_present() && pages_ptr.as_ref().is_free());
             return pages_ptr;
         }
         PagePtr::new(None)
@@ -297,6 +355,9 @@ impl Zone {
         if !(pages_ptr.as_ref().is_free()) {
             return false;
         }
+        if pages_ptr.as_ref().is_local() {
+            return false;
+        }
         if pages_ptr.as_ref().order != order {
             return false;
         }
@@ -324,16 +385,56 @@ impl Zone {
     }
 }
 
+#[arch::define_percpu]
+static PER_CPU_PAGES: PerCpuPages = PerCpuPages::new();
+
 lazy_static! {
     static ref ZONE: Spin<Zone> = Spin::new(Zone::new());
 }
 
+fn __alloc_pages(order: u32) -> PagePtr {
+    let pages_ptr;
+
+    if order <= PAGE_ALLOC_COSTLY_ORDER {
+        unsafe {
+            pages_ptr = PER_CPU_PAGES.as_mut().get_free_pages(order);
+        }
+    } else {
+        pages_ptr = ZONE.lock().get_free_pages(order);
+    }
+
+    unsafe {
+        pages_ptr.as_mut().increase_refcount();
+    }
+    pages_ptr.as_mut().remove_flags(PageFlags::FREE);
+    pages_ptr
+}
+
+fn __free_pages(pages_ptr: PagePtr, order: u32) {
+    if order <= PAGE_ALLOC_COSTLY_ORDER {
+        unsafe {
+            PER_CPU_PAGES.as_mut().free_pages(pages_ptr, order);
+        }
+    } else {
+        ZONE.lock().free_pages(pages_ptr, order);
+    }
+}
+
 pub(super) fn alloc_page() -> PagePtr {
-    ZONE.lock().alloc_pages(0)
+    __alloc_pages(0)
 }
 
 pub(super) fn alloc_pages(order: u32) -> PagePtr {
-    ZONE.lock().alloc_pages(order)
+    __alloc_pages(order)
+}
+
+pub(super) fn early_alloc_pages(order: u32) -> PagePtr {
+    let pages_ptr = ZONE.lock().get_free_pages(order);
+    unsafe {
+        pages_ptr.as_mut().increase_refcount();
+    }
+    pages_ptr.as_mut().remove_flags(PageFlags::FREE);
+    pages_ptr
 }
 
 pub(super) fn free_pages(page_ptr: PagePtr, order: u32) {
@@ -364,17 +465,17 @@ pub extern "C" fn page_to_pfn(page: *const Page) -> usize {
 
 #[no_mangle]
 pub extern "C" fn c_alloc_page() -> *const Page {
-    ZONE.lock().alloc_pages(0).as_ptr() as *const Page
+    alloc_page().as_ptr() as *const Page
 }
 
 #[no_mangle]
 pub extern "C" fn c_alloc_pages(order: u32) -> *const Page {
-    ZONE.lock().alloc_pages(order).as_ptr() as *const Page
+    alloc_pages(order).as_ptr() as *const Page
 }
 
 #[no_mangle]
 pub extern "C" fn c_alloc_page_table() -> usize {
-    let pfn: PFN = ZONE.lock().alloc_pages(0).into();
+    let pfn: PFN = alloc_page().into();
     let paddr: usize = usize::from(pfn) << 12;
     unsafe {
         core::ptr::write_bytes(paddr as *mut u8, 0, 4096);

+ 8 - 1
src/kernel/mem/paging.rs

@@ -1,5 +1,5 @@
 use super::address::PFN;
-use super::page_alloc::{alloc_page, alloc_pages, free_pages, PagePtr};
+use super::page_alloc::{alloc_page, alloc_pages, early_alloc_pages, free_pages, PagePtr};
 use super::phys::PhysPtr;
 use crate::io::{Buffer, FillResult};
 use crate::kernel::mem::phys;
@@ -29,6 +29,13 @@ impl Page {
         Self::alloc_many(order)
     }
 
+    pub fn early_alloc_ceil(count: usize) -> Self {
+        assert_ne!(count, 0);
+        let order = count.next_power_of_two().trailing_zeros();
+        let page_ptr = early_alloc_pages(order);
+        Self { page_ptr, order }
+    }
+
     /// Get `Page` from `pfn`, acquiring the ownership of the page. `refcount` is not increased.
     ///
     /// # Safety

+ 0 - 1
src/kinit.cpp

@@ -130,7 +130,6 @@ extern "C" void NORETURN kernel_init(bootloader_data* data) {
     }
 
     setup_buddy(addr_max);
-    init_allocator();
 
     using namespace mem::paging;
     auto kernel_stack_pfn = page_to_pfn(c_alloc_pages(9)) << 12;

+ 3 - 0
src/lib.rs

@@ -106,6 +106,7 @@ global_asm!(
 
 extern "C" {
     fn to_init_process();
+    fn init_allocator();
 }
 
 #[no_mangle]
@@ -114,6 +115,8 @@ pub extern "C" fn rust_kinit(early_kstack_pfn: usize) -> ! {
     // Rust doesn't need that, and we're not going to use global variables in C++.
     unsafe { init_thiscpu() };
 
+    unsafe { init_allocator() };
+
     kernel::interrupt::init().unwrap();
 
     // TODO: Move this to rust.