瀏覽代碼

Merge pull request #5 from Shao-ZW:master

Refactor slab allocator implementation
greatbridf 8 月之前
父節點
當前提交
f4e68bd903

+ 10 - 0
Cargo.lock

@@ -158,6 +158,7 @@ dependencies = [
  "itertools",
  "pointers",
  "posix_types",
+ "slab_allocator",
 ]
 
 [[package]]
@@ -327,6 +328,15 @@ version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
 
+[[package]]
+name = "slab_allocator"
+version = "0.1.0"
+dependencies = [
+ "eonix_mm",
+ "eonix_sync",
+ "intrusive_list",
+]
+
 [[package]]
 name = "syn"
 version = "2.0.89"

+ 1 - 0
Cargo.toml

@@ -12,6 +12,7 @@ atomic_unique_refcell = { path = "./crates/atomic_unique_refcell", features = [
     "no_std",
 ] }
 buddy_allocator = { path = "./crates/buddy_allocator" }
+slab_allocator = { path = "./crates/slab_allocator" }
 eonix_mm = { path = "./crates/eonix_mm" }
 eonix_preempt = { path = "./crates/eonix_preempt" }
 eonix_runtime = { path = "./crates/eonix_runtime" }

+ 8 - 0
crates/intrusive_list/src/lib.rs

@@ -41,6 +41,14 @@ impl List {
             node
         })
     }
+
+    pub fn is_empty(&self) -> bool {
+        self.count == 0
+    }
+
+    pub fn head(&mut self) -> Option<&mut Link> {
+        self.head.next_mut()
+    }
 }
 
 impl Link {

+ 10 - 0
crates/slab_allocator/Cargo.toml

@@ -0,0 +1,10 @@
+[package]
+name = "slab_allocator"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+eonix_mm = { path = "../eonix_mm" }
+eonix_sync = { path = "../eonix_sync" }
+intrusive_list = { path = "../intrusive_list" }
+

+ 69 - 0
crates/slab_allocator/src/lib.rs

@@ -0,0 +1,69 @@
+#![no_std]
+
+mod slab_cache;
+
+use core::{cmp::max, ptr::NonNull};
+
+use eonix_mm::paging::{PageAlloc, RawPage};
+use eonix_sync::Spin;
+use intrusive_list::Link;
+use slab_cache::SlabCache;
+
+pub trait SlabRawPage: RawPage {
+    /// Get the container raw page struct of the list link.
+    ///
+    /// # Safety
+    /// The caller MUST ensure that the link points to a `RawPage`.
+    unsafe fn from_link(link: &mut Link) -> Self;
+
+    /// Get the list link of the raw page.
+    ///
+    /// # Safety
+    /// The caller MUST ensure that at any time, only one mutable reference
+    /// to the link exists.
+    unsafe fn get_link(&self) -> &mut Link;
+
+    fn slab_init(&self, first_free: Option<NonNull<usize>>);
+
+    // which slab page the ptr belong
+    fn in_which(ptr: *mut u8) -> Self;
+
+    fn real_page_ptr(&self) -> *mut u8;
+
+    fn allocated_count(&self) -> &mut u32;
+
+    fn next_free(&self) -> &mut Option<NonNull<usize>>;
+}
+
+pub struct SlabAllocator<T, A, const SLAB_CACHE_COUNT: usize> {
+    slabs: [Spin<SlabCache<T, A>>; SLAB_CACHE_COUNT],
+    alloc: A,
+}
+
+unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Send for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
+unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Sync for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
+
+impl<Raw, Allocator, const SLAB_CACHE_COUNT: usize> SlabAllocator<Raw, Allocator, SLAB_CACHE_COUNT>
+where
+    Raw: SlabRawPage,
+    Allocator: PageAlloc<RawPage = Raw>,
+{
+    pub fn new_in(alloc: Allocator) -> Self {
+        Self {
+            slabs: core::array::from_fn(|i| Spin::new(SlabCache::new_in(1 << (i + 3)))),
+            alloc,
+        }
+    }
+
+    pub fn alloc(&self, mut size: usize) -> *mut u8 {
+        size = max(8, size);
+        let idx = size.next_power_of_two().trailing_zeros() - 3;
+        self.slabs[idx as usize].lock().alloc(&self.alloc)
+    }
+
+    pub fn dealloc(&self, ptr: *mut u8, mut size: usize) {
+        size = max(8, size);
+        let idx = size.next_power_of_two().trailing_zeros() - 3;
+        self.slabs[idx as usize].lock().dealloc(ptr, &self.alloc);
+    }
+}

+ 164 - 0
crates/slab_allocator/src/slab_cache.rs

@@ -0,0 +1,164 @@
+use super::SlabRawPage;
+use core::{marker::PhantomData, ptr::NonNull};
+use eonix_mm::paging::{PageAlloc, PAGE_SIZE};
+use intrusive_list::List;
+
+pub(crate) struct SlabCache<T, A> {
+    empty_list: List,
+    partial_list: List,
+    full_list: List,
+    object_size: u32,
+    _phantom: PhantomData<(T, A)>,
+}
+
+trait SlabRawPageExt {
+    fn alloc_slot(&self) -> Option<NonNull<usize>>;
+    fn dealloc_slot(&self, slot_ptr: *mut u8);
+    fn is_full(&self) -> bool;
+    fn is_empty(&self) -> bool;
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>>;
+}
+
+impl<T> SlabRawPageExt for T
+where
+    T: SlabRawPage,
+{
+    fn alloc_slot(&self) -> Option<NonNull<usize>> {
+        let ptr = self.next_free().clone();
+
+        let next_free = match ptr {
+            Some(ptr) => unsafe { ptr.read() as *mut usize },
+            None => unreachable!(),
+        };
+        *self.allocated_count() += 1;
+        *self.next_free() = NonNull::new(next_free);
+        return ptr;
+    }
+
+    fn dealloc_slot(&self, slot_ptr: *mut u8) {
+        let slot_ptr = slot_ptr as *mut usize;
+
+        if let Some(last_free) = self.next_free().clone() {
+            unsafe { *slot_ptr = last_free.as_ptr() as usize }
+        } else {
+            unsafe { *slot_ptr = 0 }
+        }
+
+        *self.allocated_count() -= 1;
+        *self.next_free() = NonNull::new(slot_ptr);
+    }
+
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>> {
+        assert!(object_size >= core::mem::size_of::<usize>() as u32);
+
+        let first_free = self.real_page_ptr() as *mut usize;
+
+        let mut slot_ptr = first_free;
+        let mut slot_count = PAGE_SIZE / object_size as usize;
+
+        // SAFETY: carefully ptr operate
+        unsafe {
+            loop {
+                if slot_count == 1 {
+                    *slot_ptr = 0;
+                    break;
+                }
+
+                let next_ptr = slot_ptr.byte_add(object_size as usize);
+                *slot_ptr = next_ptr as usize;
+                slot_ptr = next_ptr;
+                slot_count -= 1;
+            }
+        }
+
+        NonNull::new(first_free)
+    }
+
+    fn is_empty(&self) -> bool {
+        self.allocated_count().clone() == 0
+    }
+
+    fn is_full(&self) -> bool {
+        self.next_free().is_none()
+    }
+}
+
+impl<Raw, Allocator> SlabCache<Raw, Allocator>
+where
+    Raw: SlabRawPage,
+    Allocator: PageAlloc<RawPage = Raw>,
+{
+    pub(crate) const fn new_in(object_size: u32) -> Self {
+        // avoid uncessary branch in alloc and dealloc
+        assert!(object_size <= PAGE_SIZE as u32 / 2);
+
+        Self {
+            empty_list: List::new(),
+            partial_list: List::new(),
+            full_list: List::new(),
+            object_size: object_size,
+            _phantom: PhantomData,
+        }
+    }
+
+    pub(crate) fn alloc(&mut self, alloc: &Allocator) -> *mut u8 {
+        if !self.partial_list.is_empty() {
+            let page_ptr = unsafe {
+                Raw::from_link(
+                    self.partial_list
+                        .head()
+                        .expect("partial pages should not be empty"),
+                )
+            };
+
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
+
+            if page_ptr.is_full() {
+                self.partial_list.remove(unsafe { page_ptr.get_link() });
+                self.full_list.insert(unsafe { page_ptr.get_link() });
+            }
+            return ptr.as_ptr() as *mut u8;
+        }
+
+        if !self.empty_list.is_empty() {
+            let page_ptr = unsafe {
+                Raw::from_link(
+                    self.empty_list
+                        .head()
+                        .expect("empty pages should not be empty"),
+                )
+            };
+
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
+            self.empty_list.remove(unsafe { page_ptr.get_link() });
+            self.partial_list.insert(unsafe { page_ptr.get_link() });
+            return ptr.as_ptr() as *mut u8;
+        }
+
+        let new_page_ptr = alloc.alloc().expect("slab_cache get page fail!");
+        let first_free = new_page_ptr.slab_page_init(self.object_size);
+        new_page_ptr.slab_init(first_free);
+        let ptr = new_page_ptr.alloc_slot().expect("should get slot");
+        self.partial_list.insert(unsafe { new_page_ptr.get_link() });
+        ptr.as_ptr() as *mut u8
+    }
+
+    pub(crate) fn dealloc(&mut self, ptr: *mut u8, _alloc: &Allocator) {
+        let page_ptr = Raw::in_which(ptr);
+
+        if page_ptr.is_full() {
+            self.full_list.remove(unsafe { page_ptr.get_link() });
+            self.partial_list.insert(unsafe { page_ptr.get_link() });
+        }
+
+        page_ptr.dealloc_slot(ptr);
+
+        if page_ptr.is_empty() {
+            self.partial_list.remove(unsafe { page_ptr.get_link() });
+            self.empty_list.insert(unsafe { page_ptr.get_link() });
+        }
+
+        // TODO: Check whether we should place some pages back with `alloc` if the global
+        //       free page count is below the watermark.
+    }
+}

+ 1 - 0
src/kernel/mem.rs

@@ -2,6 +2,7 @@ pub mod paging;
 
 mod access;
 mod address;
+mod allocator;
 mod mm_area;
 mod mm_list;
 mod page_alloc;

+ 22 - 1
src/kernel/mem/access.rs

@@ -1,7 +1,9 @@
 use core::{num::NonZero, ptr::NonNull};
-use eonix_mm::address::{Addr as _, PAddr};
+use eonix_mm::address::{Addr as _, PAddr, VAddr};
 use eonix_mm::paging::{PageAccess, PageBlock, PFN};
 
+use super::page_alloc::RawPagePtr;
+
 const PHYS_OFFSET: usize = 0xffff_ff00_0000_0000;
 
 /// A block of memory starting at a non-zero address and having a specific length.
@@ -156,3 +158,22 @@ impl PageAccess for KernelPageAccess {
         }
     }
 }
+
+pub trait RawPageAccess {
+    /// Translate the address belonged RawPage ptr
+    /// Use it with care.
+    ///
+    /// # Panic
+    /// If the address is not properly aligned.
+    ///
+    /// # Safety
+    /// the address must be kernel accessible pointer
+    unsafe fn as_raw_page(&self) -> RawPagePtr;
+}
+
+impl RawPageAccess for VAddr {
+    unsafe fn as_raw_page(&self) -> RawPagePtr {
+        let pfn: PFN = PAddr::from(self.addr() - PHYS_OFFSET).into();
+        RawPagePtr::from(pfn)
+    }
+}

+ 54 - 0
src/kernel/mem/allocator.rs

@@ -0,0 +1,54 @@
+use core::alloc::{GlobalAlloc, Layout};
+use eonix_mm::address::VAddr;
+use eonix_mm::paging::{PAGE_SIZE_BITS, PFN};
+use eonix_sync::LazyLock;
+use slab_allocator::SlabAllocator;
+
+use super::access::RawPageAccess;
+use super::page_alloc::RawPagePtr;
+use super::{AsMemoryBlock, GlobalPageAlloc, Page};
+
+static SLAB_ALLOCATOR: LazyLock<SlabAllocator<RawPagePtr, GlobalPageAlloc, 9>> =
+    LazyLock::new(|| SlabAllocator::new_in(GlobalPageAlloc));
+
+struct Allocator;
+
+unsafe impl GlobalAlloc for Allocator {
+    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+        let size = layout.size().next_power_of_two();
+
+        let result = if size <= 2048 {
+            SLAB_ALLOCATOR.alloc(size)
+        } else {
+            let page_count = size >> PAGE_SIZE_BITS;
+            let page = Page::alloc_at_least(page_count);
+
+            let ptr = page.as_memblk().as_ptr();
+            page.into_raw();
+
+            ptr.as_ptr()
+        };
+
+        if result.is_null() {
+            core::ptr::null_mut()
+        } else {
+            result as *mut u8
+        }
+    }
+
+    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+        let size = layout.size().next_power_of_two();
+
+        if size <= 2048 {
+            SLAB_ALLOCATOR.dealloc(ptr, size)
+        } else {
+            let vaddr = VAddr::from(ptr as usize);
+            let page_ptr = vaddr.as_raw_page();
+            let pfn = PFN::from(page_ptr);
+            Page::from_raw(pfn);
+        };
+    }
+}
+
+#[global_allocator]
+static ALLOCATOR: Allocator = Allocator;

+ 3 - 1
src/kernel/mem/page_alloc.rs

@@ -9,7 +9,9 @@ use eonix_mm::{
 };
 use eonix_sync::Spin;
 use intrusive_list::List;
-use raw_page::{PageFlags, RawPage, RawPagePtr};
+use raw_page::{PageFlags, RawPage};
+
+pub use raw_page::RawPagePtr;
 
 const COSTLY_ORDER: u32 = 3;
 const BATCH_SIZE: u32 = 64;

+ 83 - 5
src/kernel/mem/page_alloc/raw_page.rs

@@ -3,24 +3,64 @@ use core::{
     ptr::NonNull,
     sync::atomic::{AtomicU32, AtomicUsize, Ordering},
 };
-use eonix_mm::paging::{RawPage as RawPageTrait, PFN};
+use eonix_mm::{
+    address::{PAddr, VAddr},
+    paging::{RawPage as RawPageTrait, PAGE_SIZE, PFN},
+};
 use intrusive_list::{container_of, Link};
+use slab_allocator::SlabRawPage;
+
+use crate::kernel::mem::access::RawPageAccess;
+use crate::kernel::mem::PhysAccess;
 
 const PAGE_ARRAY: NonNull<RawPage> =
     unsafe { NonNull::new_unchecked(0xffffff8040000000 as *mut _) };
 
 pub struct PageFlags(AtomicU32);
 
+struct SlabPageInner {
+    allocated_count: u32,
+    free_next: Option<NonNull<usize>>,
+}
+
+impl SlabPageInner {
+    fn new(free_next: Option<NonNull<usize>>) -> Self {
+        Self {
+            allocated_count: 0,
+            free_next,
+        }
+    }
+}
+
+pub struct BuddyPageInner {}
+
+enum PageType {
+    Buddy(BuddyPageInner),
+    Slab(SlabPageInner),
+}
+
+impl PageType {
+    fn slab_data(&mut self) -> &mut SlabPageInner {
+        if let PageType::Slab(slab_data) = self {
+            return slab_data;
+        } else {
+            unreachable!()
+        }
+    }
+}
+
 pub struct RawPage {
     /// This can be used for LRU page swap in the future.
     ///
     /// Now only used for free page links in the buddy system.
-    pub link: Link,
+    link: Link,
     /// # Safety
     /// This field is only used in buddy system and is protected by the global lock.
-    pub order: u32,
-    pub flags: PageFlags,
-    pub refcount: AtomicUsize,
+    order: u32,
+    flags: PageFlags,
+    refcount: AtomicUsize,
+
+    shared_data: PageType,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
@@ -80,6 +120,12 @@ impl RawPagePtr {
     pub const fn refcount(&self) -> &AtomicUsize {
         &self.as_ref().refcount
     }
+
+    // return the ptr point to the actually raw page
+    pub fn real_ptr<T>(&self) -> NonNull<T> {
+        let pfn = unsafe { PFN::from(RawPagePtr(NonNull::new_unchecked(self.as_ptr()))) };
+        unsafe { PAddr::from(pfn).as_ptr::<T>() }
+    }
 }
 
 impl From<RawPagePtr> for PFN {
@@ -148,3 +194,35 @@ impl BuddyRawPage for RawPagePtr {
         self.flags().clear(PageFlags::FREE);
     }
 }
+
+impl SlabRawPage for RawPagePtr {
+    unsafe fn from_link(link: &mut Link) -> Self {
+        let raw_page_ptr = container_of!(link, RawPage, link);
+        Self(raw_page_ptr)
+    }
+
+    unsafe fn get_link(&self) -> &mut Link {
+        &mut self.as_mut().link
+    }
+
+    fn in_which(ptr: *mut u8) -> RawPagePtr {
+        let vaddr = VAddr::from(ptr as usize & !(PAGE_SIZE - 1));
+        unsafe { vaddr.as_raw_page() }
+    }
+
+    fn allocated_count(&self) -> &mut u32 {
+        &mut self.as_mut().shared_data.slab_data().allocated_count
+    }
+
+    fn next_free(&self) -> &mut Option<NonNull<usize>> {
+        &mut self.as_mut().shared_data.slab_data().free_next
+    }
+
+    fn real_page_ptr(&self) -> *mut u8 {
+        self.real_ptr().as_ptr()
+    }
+
+    fn slab_init(&self, first_free: Option<NonNull<usize>>) {
+        self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
+    }
+}

+ 1 - 30
src/lib.rs

@@ -25,7 +25,6 @@ mod rcu;
 mod sync;
 
 use alloc::{ffi::CString, sync::Arc};
-use core::alloc::{GlobalAlloc, Layout};
 use elf::ParsedElf32;
 use eonix_mm::{address::PAddr, paging::PFN};
 use eonix_runtime::{run::FutureRun, scheduler::Scheduler, task::Task};
@@ -61,37 +60,9 @@ fn panic(info: &core::panic::PanicInfo) -> ! {
     arch::freeze()
 }
 
-extern "C" {
-    fn _do_allocate(size: usize) -> *mut core::ffi::c_void;
-    fn _do_deallocate(ptr: *mut core::ffi::c_void, size: core::ffi::c_size_t) -> i32;
-    fn init_pci();
-}
-
-struct Allocator;
-unsafe impl GlobalAlloc for Allocator {
-    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
-        let result = _do_allocate(layout.size());
-
-        if result.is_null() {
-            core::ptr::null_mut()
-        } else {
-            result as *mut u8
-        }
-    }
-
-    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
-        match _do_deallocate(ptr as *mut core::ffi::c_void, layout.size()) {
-            0 => (),
-            _ => panic!("Failed to deallocate memory"),
-        }
-    }
-}
-
-#[global_allocator]
-static ALLOCATOR: Allocator = Allocator;
-
 extern "C" {
     fn init_allocator();
+    fn init_pci();
 }
 
 #[no_mangle]