Selaa lähdekoodia

Merge branch 'master' into remove-cpp

greatbridf 8 kuukautta sitten
vanhempi
commit
24008a54a2

+ 1 - 5
CMakeLists.txt

@@ -37,11 +37,7 @@ set(BOOTLOADER_SOURCES src/boot.s
                        src/mbr.S
                        )
 
-set(KERNEL_MAIN_SOURCES src/kernel/async/lock.cc
-                        src/kernel/allocator.cc
-                        src/kernel/mem/slab.cc
-                        src/types/libstdcpp.cpp
-                        include/kernel/async/lock.hpp
+set(KERNEL_MAIN_SOURCES include/kernel/async/lock.hpp
                         include/kernel/mem/paging.hpp
                         include/kernel/mem/slab.hpp
                         include/types/list.hpp

+ 10 - 0
Cargo.lock

@@ -242,6 +242,7 @@ dependencies = [
  "itertools",
  "pointers",
  "posix_types",
+ "slab_allocator",
 ]
 
 [[package]]
@@ -402,6 +403,15 @@ version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
 
+[[package]]
+name = "slab_allocator"
+version = "0.1.0"
+dependencies = [
+ "eonix_mm",
+ "eonix_sync",
+ "intrusive_list",
+]
+
 [[package]]
 name = "syn"
 version = "2.0.89"

+ 1 - 0
Cargo.toml

@@ -23,6 +23,7 @@ eonix_log = { path = "./crates/eonix_log" }
 intrusive_list = { path = "./crates/intrusive_list" }
 pointers = { path = "./crates/pointers" }
 posix_types = { path = "./crates/posix_types" }
+slab_allocator = { path = "./crates/slab_allocator" }
 
 bitflags = "2.6.0"
 intrusive-collections = "0.9.7"

+ 8 - 0
crates/intrusive_list/src/lib.rs

@@ -41,6 +41,14 @@ impl List {
             node
         })
     }
+
+    pub fn is_empty(&self) -> bool {
+        self.count == 0
+    }
+
+    pub fn head(&mut self) -> Option<&mut Link> {
+        self.head.next_mut()
+    }
 }
 
 impl Link {

+ 10 - 0
crates/slab_allocator/Cargo.toml

@@ -0,0 +1,10 @@
+[package]
+name = "slab_allocator"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+eonix_mm = { path = "../eonix_mm" }
+eonix_sync = { path = "../eonix_sync" }
+intrusive_list = { path = "../intrusive_list" }
+

+ 69 - 0
crates/slab_allocator/src/lib.rs

@@ -0,0 +1,69 @@
+#![no_std]
+
+mod slab_cache;
+
+use core::{cmp::max, ptr::NonNull};
+
+use eonix_mm::paging::{PageAlloc, RawPage};
+use eonix_sync::Spin;
+use intrusive_list::Link;
+use slab_cache::SlabCache;
+
+pub trait SlabRawPage: RawPage {
+    /// Get the container raw page struct of the list link.
+    ///
+    /// # Safety
+    /// The caller MUST ensure that the link points to a `RawPage`.
+    unsafe fn from_link(link: &mut Link) -> Self;
+
+    /// Get the list link of the raw page.
+    ///
+    /// # Safety
+    /// The caller MUST ensure that at any time, only one mutable reference
+    /// to the link exists.
+    unsafe fn get_link(&self) -> &mut Link;
+
+    fn slab_init(&self, first_free: Option<NonNull<usize>>);
+
+    // which slab page the ptr belong
+    fn in_which(ptr: *mut u8) -> Self;
+
+    fn real_page_ptr(&self) -> *mut u8;
+
+    fn allocated_count(&self) -> &mut u32;
+
+    fn next_free(&self) -> &mut Option<NonNull<usize>>;
+}
+
+pub struct SlabAllocator<T, A, const SLAB_CACHE_COUNT: usize> {
+    slabs: [Spin<SlabCache<T, A>>; SLAB_CACHE_COUNT],
+    alloc: A,
+}
+
+unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Send for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
+unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Sync for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
+
+impl<Raw, Allocator, const SLAB_CACHE_COUNT: usize> SlabAllocator<Raw, Allocator, SLAB_CACHE_COUNT>
+where
+    Raw: SlabRawPage,
+    Allocator: PageAlloc<RawPage = Raw>,
+{
+    pub fn new_in(alloc: Allocator) -> Self {
+        Self {
+            slabs: core::array::from_fn(|i| Spin::new(SlabCache::new_in(1 << (i + 3)))),
+            alloc,
+        }
+    }
+
+    pub fn alloc(&self, mut size: usize) -> *mut u8 {
+        size = max(8, size);
+        let idx = size.next_power_of_two().trailing_zeros() - 3;
+        self.slabs[idx as usize].lock().alloc(&self.alloc)
+    }
+
+    pub fn dealloc(&self, ptr: *mut u8, mut size: usize) {
+        size = max(8, size);
+        let idx = size.next_power_of_two().trailing_zeros() - 3;
+        self.slabs[idx as usize].lock().dealloc(ptr, &self.alloc);
+    }
+}

+ 164 - 0
crates/slab_allocator/src/slab_cache.rs

@@ -0,0 +1,164 @@
+use super::SlabRawPage;
+use core::{marker::PhantomData, ptr::NonNull};
+use eonix_mm::paging::{PageAlloc, PAGE_SIZE};
+use intrusive_list::List;
+
+pub(crate) struct SlabCache<T, A> {
+    empty_list: List,
+    partial_list: List,
+    full_list: List,
+    object_size: u32,
+    _phantom: PhantomData<(T, A)>,
+}
+
+trait SlabRawPageExt {
+    fn alloc_slot(&self) -> Option<NonNull<usize>>;
+    fn dealloc_slot(&self, slot_ptr: *mut u8);
+    fn is_full(&self) -> bool;
+    fn is_empty(&self) -> bool;
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>>;
+}
+
+impl<T> SlabRawPageExt for T
+where
+    T: SlabRawPage,
+{
+    fn alloc_slot(&self) -> Option<NonNull<usize>> {
+        let ptr = self.next_free().clone();
+
+        let next_free = match ptr {
+            Some(ptr) => unsafe { ptr.read() as *mut usize },
+            None => unreachable!(),
+        };
+        *self.allocated_count() += 1;
+        *self.next_free() = NonNull::new(next_free);
+        return ptr;
+    }
+
+    fn dealloc_slot(&self, slot_ptr: *mut u8) {
+        let slot_ptr = slot_ptr as *mut usize;
+
+        if let Some(last_free) = self.next_free().clone() {
+            unsafe { *slot_ptr = last_free.as_ptr() as usize }
+        } else {
+            unsafe { *slot_ptr = 0 }
+        }
+
+        *self.allocated_count() -= 1;
+        *self.next_free() = NonNull::new(slot_ptr);
+    }
+
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>> {
+        assert!(object_size >= core::mem::size_of::<usize>() as u32);
+
+        let first_free = self.real_page_ptr() as *mut usize;
+
+        let mut slot_ptr = first_free;
+        let mut slot_count = PAGE_SIZE / object_size as usize;
+
+        // SAFETY: carefully ptr operate
+        unsafe {
+            loop {
+                if slot_count == 1 {
+                    *slot_ptr = 0;
+                    break;
+                }
+
+                let next_ptr = slot_ptr.byte_add(object_size as usize);
+                *slot_ptr = next_ptr as usize;
+                slot_ptr = next_ptr;
+                slot_count -= 1;
+            }
+        }
+
+        NonNull::new(first_free)
+    }
+
+    fn is_empty(&self) -> bool {
+        self.allocated_count().clone() == 0
+    }
+
+    fn is_full(&self) -> bool {
+        self.next_free().is_none()
+    }
+}
+
+impl<Raw, Allocator> SlabCache<Raw, Allocator>
+where
+    Raw: SlabRawPage,
+    Allocator: PageAlloc<RawPage = Raw>,
+{
+    pub(crate) const fn new_in(object_size: u32) -> Self {
+        // avoid uncessary branch in alloc and dealloc
+        assert!(object_size <= PAGE_SIZE as u32 / 2);
+
+        Self {
+            empty_list: List::new(),
+            partial_list: List::new(),
+            full_list: List::new(),
+            object_size: object_size,
+            _phantom: PhantomData,
+        }
+    }
+
+    pub(crate) fn alloc(&mut self, alloc: &Allocator) -> *mut u8 {
+        if !self.partial_list.is_empty() {
+            let page_ptr = unsafe {
+                Raw::from_link(
+                    self.partial_list
+                        .head()
+                        .expect("partial pages should not be empty"),
+                )
+            };
+
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
+
+            if page_ptr.is_full() {
+                self.partial_list.remove(unsafe { page_ptr.get_link() });
+                self.full_list.insert(unsafe { page_ptr.get_link() });
+            }
+            return ptr.as_ptr() as *mut u8;
+        }
+
+        if !self.empty_list.is_empty() {
+            let page_ptr = unsafe {
+                Raw::from_link(
+                    self.empty_list
+                        .head()
+                        .expect("empty pages should not be empty"),
+                )
+            };
+
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
+            self.empty_list.remove(unsafe { page_ptr.get_link() });
+            self.partial_list.insert(unsafe { page_ptr.get_link() });
+            return ptr.as_ptr() as *mut u8;
+        }
+
+        let new_page_ptr = alloc.alloc().expect("slab_cache get page fail!");
+        let first_free = new_page_ptr.slab_page_init(self.object_size);
+        new_page_ptr.slab_init(first_free);
+        let ptr = new_page_ptr.alloc_slot().expect("should get slot");
+        self.partial_list.insert(unsafe { new_page_ptr.get_link() });
+        ptr.as_ptr() as *mut u8
+    }
+
+    pub(crate) fn dealloc(&mut self, ptr: *mut u8, _alloc: &Allocator) {
+        let page_ptr = Raw::in_which(ptr);
+
+        if page_ptr.is_full() {
+            self.full_list.remove(unsafe { page_ptr.get_link() });
+            self.partial_list.insert(unsafe { page_ptr.get_link() });
+        }
+
+        page_ptr.dealloc_slot(ptr);
+
+        if page_ptr.is_empty() {
+            self.partial_list.remove(unsafe { page_ptr.get_link() });
+            self.empty_list.insert(unsafe { page_ptr.get_link() });
+        }
+
+        // TODO: Check whether we should place some pages back with `alloc` if the global
+        //       free page count is below the watermark.
+    }
+}

+ 0 - 302
src/kernel/allocator.cc

@@ -1,302 +0,0 @@
-#include <bit>
-#include <cstddef>
-
-#include <assert.h>
-#include <stdint.h>
-
-#include <types/allocator.hpp>
-
-#include <kernel/async/lock.hpp>
-#include <kernel/mem/paging.hpp>
-#include <kernel/mem/slab.hpp>
-
-constexpr uintptr_t KERNEL_HEAP_START = 0xffff'ff81'8000'0000;
-constexpr uintptr_t KERNEL_HEAP_END = 0xffff'ffbf'ffff'ffff;
-constexpr uintptr_t KERNEL_HEAP_SIZE = KERNEL_HEAP_END - KERNEL_HEAP_START;
-
-namespace types::memory {
-
-struct mem_blk_flags {
-    unsigned long is_free : 8;
-    unsigned long has_next : 8;
-};
-
-struct mem_blk {
-    std::size_t size;
-    mem_blk_flags flags;
-    // the first byte of the memory space
-    // the minimal allocated space is 8 bytes
-    std::byte data[];
-};
-
-constexpr std::byte* aspbyte(void* pblk) {
-    return std::bit_cast<std::byte*>(pblk);
-}
-
-constexpr mem_blk* aspblk(void* pbyte) {
-    return std::bit_cast<mem_blk*>(pbyte);
-}
-
-constexpr mem_blk* next(mem_blk* blk, std::size_t blk_size) {
-    auto* p = aspbyte(blk);
-    p += sizeof(mem_blk);
-    p += blk_size;
-    return aspblk(p);
-}
-
-// blk MUST be free
-constexpr void unite_afterwards(mem_blk* blk) {
-    while (blk->flags.has_next) {
-        auto* blk_next = next(blk, blk->size);
-        if (!blk_next->flags.is_free)
-            break;
-        blk->size += sizeof(mem_blk) + blk_next->size;
-        blk->flags.has_next = blk_next->flags.has_next;
-    }
-}
-
-// @param start_pos position where to start finding
-// @param size the size of the block we're looking for
-// @return found block if suitable block exists, if not, the last block
-constexpr mem_blk* find_blk(std::byte** p_start, std::size_t size) {
-    mem_blk* start_pos = aspblk(*p_start);
-    bool no_free_so_far = true;
-
-    while (true) {
-        if (start_pos->flags.is_free) {
-            unite_afterwards(start_pos);
-
-            no_free_so_far = false;
-
-            if (start_pos->size >= size)
-                break;
-        }
-
-        if (no_free_so_far)
-            *p_start = aspbyte(start_pos);
-
-        if (!start_pos->flags.has_next)
-            break;
-        start_pos = next(start_pos, start_pos->size);
-    }
-    return start_pos;
-}
-
-constexpr void split_block(mem_blk* blk, std::size_t this_size) {
-    // block is too small to get split
-    // that is, the block to be split should have enough room
-    // for "this_size" bytes and also could contain a new block
-    if (blk->size < this_size + sizeof(mem_blk) + 1024)
-        return;
-
-    mem_blk* blk_next = next(blk, this_size);
-
-    blk_next->size = blk->size - this_size - sizeof(mem_blk);
-
-    blk_next->flags.has_next = blk->flags.has_next;
-    blk_next->flags.is_free = 1;
-
-    blk->flags.has_next = 1;
-    blk->size = this_size;
-}
-
-std::byte* brk_memory_allocator::brk(byte* addr) {
-    if (addr >= p_limit)
-        return nullptr;
-
-    uintptr_t current_allocated = reinterpret_cast<uintptr_t>(p_allocated);
-    uintptr_t new_brk = reinterpret_cast<uintptr_t>(addr);
-
-    current_allocated &= ~(0x200000 - 1);
-    new_brk &= ~(0x200000 - 1);
-
-    using namespace kernel::mem::paging;
-    while (current_allocated <= new_brk) {
-        auto idx = idx_all(current_allocated);
-        auto pdpt = KERNEL_PAGE_TABLE[std::get<1>(idx)].parse();
-
-        auto pdpte = pdpt[std::get<2>(idx)];
-        if (!pdpte.pfn())
-            pdpte.set(PA_KERNEL_PAGE_TABLE, c_alloc_page_table());
-
-        auto pde = pdpte.parse()[std::get<3>(idx)];
-        assert(!(pde.attributes() & PA_P));
-        pde.set(PA_KERNEL_DATA_HUGE, page_to_pfn(c_alloc_pages(9)) << 12);
-
-        current_allocated += 0x200000;
-    }
-    p_allocated = (std::byte*)current_allocated;
-
-    return p_break = addr;
-}
-
-std::byte* brk_memory_allocator::sbrk(size_type increment) {
-    return brk(p_break + increment);
-}
-
-brk_memory_allocator::brk_memory_allocator(byte* start, size_type size)
-    : p_start(start)
-    , p_limit(start + size)
-    , p_break(start)
-    , p_allocated(start) {
-    auto* p_blk = aspblk(brk(p_start));
-    sbrk(sizeof(mem_blk) + 1024); // 1024 bytes (minimum size for a block)
-
-    p_blk->size = 1024;
-    p_blk->flags.has_next = 0;
-    p_blk->flags.is_free = 1;
-}
-
-void* brk_memory_allocator::allocate(size_type size) {
-    kernel::async::lock_guard_irq lck(mtx);
-    // align to 1024 bytes boundary
-    size = (size + 1024 - 1) & ~(1024 - 1);
-
-    auto* block_allocated = find_blk(&p_start, size);
-    if (!block_allocated->flags.has_next &&
-        (!block_allocated->flags.is_free || block_allocated->size < size)) {
-        // 'block_allocated' in the argument list is the pointer
-        // pointing to the last block
-
-        if (!sbrk(sizeof(mem_blk) + size))
-            return nullptr;
-
-        block_allocated->flags.has_next = 1;
-
-        block_allocated = next(block_allocated, block_allocated->size);
-
-        block_allocated->flags.has_next = 0;
-        block_allocated->flags.is_free = 1;
-        block_allocated->size = size;
-    } else {
-        split_block(block_allocated, size);
-    }
-
-    block_allocated->flags.is_free = 0;
-
-    return block_allocated->data;
-}
-
-void brk_memory_allocator::deallocate(void* ptr) {
-    kernel::async::lock_guard_irq lck(mtx);
-    auto* blk = aspblk(aspbyte(ptr) - sizeof(mem_blk));
-
-    blk->flags.is_free = 1;
-
-    if (aspbyte(blk) < p_start)
-        p_start = aspbyte(blk);
-
-    // unite free blocks nearby
-    unite_afterwards(blk);
-}
-
-bool brk_memory_allocator::allocated(void* ptr) const noexcept {
-    return (void*)KERNEL_HEAP_START <= aspbyte(ptr) && aspbyte(ptr) < sbrk();
-}
-
-static brk_memory_allocator* k_alloc;
-
-} // namespace types::memory
-
-static kernel::mem::slab_cache caches[7];
-
-static constexpr int __cache_index(std::size_t size) {
-    if (size <= 32)
-        return 0;
-    if (size <= 64)
-        return 1;
-    if (size <= 96)
-        return 2;
-    if (size <= 128)
-        return 3;
-    if (size <= 192)
-        return 4;
-    if (size <= 256)
-        return 5;
-    if (size <= 512)
-        return 6;
-    return -1;
-}
-
-extern "C" void init_allocator() {
-    kernel::mem::init_slab_cache(caches + 0, 32);
-    kernel::mem::init_slab_cache(caches + 1, 64);
-    kernel::mem::init_slab_cache(caches + 2, 96);
-    kernel::mem::init_slab_cache(caches + 3, 128);
-    kernel::mem::init_slab_cache(caches + 4, 192);
-    kernel::mem::init_slab_cache(caches + 5, 256);
-    kernel::mem::init_slab_cache(caches + 6, 512);
-
-    types::memory::k_alloc = new types::memory::brk_memory_allocator(
-        (std::byte*)KERNEL_HEAP_START, KERNEL_HEAP_SIZE);
-}
-
-extern "C" void* _do_allocate(uintptr_t size) {
-    int idx = __cache_index(size);
-    void* ptr = nullptr;
-    if (idx < 0)
-        ptr = types::memory::k_alloc->allocate(size);
-    else
-        ptr = kernel::mem::slab_alloc(&caches[idx]);
-
-    return ptr;
-}
-
-// return 0 if deallocate successfully
-// return -1 if ptr is nullptr
-// return -2 if size is not correct for slab allocated memory
-extern "C" int32_t _do_deallocate(void* ptr, uintptr_t size) {
-    if (!ptr)
-        return -1;
-
-    if (types::memory::k_alloc->allocated(ptr)) {
-        types::memory::k_alloc->deallocate(ptr);
-        return 0;
-    }
-
-    int idx = __cache_index(size);
-    if (idx < 0)
-        return -2;
-
-    kernel::mem::slab_free(ptr);
-
-    return 0;
-}
-
-void* operator new(size_t size) {
-    auto* ret = _do_allocate(size);
-    assert(ret);
-
-    return ret;
-}
-
-void operator delete(void* ptr) {
-    if (!ptr)
-        return;
-
-    if (types::memory::k_alloc->allocated(ptr))
-        types::memory::k_alloc->deallocate(ptr);
-    else
-        kernel::mem::slab_free(ptr);
-}
-
-void operator delete(void* ptr, std::size_t size) {
-    if (!ptr)
-        return;
-
-    int ret = _do_deallocate(ptr, size);
-
-    assert(ret == 0);
-}
-
-void* operator new[](size_t sz) {
-    return ::operator new(sz);
-}
-
-void operator delete[](void* ptr) {
-    ::operator delete(ptr);
-}
-
-void operator delete[](void* ptr, std::size_t size) {
-    ::operator delete(ptr, size);
-}

+ 1 - 0
src/kernel/mem.rs

@@ -2,6 +2,7 @@ pub mod paging;
 
 mod access;
 mod address;
+mod allocator;
 mod mm_area;
 mod mm_list;
 mod page_alloc;

+ 22 - 1
src/kernel/mem/access.rs

@@ -1,7 +1,9 @@
 use core::{num::NonZero, ptr::NonNull};
-use eonix_mm::address::{Addr as _, PAddr};
+use eonix_mm::address::{Addr as _, PAddr, VAddr};
 use eonix_mm::paging::{PageAccess, PageBlock, PFN};
 
+use super::page_alloc::RawPagePtr;
+
 const PHYS_OFFSET: usize = 0xffff_ff00_0000_0000;
 
 /// A block of memory starting at a non-zero address and having a specific length.
@@ -156,3 +158,22 @@ impl PageAccess for KernelPageAccess {
         }
     }
 }
+
+pub trait RawPageAccess {
+    /// Translate the address belonged RawPage ptr
+    /// Use it with care.
+    ///
+    /// # Panic
+    /// If the address is not properly aligned.
+    ///
+    /// # Safety
+    /// the address must be kernel accessible pointer
+    unsafe fn as_raw_page(&self) -> RawPagePtr;
+}
+
+impl RawPageAccess for VAddr {
+    unsafe fn as_raw_page(&self) -> RawPagePtr {
+        let pfn: PFN = PAddr::from(self.addr() - PHYS_OFFSET).into();
+        RawPagePtr::from(pfn)
+    }
+}

+ 54 - 0
src/kernel/mem/allocator.rs

@@ -0,0 +1,54 @@
+use core::alloc::{GlobalAlloc, Layout};
+use eonix_mm::address::VAddr;
+use eonix_mm::paging::{PAGE_SIZE_BITS, PFN};
+use eonix_sync::LazyLock;
+use slab_allocator::SlabAllocator;
+
+use super::access::RawPageAccess;
+use super::page_alloc::RawPagePtr;
+use super::{AsMemoryBlock, GlobalPageAlloc, Page};
+
+static SLAB_ALLOCATOR: LazyLock<SlabAllocator<RawPagePtr, GlobalPageAlloc, 9>> =
+    LazyLock::new(|| SlabAllocator::new_in(GlobalPageAlloc));
+
+struct Allocator;
+
+unsafe impl GlobalAlloc for Allocator {
+    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+        let size = layout.size().next_power_of_two();
+
+        let result = if size <= 2048 {
+            SLAB_ALLOCATOR.alloc(size)
+        } else {
+            let page_count = size >> PAGE_SIZE_BITS;
+            let page = Page::alloc_at_least(page_count);
+
+            let ptr = page.as_memblk().as_ptr();
+            page.into_raw();
+
+            ptr.as_ptr()
+        };
+
+        if result.is_null() {
+            core::ptr::null_mut()
+        } else {
+            result as *mut u8
+        }
+    }
+
+    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+        let size = layout.size().next_power_of_two();
+
+        if size <= 2048 {
+            SLAB_ALLOCATOR.dealloc(ptr, size)
+        } else {
+            let vaddr = VAddr::from(ptr as usize);
+            let page_ptr = vaddr.as_raw_page();
+            let pfn = PFN::from(page_ptr);
+            Page::from_raw(pfn);
+        };
+    }
+}
+
+#[global_allocator]
+static ALLOCATOR: Allocator = Allocator;

+ 4 - 29
src/kernel/mem/page_alloc.rs

@@ -1,17 +1,16 @@
 mod raw_page;
 
-use super::{paging::AllocZeroed as _, Page};
 use buddy_allocator::{BuddyAllocator, BuddyRawPage as _};
-use core::{ptr::NonNull, sync::atomic::Ordering};
+use core::sync::atomic::Ordering;
 use eonix_mm::{
-    address::{AddrOps as _, PAddr, PRange},
+    address::{AddrOps as _, PRange},
     paging::{GlobalPageAlloc as GlobalPageAllocTrait, PageAlloc, PFN},
 };
 use eonix_sync::{NoContext, Spin};
 use intrusive_list::List;
-use raw_page::{PageFlags, RawPagePtr};
+use raw_page::PageFlags;
 
-pub use raw_page::RawPage;
+pub use raw_page::{RawPage, RawPagePtr};
 
 const COSTLY_ORDER: u32 = 3;
 const BATCH_SIZE: u32 = 64;
@@ -196,27 +195,3 @@ impl PageAlloc for EarlyPageAlloc {
         BuddyAllocator::has_management_over(page_ptr)
     }
 }
-
-#[no_mangle]
-pub extern "C" fn page_to_pfn(page: *const ()) -> PFN {
-    let page_ptr = RawPagePtr::new(NonNull::new(page as *mut _).unwrap());
-    PFN::from(page_ptr)
-}
-
-#[no_mangle]
-pub extern "C" fn c_alloc_page() -> *const RawPage {
-    GlobalPageAlloc.alloc().expect("Out of memory").as_ref()
-}
-
-#[no_mangle]
-pub extern "C" fn c_alloc_pages(order: u32) -> *const RawPage {
-    GlobalPageAlloc
-        .alloc_order(order)
-        .expect("Out of memory")
-        .as_ref()
-}
-
-#[no_mangle]
-pub extern "C" fn c_alloc_page_table() -> PAddr {
-    PAddr::from(Page::zeroed().into_raw())
-}

+ 83 - 5
src/kernel/mem/page_alloc/raw_page.rs

@@ -3,24 +3,64 @@ use core::{
     ptr::NonNull,
     sync::atomic::{AtomicU32, AtomicUsize, Ordering},
 };
-use eonix_mm::paging::{RawPage as RawPageTrait, PFN};
+use eonix_mm::{
+    address::{PAddr, VAddr},
+    paging::{RawPage as RawPageTrait, PAGE_SIZE, PFN},
+};
 use intrusive_list::{container_of, Link};
+use slab_allocator::SlabRawPage;
+
+use crate::kernel::mem::access::RawPageAccess;
+use crate::kernel::mem::PhysAccess;
 
 const PAGE_ARRAY: NonNull<RawPage> =
     unsafe { NonNull::new_unchecked(0xffffff8040000000 as *mut _) };
 
 pub struct PageFlags(AtomicU32);
 
+struct SlabPageInner {
+    allocated_count: u32,
+    free_next: Option<NonNull<usize>>,
+}
+
+impl SlabPageInner {
+    fn new(free_next: Option<NonNull<usize>>) -> Self {
+        Self {
+            allocated_count: 0,
+            free_next,
+        }
+    }
+}
+
+pub struct BuddyPageInner {}
+
+enum PageType {
+    Buddy(BuddyPageInner),
+    Slab(SlabPageInner),
+}
+
+impl PageType {
+    fn slab_data(&mut self) -> &mut SlabPageInner {
+        if let PageType::Slab(slab_data) = self {
+            return slab_data;
+        } else {
+            unreachable!()
+        }
+    }
+}
+
 pub struct RawPage {
     /// This can be used for LRU page swap in the future.
     ///
     /// Now only used for free page links in the buddy system.
-    pub link: Link,
+    link: Link,
     /// # Safety
     /// This field is only used in buddy system and is protected by the global lock.
-    pub order: u32,
-    pub flags: PageFlags,
-    pub refcount: AtomicUsize,
+    order: u32,
+    flags: PageFlags,
+    refcount: AtomicUsize,
+
+    shared_data: PageType,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
@@ -80,6 +120,12 @@ impl RawPagePtr {
     pub const fn refcount(&self) -> &AtomicUsize {
         &self.as_ref().refcount
     }
+
+    // return the ptr point to the actually raw page
+    pub fn real_ptr<T>(&self) -> NonNull<T> {
+        let pfn = unsafe { PFN::from(RawPagePtr(NonNull::new_unchecked(self.as_ptr()))) };
+        unsafe { PAddr::from(pfn).as_ptr::<T>() }
+    }
 }
 
 impl From<RawPagePtr> for PFN {
@@ -148,3 +194,35 @@ impl BuddyRawPage for RawPagePtr {
         self.flags().clear(PageFlags::FREE);
     }
 }
+
+impl SlabRawPage for RawPagePtr {
+    unsafe fn from_link(link: &mut Link) -> Self {
+        let raw_page_ptr = container_of!(link, RawPage, link);
+        Self(raw_page_ptr)
+    }
+
+    unsafe fn get_link(&self) -> &mut Link {
+        &mut self.as_mut().link
+    }
+
+    fn in_which(ptr: *mut u8) -> RawPagePtr {
+        let vaddr = VAddr::from(ptr as usize & !(PAGE_SIZE - 1));
+        unsafe { vaddr.as_raw_page() }
+    }
+
+    fn allocated_count(&self) -> &mut u32 {
+        &mut self.as_mut().shared_data.slab_data().allocated_count
+    }
+
+    fn next_free(&self) -> &mut Option<NonNull<usize>> {
+        &mut self.as_mut().shared_data.slab_data().free_next
+    }
+
+    fn real_page_ptr(&self) -> *mut u8 {
+        self.real_ptr().as_ptr()
+    }
+
+    fn slab_init(&self, first_free: Option<NonNull<usize>>) {
+        self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
+    }
+}

+ 0 - 129
src/kernel/mem/slab.cc

@@ -1,129 +0,0 @@
-#include <cstddef>
-
-#include <assert.h>
-
-#include <types/list.hpp>
-
-#include <kernel/async/lock.hpp>
-#include <kernel/mem/paging.hpp>
-#include <kernel/mem/slab.hpp>
-
-using namespace kernel::mem;
-using namespace types::list;
-
-constexpr std::size_t SLAB_PAGE_SIZE = 0x1000; // 4K
-
-kernel::async::mutex slab_lock;
-
-std::ptrdiff_t _slab_data_start_offset(std::size_t size) {
-    return (sizeof(slab_head) + size - 1) & ~(size - 1);
-}
-
-std::size_t _slab_max_count(std::size_t size) {
-    return (SLAB_PAGE_SIZE - _slab_data_start_offset(size)) / size;
-}
-
-void* _slab_head_alloc(slab_head* slab) {
-    if (slab->free_count == 0)
-        return nullptr;
-
-    void* ptr = slab->free;
-    slab->free = *(void**)ptr;
-    slab->free_count--;
-
-    return ptr;
-}
-
-slab_head* _make_slab(uintptr_t start, std::size_t size) {
-    slab_head* slab = physaddr<slab_head>{start};
-
-    slab->obj_size = size;
-    slab->free_count = _slab_max_count(size);
-    slab->next = nullptr;
-    slab->prev = nullptr;
-
-    slab->free = physaddr<void>{start + _slab_data_start_offset(size)};
-
-    std::byte* ptr = (std::byte*)slab->free;
-    for (unsigned i = 0; i < slab->free_count; ++i) {
-        void* nextptr = ptr + size;
-        if (i == slab->free_count - 1)
-            *(void**)ptr = nullptr;
-        else
-            *(void**)ptr = nextptr;
-        ptr = (std::byte*)nextptr;
-    }
-
-    return slab;
-}
-
-void _slab_add_page(slab_cache* cache) {
-    auto new_page_pfn = page_to_pfn(c_alloc_page()) << 12;
-
-    // TODO!!!
-    // new_page->flags |= paging::PAGE_SLAB;
-
-    auto* slab = _make_slab(new_page_pfn, cache->obj_size);
-    slab->cache = cache;
-
-    list_insert(&cache->slabs_empty, slab);
-}
-
-void* kernel::mem::slab_alloc(slab_cache* cache) {
-    async::lock_guard_irq lock(slab_lock);
-
-    slab_head* slab = cache->slabs_partial;
-    if (!slab) {                 // no partial slabs, try to get an empty slab
-        if (!cache->slabs_empty) // no empty slabs, create a new one
-            _slab_add_page(cache);
-
-        slab = list_get(&cache->slabs_empty);
-
-        list_insert(&cache->slabs_partial, slab);
-    }
-
-    void* ptr = _slab_head_alloc(slab);
-
-    if (slab->free_count == 0) { // slab is full
-        list_remove(&cache->slabs_partial, slab);
-        list_insert(&cache->slabs_full, slab);
-    }
-
-    return ptr;
-}
-
-void kernel::mem::slab_free(void* ptr) {
-    async::lock_guard_irq lock(slab_lock);
-
-    slab_head* slab = (slab_head*)((uintptr_t)ptr & ~(SLAB_PAGE_SIZE - 1));
-
-    *(void**)ptr = slab->free;
-    slab->free = ptr;
-    slab->free_count++;
-
-    auto max_count = _slab_max_count(slab->obj_size);
-
-    if (max_count == 1) {
-        list_remove(&slab->cache->slabs_full, slab);
-        list_insert(&slab->cache->slabs_empty, slab);
-    }
-
-    if (slab->free_count == 1) {
-        list_remove(&slab->cache->slabs_full, slab);
-        list_insert(&slab->cache->slabs_partial, slab);
-    }
-
-    if (slab->free_count == max_count) {
-        list_remove(&slab->cache->slabs_partial, slab);
-        list_insert(&slab->cache->slabs_empty, slab);
-    }
-}
-
-void kernel::mem::init_slab_cache(slab_cache* cache, std::size_t obj_size) {
-    cache->obj_size = obj_size;
-    cache->slabs_empty = nullptr;
-    cache->slabs_partial = nullptr;
-    cache->slabs_full = nullptr;
-
-    _slab_add_page(cache);
-}

+ 0 - 6
src/kernel_init.rs

@@ -214,12 +214,6 @@ extern "C" fn _init_on_new_stack(early_kernel_stack_pfn: PFN) -> ! {
 
     init_localcpu();
 
-    extern "C" {
-        fn init_allocator();
-    }
-
-    unsafe { init_allocator() };
-
     eonix_hal::trap::init();
 
     kernel::interrupt::init().unwrap();

+ 0 - 29
src/lib.rs

@@ -26,7 +26,6 @@ mod rcu;
 mod sync;
 
 use alloc::{ffi::CString, sync::Arc};
-use core::alloc::{GlobalAlloc, Layout};
 use elf::ParsedElf32;
 use eonix_mm::paging::PFN;
 use eonix_runtime::{run::FutureRun, scheduler::Scheduler, task::Task};
@@ -62,34 +61,6 @@ fn panic(info: &core::panic::PanicInfo) -> ! {
     arch::freeze()
 }
 
-extern "C" {
-    fn _do_allocate(size: usize) -> *mut core::ffi::c_void;
-    fn _do_deallocate(ptr: *mut core::ffi::c_void, size: core::ffi::c_size_t) -> i32;
-}
-
-struct Allocator;
-unsafe impl GlobalAlloc for Allocator {
-    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
-        let result = _do_allocate(layout.size());
-
-        if result.is_null() {
-            core::ptr::null_mut()
-        } else {
-            result as *mut u8
-        }
-    }
-
-    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
-        match _do_deallocate(ptr as *mut core::ffi::c_void, layout.size()) {
-            0 => (),
-            _ => panic!("Failed to deallocate memory"),
-        }
-    }
-}
-
-#[global_allocator]
-static ALLOCATOR: Allocator = Allocator;
-
 #[no_mangle]
 pub extern "C" fn kernel_init(early_kstack_pfn: PFN) -> ! {
     init_pcie().expect("Unable to initialize PCIe bus");