Jelajahi Sumber

mem, slab: rework the slab system

Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf 4 minggu lalu
induk
melakukan
841bb379b0

+ 1 - 0
.vscode/settings.json

@@ -1,3 +1,4 @@
 {
     "makefile.configureOnOpen": false,
+    "editor.formatOnSave": true,
 }

+ 0 - 1
Cargo.lock

@@ -518,7 +518,6 @@ version = "0.1.0"
 dependencies = [
  "eonix_mm",
  "eonix_sync",
- "intrusive_list",
 ]
 
 [[package]]

+ 2 - 0
crates/intrusive_list/src/lib.rs

@@ -25,6 +25,8 @@ impl List {
     }
 
     pub fn insert(&mut self, node: &mut Link) {
+        // TODO: `node` above should be of 'static.
+
         self.head.insert(node);
         self.count += 1;
     }

+ 0 - 2
crates/slab_allocator/Cargo.toml

@@ -6,5 +6,3 @@ edition = "2024"
 [dependencies]
 eonix_mm = { path = "../eonix_mm" }
 eonix_sync = { path = "../eonix_sync" }
-intrusive_list = { path = "../intrusive_list" }
-

+ 255 - 36
crates/slab_allocator/src/lib.rs

@@ -1,69 +1,288 @@
 #![no_std]
 
-mod slab_cache;
+use core::ptr::NonNull;
 
-use core::{cmp::max, ptr::NonNull};
-
-use eonix_mm::paging::{PageAlloc, RawPage};
 use eonix_sync::Spin;
-use intrusive_list::Link;
-use slab_cache::SlabCache;
 
-pub trait SlabRawPage: RawPage {
-    /// Get the container raw page struct of the list link.
+#[repr(C)]
+pub union SlabSlot {
+    slab_slot: Option<NonNull<SlabSlot>>,
+    data: u8,
+}
+
+pub trait SlabPageList: Sized {
+    type Page: SlabPage;
+
+    fn new() -> Self;
+    fn is_empty(&self) -> bool;
+
+    fn peek_head(&mut self) -> Option<&mut Self::Page>;
+
+    fn pop_head(&mut self) -> Option<&'static mut Self::Page>;
+    fn push_tail(&mut self, page: &'static mut Self::Page);
+    fn remove(&mut self, page: &mut Self::Page);
+}
+
+pub trait SlabPage: Sized + 'static {
+    fn get_data_ptr(&self) -> NonNull<[u8]>;
+
+    fn get_free_slot(&self) -> Option<NonNull<SlabSlot>>;
+    fn set_free_slot(&mut self, next: Option<NonNull<SlabSlot>>);
+
+    fn get_alloc_count(&self) -> usize;
+
+    /// Increase the allocation count by 1 and return the increased value.
+    fn inc_alloc_count(&mut self) -> usize;
+
+    /// Decrease the allocation count by 1 and return the decreased value.
+    fn dec_alloc_count(&mut self) -> usize;
+
+    /// Get the [`SlabPage`] that `ptr` is allocated from.
     ///
     /// # Safety
-    /// The caller MUST ensure that the link points to a `RawPage`.
-    unsafe fn from_link(link: &mut Link) -> Self;
+    /// The caller MUST ensure that no others could be calling this function and
+    /// getting the [`SlabPage`] at the same time.
+    unsafe fn from_allocated(ptr: NonNull<u8>) -> &'static mut Self;
+}
+
+pub(crate) trait SlabPageExt {
+    fn alloc_slot(&mut self) -> Option<NonNull<u8>>;
 
-    /// Get the list link of the raw page.
-    ///
     /// # Safety
-    /// The caller MUST ensure that at any time, only one mutable reference
-    /// to the link exists.
-    unsafe fn get_link(&self) -> &mut Link;
+    /// The caller MUST ensure that `slot_data_ptr` points to some position
+    /// previously allocated by [`SlabPageExt::alloc_slot`].
+    unsafe fn free_slot(&mut self, slot_data_ptr: NonNull<u8>);
+
+    fn is_empty(&self) -> bool;
+    fn is_full(&self) -> bool;
+}
+
+impl<T> SlabPageExt for T
+where
+    T: SlabPage,
+{
+    fn alloc_slot(&mut self) -> Option<NonNull<u8>> {
+        let mut free_slot = self.get_free_slot()?;
+
+        unsafe {
+            let free_slot = free_slot.as_mut();
+
+            let next_slot = free_slot.slab_slot;
+            // ===== `free_slot` is now safe to be overwritten
 
-    fn slab_init(&self, first_free: Option<NonNull<usize>>);
+            self.set_free_slot(next_slot);
+            self.inc_alloc_count();
 
-    // which slab page the ptr belong
-    fn in_which(ptr: *mut u8) -> Self;
+            Some(NonNull::new_unchecked(&mut free_slot.data))
+        }
+    }
+
+    unsafe fn free_slot(&mut self, slot_data_ptr: NonNull<u8>) {
+        unsafe {
+            let mut free_slot: NonNull<SlabSlot> = slot_data_ptr.cast();
+            free_slot.as_mut().slab_slot = self.get_free_slot();
 
-    fn real_page_ptr(&self) -> *mut u8;
+            self.set_free_slot(Some(free_slot));
+            self.dec_alloc_count();
+        }
+    }
 
-    fn allocated_count(&self) -> &mut u32;
+    fn is_empty(&self) -> bool {
+        self.get_alloc_count() == 0
+    }
 
-    fn next_free(&self) -> &mut Option<NonNull<usize>>;
+    fn is_full(&self) -> bool {
+        self.get_free_slot().is_none()
+    }
 }
 
-pub struct SlabAllocator<T, A, const SLAB_CACHE_COUNT: usize> {
-    slabs: [Spin<SlabCache<T, A>>; SLAB_CACHE_COUNT],
-    alloc: A,
+pub trait SlabPageAlloc {
+    type Page: SlabPage;
+    type PageList: SlabPageList<Page = Self::Page>;
+
+    /// Allocate a page suitable for slab system use. The page MUST come with
+    /// its allocation count 0 and next free slot None.
+    ///
+    /// # Safety
+    /// The page returned MUST be properly initialized before its usage.
+    unsafe fn alloc_uninit(&self) -> &'static mut Self::Page;
 }
 
-unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Send for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
-unsafe impl<T, A, const SLAB_CACHE_COUNT: usize> Sync for SlabAllocator<T, A, SLAB_CACHE_COUNT> {}
+pub(crate) struct SlabList<T>
+where
+    T: SlabPageList,
+{
+    empty_list: T,
+    partial_list: T,
+    full_list: T,
+    object_size: usize,
+}
+
+pub struct SlabAlloc<P, const COUNT: usize>
+where
+    P: SlabPageAlloc,
+{
+    slabs: [Spin<SlabList<P::PageList>>; COUNT],
+    alloc: P,
+}
+
+unsafe impl<P, const COUNT: usize> Send for SlabAlloc<P, COUNT> where P: SlabPageAlloc {}
+unsafe impl<P, const COUNT: usize> Sync for SlabAlloc<P, COUNT> where P: SlabPageAlloc {}
 
-impl<Raw, Allocator, const SLAB_CACHE_COUNT: usize> SlabAllocator<Raw, Allocator, SLAB_CACHE_COUNT>
+impl<L, const COUNT: usize> SlabAlloc<L, COUNT>
 where
-    Raw: SlabRawPage,
-    Allocator: PageAlloc<RawPage = Raw>,
+    L: SlabPageAlloc,
 {
-    pub fn new_in(alloc: Allocator) -> Self {
+    pub fn new_in(alloc: L) -> Self {
         Self {
-            slabs: core::array::from_fn(|i| Spin::new(SlabCache::new_in(1 << (i + 3)))),
+            slabs: core::array::from_fn(|i| Spin::new(SlabList::new(1 << (i + 3)))),
             alloc,
         }
     }
 
-    pub fn alloc(&self, mut size: usize) -> *mut u8 {
-        size = max(8, size);
+    pub fn alloc(&self, mut size: usize) -> NonNull<u8> {
+        size = size.max(8);
         let idx = size.next_power_of_two().trailing_zeros() - 3;
         self.slabs[idx as usize].lock().alloc(&self.alloc)
     }
 
-    pub fn dealloc(&self, ptr: *mut u8, mut size: usize) {
-        size = max(8, size);
+    pub unsafe fn dealloc(&self, ptr: NonNull<u8>, mut size: usize) {
+        size = size.max(8);
         let idx = size.next_power_of_two().trailing_zeros() - 3;
-        self.slabs[idx as usize].lock().dealloc(ptr, &self.alloc);
+
+        unsafe {
+            // SAFETY:
+            self.slabs[idx as usize].lock().dealloc(ptr, &self.alloc);
+        }
     }
 }
+
+impl<T> SlabList<T>
+where
+    T: SlabPageList,
+{
+    fn new(object_size: usize) -> Self {
+        Self {
+            empty_list: T::new(),
+            partial_list: T::new(),
+            full_list: T::new(),
+            object_size,
+        }
+    }
+
+    fn alloc_from_partial(&mut self) -> NonNull<u8> {
+        let head = self.partial_list.peek_head().unwrap();
+        let slot = head.alloc_slot().unwrap();
+
+        if head.is_full() {
+            let head = self.partial_list.pop_head().unwrap();
+            self.full_list.push_tail(head);
+        }
+
+        slot
+    }
+
+    fn alloc_from_empty(&mut self) -> NonNull<u8> {
+        let head = self.empty_list.pop_head().unwrap();
+        let slot = head.alloc_slot().unwrap();
+
+        if head.is_full() {
+            self.full_list.push_tail(head);
+        } else {
+            self.partial_list.push_tail(head);
+        }
+
+        slot
+    }
+
+    fn charge(&mut self, alloc: &impl SlabPageAlloc<Page = T::Page>) {
+        unsafe {
+            let slab = alloc.alloc_uninit();
+            let free_slot = make_slab_page(slab.get_data_ptr(), self.object_size);
+
+            slab.set_free_slot(Some(free_slot));
+
+            self.empty_list.push_tail(slab);
+        }
+    }
+
+    fn alloc(&mut self, alloc: &impl SlabPageAlloc<Page = T::Page>) -> NonNull<u8> {
+        if !self.partial_list.is_empty() {
+            return self.alloc_from_partial();
+        }
+
+        if self.empty_list.is_empty() {
+            self.charge(alloc);
+        }
+
+        self.alloc_from_empty()
+    }
+
+    unsafe fn dealloc(&mut self, ptr: NonNull<u8>, _alloc: &impl SlabPageAlloc) {
+        let slab_page = unsafe {
+            // SAFETY:
+            <T::Page>::from_allocated(ptr)
+        };
+
+        let (was_full, is_empty);
+
+        was_full = slab_page.is_full();
+
+        unsafe {
+            // SAFETY:
+            slab_page.free_slot(ptr);
+        }
+
+        is_empty = slab_page.is_empty();
+
+        match (was_full, is_empty) {
+            (false, false) => {}
+            (false, true) => {
+                self.partial_list.remove(slab_page);
+                self.empty_list.push_tail(slab_page);
+            }
+            (true, false) => {
+                self.full_list.remove(slab_page);
+                self.partial_list.push_tail(slab_page);
+            }
+            (true, true) => {
+                self.full_list.remove(slab_page);
+                self.empty_list.push_tail(slab_page);
+            }
+        }
+
+        // TODO: Check whether we should place some pages back with `alloc` if
+        //       the global free page count is below the watermark.
+    }
+}
+
+pub fn make_slab_page(page_ptr: NonNull<[u8]>, slot_size: usize) -> NonNull<SlabSlot> {
+    assert!(
+        slot_size >= core::mem::size_of::<usize>(),
+        "The minimum slot size is of a pointer's width"
+    );
+
+    let page_size = page_ptr.len();
+    let slot_count = page_size / slot_size;
+    let page_start: NonNull<u8> = page_ptr.cast();
+
+    // Quick checks
+    assert!(
+        page_size % slot_size == 0,
+        "The page's size should be a multiple of the slot size"
+    );
+
+    let mut prev_free_slot = None;
+    for i in (0..slot_count).rev() {
+        let offset = i * slot_size;
+
+        unsafe {
+            let mut slot_ptr: NonNull<SlabSlot> = page_start.add(offset).cast();
+
+            slot_ptr.as_mut().slab_slot = prev_free_slot;
+            prev_free_slot = Some(slot_ptr);
+        }
+    }
+
+    prev_free_slot.expect("There should be at least one slot.")
+}

+ 0 - 164
crates/slab_allocator/src/slab_cache.rs

@@ -1,164 +0,0 @@
-use super::SlabRawPage;
-use core::{marker::PhantomData, ptr::NonNull};
-use eonix_mm::paging::{PageAlloc, PAGE_SIZE};
-use intrusive_list::List;
-
-pub(crate) struct SlabCache<T, A> {
-    empty_list: List,
-    partial_list: List,
-    full_list: List,
-    object_size: u32,
-    _phantom: PhantomData<(T, A)>,
-}
-
-trait SlabRawPageExt {
-    fn alloc_slot(&self) -> Option<NonNull<usize>>;
-    fn dealloc_slot(&self, slot_ptr: *mut u8);
-    fn is_full(&self) -> bool;
-    fn is_empty(&self) -> bool;
-    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>>;
-}
-
-impl<T> SlabRawPageExt for T
-where
-    T: SlabRawPage,
-{
-    fn alloc_slot(&self) -> Option<NonNull<usize>> {
-        let ptr = self.next_free().clone();
-
-        let next_free = match ptr {
-            Some(ptr) => unsafe { ptr.read() as *mut usize },
-            None => unreachable!(),
-        };
-        *self.allocated_count() += 1;
-        *self.next_free() = NonNull::new(next_free);
-        return ptr;
-    }
-
-    fn dealloc_slot(&self, slot_ptr: *mut u8) {
-        let slot_ptr = slot_ptr as *mut usize;
-
-        if let Some(last_free) = self.next_free().clone() {
-            unsafe { *slot_ptr = last_free.as_ptr() as usize }
-        } else {
-            unsafe { *slot_ptr = 0 }
-        }
-
-        *self.allocated_count() -= 1;
-        *self.next_free() = NonNull::new(slot_ptr);
-    }
-
-    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>> {
-        assert!(object_size >= core::mem::size_of::<usize>() as u32);
-
-        let first_free = self.real_page_ptr() as *mut usize;
-
-        let mut slot_ptr = first_free;
-        let mut slot_count = PAGE_SIZE / object_size as usize;
-
-        // SAFETY: carefully ptr operate
-        unsafe {
-            loop {
-                if slot_count == 1 {
-                    *slot_ptr = 0;
-                    break;
-                }
-
-                let next_ptr = slot_ptr.byte_add(object_size as usize);
-                *slot_ptr = next_ptr as usize;
-                slot_ptr = next_ptr;
-                slot_count -= 1;
-            }
-        }
-
-        NonNull::new(first_free)
-    }
-
-    fn is_empty(&self) -> bool {
-        self.allocated_count().clone() == 0
-    }
-
-    fn is_full(&self) -> bool {
-        self.next_free().is_none()
-    }
-}
-
-impl<Raw, Allocator> SlabCache<Raw, Allocator>
-where
-    Raw: SlabRawPage,
-    Allocator: PageAlloc<RawPage = Raw>,
-{
-    pub(crate) const fn new_in(object_size: u32) -> Self {
-        // avoid unnecessary branch in alloc and dealloc
-        assert!(object_size <= PAGE_SIZE as u32 / 2);
-
-        Self {
-            empty_list: List::new(),
-            partial_list: List::new(),
-            full_list: List::new(),
-            object_size: object_size,
-            _phantom: PhantomData,
-        }
-    }
-
-    pub(crate) fn alloc(&mut self, alloc: &Allocator) -> *mut u8 {
-        if !self.partial_list.is_empty() {
-            let page_ptr = unsafe {
-                Raw::from_link(
-                    self.partial_list
-                        .head()
-                        .expect("partial pages should not be empty"),
-                )
-            };
-
-            let ptr = page_ptr.alloc_slot().expect("should get slot");
-
-            if page_ptr.is_full() {
-                self.partial_list.remove(unsafe { page_ptr.get_link() });
-                self.full_list.insert(unsafe { page_ptr.get_link() });
-            }
-            return ptr.as_ptr() as *mut u8;
-        }
-
-        if !self.empty_list.is_empty() {
-            let page_ptr = unsafe {
-                Raw::from_link(
-                    self.empty_list
-                        .head()
-                        .expect("empty pages should not be empty"),
-                )
-            };
-
-            let ptr = page_ptr.alloc_slot().expect("should get slot");
-            self.empty_list.remove(unsafe { page_ptr.get_link() });
-            self.partial_list.insert(unsafe { page_ptr.get_link() });
-            return ptr.as_ptr() as *mut u8;
-        }
-
-        let new_page_ptr = alloc.alloc().expect("slab_cache get page fail!");
-        let first_free = new_page_ptr.slab_page_init(self.object_size);
-        new_page_ptr.slab_init(first_free);
-        let ptr = new_page_ptr.alloc_slot().expect("should get slot");
-        self.partial_list.insert(unsafe { new_page_ptr.get_link() });
-        ptr.as_ptr() as *mut u8
-    }
-
-    pub(crate) fn dealloc(&mut self, ptr: *mut u8, _alloc: &Allocator) {
-        let page_ptr = Raw::in_which(ptr);
-
-        if page_ptr.is_full() {
-            self.full_list.remove(unsafe { page_ptr.get_link() });
-            self.partial_list.insert(unsafe { page_ptr.get_link() });
-        }
-
-        page_ptr.dealloc_slot(ptr);
-
-        if page_ptr.is_empty() {
-            self.partial_list.remove(unsafe { page_ptr.get_link() });
-            self.empty_list.insert(unsafe { page_ptr.get_link() });
-        }
-
-        // TODO: Check whether we should place some pages back with `alloc` if the global
-        //       free page count is below the watermark.
-    }
-}

+ 10 - 11
src/kernel/mem/allocator.rs

@@ -5,13 +5,12 @@ use eonix_hal::mm::ArchPhysAccess;
 use eonix_mm::address::PhysAccess;
 use eonix_mm::paging::{PAGE_SIZE_BITS, PFN};
 use eonix_sync::LazyLock;
-use slab_allocator::SlabAllocator;
+use slab_allocator::SlabAlloc;
 
-use super::page_alloc::RawPagePtr;
 use super::{GlobalPageAlloc, Page, PageExt};
 
-static SLAB_ALLOCATOR: LazyLock<SlabAllocator<RawPagePtr, GlobalPageAlloc, 9>> =
-    LazyLock::new(|| SlabAllocator::new_in(GlobalPageAlloc));
+static SLAB_ALLOCATOR: LazyLock<SlabAlloc<GlobalPageAlloc, 9>> =
+    LazyLock::new(|| SlabAlloc::new_in(GlobalPageAlloc));
 
 struct Allocator;
 
@@ -28,23 +27,23 @@ unsafe impl GlobalAlloc for Allocator {
             let ptr = page.get_ptr();
             page.into_raw();
 
-            ptr.as_ptr()
+            ptr
         };
 
-        if result.is_null() {
-            core::ptr::null_mut()
-        } else {
-            result as *mut u8
-        }
+        result.as_ptr()
     }
 
     unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
         let size = layout.size().next_power_of_two();
+        let ptr = unsafe {
+            // SAFETY: The memory we've allocated MUST be non-null.
+            NonNull::new_unchecked(ptr)
+        };
 
         if size <= 2048 {
             SLAB_ALLOCATOR.dealloc(ptr, size)
         } else {
-            let paddr = ArchPhysAccess::from_ptr(NonNull::new_unchecked(ptr));
+            let paddr = ArchPhysAccess::from_ptr(ptr);
             let pfn = PFN::from(paddr);
             Page::from_raw(pfn);
         };

+ 4 - 6
src/kernel/mem/page_alloc.rs

@@ -1,15 +1,13 @@
 mod raw_page;
 
-use buddy_allocator::{BuddyAllocator, BuddyRawPage as _};
 use core::sync::atomic::Ordering;
-use eonix_mm::{
-    address::{AddrOps as _, PRange},
-    paging::{GlobalPageAlloc as GlobalPageAllocTrait, PageAlloc, PFN},
-};
+
+use buddy_allocator::{BuddyAllocator, BuddyRawPage as _};
+use eonix_mm::address::{AddrOps as _, PRange};
+use eonix_mm::paging::{GlobalPageAlloc as GlobalPageAllocTrait, PageAlloc, PFN};
 use eonix_sync::{NoContext, Spin};
 use intrusive_list::List;
 use raw_page::PageFlags;
-
 pub use raw_page::{RawPage, RawPagePtr};
 
 const COSTLY_ORDER: u32 = 3;

+ 129 - 70
src/kernel/mem/page_alloc/raw_page.rs

@@ -1,65 +1,46 @@
-use crate::kernel::mem::page_cache::PageCacheRawPage;
-use crate::kernel::mem::PhysAccess;
+use core::ptr::NonNull;
+use core::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
+
 use buddy_allocator::BuddyRawPage;
-use core::{
-    ptr::NonNull,
-    sync::atomic::{AtomicU32, AtomicUsize, Ordering},
-};
 use eonix_hal::mm::ArchPhysAccess;
-use eonix_mm::{
-    address::{PAddr, PhysAccess as _},
-    paging::{RawPage as RawPageTrait, PFN},
-};
-use intrusive_list::{container_of, Link};
-use slab_allocator::SlabRawPage;
+use eonix_mm::address::{PAddr, PhysAccess as _};
+use eonix_mm::paging::{PageAlloc, RawPage as RawPageTrait, PFN};
+use intrusive_list::{container_of, Link, List};
+use slab_allocator::{SlabPage, SlabPageAlloc, SlabPageList, SlabSlot};
+
+use super::GlobalPageAlloc;
+use crate::kernel::mem::page_cache::PageCacheRawPage;
+use crate::kernel::mem::PhysAccess;
 
 const PAGE_ARRAY: NonNull<RawPage> =
     unsafe { NonNull::new_unchecked(0xffffff8040000000 as *mut _) };
 
 pub struct PageFlags(AtomicU32);
 
-struct SlabPageInner {
-    allocated_count: u32,
-    free_next: Option<NonNull<usize>>,
+#[derive(Clone, Copy)]
+struct SlabPageData {
+    allocated_count: usize,
+    free_next: Option<NonNull<SlabSlot>>,
 }
 
-impl SlabPageInner {
-    fn new(free_next: Option<NonNull<usize>>) -> Self {
+impl SlabPageData {
+    const fn new() -> Self {
         Self {
             allocated_count: 0,
-            free_next,
+            free_next: None,
         }
     }
 }
 
-struct PageCacheInner {
+#[derive(Clone, Copy)]
+struct PageCacheData {
     valid_size: usize,
 }
 
-pub struct BuddyPageInner {}
-
-enum PageType {
-    Buddy(BuddyPageInner),
-    Slab(SlabPageInner),
-    PageCache(PageCacheInner),
-}
-
-impl PageType {
-    fn slab_data(&mut self) -> &mut SlabPageInner {
-        if let PageType::Slab(slab_data) = self {
-            return slab_data;
-        } else {
-            unreachable!()
-        }
-    }
-
-    fn page_cache_data(&mut self) -> &mut PageCacheInner {
-        if let PageType::PageCache(cache_data) = self {
-            return cache_data;
-        } else {
-            unreachable!()
-        }
-    }
+#[repr(C)]
+union PageData {
+    slab: SlabPageData,
+    page_cache: PageCacheData,
 }
 
 pub struct RawPage {
@@ -73,7 +54,7 @@ pub struct RawPage {
     flags: PageFlags,
     refcount: AtomicUsize,
 
-    shared_data: PageType,
+    shared_data: PageData,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
@@ -109,6 +90,13 @@ impl PageFlags {
 }
 
 impl RawPagePtr {
+    pub const fn from_ref(raw_page_ref: &RawPage) -> Self {
+        Self::new(unsafe {
+            // SAFETY: Rust references always points to non-null addresses.
+            NonNull::new_unchecked(&raw const *raw_page_ref as *mut _)
+        })
+    }
+
     pub const fn new(ptr: NonNull<RawPage>) -> Self {
         Self(ptr)
     }
@@ -215,50 +203,68 @@ impl BuddyRawPage for RawPagePtr {
     }
 }
 
-impl SlabRawPage for RawPagePtr {
-    unsafe fn from_link(link: &mut Link) -> Self {
-        let raw_page_ptr = container_of!(link, RawPage, link);
-        Self(raw_page_ptr)
-    }
+impl SlabPage for RawPage {
+    fn get_data_ptr(&self) -> NonNull<[u8]> {
+        let raw_page_ptr = RawPagePtr::from_ref(self);
+        let paddr_start = PAddr::from(PFN::from(raw_page_ptr));
+        let page_data_ptr = unsafe { paddr_start.as_ptr() };
 
-    unsafe fn get_link(&self) -> &mut Link {
-        &mut self.as_mut().link
+        NonNull::slice_from_raw_parts(page_data_ptr, 1 << (self.order + 12))
     }
 
-    fn in_which(ptr: *mut u8) -> RawPagePtr {
+    fn get_free_slot(&self) -> Option<NonNull<SlabSlot>> {
         unsafe {
-            // SAFETY: The pointer is allocated from the slab allocator,
-            //         which can't be null.
-            let ptr = NonNull::new_unchecked(ptr);
+            // SAFETY: TODO
+            self.shared_data.slab.free_next
+        }
+    }
 
-            // SAFETY: The pointer is valid.
-            let paddr = ArchPhysAccess::from_ptr(ptr);
-            let pfn = PFN::from(paddr);
+    fn set_free_slot(&mut self, next: Option<NonNull<SlabSlot>>) {
+        self.shared_data.slab.free_next = next;
+    }
 
-            RawPagePtr::from(pfn)
+    fn get_alloc_count(&self) -> usize {
+        unsafe {
+            // SAFETY: TODO
+            self.shared_data.slab.allocated_count
         }
     }
 
-    fn allocated_count(&self) -> &mut u32 {
-        &mut self.as_mut().shared_data.slab_data().allocated_count
-    }
+    fn inc_alloc_count(&mut self) -> usize {
+        unsafe {
+            // SAFETY: TODO
+            self.shared_data.slab.allocated_count += 1;
 
-    fn next_free(&self) -> &mut Option<NonNull<usize>> {
-        &mut self.as_mut().shared_data.slab_data().free_next
+            self.shared_data.slab.allocated_count
+        }
     }
 
-    fn real_page_ptr(&self) -> *mut u8 {
-        self.real_ptr().as_ptr()
+    fn dec_alloc_count(&mut self) -> usize {
+        unsafe {
+            // SAFETY: TODO
+            self.shared_data.slab.allocated_count -= 1;
+
+            self.shared_data.slab.allocated_count
+        }
     }
 
-    fn slab_init(&self, first_free: Option<NonNull<usize>>) {
-        self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
+    unsafe fn from_allocated(ptr: NonNull<u8>) -> &'static mut Self {
+        unsafe {
+            // SAFETY: The caller ensures that `ptr` is valid.
+            let paddr = ArchPhysAccess::from_ptr(ptr);
+            let pfn = PFN::from(paddr);
+
+            RawPagePtr::from(pfn).as_mut()
+        }
     }
 }
 
 impl PageCacheRawPage for RawPagePtr {
     fn valid_size(&self) -> &mut usize {
-        &mut self.as_mut().shared_data.page_cache_data().valid_size
+        unsafe {
+            // SAFETY: The caller ensures that the page is in some page cache.
+            &mut self.as_mut().shared_data.page_cache.valid_size
+        }
     }
 
     fn is_dirty(&self) -> bool {
@@ -274,6 +280,59 @@ impl PageCacheRawPage for RawPagePtr {
     }
 
     fn cache_init(&self) {
-        self.as_mut().shared_data = PageType::PageCache(PageCacheInner { valid_size: 0 });
+        self.as_mut().shared_data.page_cache = PageCacheData { valid_size: 0 };
+    }
+}
+
+pub struct RawSlabPageList(List);
+
+impl SlabPageList for RawSlabPageList {
+    type Page = RawPage;
+
+    fn new() -> Self {
+        Self(List::new())
+    }
+
+    fn is_empty(&self) -> bool {
+        self.0.is_empty()
+    }
+
+    fn peek_head(&mut self) -> Option<&mut Self::Page> {
+        unsafe {
+            let link = self.0.head()?;
+            let mut raw_page_ptr = container_of!(link, RawPage, link);
+
+            Some(raw_page_ptr.as_mut())
+        }
+    }
+
+    fn pop_head(&mut self) -> Option<&'static mut Self::Page> {
+        unsafe {
+            let link = self.0.pop()?;
+            let mut raw_page_ptr = container_of!(link, RawPage, link);
+
+            Some(raw_page_ptr.as_mut())
+        }
+    }
+
+    fn push_tail(&mut self, page: &'static mut Self::Page) {
+        self.0.insert(&mut page.link);
+    }
+
+    fn remove(&mut self, page: &mut Self::Page) {
+        self.0.remove(&mut page.link)
+    }
+}
+
+impl SlabPageAlloc for GlobalPageAlloc {
+    type Page = RawPage;
+    type PageList = RawSlabPageList;
+
+    unsafe fn alloc_uninit(&self) -> &'static mut RawPage {
+        let raw_page = self.alloc().expect("Out of memory").as_mut();
+        raw_page.flags.set(PageFlags::SLAB);
+        raw_page.shared_data.slab = SlabPageData::new();
+
+        raw_page
     }
 }