Parcourir la source

refactor: better slab trait interface

zhuowei shao il y a 8 mois
Parent
commit
ef8434b70a

+ 6 - 8
crates/slab_allocator/src/lib.rs

@@ -2,7 +2,7 @@
 
 mod slab_cache;
 
-use core::cmp::max;
+use core::{cmp::max, ptr::NonNull};
 
 use eonix_mm::paging::{PageAlloc, RawPage};
 use eonix_sync::Spin;
@@ -23,18 +23,16 @@ pub trait SlabRawPage: RawPage {
     /// to the link exists.
     unsafe fn get_link(&self) -> &mut Link;
 
-    fn slab_init(&self, object_size: u32);
-
-    fn is_emtpy(&self) -> bool;
-
-    fn is_full(&self) -> bool;
+    fn slab_init(&self, first_free: Option<NonNull<usize>>);
 
     // which slab page the ptr belong
     fn in_which(ptr: *mut u8) -> Self;
 
-    fn alloc_slot(&self) -> *mut u8;
+    fn real_page_ptr(&self) -> *mut u8;
+
+    fn allocated_count(&self) -> &mut u32;
 
-    fn dealloc_slot(&self, ptr: *mut u8);
+    fn next_free(&self) -> &mut Option<NonNull<usize>>;
 }
 
 pub struct SlabAllocator<T, A, const SLAB_CACHE_COUNT: usize> {

+ 85 - 11
crates/slab_allocator/src/slab_cache.rs

@@ -1,5 +1,5 @@
 use super::SlabRawPage;
-use core::marker::PhantomData;
+use core::{marker::PhantomData, ptr::NonNull};
 use eonix_mm::paging::{PageAlloc, PAGE_SIZE};
 use intrusive_list::List;
 
@@ -12,6 +12,78 @@ pub(crate) struct SlabCache<T, A> {
     _phantom: PhantomData<T>,
 }
 
+trait SlabRawPageExt {
+    fn alloc_slot(&self) -> Option<NonNull<usize>>;
+    fn dealloc_slot(&self, slot_ptr: *mut u8);
+    fn is_full(&self) -> bool;
+    fn is_empty(&self) -> bool;
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>>;
+}
+
+impl<T> SlabRawPageExt for T
+where
+    T: SlabRawPage,
+{
+    fn alloc_slot(&self) -> Option<NonNull<usize>> {
+        let ptr = self.next_free().clone();
+
+        let next_free = match ptr {
+            Some(ptr) => unsafe { ptr.read() as *mut usize },
+            None => unreachable!(),
+        };
+        *self.allocated_count() += 1;
+        *self.next_free() = NonNull::new(next_free);
+        return ptr;
+    }
+
+    fn dealloc_slot(&self, slot_ptr: *mut u8) {
+        let slot_ptr = slot_ptr as *mut usize;
+
+        if let Some(last_free) = self.next_free().clone() {
+            unsafe { *slot_ptr = last_free.as_ptr() as usize }
+        } else {
+            unsafe { *slot_ptr = 0 }
+        }
+
+        *self.allocated_count() -= 1;
+        *self.next_free() = NonNull::new(slot_ptr);
+    }
+
+    fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>> {
+        assert!(object_size >= core::mem::size_of::<usize>() as u32);
+
+        let first_free = self.real_page_ptr() as *mut usize;
+
+        let mut slot_ptr = first_free;
+        let mut slot_count = PAGE_SIZE / object_size as usize;
+
+        // SAFETY: carefully ptr operate
+        unsafe {
+            loop {
+                if slot_count == 1 {
+                    *slot_ptr = 0;
+                    break;
+                }
+
+                let next_ptr = slot_ptr.byte_add(object_size as usize);
+                *slot_ptr = next_ptr as usize;
+                slot_ptr = next_ptr;
+                slot_count -= 1;
+            }
+        }
+
+        NonNull::new(first_free)
+    }
+
+    fn is_empty(&self) -> bool {
+        self.allocated_count().clone() == 0
+    }
+
+    fn is_full(&self) -> bool {
+        self.next_free().is_none()
+    }
+}
+
 impl<Raw, Allocator> SlabCache<Raw, Allocator>
 where
     Raw: SlabRawPage,
@@ -41,12 +113,13 @@ where
                 )
             };
 
-            let ptr = page_ptr.alloc_slot();
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
+
             if page_ptr.is_full() {
                 self.partial_list.remove(unsafe { page_ptr.get_link() });
                 self.full_list.insert(unsafe { page_ptr.get_link() });
             }
-            return ptr;
+            return ptr.as_ptr() as *mut u8;
         }
 
         if !self.empty_list.is_empty() {
@@ -58,17 +131,18 @@ where
                 )
             };
 
-            let ptr = page_ptr.alloc_slot();
+            let ptr = page_ptr.alloc_slot().expect("should get slot");
             self.empty_list.remove(unsafe { page_ptr.get_link() });
             self.partial_list.insert(unsafe { page_ptr.get_link() });
-            return ptr;
+            return ptr.as_ptr() as *mut u8;
         }
 
-        let new_page = self.alloc.alloc().expect("slab_cache get page fail!");
-        new_page.slab_init(self.object_size);
-        let ptr = new_page.alloc_slot();
-        self.partial_list.insert(unsafe { new_page.get_link() });
-        ptr
+        let new_page_ptr = self.alloc.alloc().expect("slab_cache get page fail!");
+        let first_free = new_page_ptr.slab_page_init(self.object_size);
+        new_page_ptr.slab_init(first_free);
+        let ptr = new_page_ptr.alloc_slot().expect("should get slot");
+        self.partial_list.insert(unsafe { new_page_ptr.get_link() });
+        ptr.as_ptr() as *mut u8
     }
 
     pub(crate) fn dealloc(&mut self, ptr: *mut u8) {
@@ -81,7 +155,7 @@ where
 
         page_ptr.dealloc_slot(ptr);
 
-        if page_ptr.is_emtpy() {
+        if page_ptr.is_empty() {
             self.partial_list.remove(unsafe { page_ptr.get_link() });
             self.empty_list.insert(unsafe { page_ptr.get_link() });
         }

+ 28 - 120
src/kernel/mem/page_alloc/raw_page.rs

@@ -18,10 +18,18 @@ const PAGE_ARRAY: NonNull<RawPage> =
 
 pub struct PageFlags(AtomicU32);
 
-pub struct SlabPageInner {
-    pub object_size: u32,
-    pub allocated_count: u32,
-    pub free_next: Option<NonNull<usize>>,
+struct SlabPageInner {
+    allocated_count: u32,
+    free_next: Option<NonNull<usize>>,
+}
+
+impl SlabPageInner {
+    fn new(free_next: Option<NonNull<usize>>) -> Self {
+        Self {
+            allocated_count: 0,
+            free_next,
+        }
+    }
 }
 
 pub struct BuddyPageInner {}
@@ -32,66 +40,11 @@ pub enum PageType {
 }
 
 impl PageType {
-    // slab
-    pub fn new_slab(&mut self, object_size: u32) {
-        *self = PageType::Slab(SlabPageInner {
-            object_size,
-            allocated_count: 0,
-            free_next: None,
-        })
-    }
-
-    pub fn object_size(&self) -> u32 {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.object_size,
-            _ => unsafe { core::hint::unreachable_unchecked() },
-        }
-    }
-
-    pub fn allocated_count(&self) -> u32 {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.allocated_count,
-            _ => unsafe { core::hint::unreachable_unchecked() },
-        }
-    }
-
-    pub fn allocated_count_add(&mut self, val: u32) {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.allocated_count += val,
-            _ => unsafe { core::hint::unreachable_unchecked() },
-        }
-    }
-
-    pub fn allocated_count_sub(&mut self, val: u32) {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.allocated_count -= val,
-            _ => unsafe { core::hint::unreachable_unchecked() },
-        }
-    }
-
-    pub fn free_next(&self) -> Option<NonNull<usize>> {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.free_next,
-            _ => unsafe { core::hint::unreachable_unchecked() },
-        }
-    }
-
-    pub fn set_free_next(&mut self, free_next: Option<NonNull<usize>>) {
-        assert!(matches!(self, PageType::Slab(_)));
-
-        match self {
-            PageType::Slab(inner) => inner.free_next = free_next,
-            _ => unsafe { core::hint::unreachable_unchecked() },
+    pub fn slab_data(&mut self) -> &mut SlabPageInner {
+        if let PageType::Slab(slab_data) = self {
+            return slab_data;
+        } else {
+            unreachable!()
         }
     }
 }
@@ -107,7 +60,7 @@ pub struct RawPage {
     pub flags: PageFlags,
     pub refcount: AtomicUsize,
 
-    pub type_: PageType,
+    pub shared_data: PageType,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
@@ -252,69 +205,24 @@ impl SlabRawPage for RawPagePtr {
         &mut self.as_mut().link
     }
 
-    fn is_emtpy(&self) -> bool {
-        self.as_ref().type_.allocated_count() == 0
-    }
-
-    fn is_full(&self) -> bool {
-        self.as_ref().type_.free_next().is_none()
-    }
-
-    fn alloc_slot(&self) -> *mut u8 {
-        let ptr = self.as_ref().type_.free_next();
-
-        match ptr {
-            Some(ptr) => {
-                let next_free = unsafe { ptr.read() as *mut usize };
-                self.as_mut().type_.set_free_next(NonNull::new(next_free));
-                self.as_mut().type_.allocated_count_add(1);
-                return ptr.as_ptr() as *mut u8;
-            }
-            None => unreachable!(),
-        }
-    }
-
     fn in_which(ptr: *mut u8) -> RawPagePtr {
         let vaddr = VAddr::from(ptr as usize & !(PAGE_SIZE - 1));
-
         unsafe { vaddr.as_raw_page() }
     }
 
-    fn dealloc_slot(&self, ptr: *mut u8) {
-        let ptr = ptr as *mut usize;
-
-        if let Some(last_free) = self.as_ref().type_.free_next() {
-            unsafe { *ptr = last_free.as_ptr() as usize }
-        } else {
-            unsafe { *ptr = 0 }
-        }
-
-        self.as_mut().type_.allocated_count_sub(1);
-        self.as_mut().type_.set_free_next(NonNull::new(ptr));
+    fn allocated_count(&self) -> &mut u32 {
+        &mut self.as_mut().shared_data.slab_data().allocated_count
     }
 
-    fn slab_init(&self, object_size: u32) {
-        assert!(object_size >= core::mem::size_of::<usize>() as u32);
-
-        self.as_mut().type_.new_slab(object_size);
-
-        let mut slot_count = PAGE_SIZE / object_size as usize;
-        let mut ptr = self.real_ptr::<usize>().as_ptr();
-        self.as_mut().type_.set_free_next(NonNull::new(ptr));
+    fn next_free(&self) -> &mut Option<NonNull<usize>> {
+        &mut self.as_mut().shared_data.slab_data().free_next
+    }
 
-        // SAFETY: carefully ptr operate
-        unsafe {
-            loop {
-                if slot_count == 1 {
-                    *ptr = 0;
-                    break;
-                }
+    fn real_page_ptr(&self) -> *mut u8 {
+        self.real_ptr().as_ptr()
+    }
 
-                let next_ptr = ptr.byte_add(object_size as usize);
-                *ptr = next_ptr as usize;
-                ptr = next_ptr;
-                slot_count -= 1;
-            }
-        }
+    fn slab_init(&self, first_free: Option<NonNull<usize>>) {
+        self.as_mut().shared_data = PageType::Slab(SlabPageInner::new(first_free));
     }
 }