slab_cache.rs 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. use super::SlabRawPage;
  2. use core::{marker::PhantomData, ptr::NonNull};
  3. use eonix_mm::paging::{PageAlloc, PAGE_SIZE};
  4. use intrusive_list::List;
  5. pub(crate) struct SlabCache<T, A> {
  6. empty_list: List,
  7. partial_list: List,
  8. full_list: List,
  9. object_size: u32,
  10. _phantom: PhantomData<(T, A)>,
  11. }
  12. trait SlabRawPageExt {
  13. fn alloc_slot(&self) -> Option<NonNull<usize>>;
  14. fn dealloc_slot(&self, slot_ptr: *mut u8);
  15. fn is_full(&self) -> bool;
  16. fn is_empty(&self) -> bool;
  17. fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>>;
  18. }
  19. impl<T> SlabRawPageExt for T
  20. where
  21. T: SlabRawPage,
  22. {
  23. fn alloc_slot(&self) -> Option<NonNull<usize>> {
  24. let ptr = self.next_free().clone();
  25. let next_free = match ptr {
  26. Some(ptr) => unsafe { ptr.read() as *mut usize },
  27. None => unreachable!(),
  28. };
  29. *self.allocated_count() += 1;
  30. *self.next_free() = NonNull::new(next_free);
  31. return ptr;
  32. }
  33. fn dealloc_slot(&self, slot_ptr: *mut u8) {
  34. let slot_ptr = slot_ptr as *mut usize;
  35. if let Some(last_free) = self.next_free().clone() {
  36. unsafe { *slot_ptr = last_free.as_ptr() as usize }
  37. } else {
  38. unsafe { *slot_ptr = 0 }
  39. }
  40. *self.allocated_count() -= 1;
  41. *self.next_free() = NonNull::new(slot_ptr);
  42. }
  43. fn slab_page_init(&self, object_size: u32) -> Option<NonNull<usize>> {
  44. assert!(object_size >= core::mem::size_of::<usize>() as u32);
  45. let first_free = self.real_page_ptr() as *mut usize;
  46. let mut slot_ptr = first_free;
  47. let mut slot_count = PAGE_SIZE / object_size as usize;
  48. // SAFETY: carefully ptr operate
  49. unsafe {
  50. loop {
  51. if slot_count == 1 {
  52. *slot_ptr = 0;
  53. break;
  54. }
  55. let next_ptr = slot_ptr.byte_add(object_size as usize);
  56. *slot_ptr = next_ptr as usize;
  57. slot_ptr = next_ptr;
  58. slot_count -= 1;
  59. }
  60. }
  61. NonNull::new(first_free)
  62. }
  63. fn is_empty(&self) -> bool {
  64. self.allocated_count().clone() == 0
  65. }
  66. fn is_full(&self) -> bool {
  67. self.next_free().is_none()
  68. }
  69. }
  70. impl<Raw, Allocator> SlabCache<Raw, Allocator>
  71. where
  72. Raw: SlabRawPage,
  73. Allocator: PageAlloc<RawPage = Raw>,
  74. {
  75. pub(crate) const fn new_in(object_size: u32) -> Self {
  76. // avoid unnecessary branch in alloc and dealloc
  77. assert!(object_size <= PAGE_SIZE as u32 / 2);
  78. Self {
  79. empty_list: List::new(),
  80. partial_list: List::new(),
  81. full_list: List::new(),
  82. object_size: object_size,
  83. _phantom: PhantomData,
  84. }
  85. }
  86. pub(crate) fn alloc(&mut self, alloc: &Allocator) -> *mut u8 {
  87. if !self.partial_list.is_empty() {
  88. let page_ptr = unsafe {
  89. Raw::from_link(
  90. self.partial_list
  91. .head()
  92. .expect("partial pages should not be empty"),
  93. )
  94. };
  95. let ptr = page_ptr.alloc_slot().expect("should get slot");
  96. if page_ptr.is_full() {
  97. self.partial_list.remove(unsafe { page_ptr.get_link() });
  98. self.full_list.insert(unsafe { page_ptr.get_link() });
  99. }
  100. return ptr.as_ptr() as *mut u8;
  101. }
  102. if !self.empty_list.is_empty() {
  103. let page_ptr = unsafe {
  104. Raw::from_link(
  105. self.empty_list
  106. .head()
  107. .expect("empty pages should not be empty"),
  108. )
  109. };
  110. let ptr = page_ptr.alloc_slot().expect("should get slot");
  111. self.empty_list.remove(unsafe { page_ptr.get_link() });
  112. self.partial_list.insert(unsafe { page_ptr.get_link() });
  113. return ptr.as_ptr() as *mut u8;
  114. }
  115. let new_page_ptr = alloc.alloc().expect("slab_cache get page fail!");
  116. let first_free = new_page_ptr.slab_page_init(self.object_size);
  117. new_page_ptr.slab_init(first_free);
  118. let ptr = new_page_ptr.alloc_slot().expect("should get slot");
  119. self.partial_list.insert(unsafe { new_page_ptr.get_link() });
  120. ptr.as_ptr() as *mut u8
  121. }
  122. pub(crate) fn dealloc(&mut self, ptr: *mut u8, _alloc: &Allocator) {
  123. let page_ptr = Raw::in_which(ptr);
  124. if page_ptr.is_full() {
  125. self.full_list.remove(unsafe { page_ptr.get_link() });
  126. self.partial_list.insert(unsafe { page_ptr.get_link() });
  127. }
  128. page_ptr.dealloc_slot(ptr);
  129. if page_ptr.is_empty() {
  130. self.partial_list.remove(unsafe { page_ptr.get_link() });
  131. self.empty_list.insert(unsafe { page_ptr.get_link() });
  132. }
  133. // TODO: Check whether we should place some pages back with `alloc` if the global
  134. // free page count is below the watermark.
  135. }
  136. }