slab.cc 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. #include <cstddef>
  2. #include <assert.h>
  3. #include <types/list.hpp>
  4. #include <kernel/async/lock.hpp>
  5. #include <kernel/mem/paging.hpp>
  6. #include <kernel/mem/slab.hpp>
  7. using namespace kernel::mem;
  8. using namespace types::list;
  9. constexpr std::size_t SLAB_PAGE_SIZE = 0x1000; // 4K
  10. kernel::async::mutex slab_lock;
  11. std::ptrdiff_t _slab_data_start_offset(std::size_t size) {
  12. return (sizeof(slab_head) + size - 1) & ~(size - 1);
  13. }
  14. std::size_t _slab_max_count(std::size_t size) {
  15. return (SLAB_PAGE_SIZE - _slab_data_start_offset(size)) / size;
  16. }
  17. void* _slab_head_alloc(slab_head* slab) {
  18. if (slab->free_count == 0)
  19. return nullptr;
  20. void* ptr = slab->free;
  21. slab->free = *(void**)ptr;
  22. slab->free_count--;
  23. return ptr;
  24. }
  25. slab_head* _make_slab(uintptr_t start, std::size_t size) {
  26. slab_head* slab = physaddr<slab_head>{start};
  27. slab->obj_size = size;
  28. slab->free_count = _slab_max_count(size);
  29. slab->next = nullptr;
  30. slab->prev = nullptr;
  31. slab->free = physaddr<void>{start + _slab_data_start_offset(size)};
  32. std::byte* ptr = (std::byte*)slab->free;
  33. for (unsigned i = 0; i < slab->free_count; ++i) {
  34. void* nextptr = ptr + size;
  35. if (i == slab->free_count - 1)
  36. *(void**)ptr = nullptr;
  37. else
  38. *(void**)ptr = nextptr;
  39. ptr = (std::byte*)nextptr;
  40. }
  41. return slab;
  42. }
  43. void _slab_add_page(slab_cache* cache) {
  44. auto* new_page = paging::alloc_page();
  45. auto new_page_pfn = paging::page_to_pfn(new_page);
  46. new_page->flags |= paging::PAGE_SLAB;
  47. auto* slab = _make_slab(new_page_pfn, cache->obj_size);
  48. slab->cache = cache;
  49. list_insert(&cache->slabs_empty, slab);
  50. }
  51. void* kernel::mem::slab_alloc(slab_cache* cache) {
  52. async::lock_guard_irq lock(slab_lock);
  53. slab_head* slab = cache->slabs_partial;
  54. if (!slab) { // no partial slabs, try to get an empty slab
  55. if (!cache->slabs_empty) // no empty slabs, create a new one
  56. _slab_add_page(cache);
  57. slab = list_get(&cache->slabs_empty);
  58. list_insert(&cache->slabs_partial, slab);
  59. }
  60. void* ptr = _slab_head_alloc(slab);
  61. if (slab->free_count == 0) { // slab is full
  62. list_remove(&cache->slabs_partial, slab);
  63. list_insert(&cache->slabs_full, slab);
  64. }
  65. return ptr;
  66. }
  67. void kernel::mem::slab_free(void* ptr) {
  68. async::lock_guard_irq lock(slab_lock);
  69. slab_head* slab = (slab_head*)((uintptr_t)ptr & ~(SLAB_PAGE_SIZE - 1));
  70. *(void**)ptr = slab->free;
  71. slab->free = ptr;
  72. slab->free_count++;
  73. auto max_count = _slab_max_count(slab->obj_size);
  74. if (max_count == 1) {
  75. list_remove(&slab->cache->slabs_full, slab);
  76. list_insert(&slab->cache->slabs_empty, slab);
  77. }
  78. if (slab->free_count == 1) {
  79. list_remove(&slab->cache->slabs_full, slab);
  80. list_insert(&slab->cache->slabs_partial, slab);
  81. }
  82. if (slab->free_count == max_count) {
  83. list_remove(&slab->cache->slabs_partial, slab);
  84. list_insert(&slab->cache->slabs_empty, slab);
  85. }
  86. }
  87. void kernel::mem::init_slab_cache(slab_cache* cache, std::size_t obj_size) {
  88. cache->obj_size = obj_size;
  89. cache->slabs_empty = nullptr;
  90. cache->slabs_partial = nullptr;
  91. cache->slabs_full = nullptr;
  92. _slab_add_page(cache);
  93. }