allocator.cc 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. #include <types/allocator.hpp>
  2. #include <bit>
  3. #include <cstddef>
  4. #include <assert.h>
  5. #include <stdint.h>
  6. #include <kernel/async/lock.hpp>
  7. #include <kernel/mem/paging.hpp>
  8. #include <kernel/mem/slab.hpp>
  9. constexpr uintptr_t KERNEL_HEAP_START = 0xffff'ff81'8000'0000;
  10. constexpr uintptr_t KERNEL_HEAP_END = 0xffff'ffbf'ffff'ffff;
  11. constexpr uintptr_t KERNEL_HEAP_SIZE = KERNEL_HEAP_END - KERNEL_HEAP_START;
  12. namespace types::memory {
  13. struct mem_blk_flags {
  14. unsigned long is_free : 8;
  15. unsigned long has_next : 8;
  16. };
  17. struct mem_blk {
  18. std::size_t size;
  19. mem_blk_flags flags;
  20. // the first byte of the memory space
  21. // the minimal allocated space is 8 bytes
  22. std::byte data[];
  23. };
  24. constexpr std::byte* aspbyte(void* pblk)
  25. { return std::bit_cast<std::byte*>(pblk);}
  26. constexpr mem_blk* aspblk(void* pbyte)
  27. { return std::bit_cast<mem_blk*>(pbyte);}
  28. constexpr mem_blk* next(mem_blk* blk, std::size_t blk_size)
  29. {
  30. auto* p = aspbyte(blk);
  31. p += sizeof(mem_blk);
  32. p += blk_size;
  33. return aspblk(p);
  34. }
  35. // blk MUST be free
  36. constexpr void unite_afterwards(mem_blk* blk)
  37. {
  38. while (blk->flags.has_next) {
  39. auto* blk_next = next(blk, blk->size);
  40. if (!blk_next->flags.is_free)
  41. break;
  42. blk->size += sizeof(mem_blk) + blk_next->size;
  43. blk->flags.has_next = blk_next->flags.has_next;
  44. }
  45. }
  46. // @param start_pos position where to start finding
  47. // @param size the size of the block we're looking for
  48. // @return found block if suitable block exists, if not, the last block
  49. constexpr mem_blk* find_blk(std::byte** p_start, std::size_t size)
  50. {
  51. mem_blk* start_pos = aspblk(*p_start);
  52. bool no_free_so_far = true;
  53. while (true) {
  54. if (start_pos->flags.is_free) {
  55. unite_afterwards(start_pos);
  56. no_free_so_far = false;
  57. if (start_pos->size >= size)
  58. break;
  59. }
  60. if (no_free_so_far)
  61. *p_start = aspbyte(start_pos);
  62. if (!start_pos->flags.has_next)
  63. break;
  64. start_pos = next(start_pos, start_pos->size);
  65. }
  66. return start_pos;
  67. }
  68. constexpr void split_block(mem_blk* blk, std::size_t this_size)
  69. {
  70. // block is too small to get split
  71. // that is, the block to be split should have enough room
  72. // for "this_size" bytes and also could contain a new block
  73. if (blk->size < this_size + sizeof(mem_blk) + 1024)
  74. return;
  75. mem_blk* blk_next = next(blk, this_size);
  76. blk_next->size = blk->size
  77. - this_size
  78. - sizeof(mem_blk);
  79. blk_next->flags.has_next = blk->flags.has_next;
  80. blk_next->flags.is_free = 1;
  81. blk->flags.has_next = 1;
  82. blk->size = this_size;
  83. }
  84. std::byte* brk_memory_allocator::brk(byte* addr)
  85. {
  86. if (addr >= p_limit)
  87. return nullptr;
  88. uintptr_t current_allocated = reinterpret_cast<uintptr_t>(p_allocated);
  89. uintptr_t new_brk = reinterpret_cast<uintptr_t>(addr);
  90. current_allocated &= ~(0x200000-1);
  91. new_brk &= ~(0x200000-1);
  92. using namespace kernel::mem::paging;
  93. while (current_allocated <= new_brk) {
  94. auto idx = idx_all(current_allocated);
  95. auto pdpt = KERNEL_PAGE_TABLE[std::get<1>(idx)].parse();
  96. auto pdpte = pdpt[std::get<2>(idx)];
  97. if (!pdpte.pfn())
  98. pdpte.set(PA_KERNEL_PAGE_TABLE, alloc_page_table());
  99. auto pde = pdpte.parse()[std::get<3>(idx)];
  100. assert(!(pde.attributes() & PA_P));
  101. pde.set(PA_KERNEL_DATA_HUGE, page_to_pfn(alloc_pages(9)));
  102. current_allocated += 0x200000;
  103. }
  104. p_allocated = (std::byte*)current_allocated;
  105. return p_break = addr;
  106. }
  107. std::byte* brk_memory_allocator::sbrk(size_type increment)
  108. {
  109. return brk(p_break + increment);
  110. }
  111. brk_memory_allocator::brk_memory_allocator(byte* start, size_type size)
  112. : p_start(start)
  113. , p_limit(start + size)
  114. , p_break(start)
  115. , p_allocated(start)
  116. {
  117. auto* p_blk = aspblk(brk(p_start));
  118. sbrk(sizeof(mem_blk) + 1024); // 1024 bytes (minimum size for a block)
  119. p_blk->size = 1024;
  120. p_blk->flags.has_next = 0;
  121. p_blk->flags.is_free = 1;
  122. }
  123. void* brk_memory_allocator::allocate(size_type size)
  124. {
  125. kernel::async::lock_guard_irq lck(mtx);
  126. // align to 1024 bytes boundary
  127. size = (size + 1024-1) & ~(1024-1);
  128. auto* block_allocated = find_blk(&p_start, size);
  129. if (!block_allocated->flags.has_next
  130. && (!block_allocated->flags.is_free || block_allocated->size < size)) {
  131. // 'block_allocated' in the argument list is the pointer
  132. // pointing to the last block
  133. if (!sbrk(sizeof(mem_blk) + size))
  134. return nullptr;
  135. block_allocated->flags.has_next = 1;
  136. block_allocated = next(block_allocated, block_allocated->size);
  137. block_allocated->flags.has_next = 0;
  138. block_allocated->flags.is_free = 1;
  139. block_allocated->size = size;
  140. } else {
  141. split_block(block_allocated, size);
  142. }
  143. block_allocated->flags.is_free = 0;
  144. return block_allocated->data;
  145. }
  146. void brk_memory_allocator::deallocate(void* ptr)
  147. {
  148. kernel::async::lock_guard_irq lck(mtx);
  149. auto* blk = aspblk(aspbyte(ptr) - sizeof(mem_blk));
  150. blk->flags.is_free = 1;
  151. if (aspbyte(blk) < p_start)
  152. p_start = aspbyte(blk);
  153. // unite free blocks nearby
  154. unite_afterwards(blk);
  155. }
  156. bool brk_memory_allocator::allocated(void* ptr) const noexcept
  157. {
  158. return (void*)KERNEL_HEAP_START <= aspbyte(ptr) && aspbyte(ptr) < sbrk();
  159. }
  160. static brk_memory_allocator* k_alloc;
  161. } // namespace types::memory
  162. static kernel::mem::slab_cache caches[7];
  163. static constexpr int __cache_index(std::size_t size)
  164. {
  165. if (size <= 32)
  166. return 0;
  167. if (size <= 64)
  168. return 1;
  169. if (size <= 96)
  170. return 2;
  171. if (size <= 128)
  172. return 3;
  173. if (size <= 192)
  174. return 4;
  175. if (size <= 256)
  176. return 5;
  177. if (size <= 512)
  178. return 6;
  179. return -1;
  180. }
  181. SECTION(".text.kinit")
  182. void kernel::kinit::init_allocator()
  183. {
  184. mem::init_slab_cache(caches+0, 32);
  185. mem::init_slab_cache(caches+1, 64);
  186. mem::init_slab_cache(caches+2, 96);
  187. mem::init_slab_cache(caches+3, 128);
  188. mem::init_slab_cache(caches+4, 192);
  189. mem::init_slab_cache(caches+5, 256);
  190. mem::init_slab_cache(caches+6, 512);
  191. types::memory::k_alloc = new types::memory::brk_memory_allocator(
  192. (std::byte*)KERNEL_HEAP_START, KERNEL_HEAP_SIZE);
  193. }
  194. void* operator new(size_t size)
  195. {
  196. int idx = __cache_index(size);
  197. void* ptr = nullptr;
  198. if (idx < 0)
  199. ptr = types::memory::k_alloc->allocate(size);
  200. else
  201. ptr = kernel::mem::slab_alloc(&caches[idx]);
  202. assert(ptr);
  203. return ptr;
  204. }
  205. void operator delete(void* ptr)
  206. {
  207. if (!ptr)
  208. return;
  209. if (types::memory::k_alloc->allocated(ptr))
  210. types::memory::k_alloc->deallocate(ptr);
  211. else
  212. kernel::mem::slab_free(ptr);
  213. }
  214. void operator delete(void* ptr, std::size_t size)
  215. {
  216. if (!ptr)
  217. return;
  218. if (types::memory::k_alloc->allocated(ptr)) {
  219. types::memory::k_alloc->deallocate(ptr);
  220. return;
  221. }
  222. int idx = __cache_index(size);
  223. assert(idx >= 0);
  224. kernel::mem::slab_free(ptr);
  225. }
  226. void* operator new[](size_t sz)
  227. {
  228. return ::operator new(sz);
  229. }
  230. void operator delete[](void* ptr)
  231. {
  232. ::operator delete(ptr);
  233. }
  234. void operator delete[](void* ptr, std::size_t size)
  235. {
  236. ::operator delete(ptr, size);
  237. }