greatbridf пре 10 месеци
родитељ
комит
6f29d1acca

+ 22 - 0
include/kernel/mem/paging.hpp

@@ -45,6 +45,14 @@ constexpr psattr_t PA_FRE  = 0x0000000000000800ULL; // unused flag
 constexpr psattr_t PA_NXE  = 0x8000000000000000ULL;
 constexpr psattr_t PA_MASK = 0xfff0000000000fffULL;
 
+constexpr psattr_t PA_DATA = PA_P | PA_RW | PA_NXE;
+
+constexpr psattr_t PA_PAGE_TABLE = PA_DATA;
+constexpr psattr_t PA_KERNEL_PAGE_TABLE = PA_DATA;
+
+constexpr psattr_t PA_KERNEL_DATA = PA_DATA | PA_G;
+constexpr psattr_t PA_KERNEL_DATA_HUGE = PA_KERNEL_DATA | PA_PS;
+
 namespace __inner {
     using pse_t = uint64_t;
 
@@ -66,6 +74,16 @@ public:
         *m_ptrbase = (attributes & PA_MASK) | (pfn & ~PA_MASK);
     }
 
+    constexpr pfn_t pfn() const noexcept
+    {
+        return *m_ptrbase & ~PA_MASK;
+    }
+
+    constexpr psattr_t attributes() const noexcept
+    {
+        return *m_ptrbase & PA_MASK;
+    }
+
     constexpr PSE operator[](std::size_t nth) const noexcept
     {
         return PSE{m_ptrbase.phys() + 8 * nth};
@@ -81,6 +99,7 @@ constexpr PSE KERNEL_PAGE_TABLE{0x100000};
 
 constexpr unsigned long PAGE_PRESENT = 0x00000001;
 constexpr unsigned long PAGE_BUDDY   = 0x00000002;
+constexpr unsigned long PAGE_SLAB    = 0x00000004;
 
 struct page {
     refcount_t refcount;
@@ -95,12 +114,15 @@ struct page {
 inline page* PAGE_ARRAY;
 
 void create_zone(uintptr_t start, uintptr_t end);
+void mark_present(uintptr_t start, uintptr_t end);
 
 // order represents power of 2
 page* alloc_page();
 page* alloc_pages(int order);
 void free_page(page* page, int order);
 
+pfn_t alloc_page_table();
+
 pfn_t page_to_pfn(page* page);
 page* pfn_to_page(pfn_t pfn);
 

+ 0 - 15
include/kernel/mem/slab.hpp

@@ -32,22 +32,7 @@ struct slab_cache {
     std::size_t obj_size;
 };
 
-template <typename T>
-class slab_allocator {
-    using value_type = T;
-    using propagate_on_container_move_assignment = std::true_type;
-
-    // throws std::bad_alloc
-    [[nodiscard]] constexpr T* allocate(std::size_t n)
-    { return static_cast<T*>(::operator new(n * sizeof(T))); }
-
-    // TODO: check allocated size
-    constexpr void deallocate(T* ptr, std::size_t)
-    { ::operator delete(ptr); }
-};
-
 void init_slab_cache(slab_cache* cache, std::size_t obj_size);
-void slab_add_page(slab_cache* cache, paging::pfn_t pfn);
 
 void* slab_alloc(slab_cache* cache);
 void slab_free(void* ptr);

+ 12 - 8
include/types/allocator.hpp

@@ -22,17 +22,14 @@ private:
     byte* p_start;
     byte* p_limit;
     byte* p_break;
+    byte* p_allocated;
     kernel::async::mutex mtx;
 
-    constexpr byte* brk(byte* addr)
-    {
-        if (addr >= p_limit) [[unlikely]]
-            return nullptr;
-        return p_break = addr;
-    }
+    byte* brk(byte* addr);
+    byte* sbrk(size_type increment);
 
-    constexpr byte* sbrk(size_type increment)
-    { return brk(p_break + increment); }
+    constexpr byte* sbrk() const noexcept
+    { return p_break; }
 
 public:
     explicit brk_memory_allocator(byte* start, size_type size);
@@ -40,6 +37,13 @@ public:
 
     void* allocate(size_type size);
     void deallocate(void* ptr);
+
+    bool allocated(void* ptr) const noexcept;
 };
 
 } // namespace types::memory
+
+namespace kernel::kinit {
+void init_allocator();
+
+} // namespace kernel::kinit

+ 123 - 17
src/kernel/allocator.cc

@@ -7,6 +7,12 @@
 #include <stdint.h>
 
 #include <kernel/async/lock.hpp>
+#include <kernel/mem/paging.hpp>
+#include <kernel/mem/slab.hpp>
+
+constexpr uintptr_t KERNEL_HEAP_START = 0xffff'ff81'8000'0000;
+constexpr uintptr_t KERNEL_HEAP_END   = 0xffff'ffbf'ffff'ffff;
+constexpr uintptr_t KERNEL_HEAP_SIZE  = KERNEL_HEAP_END - KERNEL_HEAP_START;
 
 namespace types::memory {
 
@@ -100,13 +106,52 @@ constexpr void split_block(mem_blk* blk, std::size_t this_size)
     blk->size = this_size;
 }
 
+std::byte* brk_memory_allocator::brk(byte* addr)
+{
+    if (addr >= p_limit)
+        return nullptr;
+
+    uintptr_t current_allocated = reinterpret_cast<uintptr_t>(p_allocated);
+    uintptr_t new_brk = reinterpret_cast<uintptr_t>(addr);
+
+    current_allocated &= ~(0x200000-1);
+    new_brk &= ~(0x200000-1);
+
+    using namespace kernel::mem::paging;
+    while (current_allocated <= new_brk) {
+        auto idx = idx_all(current_allocated);
+        auto pdpt = KERNEL_PAGE_TABLE[std::get<1>(idx)].parse();
+
+        auto pdpte = pdpt[std::get<2>(idx)];
+        if (!pdpte.pfn())
+            pdpte.set(PA_KERNEL_PAGE_TABLE, alloc_page_table());
+
+        auto pde = pdpte.parse()[std::get<3>(idx)];
+        assert(!(pde.attributes() & PA_P));
+        pde.set(PA_KERNEL_DATA_HUGE, page_to_pfn(alloc_pages(9)));
+
+        current_allocated += 0x200000;
+    }
+    p_allocated = (std::byte*)current_allocated;
+
+    return p_break = addr;
+}
+
+std::byte* brk_memory_allocator::sbrk(size_type increment)
+{
+    return brk(p_break + increment);
+}
+
 brk_memory_allocator::brk_memory_allocator(byte* start, size_type size)
     : p_start(start)
     , p_limit(start + size)
+    , p_break(start)
+    , p_allocated(start)
 {
-    brk(p_start);
-    auto* p_blk = aspblk(sbrk(0));
-    p_blk->size = 8;
+    auto* p_blk = aspblk(brk(p_start));
+    sbrk(sizeof(mem_blk) + 1024); // 1024 bytes (minimum size for a block)
+
+    p_blk->size = 1024;
     p_blk->flags.has_next = 0;
     p_blk->flags.is_free = 1;
 }
@@ -114,8 +159,8 @@ brk_memory_allocator::brk_memory_allocator(byte* start, size_type size)
 void* brk_memory_allocator::allocate(size_type size)
 {
     kernel::async::lock_guard_irq lck(mtx);
-    // align to 8 bytes boundary
-    size = (size + 7) & ~7;
+    // align to 1024 bytes boundary
+    size = (size + 1024-1) & ~(1024-1);
 
     auto* block_allocated = find_blk(&p_start, size);
     if (!block_allocated->flags.has_next
@@ -156,40 +201,101 @@ void brk_memory_allocator::deallocate(void* ptr)
     unite_afterwards(blk);
 }
 
+bool brk_memory_allocator::allocated(void* ptr) const noexcept
+{
+    return (void*)KERNEL_HEAP_START <= aspbyte(ptr) && aspbyte(ptr) < sbrk();
+}
+
 static brk_memory_allocator* k_alloc;
 
 } // namespace types::memory
 
-void* operator new(size_t sz)
+static kernel::mem::slab_cache caches[7];
+
+static constexpr int __cache_index(std::size_t size)
 {
-    void* ptr = types::memory::k_alloc->allocate(sz);
-    assert(ptr);
-    return ptr;
+    if (size <= 32)
+        return 0;
+    if (size <= 64)
+        return 1;
+    if (size <= 96)
+        return 2;
+    if (size <= 128)
+        return 3;
+    if (size <= 192)
+        return 4;
+    if (size <= 256)
+        return 5;
+    if (size <= 512)
+        return 6;
+    return -1;
 }
 
-void* operator new[](size_t sz)
+SECTION(".text.kinit")
+void kernel::kinit::init_allocator()
+{
+    mem::init_slab_cache(caches+0, 32);
+    mem::init_slab_cache(caches+1, 64);
+    mem::init_slab_cache(caches+2, 96);
+    mem::init_slab_cache(caches+3, 128);
+    mem::init_slab_cache(caches+4, 192);
+    mem::init_slab_cache(caches+5, 256);
+    mem::init_slab_cache(caches+6, 512);
+
+    types::memory::k_alloc = new types::memory::brk_memory_allocator(
+        (std::byte*)KERNEL_HEAP_START, KERNEL_HEAP_SIZE);
+}
+
+void* operator new(size_t size)
 {
-    void* ptr = types::memory::k_alloc->allocate(sz);
+    int idx = __cache_index(size);
+    void* ptr = nullptr;
+    if (idx < 0)
+        ptr = types::memory::k_alloc->allocate(size);
+    else
+        ptr = kernel::mem::slab_alloc(&caches[idx]);
+
     assert(ptr);
     return ptr;
 }
 
 void operator delete(void* ptr)
 {
-    types::memory::k_alloc->deallocate(ptr);
+    if (!ptr)
+        return;
+
+    if (types::memory::k_alloc->allocated(ptr))
+        types::memory::k_alloc->deallocate(ptr);
+    else
+        kernel::mem::slab_free(ptr);
 }
 
-void operator delete(void* ptr, size_t)
+void operator delete(void* ptr, std::size_t size)
+{
+    if (!ptr)
+        return;
+
+    if (types::memory::k_alloc->allocated(ptr)) {
+        types::memory::k_alloc->deallocate(ptr);
+        return;
+    }
+    int idx = __cache_index(size);
+    assert(idx >= 0);
+
+    kernel::mem::slab_free(ptr);
+}
+
+void* operator new[](size_t sz)
 {
-    types::memory::k_alloc->deallocate(ptr);
+    return ::operator new(sz);
 }
 
 void operator delete[](void* ptr)
 {
-    types::memory::k_alloc->deallocate(ptr);
+    ::operator delete(ptr);
 }
 
-void operator delete[](void* ptr, size_t)
+void operator delete[](void* ptr, std::size_t size)
 {
-    types::memory::k_alloc->deallocate(ptr);
+    ::operator delete(ptr, size);
 }

+ 28 - 0
src/kernel/mem/paging.cc

@@ -1,3 +1,6 @@
+#include <assert.h>
+#include <string.h>
+
 #include <kernel/mem/paging.hpp>
 #include <kernel/mem/slab.hpp>
 
@@ -30,6 +33,8 @@ constexpr pfn_t parent(pfn_t pfn, int order)
 page* _create_zone(pfn_t pfn, int order)
 {
     page* zone = pfn_to_page(pfn);
+
+    assert(zone->flags & PAGE_PRESENT);
     zone->flags |= PAGE_BUDDY;
 
     zone->next = zones[order].next;
@@ -57,6 +62,7 @@ page* _alloc_zone(int order)
 
         auto* zone = zones[i].next;
         zones[i].next = zone->next;
+        zones[i].count--;
 
         // TODO: set free bitmap
         zone->refcount++;
@@ -64,6 +70,7 @@ page* _alloc_zone(int order)
         if (i > order)
             _split_zone(zone, i, order);
 
+        assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
         return zone;
     }
 
@@ -95,6 +102,17 @@ void kernel::mem::paging::create_zone(uintptr_t start, uintptr_t end)
     }
 }
 
+void kernel::mem::paging::mark_present(uintptr_t start, uintptr_t end)
+{
+    start >>= 12;
+
+    end += (4096 - 1);
+    end >>= 12;
+
+    while (start < end)
+        PAGE_ARRAY[start++].flags |= PAGE_PRESENT;
+}
+
 page* kernel::mem::paging::alloc_pages(int order)
 {
     auto* zone = _alloc_zone(order);
@@ -111,6 +129,16 @@ page* kernel::mem::paging::alloc_page()
     return alloc_pages(0);
 }
 
+pfn_t kernel::mem::paging::alloc_page_table()
+{
+    page* zone = alloc_page();
+    pfn_t pfn = page_to_pfn(zone);
+
+    memset(physaddr<void>{pfn}, 0x00, 0x1000);
+
+    return pfn;
+}
+
 pfn_t kernel::mem::paging::page_to_pfn(page* _page)
 {
     return (pfn_t)(_page - PAGE_ARRAY) * 0x1000;

+ 18 - 10
src/kernel/mem/slab.cc

@@ -81,21 +81,34 @@ slab_head* _make_slab(uintptr_t start, std::size_t size)
 
     std::byte* ptr = (std::byte*)slab->free;
     for (unsigned i = 0; i < slab->free_count; ++i) {
+        void* nextptr = ptr + size;
         if (i == slab->free_count-1)
             *(void**)ptr = nullptr;
         else
-            *(void**)ptr = ptr + size;
-        ++ptr;
+            *(void**)ptr = nextptr;
+        ptr = (std::byte*)nextptr;
     }
 
     return slab;
 }
 
+void _slab_add_page(slab_cache* cache) {
+    auto* new_page = paging::alloc_page();
+    auto new_page_pfn = paging::page_to_pfn(new_page);
+
+    new_page->flags |= paging::PAGE_SLAB;
+
+    auto* slab = _make_slab(new_page_pfn, cache->obj_size);
+    slab->cache = cache;
+
+    list_insert(&cache->slabs_empty, slab);
+}
+
 void* kernel::mem::slab_alloc(slab_cache* cache) {
     slab_head* slab = cache->slabs_partial;
     if (!slab) { // no partial slabs, try to get an empty slab
         if (!cache->slabs_empty) // no empty slabs, create a new one
-            slab_add_page(cache, paging::page_to_pfn(paging::alloc_page()));
+            _slab_add_page(cache);
 
         slab = list_get(&cache->slabs_empty);
 
@@ -135,17 +148,12 @@ void kernel::mem::slab_free(void* ptr) {
     }
 }
 
-void kernel::mem::slab_add_page(slab_cache* cache, paging::pfn_t pfn) {
-    auto slab = _make_slab(pfn, cache->obj_size);
-    slab->cache = cache;
-
-    list_insert(&cache->slabs_empty, slab);
-}
-
 void kernel::mem::init_slab_cache(slab_cache* cache, std::size_t obj_size)
 {
     cache->obj_size = obj_size;
     cache->slabs_empty = nullptr;
     cache->slabs_partial = nullptr;
     cache->slabs_full = nullptr;
+
+    _slab_add_page(cache);
 }

+ 11 - 13
src/kinit.cpp

@@ -5,6 +5,7 @@
 #include <stdio.h>
 #include <sys/utsname.h>
 
+#include <types/allocator.hpp>
 #include <types/types.h>
 
 #include <kernel/hw/keyboard.h>
@@ -143,7 +144,7 @@ static inline void setup_early_kernel_page_table()
     auto pd = pdpt[std::get<2>(idx)].parse();
 
     // kernel bss, size 2M
-    pd[std::get<3>(idx)].set(PA_P | PA_RW | PA_PS | PA_G | PA_NXE, 0x200000);
+    pd[std::get<3>(idx)].set(PA_KERNEL_DATA_HUGE, 0x200000);
 
     // clear kernel bss
     memset((void*)BSS_ADDR, 0x00, BSS_LENGTH);
@@ -155,21 +156,15 @@ static inline void make_early_kernel_stack()
     using namespace kernel::mem;
     using namespace kernel::mem::paging;
 
-    auto* kstack_pdpt_page = alloc_page();
-    auto* kstack_page = alloc_pages(9);
-
-    memset(physaddr<char>{page_to_pfn(kstack_pdpt_page)}, 0x00, 0x1000);
-
     constexpr auto idx = idx_all(0xffffffc040000000ULL);
 
     // early kernel stack
     auto pdpte = KERNEL_PAGE_TABLE[std::get<1>(idx)].parse()[std::get<2>(idx)];
-    pdpte.set(PA_P | PA_RW | PA_G | PA_NXE, page_to_pfn(kstack_pdpt_page));
+    pdpte.set(PA_KERNEL_PAGE_TABLE, alloc_page_table());
 
     auto pd = pdpte.parse();
-    pd[std::get<3>(idx)].set(
-            PA_P | PA_RW | PA_PS | PA_G | PA_NXE,
-            page_to_pfn(kstack_page));
+    pd[std::get<3>(idx)].set(PA_KERNEL_DATA_HUGE,
+        page_to_pfn(alloc_pages(9)));
 }
 
 SECTION(".text.kinit")
@@ -187,12 +182,11 @@ static inline void setup_buddy(uintptr_t addr_max)
     memset(physaddr<void>{0x105000}, 0x00, 4096);
 
     auto pdpte = KERNEL_PAGE_TABLE[std::get<1>(idx)].parse()[std::get<2>(idx)];
-    pdpte.set(PA_P | PA_RW | PA_G | PA_NXE, 0x105000);
+    pdpte.set(PA_KERNEL_PAGE_TABLE, 0x105000);
 
     auto pd = pdpte.parse();
     for (int i = 0; i < count; ++i, start_pfn += 0x200000) {
-        pd[std::get<3>(idx)+i].set(
-            PA_P | PA_RW | PA_PS | PA_G | PA_NXE, start_pfn);
+        pd[std::get<3>(idx)+i].set(PA_KERNEL_DATA_HUGE, start_pfn);
     }
 
     PAGE_ARRAY = (page*)0xffffff8040000000ULL;
@@ -200,8 +194,10 @@ static inline void setup_buddy(uintptr_t addr_max)
 
     for (int i = 0; i < (int)info::e820_entry_count; ++i) {
         auto& ent = info::e820_entries[i];
+
         if (ent.type != 1) // type == 1: free area
             continue;
+        mark_present(ent.base, ent.base + ent.len);
 
         auto start = ent.base;
         auto end = start + ent.len;
@@ -253,6 +249,8 @@ void NORETURN kernel_init(bootloader_data* data)
     }
 
     setup_buddy(addr_max);
+    init_allocator();
+
     make_early_kernel_stack();
 
     asm volatile(