Browse Source

Merge branch 'mem'

greatbridf 2 years ago
parent
commit
bfd97f6c89

+ 3 - 3
CMakeLists.txt

@@ -41,11 +41,11 @@ include_directories(${PROJECT_SOURCE_DIR}/include)
 
 set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         src/kernel/errno.c
-                        src/kernel/interrupt.c
+                        src/kernel/interrupt.cpp
                         src/kernel/process.c
                         src/kernel/tty.c
                         src/kernel/stdio.c
-                        src/kernel/mem.c
+                        src/kernel/mem.cpp
                         src/kernel/vfs.cpp
                         src/kernel/vga.c
                         src/kernel/hw/keyboard.cpp
@@ -64,6 +64,7 @@ set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         include/kernel/process.h
                         include/kernel/stdio.h
                         include/kernel/mem.h
+                        include/kernel/mm.hpp
                         include/kernel/vfs.h
                         include/kernel/vga.h
                         include/kernel/hw/keyboard.h
@@ -77,7 +78,6 @@ set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         include/types/size.h
                         include/types/status.h
                         include/types/stdint.h
-                        include/types/list.h
                         include/types/allocator.hpp
                         include/types/cplusplus.hpp
                         include/types/list.hpp

+ 2 - 2
include/kernel/errno.h

@@ -14,10 +14,10 @@ extern uint32_t* _get_errno(void);
 }
 #endif
 
-#define ENOMEM 0
-#define ENOTFOUND 1
+#define ENOMEM (1 << 0)
 #define EEXIST (1 << 1)
 #define ENOENT (1 << 2)
 #define EINVAL (1 << 3)
 #define EISDIR (1 << 4)
 #define ENOTDIR (1 << 5)
+#define ENOTFOUND (1 << 6)

+ 41 - 1
include/kernel/interrupt.h

@@ -2,6 +2,10 @@
 
 #include <types/types.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #define INTERRUPT_GATE_TYPE (0x8e)
 
 #define PIC_EOI (0x20)
@@ -38,7 +42,6 @@ struct page_fault_error_code {
 // external interrupt handler function
 // stub in assembly MUST be called irqN
 #define SET_UP_IRQ(N, SELECTOR)        \
-    extern void irq##N();              \
     ptr_t addr_irq##N = (ptr_t)irq##N; \
     SET_IDT_ENTRY(0x20 + (N), (addr_irq##N), (SELECTOR));
 
@@ -105,3 +108,40 @@ void irq12_handler(void);
 void irq13_handler(void);
 void irq14_handler(void);
 void irq15_handler(void);
+
+void int0(void);
+void int1(void);
+void int2(void);
+void int3(void);
+void int4(void);
+void int5(void);
+void int6(void);
+void int7(void);
+void int8(void);
+void int9(void);
+void int10(void);
+void int11(void);
+void int12(void);
+void int13(void);
+void int14(void);
+
+void irq0(void);
+void irq1(void);
+void irq2(void);
+void irq3(void);
+void irq4(void);
+void irq5(void);
+void irq6(void);
+void irq7(void);
+void irq8(void);
+void irq9(void);
+void irq10(void);
+void irq11(void);
+void irq12(void);
+void irq13(void);
+void irq14(void);
+void irq15(void);
+
+#ifdef __cplusplus
+}
+#endif

+ 2 - 144
include/kernel/mem.h

@@ -10,9 +10,6 @@
 extern "C" {
 #endif
 
-// in mem.c
-extern struct mm* kernel_mm_head;
-
 // don't forget to add the initial 1m to the total
 struct mem_size_info {
     uint16_t n_1k_blks; // memory between 1m and 16m in 1k blocks
@@ -94,35 +91,6 @@ typedef union page_table_entry {
     struct page_table_entry_in in;
 } page_table_entry;
 
-struct page_attr {
-    uint32_t read : 1;
-    uint32_t write : 1;
-    uint32_t system : 1;
-    uint32_t cow : 1;
-};
-
-struct page {
-    page_t phys_page_id;
-    size_t* ref_count;
-    struct page_attr attr;
-    struct page* next;
-};
-
-struct mm_attr {
-    uint32_t read : 1;
-    uint32_t write : 1;
-    uint32_t system : 1;
-};
-
-struct mm {
-    linr_ptr_t start;
-    size_t len;
-    struct mm_attr attr;
-    struct page* pgs;
-    struct mm* next;
-    page_directory_entry* pd;
-};
-
 // in kernel_main.c
 extern uint8_t e820_mem_map[1024];
 extern uint32_t e820_mem_map_count;
@@ -133,123 +101,13 @@ extern struct mem_size_info mem_size_info;
 #define KERNEL_HEAP_START ((void*)0x30000000)
 #define KERNEL_HEAP_LIMIT ((void*)0x40000000)
 
-struct mem_blk_flags {
-    uint8_t is_free;
-    uint8_t has_next;
-    uint8_t _unused2;
-    uint8_t _unused3;
-};
-
-struct mem_blk {
-    size_t size;
-    struct mem_blk_flags flags;
-    // the first byte of the memory space
-    // the minimal allocated space is 4 bytes
-    uint8_t data[4];
-};
-
-int init_heap(void);
-
 void* k_malloc(size_t size);
 
 void k_free(void* ptr);
 
-// translate physical address to virtual(mapped) address
-void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
-
-// translate linear address to physical address
-phys_ptr_t l_ptr_to_p_ptr(struct mm* mm_area, linr_ptr_t v_ptr);
-
-// translate virtual(mapped) address to physical address
-phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
-
-// check if the l_ptr is contained in the area
-// @return GB_OK if l_ptr is in the area
-//         GB_FAILED if not
-int is_l_ptr_valid(struct mm* mm_area, linr_ptr_t l_ptr);
-
-// find the corresponding page the l_ptr pointing to
-// @return the pointer to the struct if found, NULL if not found
-struct page* find_page_by_l_ptr(struct mm* mm_area, linr_ptr_t l_ptr);
-
-static inline page_t phys_addr_to_page(phys_ptr_t ptr)
-{
-    return ptr >> 12;
-}
-
-static inline pd_i_t page_to_pd_i(page_t p)
-{
-    return p >> 10;
-}
-
-static inline pt_i_t page_to_pt_i(page_t p)
-{
-    return p & (1024 - 1);
-}
-
-static inline phys_ptr_t page_to_phys_addr(page_t p)
-{
-    return p << 12;
-}
-
-static inline pd_i_t linr_addr_to_pd_i(linr_ptr_t ptr)
-{
-    return page_to_pd_i(phys_addr_to_page(ptr));
-}
-
-static inline pd_i_t linr_addr_to_pt_i(linr_ptr_t ptr)
-{
-    return page_to_pt_i(phys_addr_to_page(ptr));
-}
-
-static inline page_directory_entry* lptr_to_pde(struct mm* mm, linr_ptr_t l_ptr)
-{
-    return mm->pd + linr_addr_to_pd_i((phys_ptr_t)l_ptr);
-}
-
-static inline page_table_entry* lptr_to_pte(struct mm* mm, linr_ptr_t l_ptr)
-{
-    page_directory_entry* pde = lptr_to_pde(mm, l_ptr);
-    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
-    return pte + linr_addr_to_pt_i((phys_ptr_t)l_ptr);
-}
-
-static inline page_directory_entry* lp_to_pde(struct mm* mm, linr_ptr_t l_ptr)
-{
-    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mm, l_ptr);
-    page_directory_entry* pde = mm->pd + linr_addr_to_pd_i(p_ptr);
-    return pde;
-}
-
-// get the corresponding pte for the linear address
-// for example: l_ptr = 0x30001000 will return the pte including the page it is mapped to
-static inline page_table_entry* lp_to_pte(struct mm* mm, linr_ptr_t l_ptr)
-{
-    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mm, l_ptr);
-
-    page_directory_entry* pde = lp_to_pde(mm, l_ptr);
-    phys_ptr_t p_pt = page_to_phys_addr(pde->in.pt_page);
-
-    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(p_pt);
-    pte += linr_addr_to_pt_i(p_ptr);
-
-    return pte;
-}
-
-// map the page to the end of the mm_area in pd
-int k_map(
-    struct mm* mm_area,
-    struct page* page,
-    int read,
-    int write,
-    int priv,
-    int cow);
-
-// allocate a raw page
-page_t alloc_raw_page(void);
+void* ki_malloc(size_t size);
 
-// allocate a struct page together with the raw page
-struct page* allocate_page(void);
+void ki_free(void* ptr);
 
 #define KERNEL_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
 

+ 139 - 0
include/kernel/mm.hpp

@@ -0,0 +1,139 @@
+#pragma once
+
+#include <kernel/mem.h>
+#include <types/allocator.hpp>
+#include <types/list.hpp>
+#include <types/types.h>
+#include <types/vector.hpp>
+
+struct page_attr {
+    uint32_t cow : 1;
+};
+
+struct page {
+    page_t phys_page_id;
+    size_t* ref_count;
+    struct page_attr attr;
+};
+
+using page_arr = types::vector<page, types::kernel_ident_allocator>;
+
+struct mm_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+};
+
+struct mm {
+    linr_ptr_t start;
+    struct mm_attr attr;
+    page_arr* pgs;
+    page_directory_entry* pd;
+};
+
+using mm_list = types::list<mm, types::kernel_ident_allocator>;
+
+// in mem.cpp
+extern mm_list* kernel_mms;
+
+// translate physical address to virtual(mapped) address
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
+
+// translate linear address to physical address
+phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, linr_ptr_t v_ptr);
+
+// translate virtual(mapped) address to physical address
+phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
+
+// check if the l_ptr is contained in the area
+// @return GB_OK if l_ptr is in the area
+//         GB_FAILED if not
+int is_l_ptr_valid(const mm_list* mms, linr_ptr_t l_ptr);
+
+// find the corresponding page the l_ptr pointing to
+// @return the pointer to the struct if found, NULL if not found
+struct page* find_page_by_l_ptr(const mm_list* mms, linr_ptr_t l_ptr);
+
+static inline page_t phys_addr_to_page(phys_ptr_t ptr)
+{
+    return ptr >> 12;
+}
+
+static inline pd_i_t page_to_pd_i(page_t p)
+{
+    return p >> 10;
+}
+
+static inline pt_i_t page_to_pt_i(page_t p)
+{
+    return p & (1024 - 1);
+}
+
+static inline phys_ptr_t page_to_phys_addr(page_t p)
+{
+    return p << 12;
+}
+
+static inline pd_i_t linr_addr_to_pd_i(linr_ptr_t ptr)
+{
+    return page_to_pd_i(phys_addr_to_page(ptr));
+}
+
+static inline pd_i_t linr_addr_to_pt_i(linr_ptr_t ptr)
+{
+    return page_to_pt_i(phys_addr_to_page(ptr));
+}
+
+static inline page_directory_entry* mms_get_pd(const mm_list* mms)
+{
+    return mms->begin()->pd;
+}
+
+static inline page_directory_entry* lptr_to_pde(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    return mms_get_pd(mms) + linr_addr_to_pd_i((phys_ptr_t)l_ptr);
+}
+
+static inline page_table_entry* lptr_to_pte(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    page_directory_entry* pde = lptr_to_pde(mms, l_ptr);
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    return pte + linr_addr_to_pt_i((phys_ptr_t)l_ptr);
+}
+
+static inline page_directory_entry* lp_to_pde(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mms, l_ptr);
+    page_directory_entry* pde = mms_get_pd(mms) + linr_addr_to_pd_i(p_ptr);
+    return pde;
+}
+
+// get the corresponding pte for the linear address
+// for example: l_ptr = 0x30001000 will return the pte including the page it is mapped to
+static inline page_table_entry* lp_to_pte(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mms, l_ptr);
+
+    page_directory_entry* pde = lp_to_pde(mms, l_ptr);
+    phys_ptr_t p_pt = page_to_phys_addr(pde->in.pt_page);
+
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(p_pt);
+    pte += linr_addr_to_pt_i(p_ptr);
+
+    return pte;
+}
+
+// map the page to the end of the mm_area in pd
+int k_map(
+    mm* mm_area,
+    const struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow);
+
+// allocate a raw page
+page_t alloc_raw_page(void);
+
+// allocate a struct page together with the raw page
+struct page allocate_page(void);

+ 34 - 1
include/types/allocator.hpp

@@ -1,4 +1,5 @@
 #pragma once
+#include <kernel/mem.h>
 #include <types/types.h>
 
 inline void* operator new(size_t, void* ptr)
@@ -7,6 +8,10 @@ inline void* operator new(size_t, void* ptr)
 }
 
 namespace types {
+
+template <typename Allocator>
+class allocator_traits;
+
 template <typename T>
 class kernel_allocator {
 public:
@@ -14,7 +19,7 @@ public:
 
     static value_type* allocate_memory(size_t count)
     {
-        return static_cast<value_type*>(::k_malloc(sizeof(value_type) * count));
+        return static_cast<value_type*>(::k_malloc(count));
     }
 
     static void deallocate_memory(value_type* ptr)
@@ -23,6 +28,34 @@ public:
     }
 };
 
+template <typename T>
+class kernel_ident_allocator {
+public:
+    using value_type = T;
+
+    static value_type* allocate_memory(size_t count)
+    {
+        return static_cast<value_type*>(::ki_malloc(count));
+    }
+
+    static void deallocate_memory(value_type* ptr)
+    {
+        ::ki_free(ptr);
+    }
+};
+
+template <typename T, typename... Args>
+T* kernel_allocator_new(Args... args)
+{
+    return allocator_traits<kernel_allocator<T>>::allocate_and_construct(args...);
+}
+
+template <typename T, typename... Args>
+T* kernel_ident_allocator_new(Args... args)
+{
+    return allocator_traits<kernel_ident_allocator<T>>::allocate_and_construct(args...);
+}
+
 template <typename Allocator>
 class allocator_traits {
 public:

+ 8 - 0
include/types/bitmap.h

@@ -2,6 +2,14 @@
 
 #include <types/stdint.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 int bm_test(char* bm, size_t n);
 void bm_set(char* bm, size_t n);
 void bm_clear(char* bm, size_t n);
+
+#ifdef __cplusplus
+}
+#endif

+ 0 - 9
include/types/list.h

@@ -1,9 +0,0 @@
-#pragma once
-
-#define LIST_LIKE_AT(type, list_like, pos, result_name) \
-    type* result_name = list_like; \
-    {                   \
-        size_t _tmp_pos = (pos); \
-        while (_tmp_pos--) \
-            result_name = result_name->next; \
-    }

+ 32 - 5
include/types/list.hpp

@@ -1,7 +1,7 @@
 #pragma once
 
-#include <kernel/mem.h>
 #include <types/allocator.hpp>
+#include <types/cplusplus.hpp>
 #include <types/types.h>
 
 namespace types {
@@ -14,12 +14,14 @@ private:
     class node;
 
 public:
+    template <typename Pointer>
     class iterator;
 
     using value_type = T;
     using pointer_type = value_type*;
     using reference_type = value_type&;
-    using iterator_type = iterator;
+    using iterator_type = iterator<value_type*>;
+    using const_iterator_type = iterator<const value_type*>;
     using size_type = size_t;
     using difference_type = ssize_t;
     using node_base_type = node_base;
@@ -58,7 +60,12 @@ private:
     };
 
 public:
+    template <typename Pointer>
     class iterator {
+    public:
+        using Value = typename types::traits::remove_pointer<Pointer>::type;
+        using Reference = typename types::traits::add_reference<Value>::type;
+
     public:
         iterator(const iterator& iter) noexcept
             : n(iter.n)
@@ -111,17 +118,17 @@ public:
             return iter;
         }
 
-        reference_type operator*() const noexcept
+        Reference operator*() const noexcept
         {
             return (static_cast<node_type*>(n))->value;
         }
 
-        pointer_type operator->() const noexcept
+        Pointer operator->() const noexcept
         {
             return &(static_cast<node_type*>(n))->value;
         }
 
-        pointer_type ptr(void) const noexcept
+        Pointer ptr(void) const noexcept
         {
             return &(static_cast<node_type*>(n))->value;
         }
@@ -239,6 +246,26 @@ public:
         return iterator_type(tail);
     }
 
+    const_iterator_type begin() const noexcept
+    {
+        return const_iterator_type(head->next);
+    }
+
+    const_iterator_type end() const noexcept
+    {
+        return const_iterator_type(tail);
+    }
+
+    const_iterator_type cbegin() const noexcept
+    {
+        return const_iterator_type(head->next);
+    }
+
+    const_iterator_type cend() const noexcept
+    {
+        return const_iterator_type(tail);
+    }
+
     bool empty(void) const noexcept
     {
         return size() == 0;

+ 4 - 0
include/types/stdint.h

@@ -1,6 +1,10 @@
 #pragma once
 
+#ifdef __cplusplus
+#define NULL (nullptr)
+#else
 #define NULL ((void*)0)
+#endif
 
 typedef __INT8_TYPE__ int8_t;
 typedef __INT16_TYPE__ int16_t;

+ 0 - 1
include/types/vector.hpp

@@ -1,6 +1,5 @@
 #pragma once
 
-#include <kernel/mem.h>
 #include <types/allocator.hpp>
 #include <types/cplusplus.hpp>
 #include <types/types.h>

+ 7 - 3
src/kernel/event/event.cpp

@@ -1,16 +1,20 @@
-#include <kernel/tty.h>
 #include <asm/port_io.h>
 #include <kernel/event/event.h>
 #include <kernel/input/input_event.h>
 #include <kernel/stdio.h>
+#include <kernel/tty.h>
+#include <types/allocator.hpp>
 #include <types/list.hpp>
 
-static ::types::list<::input_event> _input_event_queue {};
+static ::types::list<::input_event>* _input_event_queue;
 
 namespace event {
 ::types::list<::input_event>& input_event_queue(void)
 {
-    return _input_event_queue;
+    if (!_input_event_queue) {
+        _input_event_queue = types::kernel_allocator_new<types::list<input_event>>();
+    }
+    return *_input_event_queue;
 }
 } // namespace event
 

+ 8 - 6
src/kernel/interrupt.c → src/kernel/interrupt.cpp

@@ -5,6 +5,7 @@
 #include <kernel/hw/timer.h>
 #include <kernel/interrupt.h>
 #include <kernel/mem.h>
+#include <kernel/mm.hpp>
 #include <kernel/stdio.h>
 #include <kernel/tty.h>
 #include <kernel/vga.h>
@@ -73,7 +74,7 @@ void init_pic(void)
     asm_sti();
 }
 
-void int6_handler(
+extern "C" void int6_handler(
     struct regs_32 s_regs,
     uint32_t error_code,
     ptr_t eip,
@@ -141,7 +142,6 @@ void int14_handler(
     uint16_t cs,
     uint32_t eflags)
 {
-    MAKE_BREAK_POINT();
     char buf[512];
 
     ++page_fault_times;
@@ -155,15 +155,15 @@ void int14_handler(
 
     // kernel code
     if (cs == KERNEL_CODE_SEGMENT) {
-        if (is_l_ptr_valid(kernel_mm_head, l_addr) != GB_OK) {
+        if (is_l_ptr_valid(kernel_mms, l_addr) != GB_OK) {
             goto kill;
         }
-        struct page* page = find_page_by_l_ptr(kernel_mm_head, l_addr);
+        struct page* page = find_page_by_l_ptr(kernel_mms, l_addr);
 
         // copy on write
         if (error_code.write == 1 && page->attr.cow == 1) {
-            page_directory_entry* pde = kernel_mm_head->pd + linr_addr_to_pd_i(l_addr);
-            page_table_entry* pte = p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+            page_directory_entry* pde = mms_get_pd(kernel_mms) + linr_addr_to_pd_i(l_addr);
+            page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
             pte += linr_addr_to_pt_i(l_addr);
 
             // if it is a dying page
@@ -197,6 +197,8 @@ kill:
         buf, 512,
         "killed: segmentation fault (eip: %x, cr2: %x, error_code: %x)", v_eip, l_addr, error_code);
     tty_print(console, buf);
+
+    MAKE_BREAK_POINT();
     asm_cli();
     asm_hlt();
 }

+ 0 - 546
src/kernel/mem.c

@@ -1,546 +0,0 @@
-#include <asm/boot.h>
-#include <asm/port_io.h>
-#include <asm/sys.h>
-#include <kernel/errno.h>
-#include <kernel/mem.h>
-#include <kernel/stdio.h>
-#include <kernel/task.h>
-#include <kernel/vga.h>
-#include <kernel_main.h>
-#include <types/bitmap.h>
-#include <types/list.h>
-
-// static variables
-
-struct mm kernel_mm;
-struct mm* kernel_mm_head;
-
-// ---------------------
-
-// constant values
-
-#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
-#define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
-
-// ---------------------
-
-static void* p_start;
-static void* p_break;
-
-static size_t mem_size;
-static char mem_bitmap[1024 * 1024 / 8];
-
-static int32_t set_heap_start(void* start_addr)
-{
-    p_start = start_addr;
-    return 0;
-}
-
-static int32_t brk(void* addr)
-{
-    if (addr >= KERNEL_HEAP_LIMIT) {
-        return GB_FAILED;
-    }
-    p_break = addr;
-    return 0;
-}
-
-// sets errno when failed to increase heap pointer
-static void* sbrk(size_t increment)
-{
-    if (brk(p_break + increment) != 0) {
-        errno = ENOMEM;
-        return 0;
-    } else {
-        errno = 0;
-        return p_break;
-    }
-}
-
-int init_heap(void)
-{
-    set_heap_start(KERNEL_HEAP_START);
-
-    if (brk(KERNEL_HEAP_START) != 0) {
-        return GB_FAILED;
-    }
-    struct mem_blk* p_blk = sbrk(0);
-    p_blk->size = 4;
-    p_blk->flags.has_next = 0;
-    p_blk->flags.is_free = 1;
-    return GB_OK;
-}
-
-// @param start_pos position where to start finding
-// @param size the size of the block we're looking for
-// @return found block if suitable block exists, if not, the last block
-static struct mem_blk*
-find_blk(
-    struct mem_blk* start_pos,
-    size_t size)
-{
-    while (1) {
-        if (start_pos->flags.is_free && start_pos->size >= size) {
-            errno = 0;
-            return start_pos;
-        } else {
-            if (!start_pos->flags.has_next) {
-                errno = ENOTFOUND;
-                return start_pos;
-            }
-            start_pos = ((void*)start_pos)
-                + sizeof(struct mem_blk)
-                + start_pos->size
-                - 4 * sizeof(uint8_t);
-        }
-    }
-}
-
-static struct mem_blk*
-allocate_new_block(
-    struct mem_blk* blk_before,
-    size_t size)
-{
-    sbrk(sizeof(struct mem_blk) + size - 4 * sizeof(uint8_t));
-    if (errno) {
-        return 0;
-    }
-
-    struct mem_blk* blk = ((void*)blk_before)
-        + sizeof(struct mem_blk)
-        + blk_before->size
-        - 4 * sizeof(uint8_t);
-
-    blk_before->flags.has_next = 1;
-
-    blk->flags.has_next = 0;
-    blk->flags.is_free = 1;
-    blk->size = size;
-
-    errno = 0;
-    return blk;
-}
-
-static void split_block(
-    struct mem_blk* blk,
-    size_t this_size)
-{
-    // block is too small to get split
-    if (blk->size < sizeof(struct mem_blk) + this_size) {
-        return;
-    }
-
-    struct mem_blk* blk_next = ((void*)blk)
-        + sizeof(struct mem_blk)
-        + this_size
-        - 4 * sizeof(uint8_t);
-
-    blk_next->size = blk->size
-        - this_size
-        - sizeof(struct mem_blk)
-        + 4 * sizeof(uint8_t);
-
-    blk_next->flags.has_next = blk->flags.has_next;
-    blk_next->flags.is_free = 1;
-
-    blk->flags.has_next = 1;
-    blk->size = this_size;
-}
-
-void* k_malloc(size_t size)
-{
-    struct mem_blk* block_allocated;
-
-    block_allocated = find_blk(p_start, size);
-    if (errno == ENOTFOUND) {
-        // 'block_allocated' in the argument list is the pointer
-        // pointing to the last block
-        block_allocated = allocate_new_block(block_allocated, size);
-        // no need to check errno and return value
-        // preserve these for the caller
-    } else {
-        split_block(block_allocated, size);
-    }
-
-    block_allocated->flags.is_free = 0;
-    return block_allocated->data;
-}
-
-void k_free(void* ptr)
-{
-    ptr -= (sizeof(struct mem_blk_flags) + sizeof(size_t));
-    struct mem_blk* blk = (struct mem_blk*)ptr;
-    blk->flags.is_free = 1;
-    // TODO: fusion free blocks nearby
-}
-
-void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
-{
-    if (p_ptr <= 0x30000000) {
-        // memory below 768MiB is identically mapped
-        return (void*)p_ptr;
-    } else {
-        // TODO: address translation
-        MAKE_BREAK_POINT();
-        return (void*)0xffffffff;
-    }
-}
-
-phys_ptr_t l_ptr_to_p_ptr(struct mm* mm, linr_ptr_t v_ptr)
-{
-    while (mm != NULL) {
-        if (v_ptr < mm->start || v_ptr >= mm->start + mm->len * 4096) {
-            goto next;
-        }
-        size_t offset = (size_t)(v_ptr - mm->start);
-        LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
-        return page_to_phys_addr(result->phys_page_id) + (offset % 4096);
-    next:
-        mm = mm->next;
-    }
-
-    // TODO: handle error
-    return 0xffffffff;
-}
-
-phys_ptr_t v_ptr_to_p_ptr(void* v_ptr)
-{
-    if (v_ptr < KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
-        return (phys_ptr_t)v_ptr;
-    }
-    return l_ptr_to_p_ptr(kernel_mm_head, (linr_ptr_t)v_ptr);
-}
-
-static inline void mark_page(page_t n)
-{
-    bm_set(mem_bitmap, n);
-}
-
-static inline void free_page(page_t n)
-{
-    bm_clear(mem_bitmap, n);
-}
-
-static void mark_addr_len(phys_ptr_t start, size_t n)
-{
-    if (n == 0)
-        return;
-    page_t start_page = phys_addr_to_page(start);
-    page_t end_page = phys_addr_to_page(start + n + 4095);
-    for (page_t i = start_page; i < end_page; ++i)
-        mark_page(i);
-}
-
-static void free_addr_len(phys_ptr_t start, size_t n)
-{
-    if (n == 0)
-        return;
-    page_t start_page = phys_addr_to_page(start);
-    page_t end_page = phys_addr_to_page(start + n + 4095);
-    for (page_t i = start_page; i < end_page; ++i)
-        free_page(i);
-}
-
-static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
-{
-    mark_addr_len(start, end - start);
-}
-
-static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
-{
-    free_addr_len(start, end - start);
-}
-
-page_t alloc_raw_page(void)
-{
-    for (page_t i = 0; i < 1024 * 1024; ++i) {
-        if (bm_test(mem_bitmap, i) == 0) {
-            mark_page(i);
-            return i;
-        }
-    }
-    MAKE_BREAK_POINT();
-    return 0xffffffff;
-}
-
-struct page* allocate_page(void)
-{
-    // TODO: allocate memory on identically mapped area
-    struct page* p = (struct page*)k_malloc(sizeof(struct page));
-    memset(p, 0x00, sizeof(struct page));
-    p->phys_page_id = alloc_raw_page();
-    p->ref_count = (size_t*)k_malloc(sizeof(size_t));
-    return p;
-}
-
-static inline void make_page_table(page_table_entry* pt)
-{
-    memset(pt, 0x00, sizeof(page_table_entry) * 1024);
-}
-
-static inline void init_mem_layout(void)
-{
-    mem_size = 1024 * mem_size_info.n_1k_blks;
-    mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
-
-    // mark kernel page directory
-    mark_addr_range(0x00000000, 0x00005000);
-    // mark empty page
-    mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
-    // mark EBDA and upper memory as allocated
-    mark_addr_range(0x80000, 0xfffff);
-    // mark kernel
-    mark_addr_len(0x00100000, kernel_size);
-
-    if (e820_mem_map_entry_size == 20) {
-        struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
-        for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->type != 1) {
-                mark_addr_len(entry->base, entry->len);
-            }
-        }
-    } else {
-        struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
-        for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->in.type != 1) {
-                mark_addr_len(entry->in.base, entry->in.len);
-            }
-        }
-    }
-}
-
-int is_l_ptr_valid(struct mm* mm_area, linr_ptr_t l_ptr)
-{
-    while (mm_area != NULL) {
-        if (l_ptr >= mm_area->start && l_ptr < mm_area->start + mm_area->len * PAGE_SIZE) {
-            return GB_OK;
-        }
-        mm_area = mm_area->next;
-    }
-    return GB_FAILED;
-}
-
-struct page* find_page_by_l_ptr(struct mm* mm, linr_ptr_t l_ptr)
-{
-    if (mm == kernel_mm_head && l_ptr < (linr_ptr_t)KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
-        // TODO: make mm for identically mapped area
-        MAKE_BREAK_POINT();
-        return (struct page*)0xffffffff;
-    }
-    while (mm != NULL) {
-        if (l_ptr >= mm->start && l_ptr < mm->start + mm->len * 4096) {
-            size_t offset = (size_t)(l_ptr - mm->start);
-            LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
-            return result;
-        }
-        mm = mm->next;
-    }
-
-    // TODO: error handling
-    return NULL;
-}
-
-static inline void map_raw_page_to_pte(
-    page_table_entry* pte,
-    page_t page,
-    int rw,
-    int priv)
-{
-    // set P bit
-    pte->v = 0x00000001;
-    pte->in.rw = (rw == 1);
-    pte->in.us = (priv == 1);
-    pte->in.page = page;
-}
-
-static void _map_raw_page_to_addr(
-    struct mm* mm_area,
-    page_t page,
-    int rw,
-    int priv)
-{
-    linr_ptr_t addr = (linr_ptr_t)mm_area->start + mm_area->len * 4096;
-    page_directory_entry* pde = mm_area->pd + linr_addr_to_pd_i(addr);
-    // page table not exist
-    if (!pde->in.p) {
-        // allocate a page for the page table
-        pde->in.p = 1;
-        pde->in.rw = 1;
-        pde->in.us = 0;
-        pde->in.pt_page = alloc_raw_page();
-
-        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
-    }
-
-    // map the page in the page table
-    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
-    pte += linr_addr_to_pt_i(addr);
-    map_raw_page_to_pte(pte, page, rw, priv);
-}
-
-// map page to the end of mm_area in pd
-int k_map(
-    struct mm* mm_area,
-    struct page* page,
-    int read,
-    int write,
-    int priv,
-    int cow)
-{
-    struct page* p_page_end = mm_area->pgs;
-    while (p_page_end != NULL && p_page_end->next != NULL)
-        p_page_end = p_page_end->next;
-
-    if (cow) {
-        // find its ancestor
-        while (page->attr.cow)
-            page = page->next;
-
-        // create a new page node
-        struct page* new_page = k_malloc(sizeof(struct page));
-
-        new_page->attr.read = (read == 1);
-        new_page->attr.write = (write == 1);
-        new_page->attr.system = (priv == 1);
-        new_page->attr.cow = 1;
-        // TODO: move *next out of struct page
-        new_page->next = NULL;
-
-        new_page->phys_page_id = page->phys_page_id;
-        new_page->ref_count = page->ref_count;
-
-        if (p_page_end != NULL)
-            p_page_end->next = new_page;
-        else
-            mm_area->pgs = new_page;
-    } else {
-        page->attr.read = (read == 1);
-        page->attr.write = (write == 1);
-        page->attr.system = (priv == 1);
-        page->attr.cow = 0;
-        // TODO: move *next out of struct page
-        page->next = NULL;
-
-        if (p_page_end != NULL)
-            p_page_end->next = page;
-        else
-            mm_area->pgs = page;
-    }
-    _map_raw_page_to_addr(
-        mm_area,
-        page->phys_page_id,
-        (write && !cow),
-        priv);
-
-    ++mm_area->len;
-    ++*page->ref_count;
-    return GB_OK;
-}
-
-// map a page identically
-// this function is only meant to be used in the initialization process
-// it checks the pde's P bit so you need to make sure it's already set
-// to avoid dead loops
-static inline void _init_map_page_identically(page_t page)
-{
-    page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
-    // page table not exist
-    if (!pde->in.p) {
-        // allocate a page for the page table
-        // set the P bit of the pde in advance
-        pde->in.p = 1;
-        pde->in.rw = 1;
-        pde->in.us = 0;
-        pde->in.pt_page = alloc_raw_page();
-        _init_map_page_identically(pde->in.pt_page);
-
-        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
-    }
-
-    // map the page in the page table
-    page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
-    pt += page_to_pt_i(page);
-    pt->v = 0x00000003;
-    pt->in.page = page;
-}
-
-static inline void init_paging_map_low_mem_identically(void)
-{
-    for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
-        // check if the address is valid and not mapped
-        if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
-            continue;
-        _init_map_page_identically(phys_addr_to_page(addr));
-    }
-}
-
-static struct page empty_page;
-static struct page heap_first_page;
-static size_t heap_first_page_ref_count;
-
-void init_mem(void)
-{
-    init_mem_layout();
-
-    // map the 16MiB-768MiB identically
-    init_paging_map_low_mem_identically();
-
-    kernel_mm_head = &kernel_mm;
-
-    kernel_mm.attr.read = 1;
-    kernel_mm.attr.write = 1;
-    kernel_mm.attr.system = 1;
-    kernel_mm.len = 0;
-    kernel_mm.next = NULL;
-    kernel_mm.pd = KERNEL_PAGE_DIRECTORY_ADDR;
-    kernel_mm.pgs = NULL;
-    kernel_mm.start = (linr_ptr_t)KERNEL_HEAP_START;
-
-    heap_first_page.attr.cow = 0;
-    heap_first_page.attr.read = 1;
-    heap_first_page.attr.write = 1;
-    heap_first_page.attr.system = 1;
-    heap_first_page.next = NULL;
-    heap_first_page.phys_page_id = alloc_raw_page();
-    heap_first_page.ref_count = &heap_first_page_ref_count;
-
-    *heap_first_page.ref_count = 0;
-
-    k_map(kernel_mm_head, &heap_first_page, 1, 1, 1, 0);
-
-    init_heap();
-
-    // create empty_page struct
-    empty_page.attr.cow = 0;
-    empty_page.attr.read = 1;
-    empty_page.attr.write = 0;
-    empty_page.attr.system = 0;
-    empty_page.next = NULL;
-    empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
-    empty_page.ref_count = (size_t*)k_malloc(sizeof(size_t));
-    *empty_page.ref_count = 1;
-
-    // TODO: improve the algorithm SO FREAKING SLOW
-    // while (kernel_mm_head->len < 256 * 1024 * 1024 / PAGE_SIZE) {
-    while (kernel_mm_head->len < 16 * 1024 * 1024 / PAGE_SIZE) {
-        k_map(
-            kernel_mm_head, &empty_page,
-            1, 1, 1, 1);
-    }
-}
-
-void create_segment_descriptor(
-    segment_descriptor* sd,
-    uint32_t base,
-    uint32_t limit,
-    uint32_t flags,
-    uint32_t access)
-{
-    sd->base_low = base & 0x0000ffff;
-    sd->base_mid = ((base & 0x00ff0000) >> 16);
-    sd->base_high = ((base & 0xff000000) >> 24);
-    sd->limit_low = limit & 0x0000ffff;
-    sd->limit_high = ((limit & 0x000f0000) >> 16);
-    sd->access = access;
-    sd->flags = flags;
-}

+ 523 - 0
src/kernel/mem.cpp

@@ -0,0 +1,523 @@
+#include <asm/boot.h>
+#include <asm/port_io.h>
+#include <asm/sys.h>
+#include <kernel/errno.h>
+#include <kernel/mem.h>
+#include <kernel/mm.hpp>
+#include <kernel/stdio.h>
+#include <kernel/task.h>
+#include <kernel/vga.h>
+#include <kernel_main.h>
+#include <types/bitmap.h>
+
+// global objects
+
+mm_list* kernel_mms;
+
+// ---------------------
+
+// constant values
+
+#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
+#define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
+
+#define IDENTICALLY_MAPPED_HEAP_SIZE ((size_t)0x400000)
+
+// ---------------------
+
+static size_t mem_size;
+static char mem_bitmap[1024 * 1024 / 8];
+
+class brk_memory_allocator {
+public:
+    using byte = uint8_t;
+    using size_type = size_t;
+
+    struct mem_blk_flags {
+        uint8_t is_free;
+        uint8_t has_next;
+        uint8_t _unused2;
+        uint8_t _unused3;
+    };
+
+    struct mem_blk {
+        size_t size;
+        struct mem_blk_flags flags;
+        // the first byte of the memory space
+        // the minimal allocated space is 4 bytes
+        uint8_t data[4];
+    };
+
+private:
+    byte* p_start;
+    byte* p_break;
+    byte* p_limit;
+
+    brk_memory_allocator(void) = delete;
+    brk_memory_allocator(const brk_memory_allocator&) = delete;
+    brk_memory_allocator(brk_memory_allocator&&) = delete;
+
+    inline int brk(byte* addr)
+    {
+        if (addr >= p_limit)
+            return GB_FAILED;
+        p_break = addr;
+        return GB_OK;
+    }
+
+    // sets errno
+    inline byte* sbrk(size_type increment)
+    {
+        if (brk(p_break + increment) != GB_OK) {
+            errno = ENOMEM;
+            return nullptr;
+        } else {
+            errno = 0;
+            return p_break;
+        }
+    }
+
+    inline mem_blk* _find_next_mem_blk(mem_blk* blk, size_type blk_size)
+    {
+        byte* p = (byte*)blk;
+        p += sizeof(mem_blk);
+        p += blk_size;
+        p -= (4 * sizeof(byte));
+        return (mem_blk*)p;
+    }
+
+    // sets errno
+    // @param start_pos position where to start finding
+    // @param size the size of the block we're looking for
+    // @return found block if suitable block exists, if not, the last block
+    mem_blk* find_blk(mem_blk* start_pos, size_type size)
+    {
+        while (1) {
+            if (start_pos->flags.is_free && start_pos->size >= size) {
+                errno = 0;
+                return start_pos;
+            } else {
+                if (!start_pos->flags.has_next) {
+                    errno = ENOTFOUND;
+                    return start_pos;
+                }
+                start_pos = _find_next_mem_blk(start_pos, start_pos->size);
+            }
+        }
+    }
+
+    // sets errno
+    mem_blk* allocate_new_block(mem_blk* blk_before, size_type size)
+    {
+        sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
+        // preserves errno
+        if (errno) {
+            return nullptr;
+        }
+
+        mem_blk* blk = _find_next_mem_blk(blk_before, blk_before->size);
+
+        blk_before->flags.has_next = 1;
+
+        blk->flags.has_next = 0;
+        blk->flags.is_free = 1;
+        blk->size = size;
+
+        errno = 0;
+        return blk;
+    }
+
+    void split_block(mem_blk* blk, size_type this_size)
+    {
+        // block is too small to get split
+        if (blk->size < sizeof(mem_blk) + this_size) {
+            return;
+        }
+
+        mem_blk* blk_next = _find_next_mem_blk(blk, this_size);
+
+        blk_next->size = blk->size
+            - this_size
+            - sizeof(mem_blk)
+            + 4 * sizeof(byte);
+
+        blk_next->flags.has_next = blk->flags.has_next;
+        blk_next->flags.is_free = 1;
+
+        blk->flags.has_next = 1;
+        blk->size = this_size;
+    }
+
+public:
+    brk_memory_allocator(void* start, size_type limit)
+        : p_start((byte*)start)
+        , p_limit(p_start + limit)
+    {
+        brk(p_start);
+        mem_blk* p_blk = (mem_blk*)sbrk(0);
+        p_blk->size = 4;
+        p_blk->flags.has_next = 0;
+        p_blk->flags.is_free = 1;
+    }
+
+    // sets errno
+    void* alloc(size_type size)
+    {
+        struct mem_blk* block_allocated;
+
+        block_allocated = find_blk((mem_blk*)p_start, size);
+        if (errno == ENOTFOUND) {
+            // 'block_allocated' in the argument list is the pointer
+            // pointing to the last block
+            block_allocated = allocate_new_block(block_allocated, size);
+            if (errno) {
+                // preserves errno
+                return nullptr;
+            }
+        } else {
+            split_block(block_allocated, size);
+        }
+
+        errno = 0;
+        block_allocated->flags.is_free = 0;
+        return block_allocated->data;
+    }
+
+    void free(void* ptr)
+    {
+        mem_blk* blk = (mem_blk*)((byte*)ptr - (sizeof(mem_blk_flags) + sizeof(size_t)));
+        blk->flags.is_free = 1;
+        // TODO: fusion free blocks nearby
+    }
+};
+
+static brk_memory_allocator* kernel_heap_allocator;
+static brk_memory_allocator
+    kernel_ident_mapped_allocator((void*)bss_section_end_addr,
+        IDENTICALLY_MAPPED_HEAP_SIZE);
+
+void* k_malloc(size_t size)
+{
+    return kernel_heap_allocator->alloc(size);
+}
+
+void k_free(void* ptr)
+{
+    kernel_heap_allocator->free(ptr);
+}
+
+void* ki_malloc(size_t size)
+{
+    void* ptr = kernel_ident_mapped_allocator.alloc(size);
+    if (!ptr) {
+        MAKE_BREAK_POINT();
+    }
+    return ptr;
+}
+
+void ki_free(void* ptr)
+{
+    kernel_ident_mapped_allocator.free(ptr);
+}
+
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
+{
+    if (p_ptr <= 0x30000000) {
+        // memory below 768MiB is identically mapped
+        return (void*)p_ptr;
+    } else {
+        // TODO: address translation
+        MAKE_BREAK_POINT();
+        return (void*)0xffffffff;
+    }
+}
+
+phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, linr_ptr_t v_ptr)
+{
+    for (const mm& item : *mms) {
+        if (v_ptr < item.start || v_ptr >= item.start + item.pgs->size() * PAGE_SIZE)
+            continue;
+        size_t offset = (size_t)(v_ptr - item.start);
+        const page& p = item.pgs->at(offset / PAGE_SIZE);
+        return page_to_phys_addr(p.phys_page_id) + (offset % PAGE_SIZE);
+    }
+
+    // TODO: handle error
+    return 0xffffffff;
+}
+
+phys_ptr_t v_ptr_to_p_ptr(const void* v_ptr)
+{
+    if (v_ptr < KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
+        return (phys_ptr_t)v_ptr;
+    }
+    return l_ptr_to_p_ptr(kernel_mms, (linr_ptr_t)v_ptr);
+}
+
+static inline void mark_page(page_t n)
+{
+    bm_set(mem_bitmap, n);
+}
+
+static inline void free_page(page_t n)
+{
+    bm_clear(mem_bitmap, n);
+}
+
+static void mark_addr_len(phys_ptr_t start, size_t n)
+{
+    if (n == 0)
+        return;
+    page_t start_page = phys_addr_to_page(start);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
+    for (page_t i = start_page; i < end_page; ++i)
+        mark_page(i);
+}
+
+static void free_addr_len(phys_ptr_t start, size_t n)
+{
+    if (n == 0)
+        return;
+    page_t start_page = phys_addr_to_page(start);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
+    for (page_t i = start_page; i < end_page; ++i)
+        free_page(i);
+}
+
+static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
+{
+    mark_addr_len(start, end - start);
+}
+
+static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
+{
+    free_addr_len(start, end - start);
+}
+
+page_t alloc_raw_page(void)
+{
+    for (page_t i = 0; i < 1024 * 1024; ++i) {
+        if (bm_test(mem_bitmap, i) == 0) {
+            mark_page(i);
+            return i;
+        }
+    }
+    MAKE_BREAK_POINT();
+    return 0xffffffff;
+}
+
+struct page allocate_page(void)
+{
+    struct page p { };
+    p.phys_page_id = alloc_raw_page();
+    p.ref_count = types::kernel_ident_allocator_new<size_t>(0);
+    return p;
+}
+
+static inline void make_page_table(page_table_entry* pt)
+{
+    memset(pt, 0x00, sizeof(page_table_entry) * 1024);
+}
+
+static inline void init_mem_layout(void)
+{
+    mem_size = 1024 * mem_size_info.n_1k_blks;
+    mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
+
+    // mark kernel page directory
+    mark_addr_range(0x00000000, 0x00005000);
+    // mark empty page
+    mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
+    // mark EBDA and upper memory as allocated
+    mark_addr_range(0x80000, 0xfffff);
+    // mark kernel
+    mark_addr_len(0x00100000, kernel_size);
+    // mark identically mapped heap
+    mark_addr_len(bss_section_end_addr, IDENTICALLY_MAPPED_HEAP_SIZE);
+
+    if (e820_mem_map_entry_size == 20) {
+        struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
+        for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
+            if (entry->type != 1) {
+                mark_addr_len(entry->base, entry->len);
+            }
+        }
+    } else {
+        struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
+        for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
+            if (entry->in.type != 1) {
+                mark_addr_len(entry->in.base, entry->in.len);
+            }
+        }
+    }
+}
+
+int is_l_ptr_valid(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    for (const auto& item : *mms)
+        if (l_ptr >= item.start && l_ptr < item.start + item.pgs->size() * PAGE_SIZE)
+            return GB_OK;
+    return GB_FAILED;
+}
+
+struct page* find_page_by_l_ptr(const mm_list* mms, linr_ptr_t l_ptr)
+{
+    for (const mm& item : *mms) {
+        if (l_ptr >= item.start && l_ptr < item.start + item.pgs->size() * PAGE_SIZE) {
+            size_t offset = (size_t)(l_ptr - item.start);
+            return &item.pgs->at(offset / PAGE_SIZE);
+        }
+    }
+
+    // TODO: error handling
+    return nullptr;
+}
+
+static inline void map_raw_page_to_pte(
+    page_table_entry* pte,
+    page_t page,
+    int rw,
+    int priv)
+{
+    // set P bit
+    pte->v = 0x00000001;
+    pte->in.rw = (rw == 1);
+    pte->in.us = (priv == 1);
+    pte->in.page = page;
+}
+
+// map page to the end of mm_area in pd
+int k_map(
+    struct mm* mm_area,
+    const struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow)
+{
+    linr_ptr_t addr = (linr_ptr_t)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
+    page_directory_entry* pde = mm_area->pd + linr_addr_to_pd_i(addr);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_raw_page();
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
+    }
+
+    // map the page in the page table
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pte += linr_addr_to_pt_i(addr);
+    map_raw_page_to_pte(pte, page->phys_page_id, (write && !cow), priv);
+
+    mm_area->pgs->push_back(*page);
+    mm_area->pgs->back()->attr.cow = cow;
+    ++*page->ref_count;
+    return GB_OK;
+}
+
+// map a page identically
+// this function is only meant to be used in the initialization process
+// it checks the pde's P bit so you need to make sure it's already set
+// to avoid dead loops
+static inline void _init_map_page_identically(page_t page)
+{
+    page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        // set the P bit of the pde in advance
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_raw_page();
+        _init_map_page_identically(pde->in.pt_page);
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
+    }
+
+    // map the page in the page table
+    page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pt += page_to_pt_i(page);
+    pt->v = 0x00000003;
+    pt->in.page = page;
+}
+
+static inline void init_paging_map_low_mem_identically(void)
+{
+    for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
+        // check if the address is valid and not mapped
+        if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
+            continue;
+        _init_map_page_identically(phys_addr_to_page(addr));
+    }
+}
+
+static page empty_page;
+
+void init_mem(void)
+{
+    init_mem_layout();
+
+    // map the 16MiB-768MiB identically
+    init_paging_map_low_mem_identically();
+
+    kernel_mms = types::kernel_ident_allocator_new<mm_list>();
+    kernel_mms->push_back(mm {
+        .start = (linr_ptr_t)KERNEL_HEAP_START,
+        .attr = {
+            .read = 1,
+            .write = 1,
+            .system = 1,
+        },
+        .pgs = types::kernel_ident_allocator_new<page_arr>(),
+        .pd = KERNEL_PAGE_DIRECTORY_ADDR,
+    });
+
+    page heap_first_page {
+        .phys_page_id = alloc_raw_page(),
+        .ref_count = types::kernel_ident_allocator_new<size_t>(0),
+        .attr = {
+            .cow = 0,
+        },
+    };
+
+    mm* heap_mm = kernel_mms->begin().ptr();
+
+    k_map(heap_mm, &heap_first_page, 1, 1, 1, 0);
+    memset(KERNEL_HEAP_START, 0x00, PAGE_SIZE);
+    kernel_heap_allocator = types::kernel_ident_allocator_new<brk_memory_allocator>(KERNEL_HEAP_START,
+        (uint32_t)KERNEL_HEAP_LIMIT - (uint32_t)KERNEL_HEAP_START);
+
+    // create empty_page struct
+    empty_page.attr.cow = 0;
+    empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
+    empty_page.ref_count = types::kernel_ident_allocator_new<size_t>(1);
+
+    // TODO: improve the algorithm SO FREAKING SLOW
+    // while (kernel_mm_head->len < 256 * 1024 * 1024 / PAGE_SIZE) {
+    while (heap_mm->pgs->size() < 256 * 1024 * 1024 / PAGE_SIZE) {
+        k_map(
+            heap_mm, &empty_page,
+            1, 1, 1, 1);
+    }
+}
+
+void create_segment_descriptor(
+    segment_descriptor* sd,
+    uint32_t base,
+    uint32_t limit,
+    uint32_t flags,
+    uint32_t access)
+{
+    sd->base_low = base & 0x0000ffff;
+    sd->base_mid = ((base & 0x00ff0000) >> 16);
+    sd->base_high = ((base & 0xff000000) >> 24);
+    sd->limit_low = limit & 0x0000ffff;
+    sd->limit_high = ((limit & 0x000f0000) >> 16);
+    sd->access = access;
+    sd->flags = flags;
+}

+ 2 - 2
src/kernel/tty.c

@@ -1,9 +1,9 @@
 #include <asm/port_io.h>
+#include <kernel/hw/serial.h>
+#include <kernel/mem.h>
 #include <kernel/stdio.h>
 #include <kernel/tty.h>
-#include <kernel/mem.h>
 #include <kernel/vga.h>
-#include <kernel/hw/serial.h>
 
 static void serial_tty_put_char(struct tty* p_tty, char c)
 {

+ 17 - 14
src/kernel_main.c

@@ -15,16 +15,6 @@
 #include <kernel/vga.h>
 #include <types/bitmap.h>
 
-typedef void (*constructor)(void);
-extern constructor start_ctors;
-extern constructor end_ctors;
-void call_constructors_for_cpp(void)
-{
-    for (constructor* ctor = &start_ctors; ctor != &end_ctors; ++ctor) {
-        (*ctor)();
-    }
-}
-
 #define KERNEL_MAIN_BUF_SIZE (128)
 
 struct tty* console = NULL;
@@ -57,6 +47,16 @@ static inline void halt_on_init_error(void)
         asm_hlt();
 }
 
+typedef void (*constructor)(void);
+extern constructor start_ctors;
+extern constructor end_ctors;
+void call_constructors_for_cpp(void)
+{
+    for (constructor* ctor = &start_ctors; ctor != &end_ctors; ++ctor) {
+        (*ctor)();
+    }
+}
+
 uint8_t e820_mem_map[1024];
 uint32_t e820_mem_map_count;
 uint32_t e820_mem_map_entry_size;
@@ -162,14 +162,17 @@ void kernel_main(void)
     init_idt();
     INIT_OK();
 
-    INIT_START("memory allocation");
-    init_mem();
-    INIT_OK();
-
+    // NOTE:
+    // the initializer of c++ global objects MUST NOT contain
+    // all kinds of memory allocations
     INIT_START("C++ global objects");
     call_constructors_for_cpp();
     INIT_OK();
 
+    INIT_START("memory allocation");
+    init_mem();
+    INIT_OK();
+
     INIT_START("programmable interrupt controller and timer");
     init_pic();
     init_pit();