Browse Source

feat: memory initialization

greatbridf 2 years ago
parent
commit
1cc56a7e13
8 changed files with 295 additions and 340 deletions
  1. 3 2
      include/kernel/interrupt.h
  2. 70 65
      include/kernel/mem.h
  3. 0 1
      include/types/size.h
  4. 10 0
      src/asm/interrupt.s
  5. 3 2
      src/boot.s
  6. 23 11
      src/kernel/interrupt.c
  7. 178 244
      src/kernel/mem.c
  8. 8 15
      src/kernel_main.c

+ 3 - 2
include/kernel/interrupt.h

@@ -48,12 +48,13 @@ struct IDT_entry {
 extern struct IDT_entry IDT[256];
 #endif
 
-void init_idt();
+void init_idt(void);
+void init_pic(void);
 
 // idt_descriptor: uint16_t[3]
 // [0] bit 0 :15 => limit
 // [1] bit 16:47 => address
-extern void asm_load_idt(uint16_t idt_descriptor[3]);
+extern void asm_load_idt(uint16_t idt_descriptor[3], int sti);
 
 void int13_handler(
     struct regs_32 s_regs,

+ 70 - 65
include/kernel/mem.h

@@ -24,66 +24,6 @@ struct e820_mem_map_entry_24 {
     uint32_t acpi_extension_attr;
 };
 
-// in kernel_main.c
-extern uint8_t e820_mem_map[1024];
-extern uint32_t e820_mem_map_count;
-extern uint32_t e820_mem_map_entry_size;
-extern uint32_t kernel_size;
-extern struct mem_size_info mem_size_info;
-
-#define KERNEL_HEAP_START ((void*)0x30000000)
-#define KERNEL_HEAP_LIMIT ((void*)0x40000000)
-
-struct mem_blk_flags {
-    uint8_t is_free;
-    uint8_t has_next;
-    uint8_t _unused2;
-    uint8_t _unused3;
-};
-
-struct mem_blk {
-    size_t size;
-    struct mem_blk_flags flags;
-    // the first byte of the memory space
-    // the minimal allocated space is 4 bytes
-    uint8_t data[4];
-};
-
-struct page_attr {
-    uint32_t read : 1;
-    uint32_t write : 1;
-    uint32_t system : 1;
-    uint32_t cow : 1;
-};
-
-struct page {
-    page_t phys_page_id;
-    size_t* ref_count;
-    struct page_attr attr;
-    struct page* next;
-};
-
-struct mm_attr {
-    uint32_t read : 1;
-    uint32_t write : 1;
-    uint32_t system : 1;
-};
-
-struct mm {
-    virt_ptr_t start;
-    size_t len;
-    struct mm_attr attr;
-    struct page* pgs;
-    struct mm* next;
-    page_directory_entry* pd;
-};
-
-int init_heap(void);
-
-void* k_malloc(size_t size);
-
-void k_free(void* ptr);
-
 /*
  * page directory entry
  *
@@ -107,7 +47,7 @@ struct page_directory_entry_in {
     uint32_t d : 1;
     uint32_t ps : 1;
     uint32_t ignored : 4;
-    page_t addr : 20;
+    page_t pt_page : 20;
 };
 
 typedef union page_directory_entry {
@@ -140,7 +80,7 @@ struct page_table_entry_in {
     uint32_t pat : 1;
     uint32_t g : 1;
     uint32_t ignored : 3;
-    page_t addr : 20;
+    page_t page : 20;
 };
 
 typedef union page_table_entry {
@@ -148,9 +88,75 @@ typedef union page_table_entry {
     struct page_table_entry_in in;
 } page_table_entry;
 
-#define KERNEL_EARLY_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
+struct page_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+    uint32_t cow : 1;
+};
+
+struct page {
+    page_t phys_page_id;
+    size_t* ref_count;
+    struct page_attr attr;
+    struct page* next;
+};
+
+struct mm_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+};
+
+struct mm {
+    void* start;
+    size_t len;
+    struct mm_attr attr;
+    struct page* pgs;
+    struct mm* next;
+    page_directory_entry* pd;
+};
+
+// in kernel_main.c
+extern uint8_t e820_mem_map[1024];
+extern uint32_t e820_mem_map_count;
+extern uint32_t e820_mem_map_entry_size;
+extern uint32_t kernel_size;
+extern struct mem_size_info mem_size_info;
+
+#define KERNEL_HEAP_START ((void*)0x30000000)
+#define KERNEL_HEAP_LIMIT ((void*)0x40000000)
+
+struct mem_blk_flags {
+    uint8_t is_free;
+    uint8_t has_next;
+    uint8_t _unused2;
+    uint8_t _unused3;
+};
+
+struct mem_blk {
+    size_t size;
+    struct mem_blk_flags flags;
+    // the first byte of the memory space
+    // the minimal allocated space is 4 bytes
+    uint8_t data[4];
+};
+
+int init_heap(void);
+
+void* k_malloc(size_t size);
+
+void k_free(void* ptr);
+
+// translate physical address to linear address
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
+
+// translate linear address to physical address
+phys_ptr_t v_ptr_to_p_ptr(struct mm* mm_area, void* v_ptr);
+
+#define KERNEL_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
 
-void init_paging(void);
+void init_mem(void);
 
 #define SD_TYPE_CODE_SYSTEM (0x9a)
 #define SD_TYPE_DATA_SYSTEM (0x92)
@@ -170,7 +176,6 @@ typedef struct segment_descriptor_struct {
     uint64_t base_high : 8;
 } segment_descriptor;
 
-void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss);
 void create_segment_descriptor(
     segment_descriptor* sd,
     uint32_t base,

+ 0 - 1
include/types/size.h

@@ -13,7 +13,6 @@ typedef int64_t diff_t;
 #endif
 
 typedef ptr_t phys_ptr_t;
-typedef ptr_t virt_ptr_t;
 typedef size_t page_t;
 typedef size_t pd_i_t;
 typedef size_t pt_i_t;

+ 10 - 0
src/asm/interrupt.s

@@ -12,6 +12,12 @@ int6:
 
     iret
 
+.globl int8
+.type  int8 @function
+int8:
+    nop
+    iret
+
 .globl int13
 .type  int13 @function
 int13:
@@ -158,5 +164,9 @@ irq15:
 asm_load_idt:
     movl 4(%esp), %edx
     lidt (%edx)
+    movl 8(%esp), %edx
+    cmpl $0, %edx
+    je asm_load_idt_skip
     sti
+asm_load_idt_skip:
     ret

+ 3 - 2
src/boot.s

@@ -165,9 +165,10 @@ setup_early_kernel_page_table:
 # set up early kernel page table
 
 # the early kernel page directory is located at physical
-# address 0x00000000, size 4k, so we fill the first 5KiB
+# address 0x00000000, size 4k, and the empty page is at
+# 0x5000-0x5fff, so we fill the first 6KiB
     movl $0x00000000, %eax
-    movl $0x5000, %ecx
+    movl $0x6000, %ecx
     call _fill_zero
 
 # map the first 16MiB identically

+ 23 - 11
src/kernel/interrupt.c

@@ -15,6 +15,28 @@ void init_idt()
 {
     asm_cli();
 
+    memset(IDT, 0x00, sizeof(IDT));
+
+    // invalid opcode
+    SET_IDT_ENTRY_FN(6, int6, 0x08);
+    // double fault
+    SET_IDT_ENTRY_FN(8, int8, 0x08);
+    // general protection
+    SET_IDT_ENTRY_FN(13, int13, 0x08);
+    // page fault
+    SET_IDT_ENTRY_FN(14, int14, 0x08);
+
+    uint16_t idt_descriptor[3];
+    idt_descriptor[0] = sizeof(struct IDT_entry) * 256;
+    *((uint32_t*)(idt_descriptor + 1)) = (ptr_t)IDT;
+
+    asm_load_idt(idt_descriptor, 0);
+}
+
+void init_pic(void)
+{
+    asm_cli();
+
     asm_outb(PORT_PIC1_COMMAND, 0x11); // edge trigger mode
     asm_outb(PORT_PIC1_DATA, 0x20); // start from int 0x20
     asm_outb(PORT_PIC1_DATA, 0x04); // PIC1 is connected to IRQ2 (1 << 2)
@@ -29,12 +51,6 @@ void init_idt()
     asm_outb(PORT_PIC1_DATA, 0x00);
     asm_outb(PORT_PIC2_DATA, 0x00);
 
-    // handle general protection fault (handle segmentation fault)
-    SET_IDT_ENTRY_FN(6, int6, 0x08);
-    SET_IDT_ENTRY_FN(13, int13, 0x08);
-    SET_IDT_ENTRY_FN(14, int14, 0x08);
-    // SET_IDT_ENTRY(0x0c, /* addr */ 0, 0x08);
-
     // 0x08 stands for kernel code segment
     SET_UP_IRQ(0, 0x08);
     SET_UP_IRQ(1, 0x08);
@@ -53,11 +69,7 @@ void init_idt()
     SET_UP_IRQ(14, 0x08);
     SET_UP_IRQ(15, 0x08);
 
-    uint16_t idt_descriptor[3];
-    idt_descriptor[0] = sizeof(struct IDT_entry) * 256;
-    *((uint32_t*)(idt_descriptor + 1)) = (ptr_t)IDT;
-
-    asm_load_idt(idt_descriptor);
+    asm_sti();
 }
 
 void int6_handler(

+ 178 - 244
src/kernel/mem.c

@@ -11,38 +11,20 @@
 
 // static variables
 
-static page_directory_entry* kernel_pd;
+static struct mm kernel_mm;
 
 // ---------------------
 
 // constant values
 
-#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x6000)
-#define EMPTY_PAGE_END ((phys_ptr_t)0x7000)
+#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
+#define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
 
 // ---------------------
 
 // forward declarations
 static page_t alloc_page(void);
 
-// map n pages from p_ptr to v_ptr
-// p_ptr and v_ptr needs to be 4kb-aligned
-static int p_map(
-    struct mm* mm_area,
-    phys_ptr_t p_ptr,
-    virt_ptr_t v_ptr,
-    size_t n,
-    int rw,
-    int priv);
-
-// map n bytes identically
-static inline int _ident_map(
-    struct mm* mm_area,
-    phys_ptr_t p_ptr,
-    size_t n,
-    int rw,
-    int priv);
-
 // map page to the end of mm_area in pd
 int k_map(
     struct mm* mm_area,
@@ -58,9 +40,6 @@ static void* p_start;
 static void* p_break;
 static segment_descriptor* gdt;
 
-// temporary
-static struct tss32_t _tss;
-
 static size_t mem_size;
 static char mem_bitmap[1024 * 1024 / 8];
 
@@ -233,6 +212,41 @@ static inline pd_i_t phys_addr_to_pd_i(phys_ptr_t ptr)
     return page_to_pd_i(phys_addr_to_page(ptr));
 }
 
+static inline pd_i_t phys_addr_to_pt_i(phys_ptr_t ptr)
+{
+    return page_to_pt_i(phys_addr_to_page(ptr));
+}
+
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
+{
+    if (p_ptr <= 0x30000000) {
+        // memory below 768MiB is identically mapped
+        return (void*)p_ptr;
+    } else {
+        // TODO: address translation
+        return (void*)0xffffffff;
+    }
+}
+
+phys_ptr_t v_ptr_to_p_ptr(struct mm* mm, void* v_ptr)
+{
+    if (mm == &kernel_mm && v_ptr < 0x30000000) {
+        return (phys_ptr_t)v_ptr;
+    }
+    while (mm != NULL) {
+        if (v_ptr < mm->start || v_ptr >= mm->start + mm->len * 4096) {
+            goto next;
+        }
+        size_t offset = (size_t)(v_ptr - mm->start);
+        return page_to_phys_addr(mm->pgs[offset / 4096].phys_page_id) + (offset % 4096);
+    next:
+        mm = mm->next;
+    }
+
+    // TODO: handle error
+    return 0xffffffff;
+}
+
 static inline void mark_page(page_t n)
 {
     bm_set(mem_bitmap, n);
@@ -284,153 +298,20 @@ static page_t alloc_page(void)
     return GB_FAILED;
 }
 
-static inline void create_pd(page_directory_entry* pde)
-{
-    for (int i = 0; i < 1024; ++i) {
-        pde->v = 0;
-        ++pde;
-    }
-}
-
-static inline void make_page_table(page_directory_entry* pd, page_t p)
+static inline void make_page_table(page_table_entry* pt)
 {
-    phys_ptr_t pp_pt = page_to_phys_addr(p);
-
-    page_table_entry* pt = (page_table_entry*)pp_pt;
-
     memset(pt, 0x00, sizeof(page_table_entry) * 1024);
 }
 
-static inline void do_map()
-{
-}
-
-// map n pages from p_ptr to v_ptr
-// p_ptr and v_ptr needs to be 4kb-aligned
-static int p_map(
-    struct mm* mm_area,
-    phys_ptr_t p_ptr,
-    virt_ptr_t v_ptr,
-    size_t n,
-    int rw,
-    int priv)
-{
-    page_directory_entry* pd = mm_area->pd;
-    // pages to be mapped
-    page_t v_page_start = phys_addr_to_page(v_ptr);
-    page_t v_page_end = v_page_start + n;
-
-    for (pd_i_t pde_index = page_to_pd_i(v_page_start); pde_index <= page_to_pd_i(v_page_end); ++pde_index) {
-        // page table not present
-        if (pd[pde_index].in.p != 1) {
-            page_t p_page = alloc_page();
-
-            pd[pde_index].in.p = 1;
-            pd[pde_index].in.a = 0;
-            pd[pde_index].in.rw = 1;
-            pd[pde_index].in.addr = p_page;
-            // set the page table address first in the pde
-            // before making page table since the function
-            // calls p_map recursively and the p_map call
-            // requires the pde to find it's destination
-
-            make_page_table(pd, p_page);
-
-            _ident_map(
-                pd,
-                page_to_phys_addr(p_page),
-                sizeof(page_table_entry) * 1024, 1, 1);
-        }
-    }
-
-    for (size_t i = 0; i < n; ++i) {
-        page_t v_page = v_page_start + i;
-        pd_i_t pd_i = page_to_pd_i(v_page);
-        page_table_entry* pt = (page_table_entry*)page_to_phys_addr(pd[pd_i].in.addr);
-        pt += page_to_pt_i(v_page);
-
-        if (pt->in.p == 1) {
-            errno = EEXIST;
-            return GB_FAILED;
-        }
-        pt->in.p = 1;
-        pt->in.rw = (rw == 1);
-        pt->in.us = !(priv == 1);
-        pt->in.a = 0;
-        pt->in.d = 0;
-
-        pt->in.addr = phys_addr_to_page(p_ptr) + i;
-    }
-
-    return GB_OK;
-}
-
-// map n bytes identically
-static inline int nk_map(
-    struct mm* mm_area,
-    phys_ptr_t p_ptr,
-    size_t n,
-    int rw,
-    int priv)
-{
-    struct mm* p_area = mm_area;
-    while (p_area != NULL) {
-        if (p_ptr >= p_area->start && p_ptr < p_area->start + p_area->len * 4096) {
-            break;
-        }
-    }
-
-    // area does not exist
-    if (p_area == NULL) {
-    }
-
-    return p_n_map(
-        mm_area,
-        p_ptr,
-        p_ptr,
-        n,
-        rw,
-        priv);
-}
-
-static inline int _create_kernel_pd(void)
-{
-    create_pd(kernel_pd);
-
-    int result = 0;
-
-    result |= _p_ident_n_map(kernel_pd,
-        kernel_pd,
-        4096, 1, 1);
-
-    result |= _p_ident_n_map(kernel_pd,
-        EMPTY_PAGE_ADDR,
-        4096, 0, 0);
-
-    result |= _p_ident_n_map(kernel_pd,
-        (0x00080000),
-        (0xfffff - 0x80000 + 1), 1, 1);
-
-    result |= _p_ident_n_map(kernel_pd,
-        KERNEL_START_ADDR,
-        kernel_size, 1, 1);
-
-    result |= _p_ident_n_map(kernel_pd,
-        KERNEL_EARLY_STACK_ADDR - KERNEL_EARLY_STACK_SIZE,
-        KERNEL_EARLY_STACK_SIZE, 1, 1);
-
-    return result;
-}
-
-static void init_mem_layout(void)
+static inline void init_mem_layout(void)
 {
     mem_size = 1024 * mem_size_info.n_1k_blks;
     mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
 
     // mark kernel page directory
-    mark_addr_range(0x00000000, 0x00006000);
+    mark_addr_range(0x00000000, 0x00005000);
     // mark empty page
-    mark_addr_len(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
+    mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
     // mark EBDA and upper memory as allocated
     mark_addr_range(0x80000, 0xfffff);
     // mark kernel
@@ -453,8 +334,35 @@ static void init_mem_layout(void)
     }
 }
 
-static struct mm* k_mm_head;
-static struct mm first_mm;
+static void _map_raw_page_to_addr(
+    struct mm* mm_area,
+    page_t page,
+    int rw,
+    int priv)
+{
+    // although it's NOT a physical address, we treat it as one
+    phys_ptr_t addr = (phys_ptr_t)mm_area->start + mm_area->len * 4096;
+    page_directory_entry* pde = mm_area->pd + phys_addr_to_pd_i(addr);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_page();
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
+    }
+
+    // map the page in the page table
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pte += phys_addr_to_pt_i(addr);
+    // set P bit
+    pte->v = 0x00000001;
+    pte->in.rw = (rw == 1);
+    pte->in.us = !(priv == 1);
+    pte->in.page = page;
+}
 
 // map page to the end of mm_area in pd
 int k_map(
@@ -466,111 +374,137 @@ int k_map(
     int cow)
 {
     struct page* p_page_end = mm_area->pgs;
-    while (p_page_end != NULL)
+    while (p_page_end != NULL && p_page_end->next != NULL)
         p_page_end = p_page_end->next;
 
-    struct page* new_page = k_malloc(sizeof(struct page));
-    new_page->attr.read = (read == 1);
-    new_page->attr.write = (write == 1);
-    new_page->attr.system = (priv == 1);
-    new_page->phys_page_id = page->phys_page_id;
+    if (cow) {
+        // find its ancestor
+        while (page->attr.cow)
+            page = page->next;
 
-    new_page->next = NULL;
-    p_page_end->next = new_page;
+        // create a new page node
+        struct page* new_page = k_malloc(sizeof(struct page));
 
-    if (cow) {
+        new_page->attr.read = (read == 1);
+        new_page->attr.write = (write == 1);
+        new_page->attr.system = (priv == 1);
         new_page->attr.cow = 1;
+        // TODO: move *next out of struct page
+        new_page->next = NULL;
+
+        new_page->phys_page_id = page->phys_page_id;
         new_page->ref_count = page->ref_count;
-        ++page->ref_count;
+
+        if (p_page_end != NULL)
+            p_page_end->next = new_page;
+        else
+            mm_area->pgs = new_page;
     } else {
-        new_page->attr.cow = 0;
-        *new_page->ref_count = 1;
-        p_map(
-            mm_area, page_to_phys_addr(new_page->phys_page_id),
-            mm_area->start + 4096 * mm_area->len,
-            1,
-            (write && !cow),
-            priv);
+        page->attr.read = (read == 1);
+        page->attr.write = (write == 1);
+        page->attr.system = (priv == 1);
+        page->attr.cow = 0;
+        // TODO: move *next out of struct page
+        page->next = NULL;
+
+        if (p_page_end != NULL)
+            p_page_end->next = page;
+        else
+            mm_area->pgs = page;
     }
+    _map_raw_page_to_addr(
+        mm_area,
+        page->phys_page_id,
+        (write && !cow),
+        priv);
 
     ++mm_area->len;
+    ++*page->ref_count;
+    return GB_OK;
 }
 
-void init_paging(void)
-{
-    init_mem_layout();
-
-    // create initial struct mms
-    memset(&first_mm, 0x00, sizeof(struct mm));
-
-    first_mm.attr.read = 1;
-    first_mm.attr.write = 1;
-    first_mm.attr.system = 1;
-    first_mm.start = 0x30000000;
-    first_mm.len = 1;
-    first_mm.next = NULL;
-
-    page_t init_mm_page = alloc_page();
-    p_map(KERNEL_EARLY_PAGE_DIRECTORY_ADDR, page_to_phys_addr(init_mm_page), KERNEL_HEAP_START, 1, 1, 1);
-    init_heap();
-
-    first_mm.pgs = (struct page*)k_malloc(sizeof(struct page));
-
-    first_mm.pgs->attr.read = 1;
-    first_mm.pgs->attr.write = 1;
-    first_mm.pgs->attr.system = 1;
-    first_mm.pgs->attr.cow = 0;
-    first_mm.pgs->phys_page_id = init_mm_page;
-    first_mm.pgs->ref_count = (size_t*)k_malloc(sizeof(size_t));
-    first_mm.pgs->next = NULL;
-
-    k_mm_head = &first_mm;
-
-    if (_create_kernel_pd() != GB_OK) {
-        asm_cli();
-        asm_hlt();
+// map a page identically
+// this function is only meant to be used in the initialization process
+// it checks the pde's P bit so you need to make sure it's already set
+// to avoid dead loops
+static inline void _init_map_page_identically(page_t page)
+{
+    page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        // set the P bit of the pde in advance
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_page();
+        _init_map_page_identically(pde->in.pt_page);
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
     }
 
-    asm_enable_paging(kernel_pd);
+    // map the page in the page table
+    page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pt += page_to_pt_i(page);
+    pt->v = 0x00000003;
+    pt->in.page = page;
 }
 
-static inline void
-set_segment_descriptor(
-    segment_descriptor* sd,
-    uint32_t base,
-    uint32_t limit,
-    uint8_t access,
-    uint8_t flags)
+static inline void init_paging_map_low_mem_identically(void)
 {
-    sd->access = access;
-    sd->flags = flags;
-    sd->base_low = base;
-    sd->base_mid = base >> 16;
-    sd->base_high = base >> 24;
-    sd->limit_low = limit;
-    sd->limit_high = limit >> 16;
+    for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
+        // check if the address is valid and not mapped
+        if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
+            continue;
+        _init_map_page_identically(phys_addr_to_page(addr));
+    }
 }
 
-void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
+struct mm* mm_head;
+static struct page empty_page;
+static struct page heap_first_page;
+static size_t heap_first_page_ref_count;
+
+void init_mem(void)
 {
-    gdt = k_malloc(sizeof(segment_descriptor) * 6);
-    // since the size in the struct is an OFFSET
-    // it needs to be added one to get its real size
-    uint16_t asm_gdt_size = (asm_gdt_descriptor.size + 1) / 8;
-    segment_descriptor* asm_gdt = (segment_descriptor*)asm_gdt_descriptor.address;
+    init_mem_layout();
 
-    for (int i = 0; i < asm_gdt_size; ++i) {
-        gdt[i] = asm_gdt[i];
-    }
+    // map the 16MiB-768MiB identically
+    init_paging_map_low_mem_identically();
+
+    mm_head = &kernel_mm;
+
+    kernel_mm.attr.read = 1;
+    kernel_mm.attr.write = 1;
+    kernel_mm.attr.system = 1;
+    kernel_mm.len = 0;
+    kernel_mm.next = NULL;
+    kernel_mm.pd = KERNEL_PAGE_DIRECTORY_ADDR;
+    kernel_mm.pgs = NULL;
+    kernel_mm.start = KERNEL_HEAP_START;
 
-    set_segment_descriptor(gdt + 5, (uint32_t)&_tss, sizeof(struct tss32_t), SD_TYPE_TSS, 0b0000);
+    heap_first_page.attr.cow = 0;
+    heap_first_page.attr.read = 1;
+    heap_first_page.attr.write = 1;
+    heap_first_page.attr.system = 1;
+    heap_first_page.next = NULL;
+    heap_first_page.phys_page_id = alloc_page();
+    heap_first_page.ref_count = &heap_first_page_ref_count;
 
-    _tss.esp0 = (uint32_t)kernel_esp;
-    _tss.ss0 = kernel_ss;
+    *heap_first_page.ref_count = 0;
+
+    k_map(mm_head, &heap_first_page, 1, 1, 1, 0);
+
+    init_heap();
 
-    // +1 for enabling interrupt
-    asm_load_gdt(((6 * sizeof(segment_descriptor) - 1) << 16) + 1, (uint32_t)gdt);
-    asm_load_tr((6 - 1) * 8);
+    // create empty_page struct
+    empty_page.attr.cow = 0;
+    empty_page.attr.read = 1;
+    empty_page.attr.write = 0;
+    empty_page.attr.system = 0;
+    empty_page.next = NULL;
+    empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
+    empty_page.ref_count = (size_t*)k_malloc(sizeof(size_t));
 }
 
 void create_segment_descriptor(

+ 8 - 15
src/kernel_main.c

@@ -150,36 +150,29 @@ void kernel_main(void)
     asm_enable_sse();
     INIT_OK();
 
-    EVE_START("rebuilding page table");
-    init_paging();
-    INIT_OK();
-
-    INIT_START("IDT");
+    INIT_START("exception handlers");
     init_idt();
-    init_pit();
     INIT_OK();
 
-    INIT_START("heap space");
-    if (init_heap() != GB_OK) {
-        INIT_FAILED();
-        halt_on_init_error();
-    }
+    INIT_START("memory allocation");
+    init_mem();
     INIT_OK();
 
     INIT_START("C++ global objects");
     call_constructors_for_cpp();
     INIT_OK();
 
+    INIT_START("programmable interrupt controller and timer");
+    init_pic();
+    init_pit();
+    INIT_OK();
+
     printkf("Testing k_malloc...\n");
     char* k_malloc_buf = (char*)k_malloc(sizeof(char) * 128);
     snprintf(k_malloc_buf, 128, "This text is printed on the heap!\n");
     tty_print(console, k_malloc_buf);
     k_free(k_malloc_buf);
 
-    void* kernel_stack = k_malloc(KERNEL_STACK_SIZE);
-    init_gdt_with_tss(kernel_stack + KERNEL_STACK_SIZE - 1, KERNEL_STACK_SEGMENT);
-    printkf("new GDT and TSS loaded\n");
-
     printkf("No work to do, halting...\n");
 
     while (1) {