Browse Source

memory allocation

greatbridf 2 years ago
parent
commit
2eaf205b8a
12 changed files with 409 additions and 175 deletions
  1. 3 0
      CMakeLists.txt
  2. 42 0
      doc/malloc.md
  3. 2 2
      include/asm/boot.h
  4. 2 0
      include/asm/sys.h
  5. 38 9
      include/kernel/mem.h
  6. 15 0
      include/kernel/process.h
  7. 3 4
      ldscript.ld
  8. 6 0
      src/asm/sys.s
  9. 67 6
      src/boot.s
  10. 221 143
      src/kernel/mem.c
  11. 0 0
      src/kernel/process.c
  12. 10 11
      src/kernel_main.c

+ 3 - 0
CMakeLists.txt

@@ -42,6 +42,7 @@ include_directories(${PROJECT_SOURCE_DIR}/include)
 set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         src/kernel/errno.c
                         src/kernel/interrupt.c
+                        src/kernel/process.c
                         src/kernel/tty.c
                         src/kernel/stdio.c
                         src/kernel/mem.c
@@ -59,6 +60,7 @@ set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         include/kernel/errno.h
                         include/kernel/tty.h
                         include/kernel/interrupt.h
+                        include/kernel/process.h
                         include/kernel/stdio.h
                         include/kernel/mem.h
                         include/kernel/vga.h
@@ -86,6 +88,7 @@ add_custom_command(OUTPUT extracted_kernel_main
 add_custom_target(kernel.out
     DEPENDS extracted_bootloader
     DEPENDS extracted_kernel_main
+    DEPENDS ${CMAKE_SOURCE_DIR}/ldscript.ld
     COMMAND ${CMAKE_LINKER} -T ${CMAKE_SOURCE_DIR}/ldscript.ld ${EXTRACT_DIR}/*.o
     -melf_i386 -o ${CMAKE_BINARY_DIR}/kernel.out
 )

+ 42 - 0
doc/malloc.md

@@ -1,3 +1,45 @@
 [参考链接1](https://blog.codinglabs.org/articles/a-malloc-tutorial.html#212-%E9%A1%B5%E4%B8%8E%E5%9C%B0%E5%9D%80%E6%9E%84%E6%88%90)
 [malloc的glibc实现](https://repo.or.cz/glibc.git/blob/HEAD:/malloc/malloc.c)
 [How the kernel manage your memory](https://manybutfinite.com/post/how-the-kernel-manages-your-memory/)
+
+## 地址空间分区
+从0x00000000到0x3fffffff为内核空间
+
+从0x40000000到0xffffffff为用户空间
+
+### 内核空间:
+0x00000000到0x2fffffff为动态映射区
+0x30000000到0x3fffffff为永久映射区,这个区域的内存在与物理页进行映射后不会被交换出地址空间
+
+## 物理内存分配
+0x00000000-0x00000fff:内核页目录
+0x00001000-0x00001fff:空白页
+
+## 大致思路:
+每个进程(包括内核)拥有一个struct mm,用于记录自身的虚拟地址映射状况
+
+struct mm拥有struct page的链表,对应虚拟地址实际映射的物理页
+
+struct mm的项目还可包括copy on write的页
+
+发生缺页中断时,内核通过该触发中断的进程的struct mm检查页权限、是否需要复制页等
+
+若因权限不足而触发中断,则内核中断用户进程执行,或内核panic
+
+若因页已被交换出内存,则内核将页换回内存,继续进程执行
+
+若因页为copy on write页,写入时触发中断,则将页复制一份,继续进程执行
+
+### 内核提供几个接口
+1. alloc_page从页位图中找出未被使用的物理页返回
+2. p_map用于将物理页映射到指定的虚拟地址
+3. kmap用于给出物理页,将其映射到一个未被该进程使用的虚拟地址
+
+### 分配内存
+通过kmap将空白页映射到某虚拟地址上,并开启copy on write,
+随后则可以直接对该页进行读写,写入时内核中断自动进行页的分配
+
+因此,换页中断的处理非常重要
+
+## 注
+分页相关的内存数据结构应始终被映射到永久映射区,并且该映射应在页表创建时被完成

+ 2 - 2
include/asm/boot.h

@@ -2,8 +2,8 @@
 
 #include <types/stdint.h>
 
-#define KERNEL_EARLY_STACK_ADDR ((phys_ptr_t)0x03fffff0)
-#define KERNEL_EARLY_STACK_SIZE ((size_t) 0xfffff0)
+#define KERNEL_EARLY_STACK_ADDR ((phys_ptr_t)0x01000000)
+#define KERNEL_EARLY_STACK_SIZE ((size_t)0x100000)
 
 struct __attribute__((__packed__)) gdt_descriptor {
     uint16_t size;

+ 2 - 0
include/asm/sys.h

@@ -9,6 +9,8 @@ extern "C" {
 
 void asm_enable_paging(page_directory_entry* pd_addr);
 
+phys_ptr_t current_pd(void);
+
 // the limit should be set on the higher 16bit
 // e.g. (n * sizeof(segment_descriptor) - 1) << 16
 // the lower bit off the limit is either 0 or 1

+ 38 - 9
include/kernel/mem.h

@@ -1,5 +1,6 @@
 #pragma once
 
+#include <types/size.h>
 #include <types/stdint.h>
 
 #ifdef __cplusplus
@@ -30,9 +31,8 @@ extern uint32_t e820_mem_map_entry_size;
 extern uint32_t kernel_size;
 extern struct mem_size_info mem_size_info;
 
-// TODO: decide heap start address according
-//   to user's memory size
-#define HEAP_START ((void*)0x01000000)
+#define KERNEL_HEAP_START ((void*)0x30000000)
+#define KERNEL_HEAP_LIMIT ((void*)0x40000000)
 
 struct mem_blk_flags {
     uint8_t is_free;
@@ -49,6 +49,35 @@ struct mem_blk {
     uint8_t data[4];
 };
 
+struct page_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+    uint32_t cow : 1;
+};
+
+struct page {
+    page_t phys_page_id;
+    size_t* ref_count;
+    struct page_attr attr;
+    struct page* next;
+};
+
+struct mm_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+};
+
+struct mm {
+    virt_ptr_t start;
+    size_t len;
+    struct mm_attr attr;
+    struct page* pgs;
+    struct mm* next;
+    page_directory_entry* pd;
+};
+
 int init_heap(void);
 
 void* k_malloc(size_t size);
@@ -119,7 +148,7 @@ typedef union page_table_entry {
     struct page_table_entry_in in;
 } page_table_entry;
 
-#define KERNEL_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
+#define KERNEL_EARLY_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
 
 void init_paging(void);
 
@@ -143,11 +172,11 @@ typedef struct segment_descriptor_struct {
 
 void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss);
 void create_segment_descriptor(
-        segment_descriptor* sd,
-        uint32_t base,
-        uint32_t limit,
-        uint32_t flags,
-        uint32_t access);
+    segment_descriptor* sd,
+    uint32_t base,
+    uint32_t limit,
+    uint32_t flags,
+    uint32_t access);
 
 #ifdef __cplusplus
 }

+ 15 - 0
include/kernel/process.h

@@ -0,0 +1,15 @@
+#pragma once
+
+#include <kernel/mem.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct process {
+    struct mm* mm;
+};
+
+#ifdef __cplusplus
+}
+#endif

+ 3 - 4
ldscript.ld

@@ -34,7 +34,7 @@ SECTIONS
         *(.rodata*)
     } > WHOLE
 
-    .data :
+    .data : AT(LOADADDR(.text) + ADDR(.data) - ADDR(.text))
     {
         asm_kernel_size = .;
         LONG(__real_kernel_end - ADDR(.text));
@@ -98,10 +98,9 @@ SECTIONS
         BYTE(0x00);
     } > WHOLE
 
-/*
     /DISCARD/ :
     {
-        *(.comment)
+        *(.fini_array*)
+        *(.eh_frame*)
     }
-*/
 }

+ 6 - 0
src/asm/sys.s

@@ -16,6 +16,12 @@ asm_enable_paging:
 
     ret
 
+.global current_pd
+.type   current_pd @function
+current_pd:
+    movl %cr3, %eax
+    ret
+
 .global asm_load_gdt
 .type   asm_load_gdt @function
 asm_load_gdt:

+ 67 - 6
src/boot.s

@@ -157,13 +157,74 @@ start_32bit:
     movw %ax, %gs
     movw %ax, %ss
 
-# set up stack
-# in order to align 16 byte
-# set stack base address at
-# 0x003ffff0
-    movl $0x03fffff0, %ebp
-    movl $0x03fffff0, %esp
+# set up early stack at 0x001000000
+    movl $0x01000000, %ebp
+    movl $0x01000000, %esp
+
+setup_early_kernel_page_table:
+# set up early kernel page table
+
+# the early kernel page directory is located at physical
+# address 0x00000000, size 4k, so we fill the first 5KiB
+    movl $0x00000000, %eax
+    movl $0x5000, %ecx
+    call _fill_zero
+
+# map the first 16MiB identically
+# 0x0000-0x0fff: early kernel pd
+# 0x1000-0x4fff: pde 0 - 4
+    movl $0x00000000, %eax
+    movl $0x00001003, %ebx
+_fill_pde_loop:
+    movl %ebx, (%eax)
+    addl $4, %eax
+    addl $0x1000, %ebx
+    cmpl $0x5003, %ebx
+    jne _fill_pde_loop
+
+# then, create page tables
+    movl $0x00000003, %eax
+    movl $0x00001000, %ecx
+
+_create_page_table_loop1:
+    movl %eax, (%ecx)
+    addl $4, %ecx
+    addl $0x1000, %eax
+    cmpl $0x4ffc, %ecx
+    jle _create_page_table_loop1
+
+load_early_kernel_page_table:
+    movl $0x00000000, %eax
+    movl %eax, %cr3
+
+    movl %cr0, %eax
+    orl $0x80000001, %eax
+    movl %eax, %cr0
+
+    jmp start_move_kernel
+
+# quick call
+# %eax: address to fill
+# %ecx: byte count to fill
+_fill_zero:
+    movl %ecx, -4(%esp)
+    movl %eax, -8(%esp)
+
+_fill_zero_loop:
+    cmpl $0, %ecx
+    jz _fill_zero_end
+    subl $4, %ecx
+    movl $0, (%eax)
+    addl $4, %eax
+    jmp _fill_zero_loop
+
+_fill_zero_end:
+    movl -8(%esp), %eax
+    movl -4(%esp), %ecx
+    ret
 
+start_move_kernel:
+# move the kernel to 0x100000
     movl $__loader_end, %eax
     movl $__real_kernel_start, %ebx
 

+ 221 - 143
src/kernel/mem.c

@@ -9,6 +9,51 @@
 #include <kernel_main.h>
 #include <types/bitmap.h>
 
+// static variables
+
+static page_directory_entry* kernel_pd;
+
+// ---------------------
+
+// constant values
+
+#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x6000)
+#define EMPTY_PAGE_END ((phys_ptr_t)0x7000)
+
+// ---------------------
+
+// forward declarations
+static page_t alloc_page(void);
+
+// map n pages from p_ptr to v_ptr
+// p_ptr and v_ptr needs to be 4kb-aligned
+static int p_map(
+    struct mm* mm_area,
+    phys_ptr_t p_ptr,
+    virt_ptr_t v_ptr,
+    size_t n,
+    int rw,
+    int priv);
+
+// map n bytes identically
+static inline int _ident_map(
+    struct mm* mm_area,
+    phys_ptr_t p_ptr,
+    size_t n,
+    int rw,
+    int priv);
+
+// map page to the end of mm_area in pd
+int k_map(
+    struct mm* mm_area,
+    struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow);
+
+// ---------------------
+
 static void* p_start;
 static void* p_break;
 static segment_descriptor* gdt;
@@ -27,6 +72,9 @@ static int32_t set_heap_start(void* start_addr)
 
 static int32_t brk(void* addr)
 {
+    if (addr >= KERNEL_HEAP_LIMIT) {
+        return GB_FAILED;
+    }
     p_break = addr;
     return 0;
 }
@@ -45,12 +93,9 @@ static void* sbrk(size_t increment)
 
 int init_heap(void)
 {
-    // start of the available address space
-    // TODO: adjust heap start address
-    //   according to user's memory size
-    set_heap_start(HEAP_START);
+    set_heap_start(KERNEL_HEAP_START);
 
-    if (brk(HEAP_START) != 0) {
+    if (brk(KERNEL_HEAP_START) != 0) {
         return GB_FAILED;
     }
     struct mem_blk* p_blk = sbrk(0);
@@ -175,7 +220,7 @@ static inline pd_i_t page_to_pd_i(page_t p)
 
 static inline pt_i_t page_to_pt_i(page_t p)
 {
-    return p & (1024-1);
+    return p & (1024 - 1);
 }
 
 static inline phys_ptr_t page_to_phys_addr(page_t p)
@@ -200,18 +245,20 @@ static inline void free_page(page_t n)
 
 static void mark_addr_len(phys_ptr_t start, size_t n)
 {
-    if (n == 0) return;
+    if (n == 0)
+        return;
     page_t start_page = phys_addr_to_page(start);
-    page_t end_page   = phys_addr_to_page(start + n + 4095);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
     for (page_t i = start_page; i < end_page; ++i)
         mark_page(i);
 }
 
 static void free_addr_len(phys_ptr_t start, size_t n)
 {
-    if (n == 0) return;
+    if (n == 0)
+        return;
     page_t start_page = phys_addr_to_page(start);
-    page_t end_page   = phys_addr_to_page(start + n + 4095);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
     for (page_t i = start_page; i < end_page; ++i)
         free_page(i);
 }
@@ -226,7 +273,7 @@ static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
     free_addr_len(start, end - start);
 }
 
-static int alloc_page(void)
+static page_t alloc_page(void)
 {
     for (page_t i = 0; i < 1024 * 1024; ++i) {
         if (bm_test(mem_bitmap, i) == 0) {
@@ -237,55 +284,14 @@ static int alloc_page(void)
     return GB_FAILED;
 }
 
-// allocate ONE whole page
-static phys_ptr_t _k_p_malloc(void)
-{
-    return page_to_phys_addr(alloc_page());
-}
-
-static void _k_p_free(phys_ptr_t ptr)
-{
-    free_page(phys_addr_to_page(ptr));
-}
-
 static inline void create_pd(page_directory_entry* pde)
 {
-    for (int i = 0; i < 1024; ++i)
-    {
+    for (int i = 0; i < 1024; ++i) {
         pde->v = 0;
         ++pde;
     }
 }
 
-static page_directory_entry* _kernel_pd = KERNEL_PAGE_DIRECTORY_ADDR;
-
-// map n pages from p_ptr to v_ptr
-// p_ptr and v_ptr needs to be 4kb-aligned
-static int p_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
-// map n pages
-static inline int p_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
-// map n bytes identically
-static inline int _p_ident_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
 static inline void make_page_table(page_directory_entry* pd, page_t p)
 {
     phys_ptr_t pp_pt = page_to_phys_addr(p);
@@ -293,51 +299,57 @@ static inline void make_page_table(page_directory_entry* pd, page_t p)
     page_table_entry* pt = (page_table_entry*)pp_pt;
 
     memset(pt, 0x00, sizeof(page_table_entry) * 1024);
+}
 
-    _p_ident_n_map(
-            pd,
-            pp_pt,
-            sizeof(page_table_entry) * 1024, 1, 1
-            );
+static inline void do_map()
+{
 }
 
 // map n pages from p_ptr to v_ptr
 // p_ptr and v_ptr needs to be 4kb-aligned
 static int p_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
+    struct mm* mm_area,
+    phys_ptr_t p_ptr,
+    virt_ptr_t v_ptr,
+    size_t n,
+    int rw,
+    int priv)
+{
+    page_directory_entry* pd = mm_area->pd;
     // pages to be mapped
     page_t v_page_start = phys_addr_to_page(v_ptr);
-    page_t v_page_end   = v_page_start + n;
+    page_t v_page_end = v_page_start + n;
 
-    for (pd_i_t pde_index = page_to_pd_i(v_page_start); pde_index <= page_to_pd_i(v_page_end); ++pde_index)
-    {
+    for (pd_i_t pde_index = page_to_pd_i(v_page_start); pde_index <= page_to_pd_i(v_page_end); ++pde_index) {
         // page table not present
-        if (pd[pde_index].in.p != 1)
-        {
+        if (pd[pde_index].in.p != 1) {
+            page_t p_page = alloc_page();
+
             pd[pde_index].in.p = 1;
             pd[pde_index].in.a = 0;
             pd[pde_index].in.rw = 1;
-            page_t p_page = alloc_page();
             pd[pde_index].in.addr = p_page;
+            // set the page table address first in the pde
+            // before making page table since the function
+            // calls p_map recursively and the p_map call
+            // requires the pde to find it's destination
+
             make_page_table(pd, p_page);
+
+            _ident_map(
+                pd,
+                page_to_phys_addr(p_page),
+                sizeof(page_table_entry) * 1024, 1, 1);
         }
     }
 
-    for (size_t i = 0; i < n; ++i)
-    {
+    for (size_t i = 0; i < n; ++i) {
         page_t v_page = v_page_start + i;
         pd_i_t pd_i = page_to_pd_i(v_page);
-        page_table_entry* pt = (page_table_entry*) page_to_phys_addr(pd[pd_i].in.addr);
+        page_table_entry* pt = (page_table_entry*)page_to_phys_addr(pd[pd_i].in.addr);
         pt += page_to_pt_i(v_page);
 
-        if (pt->in.p == 1)
-        {
+        if (pt->in.p == 1) {
             errno = EEXIST;
             return GB_FAILED;
         }
@@ -353,62 +365,60 @@ static int p_map(
     return GB_OK;
 }
 
-// map n pages
-static inline int p_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
-    return p_map(
-            pd,
-            p_ptr,
-            v_ptr,
-            (n + 4096 - 1) >> 12,
-            rw,
-            priv
-            );
-}
-
 // map n bytes identically
-static inline int _p_ident_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
+static inline int nk_map(
+    struct mm* mm_area,
+    phys_ptr_t p_ptr,
+    size_t n,
+    int rw,
+    int priv)
+{
+    struct mm* p_area = mm_area;
+    while (p_area != NULL) {
+        if (p_ptr >= p_area->start && p_ptr < p_area->start + p_area->len * 4096) {
+            break;
+        }
+    }
+
+    // area does not exist
+    if (p_area == NULL) {
+    }
+
     return p_n_map(
-            pd,
-            p_ptr,
-            p_ptr,
-            n,
-            rw,
-            priv
-            );
+        mm_area,
+        p_ptr,
+        p_ptr,
+        n,
+        rw,
+        priv);
 }
 
 static inline int _create_kernel_pd(void)
 {
-    create_pd(_kernel_pd);
+    create_pd(kernel_pd);
 
     int result = 0;
 
-    result |= _p_ident_n_map(_kernel_pd,
-            (phys_ptr_t)KERNEL_PAGE_DIRECTORY_ADDR,
-            sizeof(page_directory_entry) * 1024, 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            (0x00080000),
-            (0xfffff - 0x80000 + 1), 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            KERNEL_START_ADDR,
-            kernel_size, 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            KERNEL_EARLY_STACK_ADDR - KERNEL_EARLY_STACK_SIZE,
-            KERNEL_EARLY_STACK_SIZE, 1, 1);
-    
+    result |= _p_ident_n_map(kernel_pd,
+        kernel_pd,
+        4096, 1, 1);
+
+    result |= _p_ident_n_map(kernel_pd,
+        EMPTY_PAGE_ADDR,
+        4096, 0, 0);
+
+    result |= _p_ident_n_map(kernel_pd,
+        (0x00080000),
+        (0xfffff - 0x80000 + 1), 1, 1);
+
+    result |= _p_ident_n_map(kernel_pd,
+        KERNEL_START_ADDR,
+        kernel_size, 1, 1);
+
+    result |= _p_ident_n_map(kernel_pd,
+        KERNEL_EARLY_STACK_ADDR - KERNEL_EARLY_STACK_SIZE,
+        KERNEL_EARLY_STACK_SIZE, 1, 1);
+
     return result;
 }
 
@@ -418,7 +428,9 @@ static void init_mem_layout(void)
     mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
 
     // mark kernel page directory
-    mark_addr_range(0x00000000, 0x00001000);
+    mark_addr_range(0x00000000, 0x00006000);
+    // mark empty page
+    mark_addr_len(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
     // mark EBDA and upper memory as allocated
     mark_addr_range(0x80000, 0xfffff);
     // mark kernel
@@ -427,31 +439,99 @@ static void init_mem_layout(void)
     if (e820_mem_map_entry_size == 20) {
         struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
         for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->type != 1)
-            {
+            if (entry->type != 1) {
                 mark_addr_len(entry->base, entry->len);
             }
         }
     } else {
         struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
         for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->in.type != 1)
-            {
+            if (entry->in.type != 1) {
                 mark_addr_len(entry->in.base, entry->in.len);
             }
         }
     }
 }
 
+static struct mm* k_mm_head;
+static struct mm first_mm;
+
+// map page to the end of mm_area in pd
+int k_map(
+    struct mm* mm_area,
+    struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow)
+{
+    struct page* p_page_end = mm_area->pgs;
+    while (p_page_end != NULL)
+        p_page_end = p_page_end->next;
+
+    struct page* new_page = k_malloc(sizeof(struct page));
+    new_page->attr.read = (read == 1);
+    new_page->attr.write = (write == 1);
+    new_page->attr.system = (priv == 1);
+    new_page->phys_page_id = page->phys_page_id;
+
+    new_page->next = NULL;
+    p_page_end->next = new_page;
+
+    if (cow) {
+        new_page->attr.cow = 1;
+        new_page->ref_count = page->ref_count;
+        ++page->ref_count;
+    } else {
+        new_page->attr.cow = 0;
+        *new_page->ref_count = 1;
+        p_map(
+            mm_area, page_to_phys_addr(new_page->phys_page_id),
+            mm_area->start + 4096 * mm_area->len,
+            1,
+            (write && !cow),
+            priv);
+    }
+
+    ++mm_area->len;
+}
+
 void init_paging(void)
 {
     init_mem_layout();
 
+    // create initial struct mms
+    memset(&first_mm, 0x00, sizeof(struct mm));
+
+    first_mm.attr.read = 1;
+    first_mm.attr.write = 1;
+    first_mm.attr.system = 1;
+    first_mm.start = 0x30000000;
+    first_mm.len = 1;
+    first_mm.next = NULL;
+
+    page_t init_mm_page = alloc_page();
+    p_map(KERNEL_EARLY_PAGE_DIRECTORY_ADDR, page_to_phys_addr(init_mm_page), KERNEL_HEAP_START, 1, 1, 1);
+    init_heap();
+
+    first_mm.pgs = (struct page*)k_malloc(sizeof(struct page));
+
+    first_mm.pgs->attr.read = 1;
+    first_mm.pgs->attr.write = 1;
+    first_mm.pgs->attr.system = 1;
+    first_mm.pgs->attr.cow = 0;
+    first_mm.pgs->phys_page_id = init_mm_page;
+    first_mm.pgs->ref_count = (size_t*)k_malloc(sizeof(size_t));
+    first_mm.pgs->next = NULL;
+
+    k_mm_head = &first_mm;
+
     if (_create_kernel_pd() != GB_OK) {
         asm_cli();
         asm_hlt();
     }
-    asm_enable_paging(_kernel_pd);
+
+    asm_enable_paging(kernel_pd);
 }
 
 static inline void
@@ -473,8 +553,6 @@ set_segment_descriptor(
 
 void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
 {
-    // TODO: fix this
-    return;
     gdt = k_malloc(sizeof(segment_descriptor) * 6);
     // since the size in the struct is an OFFSET
     // it needs to be added one to get its real size
@@ -496,17 +574,17 @@ void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
 }
 
 void create_segment_descriptor(
-        segment_descriptor* sd,
-        uint32_t base,
-        uint32_t limit,
-        uint32_t flags,
-        uint32_t access)
-{
-    sd->base_low   = base  & 0x0000ffff;
-    sd->base_mid   = ((base  & 0x00ff0000) >> 16);
-    sd->base_high  = ((base  & 0xff000000) >> 24);
-    sd->limit_low  = limit & 0x0000ffff;
+    segment_descriptor* sd,
+    uint32_t base,
+    uint32_t limit,
+    uint32_t flags,
+    uint32_t access)
+{
+    sd->base_low = base & 0x0000ffff;
+    sd->base_mid = ((base & 0x00ff0000) >> 16);
+    sd->base_high = ((base & 0xff000000) >> 24);
+    sd->limit_low = limit & 0x0000ffff;
     sd->limit_high = ((limit & 0x000f0000) >> 16);
-    sd->access     = access;
-    sd->flags      = flags;
+    sd->access = access;
+    sd->flags = flags;
 }

+ 0 - 0
src/kernel/process.c


+ 10 - 11
src/kernel_main.c

@@ -8,9 +8,9 @@
 #include <kernel/hw/serial.h>
 #include <kernel/hw/timer.h>
 #include <kernel/interrupt.h>
-#include <kernel/tty.h>
 #include <kernel/mem.h>
 #include <kernel/stdio.h>
+#include <kernel/tty.h>
 #include <kernel/vga.h>
 #include <types/bitmap.h>
 
@@ -26,7 +26,6 @@ void call_constructors_for_cpp(void)
 
 #define KERNEL_MAIN_BUF_SIZE (128)
 
-
 struct tty* console = NULL;
 #define printkf(x...)                       \
     snprintf(buf, KERNEL_MAIN_BUF_SIZE, x); \
@@ -119,11 +118,11 @@ static segment_descriptor new_gdt[5];
 
 void load_new_gdt(void)
 {
-    create_segment_descriptor(new_gdt+0, 0,  0, 0, 0);
-    create_segment_descriptor(new_gdt+1, 0, ~0, 0b1100, SD_TYPE_CODE_SYSTEM);
-    create_segment_descriptor(new_gdt+2, 0, ~0, 0b1100, SD_TYPE_DATA_SYSTEM);
-    create_segment_descriptor(new_gdt+3, 0, ~0, 0b1100, SD_TYPE_CODE_USER);
-    create_segment_descriptor(new_gdt+4, 0, ~0, 0b1100, SD_TYPE_DATA_USER);
+    create_segment_descriptor(new_gdt + 0, 0, 0, 0, 0);
+    create_segment_descriptor(new_gdt + 1, 0, ~0, 0b1100, SD_TYPE_CODE_SYSTEM);
+    create_segment_descriptor(new_gdt + 2, 0, ~0, 0b1100, SD_TYPE_DATA_SYSTEM);
+    create_segment_descriptor(new_gdt + 3, 0, ~0, 0b1100, SD_TYPE_CODE_USER);
+    create_segment_descriptor(new_gdt + 4, 0, ~0, 0b1100, SD_TYPE_DATA_USER);
     asm_load_gdt((5 * 8 - 1) << 16, (phys_ptr_t)new_gdt);
     asm_cli();
 }
@@ -147,14 +146,14 @@ void kernel_main(void)
 
     show_mem_info(buf);
 
-    INIT_START("paging");
-    init_paging();
-    INIT_OK();
-
     INIT_START("SSE");
     asm_enable_sse();
     INIT_OK();
 
+    EVE_START("rebuilding page table");
+    init_paging();
+    INIT_OK();
+
     INIT_START("IDT");
     init_idt();
     init_pit();