Ver código fonte

Merge branch 'dev' into paging

greatbridf 2 anos atrás
pai
commit
d9ed36797d

+ 4 - 0
CMakeLists.txt

@@ -42,6 +42,7 @@ include_directories(${PROJECT_SOURCE_DIR}/include)
 set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         src/kernel/errno.c
                         src/kernel/interrupt.c
+                        src/kernel/process.c
                         src/kernel/tty.c
                         src/kernel/stdio.c
                         src/kernel/mem.c
@@ -59,6 +60,7 @@ set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         include/kernel/errno.h
                         include/kernel/tty.h
                         include/kernel/interrupt.h
+                        include/kernel/process.h
                         include/kernel/stdio.h
                         include/kernel/mem.h
                         include/kernel/vga.h
@@ -73,6 +75,7 @@ set(KERNEL_MAIN_SOURCES src/kernel_main.c
                         include/types/size.h
                         include/types/status.h
                         include/types/stdint.h
+                        include/types/list.h
                         include/types/list.hpp
                         include/kernel_main.h
                         )
@@ -86,6 +89,7 @@ add_custom_command(OUTPUT extracted_kernel_main
 add_custom_target(kernel.out
     DEPENDS extracted_bootloader
     DEPENDS extracted_kernel_main
+    DEPENDS ${CMAKE_SOURCE_DIR}/ldscript.ld
     COMMAND ${CMAKE_LINKER} -T ${CMAKE_SOURCE_DIR}/ldscript.ld ${EXTRACT_DIR}/*.o
     -melf_i386 -o ${CMAKE_BINARY_DIR}/kernel.out
 )

+ 42 - 0
doc/malloc.md

@@ -1,3 +1,45 @@
 [参考链接1](https://blog.codinglabs.org/articles/a-malloc-tutorial.html#212-%E9%A1%B5%E4%B8%8E%E5%9C%B0%E5%9D%80%E6%9E%84%E6%88%90)
 [malloc的glibc实现](https://repo.or.cz/glibc.git/blob/HEAD:/malloc/malloc.c)
 [How the kernel manage your memory](https://manybutfinite.com/post/how-the-kernel-manages-your-memory/)
+
+## 地址空间分区
+从0x00000000到0x3fffffff为内核空间
+
+从0x40000000到0xffffffff为用户空间
+
+### 内核空间:
+0x00000000到0x2fffffff为动态映射区
+0x30000000到0x3fffffff为永久映射区,这个区域的内存在与物理页进行映射后不会被交换出地址空间
+
+## 物理内存分配
+0x00000000-0x00000fff:内核页目录
+0x00001000-0x00001fff:空白页
+
+## 大致思路:
+每个进程(包括内核)拥有一个struct mm,用于记录自身的虚拟地址映射状况
+
+struct mm拥有struct page的链表,对应虚拟地址实际映射的物理页
+
+struct mm的项目还可包括copy on write的页
+
+发生缺页中断时,内核通过该触发中断的进程的struct mm检查页权限、是否需要复制页等
+
+若因权限不足而触发中断,则内核中断用户进程执行,或内核panic
+
+若因页已被交换出内存,则内核将页换回内存,继续进程执行
+
+若因页为copy on write页,写入时触发中断,则将页复制一份,继续进程执行
+
+### 内核提供几个接口
+1. alloc_page从页位图中找出未被使用的物理页返回
+2. p_map用于将物理页映射到指定的虚拟地址
+3. kmap用于给出物理页,将其映射到一个未被该进程使用的虚拟地址
+
+### 分配内存
+通过kmap将空白页映射到某虚拟地址上,并开启copy on write,
+随后则可以直接对该页进行读写,写入时内核中断自动进行页的分配
+
+因此,换页中断的处理非常重要
+
+## 注
+分页相关的内存数据结构应始终被映射到永久映射区,并且该映射应在页表创建时被完成

+ 2 - 2
include/asm/boot.h

@@ -2,8 +2,8 @@
 
 #include <types/stdint.h>
 
-#define KERNEL_EARLY_STACK_ADDR ((phys_ptr_t)0x03fffff0)
-#define KERNEL_EARLY_STACK_SIZE ((size_t) 0xfffff0)
+#define KERNEL_EARLY_STACK_ADDR ((phys_ptr_t)0x01000000)
+#define KERNEL_EARLY_STACK_SIZE ((size_t)0x100000)
 
 struct __attribute__((__packed__)) gdt_descriptor {
     uint16_t size;

+ 2 - 0
include/asm/sys.h

@@ -9,6 +9,8 @@ extern "C" {
 
 void asm_enable_paging(page_directory_entry* pd_addr);
 
+phys_ptr_t current_pd(void);
+
 // the limit should be set on the higher 16bit
 // e.g. (n * sizeof(segment_descriptor) - 1) << 16
 // the lower bit off the limit is either 0 or 1

+ 24 - 5
include/kernel/interrupt.h

@@ -17,6 +17,24 @@ struct regs_32 {
     uint32_t eax;
 };
 
+// present: When set, the page fault was caused by a page-protection violation.
+//          When not set, it was caused by a non-present page.
+// write:   When set, the page fault was caused by a write access.
+//          When not set, it was caused by a read access.
+// user:    When set, the page fault was caused while CPL = 3.
+//          This does not necessarily mean that the page fault was a privilege violation.
+// from https://wiki.osdev.org/Exceptions#Page_Fault
+struct page_fault_error_code {
+    uint32_t present : 1;
+    uint32_t write : 1;
+    uint32_t user : 1;
+    uint32_t reserved_write : 1;
+    uint32_t instruction_fetch : 1;
+    uint32_t protection_key : 1;
+    uint32_t shadow_stack : 1;
+    uint32_t software_guard_extensions : 1;
+};
+
 // external interrupt handler function
 // stub in assembly MUST be called irqN
 #define SET_UP_IRQ(N, SELECTOR)        \
@@ -48,12 +66,13 @@ struct IDT_entry {
 extern struct IDT_entry IDT[256];
 #endif
 
-void init_idt();
+void init_idt(void);
+void init_pic(void);
 
 // idt_descriptor: uint16_t[3]
 // [0] bit 0 :15 => limit
 // [1] bit 16:47 => address
-extern void asm_load_idt(uint16_t idt_descriptor[3]);
+extern void asm_load_idt(uint16_t idt_descriptor[3], int sti);
 
 void int13_handler(
     struct regs_32 s_regs,
@@ -63,10 +82,10 @@ void int13_handler(
     uint32_t eflags);
 
 void int14_handler(
-    ptr_t addr,
+    linr_ptr_t l_addr,
     struct regs_32 s_regs,
-    uint32_t error_code,
-    ptr_t eip,
+    struct page_fault_error_code error_code,
+    void* v_eip,
     uint16_t cs,
     uint32_t eflags);
 

+ 177 - 41
include/kernel/mem.h

@@ -1,11 +1,18 @@
 #pragma once
 
+#include <types/size.h>
 #include <types/stdint.h>
 
+#define PAGE_SIZE (4096)
+#define KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT ((void*)0x30000000)
+
 #ifdef __cplusplus
 extern "C" {
 #endif
 
+// in mem.c
+extern struct mm* kernel_mm_head;
+
 // don't forget to add the initial 1m to the total
 struct mem_size_info {
     uint16_t n_1k_blks; // memory between 1m and 16m in 1k blocks
@@ -23,38 +30,6 @@ struct e820_mem_map_entry_24 {
     uint32_t acpi_extension_attr;
 };
 
-// in kernel_main.c
-extern uint8_t e820_mem_map[1024];
-extern uint32_t e820_mem_map_count;
-extern uint32_t e820_mem_map_entry_size;
-extern uint32_t kernel_size;
-extern struct mem_size_info mem_size_info;
-
-// TODO: decide heap start address according
-//   to user's memory size
-#define HEAP_START ((void*)0x01000000)
-
-struct mem_blk_flags {
-    uint8_t is_free;
-    uint8_t has_next;
-    uint8_t _unused2;
-    uint8_t _unused3;
-};
-
-struct mem_blk {
-    size_t size;
-    struct mem_blk_flags flags;
-    // the first byte of the memory space
-    // the minimal allocated space is 4 bytes
-    uint8_t data[4];
-};
-
-int init_heap(void);
-
-void* k_malloc(size_t size);
-
-void k_free(void* ptr);
-
 /*
  * page directory entry
  *
@@ -78,7 +53,7 @@ struct page_directory_entry_in {
     uint32_t d : 1;
     uint32_t ps : 1;
     uint32_t ignored : 4;
-    page_t addr : 20;
+    page_t pt_page : 20;
 };
 
 typedef union page_directory_entry {
@@ -111,7 +86,7 @@ struct page_table_entry_in {
     uint32_t pat : 1;
     uint32_t g : 1;
     uint32_t ignored : 3;
-    page_t addr : 20;
+    page_t page : 20;
 };
 
 typedef union page_table_entry {
@@ -119,9 +94,171 @@ typedef union page_table_entry {
     struct page_table_entry_in in;
 } page_table_entry;
 
+struct page_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+    uint32_t cow : 1;
+};
+
+struct page {
+    page_t phys_page_id;
+    size_t* ref_count;
+    struct page_attr attr;
+    struct page* next;
+};
+
+struct mm_attr {
+    uint32_t read : 1;
+    uint32_t write : 1;
+    uint32_t system : 1;
+};
+
+struct mm {
+    linr_ptr_t start;
+    size_t len;
+    struct mm_attr attr;
+    struct page* pgs;
+    struct mm* next;
+    page_directory_entry* pd;
+};
+
+// in kernel_main.c
+extern uint8_t e820_mem_map[1024];
+extern uint32_t e820_mem_map_count;
+extern uint32_t e820_mem_map_entry_size;
+extern uint32_t kernel_size;
+extern struct mem_size_info mem_size_info;
+
+#define KERNEL_HEAP_START ((void*)0x30000000)
+#define KERNEL_HEAP_LIMIT ((void*)0x40000000)
+
+struct mem_blk_flags {
+    uint8_t is_free;
+    uint8_t has_next;
+    uint8_t _unused2;
+    uint8_t _unused3;
+};
+
+struct mem_blk {
+    size_t size;
+    struct mem_blk_flags flags;
+    // the first byte of the memory space
+    // the minimal allocated space is 4 bytes
+    uint8_t data[4];
+};
+
+int init_heap(void);
+
+void* k_malloc(size_t size);
+
+void k_free(void* ptr);
+
+// translate physical address to virtual(mapped) address
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
+
+// translate linear address to physical address
+phys_ptr_t l_ptr_to_p_ptr(struct mm* mm_area, linr_ptr_t v_ptr);
+
+// translate virtual(mapped) address to physical address
+phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
+
+// check if the l_ptr is contained in the area
+// @return GB_OK if l_ptr is in the area
+//         GB_FAILED if not
+int is_l_ptr_valid(struct mm* mm_area, linr_ptr_t l_ptr);
+
+// find the corresponding page the l_ptr pointing to
+// @return the pointer to the struct if found, NULL if not found
+struct page* find_page_by_l_ptr(struct mm* mm_area, linr_ptr_t l_ptr);
+
+static inline page_t phys_addr_to_page(phys_ptr_t ptr)
+{
+    return ptr >> 12;
+}
+
+static inline pd_i_t page_to_pd_i(page_t p)
+{
+    return p >> 10;
+}
+
+static inline pt_i_t page_to_pt_i(page_t p)
+{
+    return p & (1024 - 1);
+}
+
+static inline phys_ptr_t page_to_phys_addr(page_t p)
+{
+    return p << 12;
+}
+
+static inline pd_i_t linr_addr_to_pd_i(linr_ptr_t ptr)
+{
+    return page_to_pd_i(phys_addr_to_page(ptr));
+}
+
+static inline pd_i_t linr_addr_to_pt_i(linr_ptr_t ptr)
+{
+    return page_to_pt_i(phys_addr_to_page(ptr));
+}
+
+static inline page_directory_entry* lptr_to_pde(struct mm* mm, linr_ptr_t l_ptr)
+{
+    return mm->pd + linr_addr_to_pd_i((phys_ptr_t)l_ptr);
+}
+
+static inline page_table_entry* lptr_to_pte(struct mm* mm, linr_ptr_t l_ptr)
+{
+    page_directory_entry* pde = lptr_to_pde(mm, l_ptr);
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    return pte + linr_addr_to_pt_i((phys_ptr_t)l_ptr);
+}
+
+static inline page_directory_entry* lp_to_pde(struct mm* mm, linr_ptr_t l_ptr)
+{
+    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mm, l_ptr);
+    page_directory_entry* pde = mm->pd + linr_addr_to_pd_i(p_ptr);
+    return pde;
+}
+
+// get the corresponding pte for the linear address
+// for example: l_ptr = 0x30001000 will return the pte including the page it is mapped to
+static inline page_table_entry* lp_to_pte(struct mm* mm, linr_ptr_t l_ptr)
+{
+    phys_ptr_t p_ptr = l_ptr_to_p_ptr(mm, l_ptr);
+
+    page_directory_entry* pde = lp_to_pde(mm, l_ptr);
+    phys_ptr_t p_pt = page_to_phys_addr(pde->in.pt_page);
+
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(p_pt);
+    pte += linr_addr_to_pt_i(p_ptr);
+
+    return pte;
+}
+
+// map the page to the end of the mm_area in pd
+int k_map(
+    struct mm* mm_area,
+    struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow);
+
+// allocate a raw page
+page_t alloc_raw_page(void);
+
+// allocate a struct page together with the raw page
+struct page* allocate_page(void);
+
 #define KERNEL_PAGE_DIRECTORY_ADDR ((page_directory_entry*)0x00000000)
 
-void init_paging(void);
+void init_mem(void);
+
+#define KERNEL_CODE_SEGMENT (0x08)
+#define KERNEL_DATA_SEGMENT (0x10)
+#define USER_CODE_SEGMENT (0x18)
+#define USER_DATA_SEGMENT (0x20)
 
 #define SD_TYPE_CODE_SYSTEM (0x9a)
 #define SD_TYPE_DATA_SYSTEM (0x92)
@@ -141,13 +278,12 @@ typedef struct segment_descriptor_struct {
     uint64_t base_high : 8;
 } segment_descriptor;
 
-void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss);
 void create_segment_descriptor(
-        segment_descriptor* sd,
-        uint32_t base,
-        uint32_t limit,
-        uint32_t flags,
-        uint32_t access);
+    segment_descriptor* sd,
+    uint32_t base,
+    uint32_t limit,
+    uint32_t flags,
+    uint32_t access);
 
 #ifdef __cplusplus
 }

+ 15 - 0
include/kernel/process.h

@@ -0,0 +1,15 @@
+#pragma once
+
+#include <kernel/mem.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct process {
+    struct mm* mm;
+};
+
+#ifdef __cplusplus
+}
+#endif

+ 6 - 1
include/kernel_main.h

@@ -1,6 +1,11 @@
 #pragma once
 
-#define MAKE_BREAK_POINT() asm volatile("xchgw %bx, %bx")
+static inline void __break_point(void)
+{
+    asm volatile("xchgw %bx, %bx");
+}
+
+#define MAKE_BREAK_POINT() __break_point()
 
 #define KERNEL_STACK_SIZE (16 * 1024)
 #define KERNEL_STACK_SEGMENT (0x10)

+ 9 - 0
include/types/list.h

@@ -0,0 +1,9 @@
+#pragma once
+
+#define LIST_LIKE_AT(type, list_like, pos, result_name) \
+    type* result_name = list_like; \
+    {                   \
+        size_t _tmp_pos = (pos); \
+        while (_tmp_pos--) \
+            result_name = result_name->next; \
+    }

+ 1 - 1
include/types/size.h

@@ -13,7 +13,7 @@ typedef int64_t diff_t;
 #endif
 
 typedef ptr_t phys_ptr_t;
-typedef ptr_t virt_ptr_t;
+typedef ptr_t linr_ptr_t;
 typedef size_t page_t;
 typedef size_t pd_i_t;
 typedef size_t pt_i_t;

+ 3 - 4
ldscript.ld

@@ -34,7 +34,7 @@ SECTIONS
         *(.rodata*)
     } > WHOLE
 
-    .data :
+    .data : AT(LOADADDR(.text) + ADDR(.data) - ADDR(.text))
     {
         asm_kernel_size = .;
         LONG(__real_kernel_end - ADDR(.text));
@@ -98,10 +98,9 @@ SECTIONS
         BYTE(0x00);
     } > WHOLE
 
-/*
     /DISCARD/ :
     {
-        *(.comment)
+        *(.fini_array*)
+        *(.eh_frame*)
     }
-*/
 }

+ 11 - 0
src/asm/interrupt.s

@@ -12,6 +12,12 @@ int6:
 
     iret
 
+.globl int8
+.type  int8 @function
+int8:
+    nop
+    iret
+
 .globl int13
 .type  int13 @function
 int13:
@@ -32,6 +38,7 @@ int14:
     movl %cr2, %eax
     pushl %eax
     call int14_handler
+    popl %eax
     popal
 
 # remove the 32bit error code from stack
@@ -158,5 +165,9 @@ irq15:
 asm_load_idt:
     movl 4(%esp), %edx
     lidt (%edx)
+    movl 8(%esp), %edx
+    cmpl $0, %edx
+    je asm_load_idt_skip
     sti
+asm_load_idt_skip:
     ret

+ 8 - 1
src/asm/sys.s

@@ -11,11 +11,18 @@ asm_enable_paging:
     movl %eax, %cr3
 
     movl %cr0, %eax
-    orl $0x80000001, %eax
+    // SET PE, WP, PG
+    orl $0x80010001, %eax
     movl %eax, %cr0
 
     ret
 
+.global current_pd
+.type   current_pd @function
+current_pd:
+    movl %cr3, %eax
+    ret
+
 .global asm_load_gdt
 .type   asm_load_gdt @function
 asm_load_gdt:

+ 69 - 6
src/boot.s

@@ -157,13 +157,76 @@ start_32bit:
     movw %ax, %gs
     movw %ax, %ss
 
-# set up stack
-# in order to align 16 byte
-# set stack base address at
-# 0x003ffff0
-    movl $0x03fffff0, %ebp
-    movl $0x03fffff0, %esp
+# set up early stack at 0x001000000
+    movl $0x01000000, %ebp
+    movl $0x01000000, %esp
+
+setup_early_kernel_page_table:
+# set up early kernel page table
+
+# the early kernel page directory is located at physical
+# address 0x00000000, size 4k, and the empty page is at
+# 0x5000-0x5fff, so we fill the first 6KiB
+    movl $0x00000000, %eax
+    movl $0x6000, %ecx
+    call _fill_zero
+
+# map the first 16MiB identically
+# 0x0000-0x0fff: early kernel pd
+# 0x1000-0x4fff: pde 0 - 4
+    movl $0x00000000, %eax
+    movl $0x00001003, %ebx
+_fill_pde_loop:
+    movl %ebx, (%eax)
+    addl $4, %eax
+    addl $0x1000, %ebx
+    cmpl $0x5003, %ebx
+    jne _fill_pde_loop
+
+# then, create page tables
+    movl $0x00000003, %eax
+    movl $0x00001000, %ecx
+
+_create_page_table_loop1:
+    movl %eax, (%ecx)
+    addl $4, %ecx
+    addl $0x1000, %eax
+    cmpl $0x4ffc, %ecx
+    jle _create_page_table_loop1
+
+load_early_kernel_page_table:
+    movl $0x00000000, %eax
+    movl %eax, %cr3
+
+    movl %cr0, %eax
+    // SET PE, WP, PG
+    orl $0x80010001, %eax
+    movl %eax, %cr0
+
+    jmp start_move_kernel
+
+# quick call
+# %eax: address to fill
+# %ecx: byte count to fill
+_fill_zero:
+    movl %ecx, -4(%esp)
+    movl %eax, -8(%esp)
+
+_fill_zero_loop:
+    cmpl $0, %ecx
+    jz _fill_zero_end
+    subl $4, %ecx
+    movl $0, (%eax)
+    addl $4, %eax
+    jmp _fill_zero_loop
+
+_fill_zero_end:
+    movl -8(%esp), %eax
+    movl -4(%esp), %ecx
+    ret
 
+start_move_kernel:
+# move the kernel to 0x100000
     movl $__loader_end, %eax
     movl $__real_kernel_start, %ebx
 

+ 79 - 26
src/kernel/interrupt.c

@@ -4,6 +4,7 @@
 #include <kernel/hw/keyboard.h>
 #include <kernel/hw/timer.h>
 #include <kernel/interrupt.h>
+#include <kernel/mem.h>
 #include <kernel/stdio.h>
 #include <kernel/tty.h>
 #include <kernel/vga.h>
@@ -15,6 +16,28 @@ void init_idt()
 {
     asm_cli();
 
+    memset(IDT, 0x00, sizeof(IDT));
+
+    // invalid opcode
+    SET_IDT_ENTRY_FN(6, int6, 0x08);
+    // double fault
+    SET_IDT_ENTRY_FN(8, int8, 0x08);
+    // general protection
+    SET_IDT_ENTRY_FN(13, int13, 0x08);
+    // page fault
+    SET_IDT_ENTRY_FN(14, int14, 0x08);
+
+    uint16_t idt_descriptor[3];
+    idt_descriptor[0] = sizeof(struct IDT_entry) * 256;
+    *((uint32_t*)(idt_descriptor + 1)) = (ptr_t)IDT;
+
+    asm_load_idt(idt_descriptor, 0);
+}
+
+void init_pic(void)
+{
+    asm_cli();
+
     asm_outb(PORT_PIC1_COMMAND, 0x11); // edge trigger mode
     asm_outb(PORT_PIC1_DATA, 0x20); // start from int 0x20
     asm_outb(PORT_PIC1_DATA, 0x04); // PIC1 is connected to IRQ2 (1 << 2)
@@ -29,12 +52,6 @@ void init_idt()
     asm_outb(PORT_PIC1_DATA, 0x00);
     asm_outb(PORT_PIC2_DATA, 0x00);
 
-    // handle general protection fault (handle segmentation fault)
-    SET_IDT_ENTRY_FN(6, int6, 0x08);
-    SET_IDT_ENTRY_FN(13, int13, 0x08);
-    SET_IDT_ENTRY_FN(14, int14, 0x08);
-    // SET_IDT_ENTRY(0x0c, /* addr */ 0, 0x08);
-
     // 0x08 stands for kernel code segment
     SET_UP_IRQ(0, 0x08);
     SET_UP_IRQ(1, 0x08);
@@ -53,11 +70,7 @@ void init_idt()
     SET_UP_IRQ(14, 0x08);
     SET_UP_IRQ(15, 0x08);
 
-    uint16_t idt_descriptor[3];
-    idt_descriptor[0] = sizeof(struct IDT_entry) * 256;
-    *((uint32_t*)(idt_descriptor + 1)) = (ptr_t)IDT;
-
-    asm_load_idt(idt_descriptor);
+    asm_sti();
 }
 
 void int6_handler(
@@ -117,33 +130,73 @@ void int13_handler(
     asm_hlt();
 }
 
+static size_t page_fault_times;
+
 // page fault
 void int14_handler(
-    ptr_t addr,
+    linr_ptr_t l_addr,
     struct regs_32 s_regs,
-    uint32_t error_code,
-    ptr_t eip,
+    struct page_fault_error_code error_code,
+    void* v_eip,
     uint16_t cs,
     uint32_t eflags)
 {
+    MAKE_BREAK_POINT();
     char buf[512];
 
-    tty_print(console, "---- PAGE FAULT ----\n");
+    ++page_fault_times;
+
+    // not present page, possibly mapped but not loaded
+    // or invalid address or just invalid address
+    // TODO: mmapping and swapping
+    if (error_code.present == 0) {
+        goto kill;
+    }
+
+    // kernel code
+    if (cs == KERNEL_CODE_SEGMENT) {
+        if (is_l_ptr_valid(kernel_mm_head, l_addr) != GB_OK) {
+            goto kill;
+        }
+        struct page* page = find_page_by_l_ptr(kernel_mm_head, l_addr);
+
+        // copy on write
+        if (error_code.write == 1 && page->attr.cow == 1) {
+            page_directory_entry* pde = kernel_mm_head->pd + linr_addr_to_pd_i(l_addr);
+            page_table_entry* pte = p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+            pte += linr_addr_to_pt_i(l_addr);
+
+            // if it is a dying page
+            if (*page->ref_count == 1) {
+                page->attr.cow = 0;
+                pte->in.a = 0;
+                pte->in.rw = 1;
+                return;
+            }
+            // duplicate the page
+            page_t new_page = alloc_raw_page();
+            void* new_page_data = p_ptr_to_v_ptr(page_to_phys_addr(new_page));
+            memcpy(new_page_data, p_ptr_to_v_ptr(page_to_phys_addr(page->phys_page_id)), PAGE_SIZE);
+
+            pte->in.page = new_page;
+            pte->in.rw = 1;
+            pte->in.a = 0;
+
+            --*page->ref_count;
 
+            page->ref_count = (size_t*)k_malloc(sizeof(size_t));
+            *page->ref_count = 1;
+            page->attr.cow = 0;
+            page->phys_page_id = new_page;
+            return;
+        }
+    }
+
+kill:
     snprintf(
         buf, 512,
-        "eax: %x, ebx: %x, ecx: %x, edx: %x\n"
-        "esp: %x, ebp: %x, esi: %x, edi: %x\n"
-        "eip: %x, cs: %x, error_code: %x   \n"
-        "eflags: %x, addr: %x              \n",
-        s_regs.eax, s_regs.ebx, s_regs.ecx,
-        s_regs.edx, s_regs.esp, s_regs.ebp,
-        s_regs.esi, s_regs.edi, eip,
-        cs, error_code, eflags, addr);
+        "killed: segmentation fault (eip: %x, cr2: %x, error_code: %x)", v_eip, l_addr, error_code);
     tty_print(console, buf);
-
-    tty_print(console, "----   HALTING SYSTEM   ----");
-
     asm_cli();
     asm_hlt();
 }

+ 284 - 251
src/kernel/mem.c

@@ -8,13 +8,24 @@
 #include <kernel/vga.h>
 #include <kernel_main.h>
 #include <types/bitmap.h>
+#include <types/list.h>
+
+// static variables
+
+struct mm kernel_mm;
+struct mm* kernel_mm_head;
+
+// ---------------------
+
+// constant values
+
+#define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
+#define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
+
+// ---------------------
 
 static void* p_start;
 static void* p_break;
-static segment_descriptor* gdt;
-
-// temporary
-static struct tss32_t _tss;
 
 static size_t mem_size;
 static char mem_bitmap[1024 * 1024 / 8];
@@ -27,6 +38,9 @@ static int32_t set_heap_start(void* start_addr)
 
 static int32_t brk(void* addr)
 {
+    if (addr >= KERNEL_HEAP_LIMIT) {
+        return GB_FAILED;
+    }
     p_break = addr;
     return 0;
 }
@@ -45,12 +59,9 @@ static void* sbrk(size_t increment)
 
 int init_heap(void)
 {
-    // start of the available address space
-    // TODO: adjust heap start address
-    //   according to user's memory size
-    set_heap_start(HEAP_START);
+    set_heap_start(KERNEL_HEAP_START);
 
-    if (brk(HEAP_START) != 0) {
+    if (brk(KERNEL_HEAP_START) != 0) {
         return GB_FAILED;
     }
     struct mem_blk* p_blk = sbrk(0);
@@ -163,29 +174,41 @@ void k_free(void* ptr)
     // TODO: fusion free blocks nearby
 }
 
-static inline page_t phys_addr_to_page(phys_ptr_t ptr)
-{
-    return ptr >> 12;
-}
-
-static inline pd_i_t page_to_pd_i(page_t p)
+void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
 {
-    return p >> 10;
+    if (p_ptr <= 0x30000000) {
+        // memory below 768MiB is identically mapped
+        return (void*)p_ptr;
+    } else {
+        // TODO: address translation
+        MAKE_BREAK_POINT();
+        return (void*)0xffffffff;
+    }
 }
 
-static inline pt_i_t page_to_pt_i(page_t p)
+phys_ptr_t l_ptr_to_p_ptr(struct mm* mm, linr_ptr_t v_ptr)
 {
-    return p & (1024-1);
-}
+    if (mm == kernel_mm_head && v_ptr < (linr_ptr_t)KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
+        return (phys_ptr_t)v_ptr;
+    }
+    while (mm != NULL) {
+        if (v_ptr < mm->start || v_ptr >= mm->start + mm->len * 4096) {
+            goto next;
+        }
+        size_t offset = (size_t)(v_ptr - mm->start);
+        LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
+        return page_to_phys_addr(result->phys_page_id) + (offset % 4096);
+    next:
+        mm = mm->next;
+    }
 
-static inline phys_ptr_t page_to_phys_addr(page_t p)
-{
-    return p << 12;
+    // TODO: handle error
+    return 0xffffffff;
 }
 
-static inline pd_i_t phys_addr_to_pd_i(phys_ptr_t ptr)
+phys_ptr_t v_ptr_to_p_ptr(void* v_ptr)
 {
-    return page_to_pd_i(phys_addr_to_page(ptr));
+    return l_ptr_to_p_ptr(kernel_mm_head, (linr_ptr_t)v_ptr);
 }
 
 static inline void mark_page(page_t n)
@@ -200,18 +223,20 @@ static inline void free_page(page_t n)
 
 static void mark_addr_len(phys_ptr_t start, size_t n)
 {
-    if (n == 0) return;
+    if (n == 0)
+        return;
     page_t start_page = phys_addr_to_page(start);
-    page_t end_page   = phys_addr_to_page(start + n + 4095);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
     for (page_t i = start_page; i < end_page; ++i)
         mark_page(i);
 }
 
 static void free_addr_len(phys_ptr_t start, size_t n)
 {
-    if (n == 0) return;
+    if (n == 0)
+        return;
     page_t start_page = phys_addr_to_page(start);
-    page_t end_page   = phys_addr_to_page(start + n + 4095);
+    page_t end_page = phys_addr_to_page(start + n + 4095);
     for (page_t i = start_page; i < end_page; ++i)
         free_page(i);
 }
@@ -226,7 +251,7 @@ static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
     free_addr_len(start, end - start);
 }
 
-static int alloc_page(void)
+page_t alloc_raw_page(void)
 {
     for (page_t i = 0; i < 1024 * 1024; ++i) {
         if (bm_test(mem_bitmap, i) == 0) {
@@ -237,188 +262,30 @@ static int alloc_page(void)
     return GB_FAILED;
 }
 
-// allocate ONE whole page
-static phys_ptr_t _k_p_malloc(void)
-{
-    return page_to_phys_addr(alloc_page());
-}
-
-static void _k_p_free(phys_ptr_t ptr)
-{
-    free_page(phys_addr_to_page(ptr));
-}
-
-static inline void create_pd(page_directory_entry* pde)
+struct page* allocate_page(void)
 {
-    for (int i = 0; i < 1024; ++i)
-    {
-        pde->v = 0;
-        ++pde;
-    }
+    // TODO: allocate memory on identically mapped area
+    struct page* p = (struct page*)k_malloc(sizeof(struct page));
+    memset(p, 0x00, sizeof(struct page));
+    p->phys_page_id = alloc_raw_page();
+    p->ref_count = (size_t*)k_malloc(sizeof(size_t));
+    return p;
 }
 
-static page_directory_entry* _kernel_pd = KERNEL_PAGE_DIRECTORY_ADDR;
-
-// map n pages from p_ptr to v_ptr
-// p_ptr and v_ptr needs to be 4kb-aligned
-static int p_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
-// map n pages
-static inline int p_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
-// map n bytes identically
-static inline int _p_ident_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        size_t n,
-        int rw,
-        int priv);
-
-static inline void make_page_table(page_directory_entry* pd, page_t p)
+static inline void make_page_table(page_table_entry* pt)
 {
-    phys_ptr_t pp_pt = page_to_phys_addr(p);
-
-    page_table_entry* pt = (page_table_entry*)pp_pt;
-
     memset(pt, 0x00, sizeof(page_table_entry) * 1024);
-
-    _p_ident_n_map(
-            pd,
-            pp_pt,
-            sizeof(page_table_entry) * 1024, 1, 1
-            );
-}
-
-// map n pages from p_ptr to v_ptr
-// p_ptr and v_ptr needs to be 4kb-aligned
-static int p_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
-    // pages to be mapped
-    page_t v_page_start = phys_addr_to_page(v_ptr);
-    page_t v_page_end   = v_page_start + n;
-
-    for (pd_i_t pde_index = page_to_pd_i(v_page_start); pde_index <= page_to_pd_i(v_page_end); ++pde_index)
-    {
-        // page table not present
-        if (pd[pde_index].in.p != 1)
-        {
-            pd[pde_index].in.p = 1;
-            pd[pde_index].in.a = 0;
-            pd[pde_index].in.rw = 1;
-            page_t p_page = alloc_page();
-            pd[pde_index].in.addr = p_page;
-            make_page_table(pd, p_page);
-        }
-    }
-
-    for (size_t i = 0; i < n; ++i)
-    {
-        page_t v_page = v_page_start + i;
-        pd_i_t pd_i = page_to_pd_i(v_page);
-        page_table_entry* pt = (page_table_entry*) page_to_phys_addr(pd[pd_i].in.addr);
-        pt += page_to_pt_i(v_page);
-
-        if (pt->in.p == 1)
-        {
-            errno = EEXIST;
-            return GB_FAILED;
-        }
-        pt->in.p = 1;
-        pt->in.rw = (rw == 1);
-        pt->in.us = !(priv == 1);
-        pt->in.a = 0;
-        pt->in.d = 0;
-
-        pt->in.addr = phys_addr_to_page(p_ptr) + i;
-    }
-
-    return GB_OK;
 }
 
-// map n pages
-static inline int p_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        virt_ptr_t v_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
-    return p_map(
-            pd,
-            p_ptr,
-            v_ptr,
-            (n + 4096 - 1) >> 12,
-            rw,
-            priv
-            );
-}
-
-// map n bytes identically
-static inline int _p_ident_n_map(
-        page_directory_entry* pd,
-        phys_ptr_t p_ptr,
-        size_t n,
-        int rw,
-        int priv)
-{
-    return p_n_map(
-            pd,
-            p_ptr,
-            p_ptr,
-            n,
-            rw,
-            priv
-            );
-}
-
-static inline int _create_kernel_pd(void)
-{
-    create_pd(_kernel_pd);
-
-    int result = 0;
-
-    result |= _p_ident_n_map(_kernel_pd,
-            (phys_ptr_t)KERNEL_PAGE_DIRECTORY_ADDR,
-            sizeof(page_directory_entry) * 1024, 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            (0x00080000),
-            (0xfffff - 0x80000 + 1), 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            KERNEL_START_ADDR,
-            kernel_size, 1, 1);
-    result |= _p_ident_n_map(_kernel_pd,
-            KERNEL_EARLY_STACK_ADDR - KERNEL_EARLY_STACK_SIZE,
-            KERNEL_EARLY_STACK_SIZE, 1, 1);
-    
-    return result;
-}
-
-static void init_mem_layout(void)
+static inline void init_mem_layout(void)
 {
     mem_size = 1024 * mem_size_info.n_1k_blks;
     mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
 
     // mark kernel page directory
-    mark_addr_range(0x00000000, 0x00001000);
+    mark_addr_range(0x00000000, 0x00005000);
+    // mark empty page
+    mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
     // mark EBDA and upper memory as allocated
     mark_addr_range(0x80000, 0xfffff);
     // mark kernel
@@ -427,86 +294,252 @@ static void init_mem_layout(void)
     if (e820_mem_map_entry_size == 20) {
         struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
         for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->type != 1)
-            {
+            if (entry->type != 1) {
                 mark_addr_len(entry->base, entry->len);
             }
         }
     } else {
         struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
         for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
-            if (entry->in.type != 1)
-            {
+            if (entry->in.type != 1) {
                 mark_addr_len(entry->in.base, entry->in.len);
             }
         }
     }
 }
 
-void init_paging(void)
+int is_l_ptr_valid(struct mm* mm_area, linr_ptr_t l_ptr)
 {
-    init_mem_layout();
+    while (mm_area != NULL) {
+        if (l_ptr >= mm_area->start && l_ptr < mm_area->start + mm_area->len * PAGE_SIZE) {
+            return GB_OK;
+        }
+        mm_area = mm_area->next;
+    }
+    return GB_FAILED;
+}
 
-    if (_create_kernel_pd() != GB_OK) {
-        asm_cli();
-        asm_hlt();
+struct page* find_page_by_l_ptr(struct mm* mm, linr_ptr_t l_ptr)
+{
+    if (mm == kernel_mm_head && l_ptr < (linr_ptr_t)KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
+        // TODO: make mm for identically mapped area
+        MAKE_BREAK_POINT();
+        return (struct page*)0xffffffff;
+    }
+    while (mm != NULL) {
+        if (l_ptr >= mm->start && l_ptr < mm->start + mm->len * 4096) {
+            size_t offset = (size_t)(l_ptr - mm->start);
+            LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
+            return result;
+        }
+        mm = mm->next;
     }
-    asm_enable_paging(_kernel_pd);
+
+    // TODO: error handling
+    return NULL;
 }
 
-static inline void
-set_segment_descriptor(
-    segment_descriptor* sd,
-    uint32_t base,
-    uint32_t limit,
-    uint8_t access,
-    uint8_t flags)
+void map_raw_page_to_pte(
+    page_table_entry* pte,
+    page_t page,
+    int rw,
+    int priv)
 {
-    sd->access = access;
-    sd->flags = flags;
-    sd->base_low = base;
-    sd->base_mid = base >> 16;
-    sd->base_high = base >> 24;
-    sd->limit_low = limit;
-    sd->limit_high = limit >> 16;
+    // set P bit
+    pte->v = 0x00000001;
+    pte->in.rw = (rw == 1);
+    pte->in.us = (priv == 1);
+    pte->in.page = page;
 }
 
-void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
+static void _map_raw_page_to_addr(
+    struct mm* mm_area,
+    page_t page,
+    int rw,
+    int priv)
 {
-    // TODO: fix this
-    return;
-    gdt = k_malloc(sizeof(segment_descriptor) * 6);
-    // since the size in the struct is an OFFSET
-    // it needs to be added one to get its real size
-    uint16_t asm_gdt_size = (asm_gdt_descriptor.size + 1) / 8;
-    segment_descriptor* asm_gdt = (segment_descriptor*)asm_gdt_descriptor.address;
-
-    for (int i = 0; i < asm_gdt_size; ++i) {
-        gdt[i] = asm_gdt[i];
+    linr_ptr_t addr = (linr_ptr_t)mm_area->start + mm_area->len * 4096;
+    page_directory_entry* pde = mm_area->pd + linr_addr_to_pd_i(addr);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_raw_page();
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
     }
 
-    set_segment_descriptor(gdt + 5, (uint32_t)&_tss, sizeof(struct tss32_t), SD_TYPE_TSS, 0b0000);
+    // map the page in the page table
+    page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pte += linr_addr_to_pt_i(addr);
+    map_raw_page_to_pte(pte, page, rw, priv);
+}
+
+// map page to the end of mm_area in pd
+int k_map(
+    struct mm* mm_area,
+    struct page* page,
+    int read,
+    int write,
+    int priv,
+    int cow)
+{
+    struct page* p_page_end = mm_area->pgs;
+    while (p_page_end != NULL && p_page_end->next != NULL)
+        p_page_end = p_page_end->next;
+
+    if (cow) {
+        // find its ancestor
+        while (page->attr.cow)
+            page = page->next;
+
+        // create a new page node
+        struct page* new_page = k_malloc(sizeof(struct page));
+
+        new_page->attr.read = (read == 1);
+        new_page->attr.write = (write == 1);
+        new_page->attr.system = (priv == 1);
+        new_page->attr.cow = 1;
+        // TODO: move *next out of struct page
+        new_page->next = NULL;
+
+        new_page->phys_page_id = page->phys_page_id;
+        new_page->ref_count = page->ref_count;
+
+        if (p_page_end != NULL)
+            p_page_end->next = new_page;
+        else
+            mm_area->pgs = new_page;
+    } else {
+        page->attr.read = (read == 1);
+        page->attr.write = (write == 1);
+        page->attr.system = (priv == 1);
+        page->attr.cow = 0;
+        // TODO: move *next out of struct page
+        page->next = NULL;
+
+        if (p_page_end != NULL)
+            p_page_end->next = page;
+        else
+            mm_area->pgs = page;
+    }
+    _map_raw_page_to_addr(
+        mm_area,
+        page->phys_page_id,
+        (write && !cow),
+        priv);
+
+    ++mm_area->len;
+    ++*page->ref_count;
+    return GB_OK;
+}
 
-    _tss.esp0 = (uint32_t)kernel_esp;
-    _tss.ss0 = kernel_ss;
+// map a page identically
+// this function is only meant to be used in the initialization process
+// it checks the pde's P bit so you need to make sure it's already set
+// to avoid dead loops
+static inline void _init_map_page_identically(page_t page)
+{
+    page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
+    // page table not exist
+    if (!pde->in.p) {
+        // allocate a page for the page table
+        // set the P bit of the pde in advance
+        pde->in.p = 1;
+        pde->in.rw = 1;
+        pde->in.us = 0;
+        pde->in.pt_page = alloc_raw_page();
+        _init_map_page_identically(pde->in.pt_page);
+
+        make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
+    }
 
-    // +1 for enabling interrupt
-    asm_load_gdt(((6 * sizeof(segment_descriptor) - 1) << 16) + 1, (uint32_t)gdt);
-    asm_load_tr((6 - 1) * 8);
+    // map the page in the page table
+    page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
+    pt += page_to_pt_i(page);
+    pt->v = 0x00000003;
+    pt->in.page = page;
+}
+
+static inline void init_paging_map_low_mem_identically(void)
+{
+    for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
+        // check if the address is valid and not mapped
+        if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
+            continue;
+        _init_map_page_identically(phys_addr_to_page(addr));
+    }
+}
+
+static struct page empty_page;
+static struct page heap_first_page;
+static size_t heap_first_page_ref_count;
+
+void init_mem(void)
+{
+    init_mem_layout();
+
+    // map the 16MiB-768MiB identically
+    init_paging_map_low_mem_identically();
+
+    kernel_mm_head = &kernel_mm;
+
+    kernel_mm.attr.read = 1;
+    kernel_mm.attr.write = 1;
+    kernel_mm.attr.system = 1;
+    kernel_mm.len = 0;
+    kernel_mm.next = NULL;
+    kernel_mm.pd = KERNEL_PAGE_DIRECTORY_ADDR;
+    kernel_mm.pgs = NULL;
+    kernel_mm.start = (linr_ptr_t)KERNEL_HEAP_START;
+
+    heap_first_page.attr.cow = 0;
+    heap_first_page.attr.read = 1;
+    heap_first_page.attr.write = 1;
+    heap_first_page.attr.system = 1;
+    heap_first_page.next = NULL;
+    heap_first_page.phys_page_id = alloc_raw_page();
+    heap_first_page.ref_count = &heap_first_page_ref_count;
+
+    *heap_first_page.ref_count = 0;
+
+    k_map(kernel_mm_head, &heap_first_page, 1, 1, 1, 0);
+
+    init_heap();
+
+    // create empty_page struct
+    empty_page.attr.cow = 0;
+    empty_page.attr.read = 1;
+    empty_page.attr.write = 0;
+    empty_page.attr.system = 0;
+    empty_page.next = NULL;
+    empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
+    empty_page.ref_count = (size_t*)k_malloc(sizeof(size_t));
+    *empty_page.ref_count = 1;
+
+    // TODO: improve the algorithm SO FREAKING SLOW
+    // while (kernel_mm_head->len < 256 * 1024 * 1024 / PAGE_SIZE) {
+    while (kernel_mm_head->len < 16 * 1024 * 1024 / PAGE_SIZE) {
+        k_map(
+            kernel_mm_head, &empty_page,
+            1, 1, 1, 1);
+    }
 }
 
 void create_segment_descriptor(
-        segment_descriptor* sd,
-        uint32_t base,
-        uint32_t limit,
-        uint32_t flags,
-        uint32_t access)
+    segment_descriptor* sd,
+    uint32_t base,
+    uint32_t limit,
+    uint32_t flags,
+    uint32_t access)
 {
-    sd->base_low   = base  & 0x0000ffff;
-    sd->base_mid   = ((base  & 0x00ff0000) >> 16);
-    sd->base_high  = ((base  & 0xff000000) >> 24);
-    sd->limit_low  = limit & 0x0000ffff;
+    sd->base_low = base & 0x0000ffff;
+    sd->base_mid = ((base & 0x00ff0000) >> 16);
+    sd->base_high = ((base & 0xff000000) >> 24);
+    sd->limit_low = limit & 0x0000ffff;
     sd->limit_high = ((limit & 0x000f0000) >> 16);
-    sd->access     = access;
-    sd->flags      = flags;
+    sd->access = access;
+    sd->flags = flags;
 }

+ 0 - 0
src/kernel/process.c


+ 20 - 28
src/kernel_main.c

@@ -8,9 +8,9 @@
 #include <kernel/hw/serial.h>
 #include <kernel/hw/timer.h>
 #include <kernel/interrupt.h>
-#include <kernel/tty.h>
 #include <kernel/mem.h>
 #include <kernel/stdio.h>
+#include <kernel/tty.h>
 #include <kernel/vga.h>
 #include <types/bitmap.h>
 
@@ -26,7 +26,6 @@ void call_constructors_for_cpp(void)
 
 #define KERNEL_MAIN_BUF_SIZE (128)
 
-
 struct tty* console = NULL;
 #define printkf(x...)                       \
     snprintf(buf, KERNEL_MAIN_BUF_SIZE, x); \
@@ -119,18 +118,20 @@ static segment_descriptor new_gdt[5];
 
 void load_new_gdt(void)
 {
-    create_segment_descriptor(new_gdt+0, 0,  0, 0, 0);
-    create_segment_descriptor(new_gdt+1, 0, ~0, 0b1100, SD_TYPE_CODE_SYSTEM);
-    create_segment_descriptor(new_gdt+2, 0, ~0, 0b1100, SD_TYPE_DATA_SYSTEM);
-    create_segment_descriptor(new_gdt+3, 0, ~0, 0b1100, SD_TYPE_CODE_USER);
-    create_segment_descriptor(new_gdt+4, 0, ~0, 0b1100, SD_TYPE_DATA_USER);
+    create_segment_descriptor(new_gdt + 0, 0, 0, 0, 0);
+    create_segment_descriptor(new_gdt + 1, 0, ~0, 0b1100, SD_TYPE_CODE_SYSTEM);
+    create_segment_descriptor(new_gdt + 2, 0, ~0, 0b1100, SD_TYPE_DATA_SYSTEM);
+    create_segment_descriptor(new_gdt + 3, 0, ~0, 0b1100, SD_TYPE_CODE_USER);
+    create_segment_descriptor(new_gdt + 4, 0, ~0, 0b1100, SD_TYPE_DATA_USER);
     asm_load_gdt((5 * 8 - 1) << 16, (phys_ptr_t)new_gdt);
     asm_cli();
 }
 
 void kernel_main(void)
 {
-    MAKE_BREAK_POINT();
+    // MAKE_BREAK_POINT();
+    asm_enable_sse();
+
     save_loader_data();
 
     load_new_gdt();
@@ -147,39 +148,30 @@ void kernel_main(void)
 
     show_mem_info(buf);
 
-    INIT_START("paging");
-    init_paging();
-    INIT_OK();
-
-    INIT_START("SSE");
-    asm_enable_sse();
-    INIT_OK();
-
-    INIT_START("IDT");
+    INIT_START("exception handlers");
     init_idt();
-    init_pit();
     INIT_OK();
 
-    INIT_START("heap space");
-    if (init_heap() != GB_OK) {
-        INIT_FAILED();
-        halt_on_init_error();
-    }
+    INIT_START("memory allocation");
+    init_mem();
     INIT_OK();
 
     INIT_START("C++ global objects");
     call_constructors_for_cpp();
     INIT_OK();
 
+    INIT_START("programmable interrupt controller and timer");
+    init_pic();
+    init_pit();
+    INIT_OK();
+
     printkf("Testing k_malloc...\n");
-    char* k_malloc_buf = (char*)k_malloc(sizeof(char) * 128);
-    snprintf(k_malloc_buf, 128, "This text is printed on the heap!\n");
+    char* k_malloc_buf = (char*)k_malloc(sizeof(char) * 4097);
+    snprintf(k_malloc_buf, 4097, "This text is printed on the heap!\n");
     tty_print(console, k_malloc_buf);
     k_free(k_malloc_buf);
 
-    void* kernel_stack = k_malloc(KERNEL_STACK_SIZE);
-    init_gdt_with_tss(kernel_stack + KERNEL_STACK_SIZE - 1, KERNEL_STACK_SEGMENT);
-    printkf("new GDT and TSS loaded\n");
+    k_malloc_buf[4096] = '\x89';
 
     printkf("No work to do, halting...\n");