|
@@ -1,4 +1,3 @@
|
|
|
-#include <asm/boot.h>
|
|
|
#include <asm/port_io.h>
|
|
|
#include <asm/sys.h>
|
|
|
#include <assert.h>
|
|
@@ -8,7 +7,6 @@
|
|
|
#include <kernel/process.hpp>
|
|
|
#include <kernel/task.h>
|
|
|
#include <kernel/vga.hpp>
|
|
|
-#include <kernel_main.hpp>
|
|
|
#include <stdint.h>
|
|
|
#include <stdio.h>
|
|
|
#include <types/allocator.hpp>
|
|
@@ -18,221 +16,59 @@
|
|
|
|
|
|
// constant values
|
|
|
|
|
|
-#define EMPTY_PAGE_ADDR ((pptr_t)0x0000)
|
|
|
-#define EMPTY_PAGE_END ((pptr_t)0x1000)
|
|
|
-
|
|
|
-#define IDENTICALLY_MAPPED_HEAP_SIZE ((size_t)0x400000)
|
|
|
+#define EMPTY_PAGE ((page_t)0)
|
|
|
|
|
|
// ---------------------
|
|
|
|
|
|
static size_t mem_size;
|
|
|
-static char mem_bitmap[1024 * 1024 / 8];
|
|
|
-
|
|
|
-class brk_memory_allocator {
|
|
|
-public:
|
|
|
- using byte = uint8_t;
|
|
|
- using size_type = size_t;
|
|
|
-
|
|
|
- struct mem_blk_flags {
|
|
|
- uint8_t is_free;
|
|
|
- uint8_t has_next;
|
|
|
- uint8_t _unused2;
|
|
|
- uint8_t _unused3;
|
|
|
- };
|
|
|
-
|
|
|
- struct mem_blk {
|
|
|
- size_t size;
|
|
|
- struct mem_blk_flags flags;
|
|
|
- // the first byte of the memory space
|
|
|
- // the minimal allocated space is 4 bytes
|
|
|
- uint8_t data[4];
|
|
|
- };
|
|
|
-
|
|
|
-private:
|
|
|
- byte* p_start;
|
|
|
- byte* p_break;
|
|
|
- byte* p_limit;
|
|
|
-
|
|
|
- brk_memory_allocator(void) = delete;
|
|
|
- brk_memory_allocator(const brk_memory_allocator&) = delete;
|
|
|
- brk_memory_allocator(brk_memory_allocator&&) = delete;
|
|
|
-
|
|
|
- inline constexpr int brk(byte* addr)
|
|
|
- {
|
|
|
- if (unlikely(addr >= p_limit))
|
|
|
- return GB_FAILED;
|
|
|
- p_break = addr;
|
|
|
- return GB_OK;
|
|
|
- }
|
|
|
-
|
|
|
- // sets errno
|
|
|
- inline byte* sbrk(size_type increment)
|
|
|
- {
|
|
|
- if (unlikely(brk(p_break + increment) != GB_OK)) {
|
|
|
- errno = ENOMEM;
|
|
|
- return nullptr;
|
|
|
- } else {
|
|
|
- errno = 0;
|
|
|
- return p_break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- inline mem_blk* _find_next_mem_blk(mem_blk* blk, size_type blk_size)
|
|
|
- {
|
|
|
- byte* p = (byte*)blk;
|
|
|
- p += sizeof(mem_blk);
|
|
|
- p += blk_size;
|
|
|
- p -= (4 * sizeof(byte));
|
|
|
- return (mem_blk*)p;
|
|
|
- }
|
|
|
-
|
|
|
- // sets errno
|
|
|
- // @param start_pos position where to start finding
|
|
|
- // @param size the size of the block we're looking for
|
|
|
- // @return found block if suitable block exists, if not, the last block
|
|
|
- mem_blk* find_blk(mem_blk* start_pos, size_type size)
|
|
|
- {
|
|
|
- while (1) {
|
|
|
- if (start_pos->flags.is_free && start_pos->size >= size) {
|
|
|
- errno = 0;
|
|
|
- return start_pos;
|
|
|
- } else {
|
|
|
- if (unlikely(!start_pos->flags.has_next)) {
|
|
|
- errno = ENOTFOUND;
|
|
|
- return start_pos;
|
|
|
- }
|
|
|
- start_pos = _find_next_mem_blk(start_pos, start_pos->size);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // sets errno
|
|
|
- mem_blk* allocate_new_block(mem_blk* blk_before, size_type size)
|
|
|
- {
|
|
|
- sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
|
|
|
- // preserves errno
|
|
|
- if (unlikely(errno)) {
|
|
|
- return nullptr;
|
|
|
- }
|
|
|
-
|
|
|
- mem_blk* blk = _find_next_mem_blk(blk_before, blk_before->size);
|
|
|
-
|
|
|
- blk_before->flags.has_next = 1;
|
|
|
-
|
|
|
- blk->flags.has_next = 0;
|
|
|
- blk->flags.is_free = 1;
|
|
|
- blk->size = size;
|
|
|
-
|
|
|
- errno = 0;
|
|
|
- return blk;
|
|
|
- }
|
|
|
-
|
|
|
- void split_block(mem_blk* blk, size_type this_size)
|
|
|
- {
|
|
|
- // block is too small to get split
|
|
|
- if (blk->size < sizeof(mem_blk) + this_size) {
|
|
|
- return;
|
|
|
- }
|
|
|
+static uint8_t mem_bitmap[1024 * 1024 / 8];
|
|
|
|
|
|
- mem_blk* blk_next = _find_next_mem_blk(blk, this_size);
|
|
|
+// global
|
|
|
+segment_descriptor gdt[6];
|
|
|
|
|
|
- blk_next->size = blk->size
|
|
|
- - this_size
|
|
|
- - sizeof(mem_blk)
|
|
|
- + 4 * sizeof(byte);
|
|
|
+uint8_t e820_mem_map[1024];
|
|
|
+uint32_t e820_mem_map_count;
|
|
|
+uint32_t e820_mem_map_entry_size;
|
|
|
+struct mem_size_info mem_size_info;
|
|
|
|
|
|
- blk_next->flags.has_next = blk->flags.has_next;
|
|
|
- blk_next->flags.is_free = 1;
|
|
|
-
|
|
|
- blk->flags.has_next = 1;
|
|
|
- blk->size = this_size;
|
|
|
- }
|
|
|
-
|
|
|
-public:
|
|
|
- brk_memory_allocator(void* start, size_type limit)
|
|
|
- : p_start((byte*)start)
|
|
|
- , p_limit(p_start + limit)
|
|
|
- {
|
|
|
- brk(p_start);
|
|
|
- mem_blk* p_blk = (mem_blk*)sbrk(0);
|
|
|
- p_blk->size = 4;
|
|
|
- p_blk->flags.has_next = 0;
|
|
|
- p_blk->flags.is_free = 1;
|
|
|
- }
|
|
|
-
|
|
|
- // sets errno
|
|
|
- void* alloc(size_type size)
|
|
|
- {
|
|
|
- struct mem_blk* block_allocated;
|
|
|
-
|
|
|
- block_allocated = find_blk((mem_blk*)p_start, size);
|
|
|
- if (errno == ENOTFOUND) {
|
|
|
- // 'block_allocated' in the argument list is the pointer
|
|
|
- // pointing to the last block
|
|
|
- block_allocated = allocate_new_block(block_allocated, size);
|
|
|
- if (errno) {
|
|
|
- // preserves errno
|
|
|
- return nullptr;
|
|
|
- }
|
|
|
- } else {
|
|
|
- split_block(block_allocated, size);
|
|
|
- }
|
|
|
-
|
|
|
- errno = 0;
|
|
|
- block_allocated->flags.is_free = 0;
|
|
|
- return block_allocated->data;
|
|
|
- }
|
|
|
-
|
|
|
- void free(void* ptr)
|
|
|
- {
|
|
|
- mem_blk* blk = (mem_blk*)((byte*)ptr - (sizeof(mem_blk_flags) + sizeof(size_t)));
|
|
|
- blk->flags.is_free = 1;
|
|
|
- // TODO: fusion free blocks nearby
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-static brk_memory_allocator* kernel_heap_allocator;
|
|
|
-static brk_memory_allocator
|
|
|
- kernel_ident_mapped_allocator((void*)bss_section_end_addr,
|
|
|
- IDENTICALLY_MAPPED_HEAP_SIZE);
|
|
|
+void* operator new(size_t sz)
|
|
|
+{
|
|
|
+ void* ptr = types::__allocator::m_palloc->alloc(sz);
|
|
|
+ assert(ptr);
|
|
|
+ return ptr;
|
|
|
+}
|
|
|
|
|
|
-void* k_malloc(size_t size)
|
|
|
+void* operator new[](size_t sz)
|
|
|
{
|
|
|
- void* ptr = kernel_heap_allocator->alloc(size);
|
|
|
- assert(likely(ptr));
|
|
|
+ void* ptr = types::__allocator::m_palloc->alloc(sz);
|
|
|
+ assert(ptr);
|
|
|
return ptr;
|
|
|
}
|
|
|
|
|
|
-void k_free(void* ptr)
|
|
|
+void operator delete(void* ptr)
|
|
|
{
|
|
|
- kernel_heap_allocator->free(ptr);
|
|
|
+ types::__allocator::m_palloc->free(ptr);
|
|
|
}
|
|
|
|
|
|
-void* ki_malloc(size_t size)
|
|
|
+void operator delete(void* ptr, size_t)
|
|
|
{
|
|
|
- void* ptr = kernel_ident_mapped_allocator.alloc(size);
|
|
|
- assert(likely(ptr));
|
|
|
- return ptr;
|
|
|
+ types::__allocator::m_palloc->free(ptr);
|
|
|
}
|
|
|
|
|
|
-void ki_free(void* ptr)
|
|
|
+void operator delete[](void* ptr)
|
|
|
{
|
|
|
- kernel_ident_mapped_allocator.free(ptr);
|
|
|
+ types::__allocator::m_palloc->free(ptr);
|
|
|
}
|
|
|
|
|
|
-void* ptovp(pptr_t p_ptr)
|
|
|
+void operator delete[](void* ptr, size_t)
|
|
|
{
|
|
|
- // memory below 768MiB is identically mapped
|
|
|
- // TODO: address translation for high mem
|
|
|
- assert(p_ptr <= 0x30000000);
|
|
|
- return (void*)p_ptr;
|
|
|
+ types::__allocator::m_palloc->free(ptr);
|
|
|
}
|
|
|
|
|
|
inline void mark_page(page_t n)
|
|
|
{
|
|
|
bm_set(mem_bitmap, n);
|
|
|
}
|
|
|
-
|
|
|
inline void free_page(page_t n)
|
|
|
{
|
|
|
bm_clear(mem_bitmap, n);
|
|
@@ -240,130 +76,113 @@ inline void free_page(page_t n)
|
|
|
|
|
|
constexpr void mark_addr_len(pptr_t start, size_t n)
|
|
|
{
|
|
|
- if (unlikely(n == 0))
|
|
|
+ if (n == 0)
|
|
|
return;
|
|
|
- page_t start_page = to_page(start);
|
|
|
- page_t end_page = to_page(start + n + 4095);
|
|
|
+ page_t start_page = align_down<12>(start) >> 12;
|
|
|
+ page_t end_page = align_up<12>(start + n) >> 12;
|
|
|
for (page_t i = start_page; i < end_page; ++i)
|
|
|
mark_page(i);
|
|
|
}
|
|
|
|
|
|
constexpr void free_addr_len(pptr_t start, size_t n)
|
|
|
{
|
|
|
- if (unlikely(n == 0))
|
|
|
+ if (n == 0)
|
|
|
return;
|
|
|
- page_t start_page = to_page(start);
|
|
|
- page_t end_page = to_page(start + n + 4095);
|
|
|
+ page_t start_page = align_down<12>(start) >> 12;
|
|
|
+ page_t end_page = align_up<12>(start + n) >> 12;
|
|
|
for (page_t i = start_page; i < end_page; ++i)
|
|
|
free_page(i);
|
|
|
}
|
|
|
|
|
|
-inline constexpr void mark_addr_range(pptr_t start, pptr_t end)
|
|
|
+constexpr void mark_addr_range(pptr_t start, pptr_t end)
|
|
|
{
|
|
|
mark_addr_len(start, end - start);
|
|
|
}
|
|
|
|
|
|
-inline constexpr void free_addr_range(pptr_t start, pptr_t end)
|
|
|
+constexpr void free_addr_range(pptr_t start, pptr_t end)
|
|
|
{
|
|
|
free_addr_len(start, end - start);
|
|
|
}
|
|
|
|
|
|
-// @return the max count (but less than n) of the pages continuously available
|
|
|
-static inline size_t _test_n_raw_pages(page_t start, size_t n)
|
|
|
-{
|
|
|
- // *start is already allocated
|
|
|
- if (bm_test(mem_bitmap, start))
|
|
|
- return 0;
|
|
|
-
|
|
|
- return 1 + ((n > 1) ? _test_n_raw_pages(start + 1, n - 1) : 0);
|
|
|
-}
|
|
|
-
|
|
|
-page_t alloc_n_raw_pages(size_t n)
|
|
|
+page_t __alloc_raw_page(void)
|
|
|
{
|
|
|
- page_t first = 0;
|
|
|
- while (first <= 1024 * 1024 - n) {
|
|
|
- size_t max = _test_n_raw_pages(first, n);
|
|
|
- if (max != n) {
|
|
|
- first += (max + 1);
|
|
|
- } else {
|
|
|
- for (page_t i = first; i < first + n; ++i)
|
|
|
- mark_page(i);
|
|
|
- return first;
|
|
|
+ for (size_t i = 0; i < sizeof(mem_bitmap); ++i) {
|
|
|
+ if (bm_test(mem_bitmap, i) == 0) {
|
|
|
+ bm_set(mem_bitmap, i);
|
|
|
+ return i;
|
|
|
}
|
|
|
}
|
|
|
- assert(false);
|
|
|
- return 0xffffffff;
|
|
|
+ return -1;
|
|
|
}
|
|
|
|
|
|
-void free_n_raw_pages(page_t start_pg, size_t n)
|
|
|
+void __free_raw_page(page_t pg)
|
|
|
{
|
|
|
- while (n--)
|
|
|
- free_page(start_pg++);
|
|
|
+ bm_clear(mem_bitmap, pg);
|
|
|
}
|
|
|
|
|
|
-struct page allocate_page(void)
|
|
|
+page allocate_page(void)
|
|
|
{
|
|
|
return page {
|
|
|
- .phys_page_id = alloc_raw_page(),
|
|
|
- .pte = nullptr,
|
|
|
+ .phys_page_id = __alloc_raw_page(),
|
|
|
.ref_count = types::_new<types::kernel_ident_allocator, size_t>(0),
|
|
|
- .attr { 0 },
|
|
|
+ .pg_pteidx = 0,
|
|
|
+ .attr = 0,
|
|
|
};
|
|
|
}
|
|
|
|
|
|
-pd_t alloc_pd(void)
|
|
|
-{
|
|
|
- // TODO: alloc page in low mem and gen struct page for it
|
|
|
- page_t pd_page = alloc_raw_page();
|
|
|
- pd_t pd = to_pd(pd_page);
|
|
|
- memset(pd, 0x00, PAGE_SIZE);
|
|
|
- return pd;
|
|
|
-}
|
|
|
-
|
|
|
-pt_t alloc_pt(void)
|
|
|
+void free_page(page* pg)
|
|
|
{
|
|
|
- // TODO: alloc page in low mem and gen struct page for it
|
|
|
- page_t pt_page = alloc_raw_page();
|
|
|
- pt_t pt = to_pt(pt_page);
|
|
|
- memset(pt, 0x00, PAGE_SIZE);
|
|
|
- return pt;
|
|
|
+ if (*pg->ref_count == 1) {
|
|
|
+ types::pdelete<types::kernel_ident_allocator>(pg->ref_count);
|
|
|
+ __free_raw_page(pg->phys_page_id);
|
|
|
+ } else {
|
|
|
+ --*pg->ref_count;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-void dealloc_pd(pd_t pd)
|
|
|
+void dealloc_pd(page_t pd)
|
|
|
{
|
|
|
- for (pde_t* ent = (*pd) + 256; ent < (*pd) + 1024; ++ent) {
|
|
|
- if (!ent->in.p)
|
|
|
- continue;
|
|
|
- dealloc_pt(to_pt(ent));
|
|
|
+ {
|
|
|
+ kernel::paccess pa(pd);
|
|
|
+ auto p_pd = (pd_t)pa.ptr();
|
|
|
+ assert(p_pd);
|
|
|
+ for (pde_t* ent = (*p_pd); ent < (*p_pd) + 768; ++ent) {
|
|
|
+ if (!ent->in.p)
|
|
|
+ continue;
|
|
|
+ __free_raw_page(ent->in.pt_page);
|
|
|
+ }
|
|
|
}
|
|
|
- memset(pd, 0x00, sizeof(*pd));
|
|
|
-
|
|
|
- page_t pg = to_page((pptr_t)pd);
|
|
|
- free_page(pg);
|
|
|
-}
|
|
|
-void dealloc_pt(pt_t pt)
|
|
|
-{
|
|
|
- memset(pt, 0x00, sizeof(*pt));
|
|
|
-
|
|
|
- page_t pg = to_page((pptr_t)pt);
|
|
|
- free_page(pg);
|
|
|
+ __free_raw_page(pd);
|
|
|
}
|
|
|
|
|
|
+SECTION(".text.kinit")
|
|
|
static inline void init_mem_layout(void)
|
|
|
{
|
|
|
mem_size = 1024 * mem_size_info.n_1k_blks;
|
|
|
mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
|
|
|
|
|
|
- // mark kernel page directory
|
|
|
- mark_addr_range(0x00001000, 0x00006000);
|
|
|
// mark empty page
|
|
|
- mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
|
|
|
+ mark_addr_range(0x00000000, 0x00001000);
|
|
|
+ // mark kernel page directory
|
|
|
+ mark_addr_range(0x00001000, 0x00002000);
|
|
|
+ // mark kernel page table
|
|
|
+ mark_addr_range(0x00002000, 0x00006000);
|
|
|
+ // mark kernel early stack
|
|
|
+ mark_addr_range(0x00006000, 0x00008000);
|
|
|
// mark EBDA and upper memory as allocated
|
|
|
- mark_addr_range(0x80000, 0xfffff);
|
|
|
- // mark kernel
|
|
|
- mark_addr_len(0x00100000, kernel_size);
|
|
|
- // mark identically mapped heap
|
|
|
- mark_addr_len(bss_section_end_addr, IDENTICALLY_MAPPED_HEAP_SIZE);
|
|
|
+ mark_addr_range(0x80000, 0x100000);
|
|
|
+ extern char __stage1_start[];
|
|
|
+ extern char __kinit_end[];
|
|
|
+ extern char __text_start[];
|
|
|
+ extern char __data_end[];
|
|
|
+
|
|
|
+ constexpr pptr_t PHYS_BSS_START = 0x100000;
|
|
|
+ // mark .stage1 and .kinit
|
|
|
+ mark_addr_range((pptr_t)__stage1_start, (pptr_t)__kinit_end);
|
|
|
+ // mark kernel .text to .data
|
|
|
+ mark_addr_len((pptr_t)__kinit_end, __data_end - __text_start);
|
|
|
+ // mark kernel .bss
|
|
|
+ mark_addr_len(PHYS_BSS_START, bss_len);
|
|
|
|
|
|
if (e820_mem_map_entry_size == 20) {
|
|
|
struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
|
|
@@ -386,9 +205,12 @@ using kernel::mm_list;
|
|
|
mm_list::mm_list(const mm_list& v)
|
|
|
: m_areas(v.m_areas)
|
|
|
{
|
|
|
- pd_t pd = alloc_pd();
|
|
|
- memcpy(pd, v.m_pd, PAGE_SIZE);
|
|
|
- m_pd = pd;
|
|
|
+ m_pd = __alloc_raw_page();
|
|
|
+ kernel::paccess pdst(m_pd), psrc(v.m_pd);
|
|
|
+ auto* dst = pdst.ptr();
|
|
|
+ auto* src = psrc.ptr();
|
|
|
+ assert(dst && src);
|
|
|
+ memcpy(dst, src, PAGE_SIZE);
|
|
|
}
|
|
|
|
|
|
inline void map_raw_page_to_pte(
|
|
@@ -406,40 +228,70 @@ inline void map_raw_page_to_pte(
|
|
|
pte->in.page = page;
|
|
|
}
|
|
|
|
|
|
-int mm::append_page(page* pg, bool present, bool write, bool priv, bool cow)
|
|
|
+int mm::append_page(page& pg, uint32_t attr, bool priv)
|
|
|
{
|
|
|
void* addr = this->end();
|
|
|
- pde_t* pde = to_pde(this->owner->m_pd, addr);
|
|
|
+ kernel::paccess pa(this->owner->m_pd);
|
|
|
+ auto pd = (pd_t)pa.ptr();
|
|
|
+ assert(pd);
|
|
|
+ pde_t* pde = *pd + v_to_pdi(addr);
|
|
|
+
|
|
|
+ page_t pt_pg = 0;
|
|
|
+ pte_t* pte = nullptr;
|
|
|
// page table not exist
|
|
|
if (unlikely(!pde->in.p)) {
|
|
|
// allocate a page for the page table
|
|
|
+ pt_pg = __alloc_raw_page();
|
|
|
pde->in.p = 1;
|
|
|
pde->in.rw = 1;
|
|
|
pde->in.us = 1;
|
|
|
- pde->in.pt_page = alloc_raw_page();
|
|
|
+ pde->in.pt_page = pt_pg;
|
|
|
+
|
|
|
+ auto pt = (pt_t)kernel::pmap(pt_pg);
|
|
|
+ assert(pt);
|
|
|
+ pte = *pt;
|
|
|
|
|
|
- memset(to_pt(pde), 0x00, PAGE_SIZE);
|
|
|
+ memset(pt, 0x00, PAGE_SIZE);
|
|
|
+ } else {
|
|
|
+ pt_pg = pde->in.pt_page;
|
|
|
+ auto pt = (pt_t)kernel::pmap(pt_pg);
|
|
|
+ assert(pt);
|
|
|
+ pte = *pt;
|
|
|
}
|
|
|
|
|
|
// map the page in the page table
|
|
|
- pte_t* pte = to_pte(pde, addr);
|
|
|
- map_raw_page_to_pte(pte, pg->phys_page_id, present, (write && !cow), priv);
|
|
|
-
|
|
|
- if (unlikely(cow && !pg->attr.in.cow)) {
|
|
|
- pg->attr.in.cow = 1;
|
|
|
- pg->pte->in.rw = 0;
|
|
|
- pg->pte->in.a = 0;
|
|
|
+ int pti = v_to_pti(addr);
|
|
|
+ pte += pti;
|
|
|
+
|
|
|
+ map_raw_page_to_pte(
|
|
|
+ pte,
|
|
|
+ pg.phys_page_id,
|
|
|
+ !(attr & PAGE_MMAP),
|
|
|
+ false,
|
|
|
+ priv);
|
|
|
+
|
|
|
+ kernel::pfree(pt_pg);
|
|
|
+
|
|
|
+ if (unlikely((attr & PAGE_COW) && !(pg.attr & PAGE_COW))) {
|
|
|
+ kernel::paccess pa(pg.pg_pteidx >> 12);
|
|
|
+ auto* pg_pte = (pte_t*)pa.ptr();
|
|
|
+ assert(pg_pte);
|
|
|
+ pg_pte += (pg.pg_pteidx & 0xfff);
|
|
|
+ pg.attr |= PAGE_COW;
|
|
|
+ pg_pte->in.rw = 0;
|
|
|
+ pg_pte->in.a = 0;
|
|
|
invalidate_tlb(addr);
|
|
|
}
|
|
|
- ++*pg->ref_count;
|
|
|
+ ++*pg.ref_count;
|
|
|
+
|
|
|
+ auto iter = this->pgs->emplace_back(pg);
|
|
|
+ iter->pg_pteidx = (pt_pg << 12) + pti;
|
|
|
+ iter->attr = attr;
|
|
|
|
|
|
- auto iter = this->pgs->emplace_back(*pg);
|
|
|
- iter->pte = pte;
|
|
|
return GB_OK;
|
|
|
}
|
|
|
|
|
|
-static inline int _mmap(
|
|
|
- mm_list* mms,
|
|
|
+int mmap(
|
|
|
void* hint,
|
|
|
size_t len,
|
|
|
fs::inode* file,
|
|
@@ -447,6 +299,8 @@ static inline int _mmap(
|
|
|
int write,
|
|
|
int priv)
|
|
|
{
|
|
|
+ auto& mms = current_process->mms;
|
|
|
+
|
|
|
if (unlikely(!file->flags.in.file && !file->flags.in.special_node)) {
|
|
|
errno = EINVAL;
|
|
|
return GB_FAILED;
|
|
@@ -459,93 +313,45 @@ static inline int _mmap(
|
|
|
|
|
|
size_t n_pgs = align_up<12>(len) >> 12;
|
|
|
|
|
|
- if (!mms->is_avail(hint, len)) {
|
|
|
+ if (!mms.is_avail(hint, len)) {
|
|
|
errno = EEXIST;
|
|
|
return GB_FAILED;
|
|
|
}
|
|
|
|
|
|
- auto mm = mms->addarea(hint, write, priv);
|
|
|
+ auto mm = mms.addarea(hint, write, priv);
|
|
|
mm->mapped_file = file;
|
|
|
mm->file_offset = offset;
|
|
|
|
|
|
for (size_t i = 0; i < n_pgs; ++i)
|
|
|
- mm->append_page(&empty_page, false, write, priv, true);
|
|
|
+ mm->append_page(empty_page, PAGE_MMAP | PAGE_COW, priv);
|
|
|
|
|
|
return GB_OK;
|
|
|
}
|
|
|
|
|
|
-int mmap(
|
|
|
- void* hint,
|
|
|
- size_t len,
|
|
|
- fs::inode* file,
|
|
|
- size_t offset,
|
|
|
- int write,
|
|
|
- int priv)
|
|
|
-{
|
|
|
- return _mmap(¤t_process->mms, hint, len, file, offset, write, priv);
|
|
|
-}
|
|
|
-
|
|
|
-// map a page identically
|
|
|
-// this function is only meant to be used in the initialization process
|
|
|
-// it checks the pde's P bit so you need to make sure it's already set
|
|
|
-// to avoid dead loops
|
|
|
-static inline void _init_map_page_identically(page_t page)
|
|
|
-{
|
|
|
- pde_t* pde = *KERNEL_PAGE_DIRECTORY_ADDR + to_pdi(page);
|
|
|
- // page table not exist
|
|
|
- if (unlikely(!pde->in.p)) {
|
|
|
- // allocate a page for the page table
|
|
|
- // set the P bit of the pde in advance
|
|
|
- pde->in.p = 1;
|
|
|
- pde->in.rw = 1;
|
|
|
- pde->in.us = 0;
|
|
|
- pde->in.pt_page = alloc_raw_page();
|
|
|
- _init_map_page_identically(pde->in.pt_page);
|
|
|
- memset(to_pt(pde), 0x00, PAGE_SIZE);
|
|
|
- }
|
|
|
-
|
|
|
- // map the page in the page table
|
|
|
- pte_t* pt = to_pte(pde, page);
|
|
|
- pt->v = 0x00000003;
|
|
|
- pt->in.page = page;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void init_paging_map_low_mem_identically(void)
|
|
|
-{
|
|
|
- for (pptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
|
|
|
- // check if the address is valid and not mapped
|
|
|
- if (bm_test(mem_bitmap, to_page(addr)))
|
|
|
- continue;
|
|
|
- _init_map_page_identically(to_page(addr));
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
+SECTION(".text.kinit")
|
|
|
void init_mem(void)
|
|
|
{
|
|
|
init_mem_layout();
|
|
|
|
|
|
- // map the 16MiB-768MiB identically
|
|
|
- init_paging_map_low_mem_identically();
|
|
|
-
|
|
|
- kernel_mms = types::pnew<types::kernel_ident_allocator>(kernel_mms, KERNEL_PAGE_DIRECTORY_ADDR);
|
|
|
+ // TODO: replace early kernel pd
|
|
|
+ kernel_mms = types::pnew<types::kernel_ident_allocator>(kernel_mms, EARLY_KERNEL_PD_PAGE);
|
|
|
auto heap_mm = kernel_mms->addarea(KERNEL_HEAP_START, true, true);
|
|
|
|
|
|
// create empty_page struct
|
|
|
- empty_page.attr.in.cow = 0;
|
|
|
- empty_page.phys_page_id = to_page(EMPTY_PAGE_ADDR);
|
|
|
- empty_page.ref_count = types::_new<types::kernel_ident_allocator, size_t>(1);
|
|
|
- empty_page.pte = to_pte(*KERNEL_PAGE_DIRECTORY_ADDR, empty_page.phys_page_id);
|
|
|
- empty_page.pte->in.rw = 0;
|
|
|
- invalidate_tlb(0x00000000);
|
|
|
-
|
|
|
- // 0x30000000 to 0x40000000 or 768MiB to 1GiB
|
|
|
- while (heap_mm->pgs->size() < 256 * 1024 * 1024 / PAGE_SIZE)
|
|
|
- heap_mm->append_page(&empty_page, true, true, true, true);
|
|
|
-
|
|
|
- kernel_heap_allocator = types::pnew<types::kernel_ident_allocator>(kernel_heap_allocator,
|
|
|
- KERNEL_HEAP_START, vptrdiff(KERNEL_HEAP_LIMIT, KERNEL_HEAP_START));
|
|
|
+ empty_page.attr = 0;
|
|
|
+ empty_page.phys_page_id = EMPTY_PAGE;
|
|
|
+ empty_page.ref_count = types::_new<types::kernel_ident_allocator, size_t>(2);
|
|
|
+ empty_page.pg_pteidx = 0x00002000;
|
|
|
+
|
|
|
+ // 0xd0000000 to 0xd4000000 or 3.5GiB, size 64MiB
|
|
|
+ while (heap_mm->pgs->size() < 64 * 1024 * 1024 / PAGE_SIZE)
|
|
|
+ heap_mm->append_page(empty_page, PAGE_COW, true);
|
|
|
+
|
|
|
+ types::__allocator::init_kernel_heap(KERNEL_HEAP_START,
|
|
|
+ vptrdiff(KERNEL_HEAP_LIMIT, KERNEL_HEAP_START));
|
|
|
}
|
|
|
|
|
|
+SECTION(".text.kinit")
|
|
|
void create_segment_descriptor(
|
|
|
segment_descriptor* sd,
|
|
|
uint32_t base,
|
|
@@ -561,3 +367,63 @@ void create_segment_descriptor(
|
|
|
sd->access = access;
|
|
|
sd->flags = flags;
|
|
|
}
|
|
|
+
|
|
|
+namespace __physmapper {
|
|
|
+struct mapped_area {
|
|
|
+ size_t ref;
|
|
|
+ uint8_t* ptr;
|
|
|
+};
|
|
|
+
|
|
|
+static types::hash_map<page_t, mapped_area,
|
|
|
+ types::linux_hasher<page_t>, types::kernel_ident_allocator>
|
|
|
+ mapped;
|
|
|
+static uint8_t freebm[0x400 / 8];
|
|
|
+} // namespace __physmapper
|
|
|
+
|
|
|
+uint8_t* kernel::pmap(page_t pg)
|
|
|
+{
|
|
|
+ auto iter = __physmapper::mapped.find(pg);
|
|
|
+ if (iter) {
|
|
|
+ ++iter->value.ref;
|
|
|
+ return iter->value.ptr;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (int i = 2; i < 0x400; ++i) {
|
|
|
+ if (bm_test(__physmapper::freebm, i) == 0) {
|
|
|
+ auto* pte = (pte_t*)(0xff001000) + i;
|
|
|
+ pte->v = 0x3;
|
|
|
+ pte->in.page = pg;
|
|
|
+
|
|
|
+ uint8_t* ptr = (uint8_t*)0xff000000 + 0x1000 * i;
|
|
|
+ invalidate_tlb(ptr);
|
|
|
+
|
|
|
+ bm_set(__physmapper::freebm, i);
|
|
|
+ __physmapper::mapped.emplace(pg,
|
|
|
+ __physmapper::mapped_area { 1, ptr });
|
|
|
+ return ptr;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return nullptr;
|
|
|
+}
|
|
|
+void kernel::pfree(page_t pg)
|
|
|
+{
|
|
|
+ auto iter = __physmapper::mapped.find(pg);
|
|
|
+ if (!iter)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (iter->value.ref > 1) {
|
|
|
+ --iter->value.ref;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ int i = (uint32_t)iter->value.ptr - 0xff000000;
|
|
|
+ i /= 0x1000;
|
|
|
+
|
|
|
+ auto* pte = (pte_t*)(0xff001000) + i;
|
|
|
+ pte->v = 0;
|
|
|
+ invalidate_tlb(iter->value.ptr);
|
|
|
+
|
|
|
+ bm_clear(__physmapper::freebm, i);
|
|
|
+ __physmapper::mapped.remove(iter);
|
|
|
+}
|