greatbridf há 9 meses atrás
pai
commit
b6b04e657d

+ 2 - 2
include/kernel/mem/mm_list.hpp

@@ -77,8 +77,8 @@ public:
 
     uintptr_t find_avail(uintptr_t hint, size_t length) const;
 
-    int unmap(iterator area);
-    int unmap(uintptr_t start, std::size_t length);
+    int unmap(iterator area, bool should_invalidate_tlb);
+    int unmap(uintptr_t start, std::size_t length, bool should_invalidate_tlb);
 
     int mmap(const map_args& args);
 

+ 6 - 6
include/kernel/mem/paging.hpp

@@ -106,9 +106,9 @@ constexpr uintptr_t KERNEL_PAGE_TABLE_ADDR = 0x100000;
 constexpr physaddr<void> KERNEL_PAGE_TABLE_PHYS_ADDR{KERNEL_PAGE_TABLE_ADDR};
 constexpr PSE KERNEL_PAGE_TABLE{0x100000};
 
-constexpr unsigned long PAGE_PRESENT = 0x00000001;
-constexpr unsigned long PAGE_BUDDY   = 0x00000002;
-constexpr unsigned long PAGE_SLAB    = 0x00000004;
+constexpr unsigned long PAGE_PRESENT = 0x00010000;
+constexpr unsigned long PAGE_BUDDY   = 0x00020000;
+constexpr unsigned long PAGE_SLAB    = 0x00040000;
 
 struct page {
     // TODO: use atomic
@@ -126,14 +126,14 @@ void mark_present(uintptr_t start, uintptr_t end);
 
 [[nodiscard]] page* alloc_page();
 // order represents power of 2
-[[nodiscard]] page* alloc_pages(int order);
+[[nodiscard]] page* alloc_pages(unsigned order);
 
 // order represents power of 2
-void free_pages(page* page, int order);
+void free_pages(page* page, unsigned order);
 void free_page(page* page);
 
 // order represents power of 2
-void free_pages(pfn_t pfn, int order);
+void free_pages(pfn_t pfn, unsigned order);
 void free_page(pfn_t pfn);
 
 // clear the page all zero

+ 8 - 5
include/kernel/syscall.hpp

@@ -1,5 +1,8 @@
 #pragma once
 
+#include <string>
+#include <vector>
+
 #include <bits/alltypes.h>
 #include <poll.h>
 #include <sys/stat.h>
@@ -41,8 +44,8 @@ int do_open(const char __user* path, int flags, mode_t mode);
 int do_symlink(const char __user* target, const char __user* linkpath);
 int do_readlink(const char __user* pathname, char __user* buf, size_t buf_size);
 int do_ioctl(int fd, unsigned long request, uintptr_t arg3);
-ssize_t do_readv(int fd, const iovec __user* iov, int iovcnt);
-ssize_t do_writev(int fd, const iovec __user* iov, int iovcnt);
+ssize_t do_readv(int fd, const iovec* iov, int iovcnt);
+ssize_t do_writev(int fd, const iovec* iov, int iovcnt);
 off_t do_lseek(int fd, off_t offset, int whence);
 uintptr_t do_mmap_pgoff(uintptr_t addr, size_t len,
         int prot, int flags, int fd, off_t pgoffset);
@@ -93,9 +96,9 @@ struct execve_retval {
 };
 
 execve_retval do_execve(
-        const char __user* exec,
-        char __user* const __user* argv,
-        char __user* const __user* envp);
+        const std::string& exec,
+        const std::vector<std::string>& args,
+        const std::vector<std::string>& envs);
 
 // in mount.cc
 int do_mount(

+ 2 - 2
include/types/elf.hpp

@@ -145,8 +145,8 @@ struct PACKED elf32_section_header_entry {
 
 struct elf32_load_data {
     const fs::dentry* exec_dent;
-    std::vector<std::string> argv;
-    std::vector<std::string> envp;
+    const std::vector<std::string>& argv;
+    const std::vector<std::string>& envp;
     uintptr_t ip;
     uintptr_t sp;
 };

+ 16 - 0
include/types/user_types.hpp

@@ -0,0 +1,16 @@
+#pragma once
+
+#include <stdint.h>
+
+#include <types/types.h>
+
+namespace types {
+
+using ptr32_t = uint32_t;
+
+struct iovec32 {
+    ptr32_t iov_base;
+    ptr32_t iov_len;
+};
+
+} // namespace types

+ 1 - 1
init_script.sh

@@ -40,5 +40,5 @@ alias ll="ls -l "
 alias la="ls -la "
 EOF
 
-exec /mnt/init /bin/sh -l \
+exec /bin/sh -l \
     < /dev/console > /dev/console 2> /dev/console

+ 6 - 6
src/asm/interrupt.s

@@ -88,12 +88,12 @@ asm_ctx_switch:
 
 	call after_ctx_switch
 
-	mov %r15, 0x28(%rsp)
-	mov %r14, 0x20(%rsp)
-	mov %r13, 0x18(%rsp)
-	mov %r12, 0x10(%rsp)
-	mov %rbp, 0x08(%rsp)
-    mov %rbx, 0x00(%rsp)
+	mov 0x28(%rsp), %r15
+	mov 0x20(%rsp), %r14
+	mov 0x18(%rsp), %r13
+	mov 0x10(%rsp), %r12
+	mov 0x08(%rsp), %rbp
+    mov 0x00(%rsp), %rbx
 
 	add $0x30, %rsp
     popf

+ 3 - 5
src/kernel/allocator.cc

@@ -17,10 +17,8 @@ constexpr uintptr_t KERNEL_HEAP_SIZE  = KERNEL_HEAP_END - KERNEL_HEAP_START;
 namespace types::memory {
 
 struct mem_blk_flags {
-    uint8_t is_free;
-    uint8_t has_next;
-    uint8_t : 8; // unused1
-    uint8_t : 8; // unused2
+    unsigned long is_free  : 8;
+    unsigned long has_next : 8;
 };
 
 struct mem_blk {
@@ -90,7 +88,7 @@ constexpr void split_block(mem_blk* blk, std::size_t this_size)
     // block is too small to get split
     // that is, the block to be split should have enough room
     // for "this_size" bytes and also could contain a new block
-    if (blk->size < this_size + sizeof(mem_blk) + 8)
+    if (blk->size < this_size + sizeof(mem_blk) + 1024)
         return;
 
     mem_blk* blk_next = next(blk, this_size);

+ 30 - 10
src/kernel/mem/mm_list.cc

@@ -5,8 +5,18 @@
 #include <kernel/mem/paging.hpp>
 #include <kernel/mem/vm_area.hpp>
 
+
 using namespace kernel::mem;
 
+static inline void __invalidate_all_tlb()
+{
+    asm volatile(
+            "mov %%cr3, %%rax\n\t"
+            "mov %%rax, %%cr3\n\t"
+            : : : "rax", "memory"
+            );
+}
+
 static inline void __dealloc_page_table_all(
         paging::pfn_t pt, int depth, int from, int to)
 {
@@ -68,9 +78,17 @@ mm_list::mm_list(const mm_list& other): mm_list{}
 
             increase_refcount(pfn_to_page(pfn));
 
+            // TODO: create a function to set COW mappings
+            attributes = other_pte.attributes();
+            attributes &= ~PA_RW;
+            attributes |= PA_COW;
+            other_pte.set(attributes, pfn);
+
             ++this_iter, ++other_iter;
         }
     }
+
+    __invalidate_all_tlb();
 }
 
 mm_list::~mm_list()
@@ -163,7 +181,9 @@ uintptr_t mm_list::set_brk(uintptr_t addr)
 void mm_list::clear()
 {
     for (auto iter = m_areas.begin(); iter != m_areas.end(); ++iter)
-        unmap(iter);
+        unmap(iter, false);
+
+    __invalidate_all_tlb();
 
     m_areas.clear();
     m_brk = m_areas.end();
@@ -191,11 +211,11 @@ mm_list::iterator mm_list::split(iterator area, uintptr_t addr)
     return iter;
 }
 
-int mm_list::unmap(iterator area)
+int mm_list::unmap(iterator area, bool should_invalidate_tlb)
 {
     using namespace paging;
 
-    bool should_invlpg = area->end - area->start <= 0x4000;
+    bool should_use_invlpg = area->end - area->start <= 0x4000;
     auto range = vaddr_range{m_pt, area->start, area->end};
     uintptr_t cur_addr = area->start;
 
@@ -204,19 +224,19 @@ int mm_list::unmap(iterator area)
         free_page(pte.pfn());
         pte.clear();
 
-        if (should_invlpg) {
-            asm volatile("invlpg (%0)": :"r"(cur_addr) :"memory");
+        if (should_invalidate_tlb && should_use_invlpg) {
+            asm volatile("invlpg (%0)": : "r"(cur_addr): "memory");
             cur_addr += 0x1000;
         }
     }
 
-    if (!should_invlpg)
-        asm volatile("mov %%cr3, %%rax\n\t mov %%rax, %%cr3": : : "rax", "memory");
+    if (should_invalidate_tlb && !should_use_invlpg)
+        __invalidate_all_tlb();
 
     return 0;
 }
 
-int mm_list::unmap(uintptr_t start, std::size_t length)
+int mm_list::unmap(uintptr_t start, std::size_t length, bool should_invalidate_tlb)
 {
     // standard says that addr and len MUST be
     // page-aligned or the call is invalid
@@ -246,7 +266,7 @@ int mm_list::unmap(uintptr_t start, std::size_t length)
         // iter.end <= end
         // it is safe to unmap the area directly
         if (*iter < end) {
-            if (int ret = unmap(iter); ret != 0)
+            if (int ret = unmap(iter, should_invalidate_tlb); ret != 0)
                 return ret;
 
             iter = m_areas.erase(iter);
@@ -263,7 +283,7 @@ int mm_list::unmap(uintptr_t start, std::size_t length)
         }
 
         (void)split(iter, end);
-        if (int ret = unmap(iter); ret != 0)
+        if (int ret = unmap(iter, should_invalidate_tlb); ret != 0)
             return ret;
 
         iter = m_areas.erase(iter);

+ 41 - 18
src/kernel/mem/paging.cc

@@ -38,51 +38,62 @@ static struct zone_info {
 
 static mutex zone_lock;
 
-constexpr int _msb(std::size_t x)
+constexpr unsigned _msb(std::size_t x)
 {
-    int n = 0;
+    unsigned n = 0;
     while (x >>= 1)
         n++;
     return n;
 }
 
-constexpr pfn_t buddy(pfn_t pfn, int order)
+constexpr pfn_t buddy(pfn_t pfn, unsigned order)
 {
     return pfn ^ (1 << (order + 12));
 }
 
-constexpr pfn_t parent(pfn_t pfn, int order)
+constexpr pfn_t parent(pfn_t pfn, unsigned order)
 {
     return pfn & ~(1 << (order + 12));
 }
 
 // call with zone_lock held
-static inline void _zone_list_insert(int order, page* zone)
+static inline void _zone_list_insert(unsigned order, page* zone)
 {
+    assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
+    assert((zone->flags & 0xff) == 0);
+    zone->flags |= order;
+
     zones[order].count++;
     list_insert(&zones[order].next, zone);
 }
 
 // call with zone_lock held
-static inline void _zone_list_remove(int order, page* zone)
+static inline void _zone_list_remove(unsigned order, page* zone)
 {
+    assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
+    assert(zones[order].count > 0 && (zone->flags & 0xff) == order);
+    zone->flags &= ~0xff;
+
     zones[order].count--;
     list_remove(&zones[order].next, zone);
 }
 
 // call with zone_lock held
-static inline page* _zone_list_get(int order)
+static inline page* _zone_list_get(unsigned order)
 {
     if (zones[order].count == 0)
         return nullptr;
 
     zones[order].count--;
-    return list_get(&zones[order].next);
+    auto* pg = list_get(&zones[order].next);
+
+    assert((pg->flags & 0xff) == order);
+    return pg;
 }
 
 // where order represents power of 2
 // call with zone_lock held
-static inline page* _create_zone(pfn_t pfn, int order)
+static inline page* _create_zone(pfn_t pfn, unsigned order)
 {
     page* zone = pfn_to_page(pfn);
 
@@ -94,7 +105,7 @@ static inline page* _create_zone(pfn_t pfn, int order)
 }
 
 // call with zone_lock held
-static inline void _split_zone(page* zone, int order, int target_order)
+static inline void _split_zone(page* zone, unsigned order, unsigned target_order)
 {
     while (order > target_order) {
         pfn_t pfn = page_to_pfn(zone);
@@ -102,12 +113,15 @@ static inline void _split_zone(page* zone, int order, int target_order)
 
         order--;
     }
+
+    zone->flags &= ~0xff;
+    zone->flags |= target_order;
 }
 
 // call with zone_lock held
-static inline page* _alloc_zone(int order)
+static inline page* _alloc_zone(unsigned order)
 {
-    for (int i = order; i < 52; ++i) {
+    for (unsigned i = order; i < 52; ++i) {
         auto zone = _zone_list_get(i);
         if (!zone)
             continue;
@@ -136,7 +150,7 @@ void kernel::mem::paging::create_zone(uintptr_t start, uintptr_t end)
     lock_guard_irq lock{zone_lock};
 
     unsigned long low = start;
-    for (int i = 0; i < _msb(end); ++i, low >>= 1) {
+    for (unsigned i = 0; i < _msb(end); ++i, low >>= 1) {
         if (!(low & 1))
             continue;
         _create_zone(low << (12+i), i);
@@ -145,7 +159,7 @@ void kernel::mem::paging::create_zone(uintptr_t start, uintptr_t end)
 
     low = 1 << _msb(end);
     while (low < end) {
-        int order = _msb(end - low);
+        unsigned order = _msb(end - low);
         _create_zone(low << 12, order);
         low |= (1 << order);
     }
@@ -162,7 +176,7 @@ void kernel::mem::paging::mark_present(uintptr_t start, uintptr_t end)
         PAGE_ARRAY[start++].flags |= PAGE_PRESENT;
 }
 
-page* kernel::mem::paging::alloc_pages(int order)
+page* kernel::mem::paging::alloc_pages(unsigned order)
 {
     lock_guard_irq lock{zone_lock};
     auto* zone = _alloc_zone(order);
@@ -187,8 +201,10 @@ pfn_t kernel::mem::paging::alloc_page_table()
     return pfn;
 }
 
-void kernel::mem::paging::free_pages(page* pg, int order)
+void kernel::mem::paging::free_pages(page* pg, unsigned order)
 {
+    assert((pg->flags & 0xff) == order);
+
     // TODO: atomic
     if (!(pg->flags & PAGE_BUDDY) || --pg->refcount)
         return;
@@ -199,7 +215,13 @@ void kernel::mem::paging::free_pages(page* pg, int order)
         pfn_t buddy_pfn = buddy(pfn, order);
         page* buddy_page = pfn_to_page(buddy_pfn);
 
-        if (!(buddy_page->flags & PAGE_BUDDY) || buddy_page->refcount)
+        if (!(buddy_page->flags & PAGE_BUDDY))
+            break;
+
+        if ((buddy_page->flags & 0xff) != order)
+            break;
+
+        if (buddy_page->refcount)
             break;
 
         _zone_list_remove(order, buddy_page);
@@ -211,6 +233,7 @@ void kernel::mem::paging::free_pages(page* pg, int order)
         order++;
     }
 
+    pg->flags &= ~0xff;
     _zone_list_insert(order, pg);
 }
 
@@ -219,7 +242,7 @@ void kernel::mem::paging::free_page(page* page)
     return free_pages(page, 0);
 }
 
-void kernel::mem::paging::free_pages(pfn_t pfn, int order)
+void kernel::mem::paging::free_pages(pfn_t pfn, unsigned order)
 {
     return free_pages(pfn_to_page(pfn), order);
 }

+ 6 - 7
src/kernel/process.cpp

@@ -261,13 +261,13 @@ static inline void __spawn(kernel::task::thread& thd, uintptr_t entry)
     // return(start) address
     thd.kstack.pushq(entry);
     thd.kstack.pushq(0x200);       // flags
-    thd.kstack.pushq(0);           // 0 for alignment
-    thd.kstack.pushq(0);           // rbx
-    thd.kstack.pushq(0);           // rbp
-    thd.kstack.pushq(0);           // r12
-    thd.kstack.pushq(0);           // r13
-    thd.kstack.pushq(0);           // r14
     thd.kstack.pushq(0);           // r15
+    thd.kstack.pushq(0);           // r14
+    thd.kstack.pushq(0);           // r13
+    thd.kstack.pushq(0);           // r12
+    thd.kstack.pushq(0);           // rbp
+    thd.kstack.pushq(0);           // rbx
+    thd.kstack.pushq(0);           // 0 for alignment
     thd.kstack.pushq(prev_sp);     // previous sp
 }
 
@@ -526,7 +526,6 @@ bool schedule()
 {
     if (kernel::async::preempt_count() != 0)
         return true;
-    return true;
 
     auto* next_thd = kernel::task::dispatcher::next();
 

+ 74 - 15
src/kernel/syscall.cpp

@@ -11,6 +11,7 @@
 #include <sys/prctl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <sys/uio.h>
 #include <sys/utsname.h>
 #include <sys/wait.h>
 #include <termios.h>
@@ -21,6 +22,7 @@
 #include <types/elf.hpp>
 #include <types/path.hpp>
 #include <types/types.h>
+#include <types/user_types.hpp>
 
 #include <kernel/async/lock.hpp>
 #include <kernel/hw/timer.hpp>
@@ -120,8 +122,6 @@ DEFINE_SYSCALL32(chdir, const char __user*, path)
 DEFINE_SYSCALL32(symlink, const char __user*, target, const char __user*, linkpath)
 DEFINE_SYSCALL32(readlink, const char __user*, pathname, char __user*, buf, size_t, buf_size)
 DEFINE_SYSCALL32(ioctl, int, fd, unsigned long, request, uintptr_t, arg3)
-DEFINE_SYSCALL32(readv, int, fd, const iovec __user*, iov, int, iovcnt)
-DEFINE_SYSCALL32(writev, int, fd, const iovec __user*, iov, int, iovcnt)
 DEFINE_SYSCALL32(munmap, uintptr_t, addr, size_t, len)
 DEFINE_SYSCALL32(poll, pollfd __user*, fds, nfds_t, nfds, int, timeout)
 DEFINE_SYSCALL32(mknod, const char __user*, pathname, mode_t, mode, dev_t, dev)
@@ -201,9 +201,9 @@ static uint32_t _syscall32_fork(interrupt_stack_normal* data, mmx_registers* mmx
     newthd->kstack.pushq(data->flags);
     newthd->kstack.pushq(data->cs);
     newthd->kstack.pushq(data->v_rip);
-    newthd->kstack.pushq(0); // 0 for 16 bytes alignment
-
     auto cur_sp = newthd->kstack.sp;
+
+    newthd->kstack.pushq(0); // 0 for 16 bytes alignment
     newthd->kstack.pushq(cur_sp);
     newthd->kstack.pushq(data->head.s_regs.rbp);
     newthd->kstack.pushq(data->head.s_regs.r15);
@@ -219,7 +219,7 @@ static uint32_t _syscall32_fork(interrupt_stack_normal* data, mmx_registers* mmx
     newthd->kstack.pushq(data->head.s_regs.rdx);
     newthd->kstack.pushq(data->head.s_regs.rcx);
     newthd->kstack.pushq(data->head.s_regs.rbx);
-    newthd->kstack.pushq(data->head.s_regs.rax);
+    newthd->kstack.pushq(0); // rax: return value
 
     cur_sp = newthd->kstack.sp;
 
@@ -229,14 +229,14 @@ static uint32_t _syscall32_fork(interrupt_stack_normal* data, mmx_registers* mmx
     // asm_ctx_switch stack
     // return(start) address
     newthd->kstack.pushq((uintptr_t)ISR_stub_restore);
-    newthd->kstack.pushq(0x200);          // flags
-    newthd->kstack.pushq(0);              // 0 for alignment
-    newthd->kstack.pushq(cur_sp);         // rbx
-    newthd->kstack.pushq(0);              // rbp
-    newthd->kstack.pushq(0);              // r12
-    newthd->kstack.pushq(0);              // r13
-    newthd->kstack.pushq(0);              // r14
+    newthd->kstack.pushq(0);              // flags
     newthd->kstack.pushq(0);              // r15
+    newthd->kstack.pushq(0);              // r14
+    newthd->kstack.pushq(0);              // r13
+    newthd->kstack.pushq(0);              // r12
+    newthd->kstack.pushq(0);              // rbp
+    newthd->kstack.pushq(cur_sp);         // rbx
+    newthd->kstack.pushq(0);              // 0 for alignment
     newthd->kstack.pushq(newthd_prev_sp); // previous sp
 
     return newproc.pid;
@@ -265,6 +265,48 @@ static uint32_t _syscall32_llseek(interrupt_stack_normal* data, mmx_registers*)
     return 0;
 }
 
+static uint32_t _syscall32_readv(interrupt_stack_normal* data, mmx_registers*)
+{
+    SYSCALL32_ARG1(int, fd);
+    SYSCALL32_ARG2(const types::iovec32 __user*, _iov);
+    SYSCALL32_ARG3(int, iovcnt);
+
+    // TODO: use copy_from_user
+    if (!_iov)
+        return -EFAULT;
+
+    std::vector<iovec> iov(iovcnt);
+    for (int i = 0; i < iovcnt; ++i) {
+        // TODO: check access right
+        uintptr_t base = _iov[i].iov_base;
+        iov[i].iov_base = (void*)base;
+        iov[i].iov_len = _iov[i].iov_len;
+    }
+
+    return kernel::syscall::do_readv(fd, iov.data(), iovcnt);
+}
+
+static uint32_t _syscall32_writev(interrupt_stack_normal* data, mmx_registers*)
+{
+    SYSCALL32_ARG1(int, fd);
+    SYSCALL32_ARG2(const types::iovec32 __user*, _iov);
+    SYSCALL32_ARG3(int, iovcnt);
+
+    // TODO: use copy_from_user
+    if (!_iov)
+        return -EFAULT;
+
+    std::vector<iovec> iov(iovcnt);
+    for (int i = 0; i < iovcnt; ++i) {
+        // TODO: check access right
+        uintptr_t base = _iov[i].iov_base;
+        iov[i].iov_base = (void*)base;
+        iov[i].iov_len = _iov[i].iov_len;
+    }
+
+    return kernel::syscall::do_writev(fd, iov.data(), iovcnt);
+}
+
 [[noreturn]] static uint32_t _syscall32_exit_group(
         interrupt_stack_normal* data, mmx_registers* mmxregs)
 {
@@ -275,9 +317,26 @@ static uint32_t _syscall32_llseek(interrupt_stack_normal* data, mmx_registers*)
 static uint32_t _syscall32_execve(interrupt_stack_normal* data, mmx_registers*)
 {
     SYSCALL32_ARG1(const char __user*, exec);
-    SYSCALL32_ARG2(char __user* const __user*, argv);
-    SYSCALL32_ARG3(char __user* const __user*, envp);
-    auto retval = kernel::syscall::do_execve(exec, argv, envp);
+    SYSCALL32_ARG2(const uint32_t __user*, argv);
+    SYSCALL32_ARG3(const uint32_t __user*, envp);
+
+    if (!exec || !argv || !envp)
+        return -EFAULT;
+
+    std::vector<std::string> args, envs;
+
+    // TODO: use copy_from_user
+    while (*argv) {
+        uintptr_t addr = *(argv++);
+        args.push_back((char __user*)addr);
+    }
+
+    while (*envp) {
+        uintptr_t addr = *(envp++);
+        envs.push_back((char __user*)addr);
+    }
+
+    auto retval = kernel::syscall::do_execve(exec, args, envs);
 
     if (retval.status == 0) {
         // TODO: switch cs ans ss

+ 4 - 4
src/kernel/syscall/fileops.cc

@@ -195,7 +195,7 @@ int kernel::syscall::do_ioctl(int fd, unsigned long request, uintptr_t arg3)
     return 0;
 }
 
-ssize_t kernel::syscall::do_readv(int fd, const iovec __user* iov, int iovcnt)
+ssize_t kernel::syscall::do_readv(int fd, const iovec* iov, int iovcnt)
 {
     auto* file = current_process->files[fd];
 
@@ -224,7 +224,7 @@ ssize_t kernel::syscall::do_readv(int fd, const iovec __user* iov, int iovcnt)
 }
 
 // TODO: this operation SHOULD be atomic
-ssize_t kernel::syscall::do_writev(int fd, const iovec __user* iov, int iovcnt)
+ssize_t kernel::syscall::do_writev(int fd, const iovec* iov, int iovcnt)
 {
     auto* file = current_process->files[fd];
 
@@ -281,7 +281,7 @@ uintptr_t kernel::syscall::do_mmap_pgoff(uintptr_t addr, size_t len,
 
         // do unmapping, equal to munmap, MAP_FIXED set
         if (prot == PROT_NONE) {
-            if (int ret = mms.unmap(addr, len); ret != 0)
+            if (int ret = mms.unmap(addr, len, true); ret != 0)
                 return ret;
         }
         else {
@@ -320,7 +320,7 @@ int kernel::syscall::do_munmap(uintptr_t addr, size_t len)
     if (addr & 0xfff)
         return -EINVAL;
 
-    return current_process->mms.unmap(addr, len);
+    return current_process->mms.unmap(addr, len, true);
 }
 
 ssize_t kernel::syscall::do_sendfile(int out_fd, int in_fd,

+ 13 - 15
src/kernel/syscall/procops.cc

@@ -1,3 +1,6 @@
+#include <string>
+#include <vector>
+
 #include <sys/prctl.h>
 #include <sys/utsname.h>
 #include <sys/wait.h>
@@ -38,24 +41,19 @@ int kernel::syscall::do_chdir(const char __user* path)
 }
 
 execve_retval kernel::syscall::do_execve(
-        const char __user* exec,
-        char __user* const __user* argv,
-        char __user* const __user* envp)
+        const std::string& exec,
+        const std::vector<std::string>& args,
+        const std::vector<std::string>& envs)
 {
-    types::elf::elf32_load_data d;
-
-    if (!exec || !argv || !envp)
-        return { 0, 0, -EFAULT };
-
-    // TODO: use copy_from_user
-    while (*argv)
-        d.argv.push_back(*(argv++));
-
-    while (*envp)
-        d.envp.push_back(*(envp++));
+    types::elf::elf32_load_data d{
+        .exec_dent{},
+        .argv{args},
+        .envp{envs},
+        .ip{}, .sp{},
+    };
 
     d.exec_dent = fs::vfs_open(*current_process->root,
-            current_process->pwd + exec);
+            current_process->pwd + exec.c_str());
 
     if (!d.exec_dent)
         return { 0, 0, -ENOENT };

+ 2 - 2
src/kernel/task/thread.cc

@@ -83,15 +83,15 @@ thread::kernel_stack::~kernel_stack()
 
 uint64_t thread::kernel_stack::pushq(uint64_t val)
 {
-    *(uint64_t*)sp = val;
     sp -= 8;
+    *(uint64_t*)sp = val;
     return val;
 }
 
 uint32_t thread::kernel_stack::pushl(uint32_t val)
 {
-    *(uint32_t*)sp = val;
     sp -= 4;
+    *(uint32_t*)sp = val;
     return val;
 }
 

+ 3 - 0
src/kernel/user/thread_local.cc

@@ -10,6 +10,9 @@ using namespace kernel::user;
 
 void kernel::user::load_thread_area32(uint64_t desc)
 {
+    if (!desc)
+        return;
+
     kernel::mem::gdt[7] = desc;
 
     asm volatile(