|
@@ -7,289 +7,316 @@
|
|
|
#include <kernel/mm.hpp>
|
|
|
#include <kernel/process.hpp>
|
|
|
#include <kernel/stdio.h>
|
|
|
-#include <kernel/syscall.hpp>
|
|
|
#include <kernel/tty.h>
|
|
|
#include <kernel/vfs.hpp>
|
|
|
#include <kernel_main.h>
|
|
|
#include <types/allocator.hpp>
|
|
|
+#include <types/assert.h>
|
|
|
#include <types/elf.hpp>
|
|
|
-#include <types/lock.h>
|
|
|
+#include <types/hash_map.hpp>
|
|
|
+#include <types/list.hpp>
|
|
|
+#include <types/lock.hpp>
|
|
|
+#include <types/size.h>
|
|
|
#include <types/status.h>
|
|
|
+#include <types/stdint.h>
|
|
|
#include <types/types.h>
|
|
|
|
|
|
-extern "C" void NORETURN to_kernel(interrupt_stack* ret_stack);
|
|
|
-extern "C" void NORETURN to_user(interrupt_stack* ret_stack);
|
|
|
-
|
|
|
static bool is_scheduler_ready;
|
|
|
static types::list<process>* processes;
|
|
|
+static typename types::hash_map<pid_t, types::list<process>::iterator_type, types::linux_hasher<pid_t>>* idx_processes;
|
|
|
static types::list<thread*>* ready_thds;
|
|
|
-static pid_t max_pid = 1;
|
|
|
static void (*volatile kthreadd_new_thd_func)(void*);
|
|
|
static void* volatile kthreadd_new_thd_data;
|
|
|
-static uint32_t volatile kthreadd_lock = 0;
|
|
|
+static types::mutex kthreadd_mtx;
|
|
|
+
|
|
|
+namespace kernel {
|
|
|
+
|
|
|
+struct no_irq_guard {
|
|
|
+ explicit no_irq_guard()
|
|
|
+ {
|
|
|
+ asm_cli();
|
|
|
+ }
|
|
|
+
|
|
|
+ no_irq_guard(const no_irq_guard&) = delete;
|
|
|
+ no_irq_guard& operator=(const no_irq_guard&) = delete;
|
|
|
|
|
|
-thread* current_thread;
|
|
|
-process* current_process;
|
|
|
+ ~no_irq_guard()
|
|
|
+ {
|
|
|
+ asm_sti();
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
+} // namespace kernel
|
|
|
|
|
|
process::process(process&& val)
|
|
|
: mms(types::move(val.mms))
|
|
|
, thds(types::move(val.thds))
|
|
|
+ , wait_lst(types::move(val.wait_lst))
|
|
|
, pid(val.pid)
|
|
|
+ , ppid(val.ppid)
|
|
|
{
|
|
|
if (current_process == &val)
|
|
|
current_process = this;
|
|
|
|
|
|
attr.system = val.attr.system;
|
|
|
- k_esp = val.k_esp;
|
|
|
|
|
|
for (auto& item : thds)
|
|
|
item.owner = this;
|
|
|
|
|
|
- val.k_esp = nullptr;
|
|
|
val.attr.system = 0;
|
|
|
}
|
|
|
|
|
|
process::process(const process& val, const thread& main_thd)
|
|
|
: mms(*kernel_mms)
|
|
|
, attr { .system = val.attr.system }
|
|
|
- , pid { max_pid++ }
|
|
|
+ , pid { process::alloc_pid() }
|
|
|
+ , ppid { val.pid }
|
|
|
{
|
|
|
auto iter_thd = thds.emplace_back(main_thd);
|
|
|
iter_thd->owner = this;
|
|
|
|
|
|
- // TODO: allocate low mem
|
|
|
- k_esp = (void*)to_pp(alloc_n_raw_pages(2));
|
|
|
- memcpy(k_esp, (char*)main_thd.owner->k_esp - THREAD_KERNEL_STACK_SIZE, THREAD_KERNEL_STACK_SIZE);
|
|
|
- k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
|
|
|
-
|
|
|
- if (val.attr.system) {
|
|
|
- auto orig_k_esp = (uint32_t)main_thd.owner->k_esp;
|
|
|
-
|
|
|
- iter_thd->regs.ebp -= orig_k_esp;
|
|
|
- iter_thd->regs.ebp += (uint32_t)k_esp;
|
|
|
-
|
|
|
- iter_thd->regs.esp -= orig_k_esp;
|
|
|
- iter_thd->regs.esp += (uint32_t)k_esp;
|
|
|
- } else {
|
|
|
- pd_t pd = alloc_pd();
|
|
|
- memcpy(pd, mms_get_pd(kernel_mms), PAGE_SIZE);
|
|
|
+ for (auto& area : val.mms) {
|
|
|
+ if (area.is_ident())
|
|
|
+ continue;
|
|
|
|
|
|
- mms.begin()->pd = pd;
|
|
|
-
|
|
|
- // skip kernel heap since it's already copied above
|
|
|
- for (auto iter_src = ++val.mms.cbegin(); iter_src != val.mms.cend(); ++iter_src)
|
|
|
- mm::mirror_mm_area(&mms, iter_src.ptr(), pd);
|
|
|
+ mms.mirror_area(area);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-process::process(void* start_eip)
|
|
|
+process::process(void)
|
|
|
: mms(*kernel_mms)
|
|
|
- , thds {}
|
|
|
, attr { .system = 1 }
|
|
|
- , pid { max_pid++ }
|
|
|
+ , pid { process::alloc_pid() }
|
|
|
+ , ppid { 1 }
|
|
|
{
|
|
|
- // TODO: allocate low mem
|
|
|
- k_esp = (void*)to_pp(alloc_n_raw_pages(2));
|
|
|
- memset((char*)k_esp, 0x00, THREAD_KERNEL_STACK_SIZE);
|
|
|
- k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
|
|
|
-
|
|
|
- auto thd = thds.emplace_back(thread {
|
|
|
- .eip = start_eip,
|
|
|
- .owner = this,
|
|
|
- .regs {
|
|
|
- .edi {},
|
|
|
- .esi {},
|
|
|
- .ebp = reinterpret_cast<uint32_t>(k_esp),
|
|
|
- .esp = reinterpret_cast<uint32_t>(k_esp),
|
|
|
- .ebx {},
|
|
|
- .edx {},
|
|
|
- .ecx {},
|
|
|
- .eax {},
|
|
|
- },
|
|
|
- .eflags {},
|
|
|
- .attr {
|
|
|
- .system = 1,
|
|
|
- .ready = 1,
|
|
|
- .wait = 0,
|
|
|
- },
|
|
|
- });
|
|
|
- ready_thds->push_back(thd.ptr());
|
|
|
+ auto thd = thds.emplace_back(this, true);
|
|
|
+
|
|
|
+ add_to_ready_list(thd.ptr());
|
|
|
}
|
|
|
|
|
|
-void NORETURN _kernel_init(void)
|
|
|
+process::process(void (*func)(void), pid_t _ppid)
|
|
|
+ : mms(*kernel_mms)
|
|
|
+ , attr { .system = 1 }
|
|
|
+ , pid { process::alloc_pid() }
|
|
|
+ , ppid { _ppid }
|
|
|
{
|
|
|
- // TODO: parse kernel parameters
|
|
|
- auto* _new_fs = fs::register_fs(types::kernel_allocator_new<fs::fat::fat32>(fs::vfs_open("/dev/hda1")->ind));
|
|
|
- int ret = fs::fs_root->ind->fs->mount(fs::vfs_open("/mnt"), _new_fs);
|
|
|
- if (unlikely(ret != GB_OK))
|
|
|
- syscall(0x03);
|
|
|
-
|
|
|
- pd_t new_pd = alloc_pd();
|
|
|
- memcpy(new_pd, mms_get_pd(kernel_mms), PAGE_SIZE);
|
|
|
-
|
|
|
- asm_cli();
|
|
|
-
|
|
|
- current_process->mms.begin()->pd = new_pd;
|
|
|
-
|
|
|
- asm_sti();
|
|
|
+ auto thd = thds.emplace_back(this, true);
|
|
|
+
|
|
|
+ add_to_ready_list(thd.ptr());
|
|
|
+
|
|
|
+ auto* esp = &thd->esp;
|
|
|
+
|
|
|
+ // return(start) address
|
|
|
+ push_stack(esp, (uint32_t)func);
|
|
|
+ // ebx
|
|
|
+ push_stack(esp, 0);
|
|
|
+ // edi
|
|
|
+ push_stack(esp, 0);
|
|
|
+ // esi
|
|
|
+ push_stack(esp, 0);
|
|
|
+ // ebp
|
|
|
+ push_stack(esp, 0);
|
|
|
+ // eflags
|
|
|
+ push_stack(esp, 0x200);
|
|
|
+}
|
|
|
|
|
|
- interrupt_stack intrpt_stack {};
|
|
|
- intrpt_stack.eflags = 0x200; // STI
|
|
|
- types::elf::elf32_load("/mnt/INIT.ELF", &intrpt_stack, 0);
|
|
|
- // map stack area
|
|
|
- ret = mmap((void*)types::elf::ELF_STACK_TOP, types::elf::ELF_STACK_SIZE, fs::vfs_open("/dev/null")->ind, 0, 1, 0);
|
|
|
- if (unlikely(ret != GB_OK))
|
|
|
- syscall(0x03);
|
|
|
+process::~process()
|
|
|
+{
|
|
|
+ for (auto iter = thds.begin(); iter != thds.end(); ++iter)
|
|
|
+ remove_from_ready_list(iter.ptr());
|
|
|
+}
|
|
|
|
|
|
- asm_cli();
|
|
|
- current_process->attr.system = 0;
|
|
|
- current_thread->attr.system = 0;
|
|
|
- to_user(&intrpt_stack);
|
|
|
+inline void NORETURN _noreturn_crash(void)
|
|
|
+{
|
|
|
+ for (;;)
|
|
|
+ assert(false);
|
|
|
}
|
|
|
|
|
|
+extern "C" void NORETURN go_kernel(uint32_t* kstack, void (*k_main)(void));
|
|
|
+extern "C" void NORETURN go_user(void* eip, uint32_t* esp);
|
|
|
+
|
|
|
void kernel_threadd_main(void)
|
|
|
{
|
|
|
tty_print(console, "kernel thread daemon started\n");
|
|
|
- k_new_thread(hw::init_ata, (void*)_kernel_init);
|
|
|
+
|
|
|
for (;;) {
|
|
|
if (kthreadd_new_thd_func) {
|
|
|
- spin_lock(&kthreadd_lock);
|
|
|
- int return_value = 0;
|
|
|
+ void (*func)(void*) = nullptr;
|
|
|
+ void* data = nullptr;
|
|
|
|
|
|
- void (*func)(void*) = kthreadd_new_thd_func;
|
|
|
- void* data = kthreadd_new_thd_data;
|
|
|
- kthreadd_new_thd_func = nullptr;
|
|
|
- kthreadd_new_thd_data = nullptr;
|
|
|
+ {
|
|
|
+ types::lock_guard lck(kthreadd_mtx);
|
|
|
+ func = kthreadd_new_thd_func;
|
|
|
+ data = kthreadd_new_thd_data;
|
|
|
|
|
|
- spin_unlock(&kthreadd_lock);
|
|
|
+ kthreadd_new_thd_func = nullptr;
|
|
|
+ kthreadd_new_thd_data = nullptr;
|
|
|
+ }
|
|
|
+
|
|
|
+ // TODO
|
|
|
+ (void)func, (void)data;
|
|
|
+ assert(false);
|
|
|
|
|
|
// syscall_fork
|
|
|
- return_value = syscall(0x00);
|
|
|
-
|
|
|
- if (return_value != 0) {
|
|
|
- // child
|
|
|
- func(data);
|
|
|
- for (;;)
|
|
|
- syscall(0x03);
|
|
|
- // TODO: syscall_exit()
|
|
|
- }
|
|
|
- spin_unlock(&kthreadd_lock);
|
|
|
+ // int ret = syscall(0x00);
|
|
|
+
|
|
|
+ // if (ret == 0) {
|
|
|
+ // // child process
|
|
|
+ // func(data);
|
|
|
+ // // the function shouldn't return here
|
|
|
+ // assert(false);
|
|
|
+ // }
|
|
|
}
|
|
|
+ // TODO: sleep here to wait for new_kernel_thread event
|
|
|
asm_hlt();
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+void NORETURN _kernel_init(void)
|
|
|
+{
|
|
|
+ {
|
|
|
+ kernel::no_irq_guard grd;
|
|
|
+
|
|
|
+ add_to_process_list(process { kernel_threadd_main, 1 });
|
|
|
+ }
|
|
|
+ hw::init_ata();
|
|
|
+
|
|
|
+ // TODO: parse kernel parameters
|
|
|
+ auto* _new_fs = fs::register_fs(types::kernel_allocator_new<fs::fat::fat32>(fs::vfs_open("/dev/hda1")->ind));
|
|
|
+ int ret = fs::fs_root->ind->fs->mount(fs::vfs_open("/mnt"), _new_fs);
|
|
|
+ assert_likely(ret == GB_OK);
|
|
|
+
|
|
|
+ current_process->attr.system = 0;
|
|
|
+ current_thread->attr.system = 0;
|
|
|
+
|
|
|
+ const char* argv[] = { "/mnt/INIT.ELF", nullptr };
|
|
|
+
|
|
|
+ types::elf::elf32_load_data d;
|
|
|
+ d.exec = "/mnt/INIT.ELF";
|
|
|
+ d.argv = argv;
|
|
|
+ d.system = false;
|
|
|
+
|
|
|
+ assert(types::elf::elf32_load(&d) == GB_OK);
|
|
|
+
|
|
|
+ is_scheduler_ready = true;
|
|
|
+
|
|
|
+ go_user(d.eip, d.sp);
|
|
|
+}
|
|
|
+
|
|
|
void k_new_thread(void (*func)(void*), void* data)
|
|
|
{
|
|
|
- spin_lock(&kthreadd_lock);
|
|
|
+ types::lock_guard lck(kthreadd_mtx);
|
|
|
kthreadd_new_thd_func = func;
|
|
|
kthreadd_new_thd_data = data;
|
|
|
- spin_unlock(&kthreadd_lock);
|
|
|
}
|
|
|
|
|
|
void NORETURN init_scheduler()
|
|
|
{
|
|
|
- processes = types::kernel_allocator_new<types::list<process>>();
|
|
|
- ready_thds = types::kernel_allocator_new<types::list<thread*>>();
|
|
|
+ processes = types::kernel_allocator_pnew(processes);
|
|
|
+ ready_thds = types::kernel_allocator_pnew(ready_thds);
|
|
|
+ idx_processes = types::kernel_allocator_pnew(idx_processes);
|
|
|
+ idx_child_processes = types::kernel_allocator_pnew(idx_child_processes);
|
|
|
|
|
|
- auto iter = processes->emplace_back((void*)kernel_threadd_main);
|
|
|
+ auto pid = add_to_process_list(process {});
|
|
|
+ auto init = findproc(pid);
|
|
|
|
|
|
// we need interrupts enabled for cow mapping so now we disable it
|
|
|
// in case timer interrupt mess things up
|
|
|
asm_cli();
|
|
|
|
|
|
- current_process = iter.ptr();
|
|
|
- current_thread = iter->thds.begin().ptr();
|
|
|
+ current_process = init;
|
|
|
+ current_thread = init->thds.begin().ptr();
|
|
|
|
|
|
tss.ss0 = KERNEL_DATA_SEGMENT;
|
|
|
- tss.esp0 = (uint32_t)iter->k_esp;
|
|
|
+ tss.esp0 = current_thread->kstack;
|
|
|
|
|
|
- asm_switch_pd(mms_get_pd(¤t_process->mms));
|
|
|
+ asm_switch_pd(current_process->mms.m_pd);
|
|
|
|
|
|
- is_scheduler_ready = true;
|
|
|
-
|
|
|
- interrupt_stack intrpt_stack {};
|
|
|
- process_context_load(&intrpt_stack, current_process);
|
|
|
- thread_context_load(&intrpt_stack, current_thread);
|
|
|
- to_kernel(&intrpt_stack);
|
|
|
+ go_kernel(current_thread->esp, _kernel_init);
|
|
|
}
|
|
|
|
|
|
-void thread_context_save(interrupt_stack* int_stack, thread* thd)
|
|
|
+pid_t add_to_process_list(process&& proc)
|
|
|
{
|
|
|
- thd->eflags = int_stack->eflags;
|
|
|
- thd->eip = int_stack->v_eip;
|
|
|
- memcpy(&thd->regs, &int_stack->s_regs, sizeof(regs_32));
|
|
|
- if (thd->attr.system)
|
|
|
- thd->regs.esp = int_stack->s_regs.esp + 0x0c;
|
|
|
- else
|
|
|
- thd->regs.esp = int_stack->esp;
|
|
|
-}
|
|
|
+ auto iter = processes->emplace_back(types::move(proc));
|
|
|
+ idx_processes->insert(iter->pid, iter);
|
|
|
|
|
|
-void thread_context_load(interrupt_stack* int_stack, thread* thd)
|
|
|
-{
|
|
|
- int_stack->eflags = (thd->eflags | 0x200); // OR $STI
|
|
|
- int_stack->v_eip = thd->eip;
|
|
|
- memcpy(&int_stack->s_regs, &thd->regs, sizeof(regs_32));
|
|
|
- current_thread = thd;
|
|
|
+ auto children = idx_child_processes->find(iter->ppid);
|
|
|
+ if (!children) {
|
|
|
+ idx_child_processes->insert(iter->ppid, {});
|
|
|
+ children = idx_child_processes->find(iter->ppid);
|
|
|
+ }
|
|
|
+
|
|
|
+ children->value.push_back(iter->pid);
|
|
|
+
|
|
|
+ return iter->pid;
|
|
|
}
|
|
|
|
|
|
-void process_context_save(interrupt_stack*, process*)
|
|
|
+void remove_from_process_list(pid_t pid)
|
|
|
{
|
|
|
+ auto proc_iter = idx_processes->find(pid);
|
|
|
+ auto ppid = proc_iter->value->ppid;
|
|
|
+
|
|
|
+ auto& parent_children = idx_child_processes->find(ppid)->value;
|
|
|
+
|
|
|
+ auto i = parent_children.find(pid);
|
|
|
+ parent_children.erase(i);
|
|
|
+
|
|
|
+ processes->erase(proc_iter->value);
|
|
|
+ idx_processes->remove(proc_iter);
|
|
|
}
|
|
|
|
|
|
-void process_context_load(interrupt_stack*, process* proc)
|
|
|
+void add_to_ready_list(thread* thd)
|
|
|
{
|
|
|
- if (!proc->attr.system)
|
|
|
- tss.esp0 = (uint32_t)proc->k_esp;
|
|
|
- asm_switch_pd(mms_get_pd(&proc->mms));
|
|
|
- current_process = proc;
|
|
|
+ ready_thds->push_back(thd);
|
|
|
}
|
|
|
|
|
|
-void add_to_process_list(process&& proc)
|
|
|
+void remove_from_ready_list(thread* thd)
|
|
|
{
|
|
|
- processes->push_back(types::move(proc));
|
|
|
+ auto iter = ready_thds->find(thd);
|
|
|
+ while (iter != ready_thds->end()) {
|
|
|
+ ready_thds->erase(iter);
|
|
|
+ iter = ready_thds->find(thd);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-void add_to_ready_list(thread* thd)
|
|
|
+types::list<thread*>::iterator_type query_next_thread(void)
|
|
|
{
|
|
|
- ready_thds->push_back(thd);
|
|
|
+ auto iter_thd = ready_thds->begin();
|
|
|
+ while (!((*iter_thd)->attr.ready))
|
|
|
+ iter_thd = ready_thds->erase(iter_thd);
|
|
|
+ return iter_thd;
|
|
|
}
|
|
|
|
|
|
-static inline void next_task(const types::list<thread*>::iterator_type& iter_to_remove, thread* cur_thd)
|
|
|
+process* findproc(pid_t pid)
|
|
|
{
|
|
|
- ready_thds->erase(iter_to_remove);
|
|
|
- if (cur_thd->attr.ready)
|
|
|
- ready_thds->push_back(cur_thd);
|
|
|
+ return idx_processes->find(pid)->value.ptr();
|
|
|
}
|
|
|
|
|
|
-void do_scheduling(interrupt_stack* intrpt_data)
|
|
|
+extern "C" void asm_ctx_switch(uint32_t** curr_esp, uint32_t* next_esp);
|
|
|
+void schedule()
|
|
|
{
|
|
|
if (unlikely(!is_scheduler_ready))
|
|
|
return;
|
|
|
|
|
|
- auto iter_thd = ready_thds->begin();
|
|
|
- while (!((*iter_thd)->attr.ready))
|
|
|
- iter_thd = ready_thds->erase(iter_thd);
|
|
|
+ auto iter_thd = query_next_thread();
|
|
|
auto thd = *iter_thd;
|
|
|
|
|
|
if (current_thread == thd) {
|
|
|
- next_task(iter_thd, thd);
|
|
|
+ next_task(iter_thd);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
process* proc = thd->owner;
|
|
|
if (current_process != proc) {
|
|
|
- process_context_save(intrpt_data, current_process);
|
|
|
- process_context_load(intrpt_data, proc);
|
|
|
+ asm_switch_pd(proc->mms.m_pd);
|
|
|
+ current_process = proc;
|
|
|
}
|
|
|
|
|
|
- thread_context_save(intrpt_data, current_thread);
|
|
|
- thread_context_load(intrpt_data, thd);
|
|
|
+ auto* curr_thd = current_thread;
|
|
|
|
|
|
- next_task(iter_thd, thd);
|
|
|
+ current_thread = thd;
|
|
|
+ tss.esp0 = current_thread->kstack;
|
|
|
+ next_task(iter_thd);
|
|
|
|
|
|
- if (thd->attr.system)
|
|
|
- to_kernel(intrpt_data);
|
|
|
- else
|
|
|
- to_user(intrpt_data);
|
|
|
+ asm_ctx_switch(&curr_thd->esp, thd->esp);
|
|
|
}
|