process.cpp 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. #include <asm/port_io.h>
  2. #include <asm/sys.h>
  3. #include <fs/fat.hpp>
  4. #include <kernel/hw/ata.hpp>
  5. #include <kernel/interrupt.h>
  6. #include <kernel/mem.h>
  7. #include <kernel/mm.hpp>
  8. #include <kernel/process.hpp>
  9. #include <kernel/stdio.h>
  10. #include <kernel/syscall.hpp>
  11. #include <kernel/tty.h>
  12. #include <kernel/vfs.hpp>
  13. #include <kernel_main.h>
  14. #include <types/allocator.hpp>
  15. #include <types/elf.hpp>
  16. #include <types/lock.h>
  17. #include <types/status.h>
  18. #include <types/types.h>
  19. extern "C" void NORETURN to_kernel(interrupt_stack* ret_stack);
  20. extern "C" void NORETURN to_user(interrupt_stack* ret_stack);
  21. static bool is_scheduler_ready;
  22. static types::list<process>* processes;
  23. static types::list<thread*>* ready_thds;
  24. static pid_t max_pid = 1;
  25. static void (*volatile kthreadd_new_thd_func)(void*);
  26. static void* volatile kthreadd_new_thd_data;
  27. static uint32_t volatile kthreadd_lock = 0;
  28. thread* current_thread;
  29. process* current_process;
  30. process::process(process&& val)
  31. : mms(types::move(val.mms))
  32. , thds(types::move(val.thds))
  33. , pid(val.pid)
  34. {
  35. if (current_process == &val)
  36. current_process = this;
  37. attr.system = val.attr.system;
  38. k_esp = val.k_esp;
  39. for (auto& item : thds)
  40. item.owner = this;
  41. val.k_esp = nullptr;
  42. val.attr.system = 0;
  43. }
  44. process::process(const process& val, const thread& main_thd)
  45. : mms(*kernel_mms)
  46. , attr { .system = val.attr.system }
  47. , pid { max_pid++ }
  48. {
  49. auto iter_thd = thds.emplace_back(main_thd);
  50. iter_thd->owner = this;
  51. // TODO: allocate low mem
  52. k_esp = (void*)to_pp(alloc_n_raw_pages(2));
  53. memcpy(k_esp, (char*)main_thd.owner->k_esp - THREAD_KERNEL_STACK_SIZE, THREAD_KERNEL_STACK_SIZE);
  54. k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
  55. if (val.attr.system) {
  56. auto orig_k_esp = (uint32_t)main_thd.owner->k_esp;
  57. iter_thd->regs.ebp -= orig_k_esp;
  58. iter_thd->regs.ebp += (uint32_t)k_esp;
  59. iter_thd->regs.esp -= orig_k_esp;
  60. iter_thd->regs.esp += (uint32_t)k_esp;
  61. } else {
  62. pd_t pd = alloc_pd();
  63. memcpy(pd, mms_get_pd(kernel_mms), PAGE_SIZE);
  64. mms.begin()->pd = pd;
  65. // skip kernel heap since it's already copied above
  66. for (auto iter_src = ++val.mms.cbegin(); iter_src != val.mms.cend(); ++iter_src)
  67. mm::mirror_mm_area(&mms, iter_src.ptr(), pd);
  68. }
  69. }
  70. process::process(void* start_eip)
  71. : mms(*kernel_mms)
  72. , thds {}
  73. , attr { .system = 1 }
  74. , pid { max_pid++ }
  75. {
  76. // TODO: allocate low mem
  77. k_esp = (void*)to_pp(alloc_n_raw_pages(2));
  78. memset((char*)k_esp, 0x00, THREAD_KERNEL_STACK_SIZE);
  79. k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
  80. auto thd = thds.emplace_back(thread {
  81. .eip = start_eip,
  82. .owner = this,
  83. .regs {
  84. .edi {},
  85. .esi {},
  86. .ebp = reinterpret_cast<uint32_t>(k_esp),
  87. .esp = reinterpret_cast<uint32_t>(k_esp),
  88. .ebx {},
  89. .edx {},
  90. .ecx {},
  91. .eax {},
  92. },
  93. .eflags {},
  94. .attr {
  95. .system = 1,
  96. .ready = 1,
  97. .wait = 0,
  98. },
  99. });
  100. ready_thds->push_back(thd.ptr());
  101. }
  102. void NORETURN _kernel_init(void)
  103. {
  104. // TODO: parse kernel parameters
  105. auto* _new_fs = fs::register_fs(types::kernel_allocator_new<fs::fat::fat32>(fs::vfs_open("/dev/hda1")->ind));
  106. int ret = fs::fs_root->ind->fs->mount(fs::vfs_open("/mnt"), _new_fs);
  107. if (unlikely(ret != GB_OK))
  108. syscall(0x03);
  109. pd_t new_pd = alloc_pd();
  110. memcpy(new_pd, mms_get_pd(kernel_mms), PAGE_SIZE);
  111. asm_cli();
  112. current_process->mms.begin()->pd = new_pd;
  113. asm_sti();
  114. interrupt_stack intrpt_stack {};
  115. intrpt_stack.eflags = 0x200; // STI
  116. types::elf::elf32_load("/mnt/INIT.ELF", &intrpt_stack, 0);
  117. // map stack area
  118. ret = mmap((void*)types::elf::ELF_STACK_TOP, types::elf::ELF_STACK_SIZE, fs::vfs_open("/dev/null")->ind, 0, 1, 0);
  119. if (unlikely(ret != GB_OK))
  120. syscall(0x03);
  121. asm_cli();
  122. current_process->attr.system = 0;
  123. current_thread->attr.system = 0;
  124. to_user(&intrpt_stack);
  125. }
  126. void kernel_threadd_main(void)
  127. {
  128. tty_print(console, "kernel thread daemon started\n");
  129. k_new_thread(hw::init_ata, (void*)_kernel_init);
  130. for (;;) {
  131. if (kthreadd_new_thd_func) {
  132. spin_lock(&kthreadd_lock);
  133. int return_value = 0;
  134. void (*func)(void*) = kthreadd_new_thd_func;
  135. void* data = kthreadd_new_thd_data;
  136. kthreadd_new_thd_func = nullptr;
  137. kthreadd_new_thd_data = nullptr;
  138. spin_unlock(&kthreadd_lock);
  139. // syscall_fork
  140. return_value = syscall(0x00);
  141. if (return_value != 0) {
  142. // child
  143. func(data);
  144. for (;;)
  145. syscall(0x03);
  146. // TODO: syscall_exit()
  147. }
  148. spin_unlock(&kthreadd_lock);
  149. }
  150. asm_hlt();
  151. }
  152. }
  153. void k_new_thread(void (*func)(void*), void* data)
  154. {
  155. spin_lock(&kthreadd_lock);
  156. kthreadd_new_thd_func = func;
  157. kthreadd_new_thd_data = data;
  158. spin_unlock(&kthreadd_lock);
  159. }
  160. void NORETURN init_scheduler()
  161. {
  162. processes = types::kernel_allocator_new<types::list<process>>();
  163. ready_thds = types::kernel_allocator_new<types::list<thread*>>();
  164. auto iter = processes->emplace_back((void*)kernel_threadd_main);
  165. // we need interrupts enabled for cow mapping so now we disable it
  166. // in case timer interrupt mess things up
  167. asm_cli();
  168. current_process = iter.ptr();
  169. current_thread = iter->thds.begin().ptr();
  170. tss.ss0 = KERNEL_DATA_SEGMENT;
  171. tss.esp0 = (uint32_t)iter->k_esp;
  172. asm_switch_pd(mms_get_pd(&current_process->mms));
  173. is_scheduler_ready = true;
  174. interrupt_stack intrpt_stack {};
  175. process_context_load(&intrpt_stack, current_process);
  176. thread_context_load(&intrpt_stack, current_thread);
  177. to_kernel(&intrpt_stack);
  178. }
  179. void thread_context_save(interrupt_stack* int_stack, thread* thd)
  180. {
  181. thd->eflags = int_stack->eflags;
  182. thd->eip = int_stack->v_eip;
  183. memcpy(&thd->regs, &int_stack->s_regs, sizeof(regs_32));
  184. if (thd->attr.system)
  185. thd->regs.esp = int_stack->s_regs.esp + 0x0c;
  186. else
  187. thd->regs.esp = int_stack->esp;
  188. }
  189. void thread_context_load(interrupt_stack* int_stack, thread* thd)
  190. {
  191. int_stack->eflags = (thd->eflags | 0x200); // OR $STI
  192. int_stack->v_eip = thd->eip;
  193. memcpy(&int_stack->s_regs, &thd->regs, sizeof(regs_32));
  194. current_thread = thd;
  195. }
  196. void process_context_save(interrupt_stack*, process*)
  197. {
  198. }
  199. void process_context_load(interrupt_stack*, process* proc)
  200. {
  201. if (!proc->attr.system)
  202. tss.esp0 = (uint32_t)proc->k_esp;
  203. asm_switch_pd(mms_get_pd(&proc->mms));
  204. current_process = proc;
  205. }
  206. void add_to_process_list(process&& proc)
  207. {
  208. processes->push_back(types::move(proc));
  209. }
  210. void add_to_ready_list(thread* thd)
  211. {
  212. ready_thds->push_back(thd);
  213. }
  214. static inline void next_task(const types::list<thread*>::iterator_type& iter_to_remove, thread* cur_thd)
  215. {
  216. ready_thds->erase(iter_to_remove);
  217. if (cur_thd->attr.ready)
  218. ready_thds->push_back(cur_thd);
  219. }
  220. void do_scheduling(interrupt_stack* intrpt_data)
  221. {
  222. if (unlikely(!is_scheduler_ready))
  223. return;
  224. auto iter_thd = ready_thds->begin();
  225. while (!((*iter_thd)->attr.ready))
  226. iter_thd = ready_thds->erase(iter_thd);
  227. auto thd = *iter_thd;
  228. if (current_thread == thd) {
  229. next_task(iter_thd, thd);
  230. return;
  231. }
  232. process* proc = thd->owner;
  233. if (current_process != proc) {
  234. process_context_save(intrpt_data, current_process);
  235. process_context_load(intrpt_data, proc);
  236. }
  237. thread_context_save(intrpt_data, current_thread);
  238. thread_context_load(intrpt_data, thd);
  239. next_task(iter_thd, thd);
  240. if (thd->attr.system)
  241. to_kernel(intrpt_data);
  242. else
  243. to_user(intrpt_data);
  244. }