process.cpp 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. #include <asm/port_io.h>
  2. #include <asm/sys.h>
  3. #include <fs/fat.hpp>
  4. #include <kernel/hw/ata.hpp>
  5. #include <kernel/interrupt.h>
  6. #include <kernel/mem.h>
  7. #include <kernel/mm.hpp>
  8. #include <kernel/process.hpp>
  9. #include <kernel/stdio.h>
  10. #include <kernel/syscall.hpp>
  11. #include <kernel/tty.h>
  12. #include <kernel/vfs.hpp>
  13. #include <kernel_main.h>
  14. #include <types/allocator.hpp>
  15. #include <types/elf.hpp>
  16. #include <types/hash_map.hpp>
  17. #include <types/list.hpp>
  18. #include <types/lock.hpp>
  19. #include <types/status.h>
  20. #include <types/types.h>
  21. static bool is_scheduler_ready;
  22. static types::list<process>* processes;
  23. static typename types::hash_map<pid_t, types::list<process>::iterator_type, types::linux_hasher<pid_t>>* idx_processes;
  24. static types::list<thread*>* ready_thds;
  25. static pid_t max_pid;
  26. static void (*volatile kthreadd_new_thd_func)(void*);
  27. static void* volatile kthreadd_new_thd_data;
  28. static types::mutex kthreadd_mtx;
  29. process::process(process&& val)
  30. : mms(types::move(val.mms))
  31. , thds(types::move(val.thds))
  32. , pid(val.pid)
  33. , ppid(val.ppid)
  34. {
  35. if (current_process == &val)
  36. current_process = this;
  37. attr.system = val.attr.system;
  38. k_esp = val.k_esp;
  39. for (auto& item : thds)
  40. item.owner = this;
  41. val.k_esp = nullptr;
  42. val.attr.system = 0;
  43. }
  44. process::process(const process& val, const thread& main_thd)
  45. : mms(*kernel_mms)
  46. , attr { .system = val.attr.system }
  47. , pid { ++max_pid }
  48. , ppid { val.pid }
  49. {
  50. auto iter_thd = thds.emplace_back(main_thd);
  51. iter_thd->owner = this;
  52. // TODO: allocate low mem
  53. k_esp = (void*)to_pp(alloc_n_raw_pages(2));
  54. memcpy(k_esp, (char*)main_thd.owner->k_esp - THREAD_KERNEL_STACK_SIZE, THREAD_KERNEL_STACK_SIZE);
  55. k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
  56. if (val.attr.system) {
  57. auto orig_k_esp = (uint32_t)main_thd.owner->k_esp;
  58. iter_thd->regs.ebp -= orig_k_esp;
  59. iter_thd->regs.ebp += (uint32_t)k_esp;
  60. iter_thd->regs.esp -= orig_k_esp;
  61. iter_thd->regs.esp += (uint32_t)k_esp;
  62. } else {
  63. pd_t pd = alloc_pd();
  64. memcpy(pd, mms_get_pd(kernel_mms), PAGE_SIZE);
  65. mms.begin()->pd = pd;
  66. // skip kernel heap since it's already copied above
  67. for (auto iter_src = ++val.mms.cbegin(); iter_src != val.mms.cend(); ++iter_src)
  68. mm::mirror_mm_area(&mms, iter_src.ptr(), pd);
  69. }
  70. }
  71. process::process(void* start_eip)
  72. : mms(*kernel_mms)
  73. , thds {}
  74. , attr { .system = 1 }
  75. , pid { ++max_pid }
  76. , ppid { 1 }
  77. {
  78. // TODO: allocate low mem
  79. k_esp = (void*)to_pp(alloc_n_raw_pages(2));
  80. memset((char*)k_esp, 0x00, THREAD_KERNEL_STACK_SIZE);
  81. k_esp = (char*)k_esp + THREAD_KERNEL_STACK_SIZE;
  82. auto thd = thds.emplace_back(thread {
  83. .eip = start_eip,
  84. .owner = this,
  85. .regs {
  86. .edi {},
  87. .esi {},
  88. .ebp = reinterpret_cast<uint32_t>(k_esp),
  89. .esp = reinterpret_cast<uint32_t>(k_esp),
  90. .ebx {},
  91. .edx {},
  92. .ecx {},
  93. .eax {},
  94. },
  95. .eflags {},
  96. .attr {
  97. .system = 1,
  98. .ready = 1,
  99. .wait = 0,
  100. },
  101. });
  102. ready_thds->push_back(thd.ptr());
  103. }
  104. void NORETURN _kernel_init(void)
  105. {
  106. // TODO: parse kernel parameters
  107. auto* _new_fs = fs::register_fs(types::kernel_allocator_new<fs::fat::fat32>(fs::vfs_open("/dev/hda1")->ind));
  108. int ret = fs::fs_root->ind->fs->mount(fs::vfs_open("/mnt"), _new_fs);
  109. if (unlikely(ret != GB_OK))
  110. syscall(0x03);
  111. pd_t new_pd = alloc_pd();
  112. memcpy(new_pd, mms_get_pd(kernel_mms), PAGE_SIZE);
  113. asm_cli();
  114. current_process->mms.begin()->pd = new_pd;
  115. asm_sti();
  116. interrupt_stack intrpt_stack {};
  117. intrpt_stack.eflags = 0x200; // STI
  118. const char* argv[] = { "/mnt/INIT.ELF", nullptr };
  119. types::elf::elf32_load("/mnt/INIT.ELF", argv, &intrpt_stack, 0);
  120. asm_cli();
  121. current_process->attr.system = 0;
  122. current_thread->attr.system = 0;
  123. to_user(&intrpt_stack);
  124. }
  125. void kernel_threadd_main(void)
  126. {
  127. tty_print(console, "kernel thread daemon started\n");
  128. // fork
  129. int ret = syscall(0x00);
  130. // pid 1
  131. if (ret) {
  132. hw::init_ata();
  133. _kernel_init();
  134. // noreturn
  135. syscall(0x03);
  136. }
  137. for (;;) {
  138. if (kthreadd_new_thd_func) {
  139. void (*func)(void*) = nullptr;
  140. void* data = nullptr;
  141. {
  142. types::lock_guard lck(kthreadd_mtx);
  143. func = kthreadd_new_thd_func;
  144. data = kthreadd_new_thd_data;
  145. }
  146. // syscall_fork
  147. int ret = syscall(0x00);
  148. if (ret == 0) {
  149. // child process
  150. func(data);
  151. // the function shouldn't return here
  152. syscall(0x03);
  153. }
  154. }
  155. // TODO: sleep here to wait for new_kernel_thread event
  156. asm_hlt();
  157. }
  158. }
  159. void k_new_thread(void (*func)(void*), void* data)
  160. {
  161. types::lock_guard lck(kthreadd_mtx);
  162. kthreadd_new_thd_func = func;
  163. kthreadd_new_thd_data = data;
  164. }
  165. void NORETURN init_scheduler()
  166. {
  167. processes = types::kernel_allocator_pnew(processes);
  168. ready_thds = types::kernel_allocator_pnew(ready_thds);
  169. idx_processes = types::kernel_allocator_pnew(idx_processes);
  170. idx_child_processes = types::kernel_allocator_pnew(idx_child_processes);
  171. add_to_process_list(process((void*)kernel_threadd_main));
  172. auto init = findproc(1);
  173. // we need interrupts enabled for cow mapping so now we disable it
  174. // in case timer interrupt mess things up
  175. asm_cli();
  176. current_process = init;
  177. current_thread = init->thds.begin().ptr();
  178. tss.ss0 = KERNEL_DATA_SEGMENT;
  179. tss.esp0 = (uint32_t)init->k_esp;
  180. asm_switch_pd(mms_get_pd(&current_process->mms));
  181. is_scheduler_ready = true;
  182. interrupt_stack intrpt_stack {};
  183. process_context_load(&intrpt_stack, current_process);
  184. thread_context_load(&intrpt_stack, current_thread);
  185. to_kernel(&intrpt_stack);
  186. }
  187. void thread_context_save(interrupt_stack* int_stack, thread* thd)
  188. {
  189. thd->eflags = int_stack->eflags;
  190. thd->eip = int_stack->v_eip;
  191. memcpy(&thd->regs, &int_stack->s_regs, sizeof(regs_32));
  192. if (thd->attr.system)
  193. thd->regs.esp = int_stack->s_regs.esp + 0x0c;
  194. else
  195. thd->regs.esp = int_stack->esp;
  196. }
  197. void thread_context_load(interrupt_stack* int_stack, thread* thd)
  198. {
  199. int_stack->eflags = (thd->eflags | 0x200); // OR $STI
  200. int_stack->v_eip = thd->eip;
  201. memcpy(&int_stack->s_regs, &thd->regs, sizeof(regs_32));
  202. current_thread = thd;
  203. }
  204. void process_context_save(interrupt_stack*, process*)
  205. {
  206. }
  207. void process_context_load(interrupt_stack*, process* proc)
  208. {
  209. if (!proc->attr.system)
  210. tss.esp0 = (uint32_t)proc->k_esp;
  211. asm_switch_pd(mms_get_pd(&proc->mms));
  212. current_process = proc;
  213. }
  214. void add_to_process_list(process&& proc)
  215. {
  216. auto iter = processes->emplace_back(types::move(proc));
  217. idx_processes->insert(iter->pid, iter);
  218. auto children = idx_child_processes->find(iter->ppid);
  219. if (!children) {
  220. idx_child_processes->insert(iter->ppid, {});
  221. children = idx_child_processes->find(iter->ppid);
  222. }
  223. children->value.push_back(iter->pid);
  224. }
  225. void add_to_ready_list(thread* thd)
  226. {
  227. ready_thds->push_back(thd);
  228. }
  229. void remove_from_ready_list(thread* thd)
  230. {
  231. auto iter = ready_thds->find(thd);
  232. while (iter != ready_thds->end()) {
  233. ready_thds->erase(iter);
  234. iter = ready_thds->find(thd);
  235. }
  236. }
  237. types::list<thread*>::iterator_type query_next_thread(void)
  238. {
  239. auto iter_thd = ready_thds->begin();
  240. while (!((*iter_thd)->attr.ready))
  241. iter_thd = ready_thds->erase(iter_thd);
  242. return iter_thd;
  243. }
  244. process* findproc(pid_t pid)
  245. {
  246. return idx_processes->find(pid)->value.ptr();
  247. }
  248. void do_scheduling(interrupt_stack* intrpt_data)
  249. {
  250. if (unlikely(!is_scheduler_ready))
  251. return;
  252. auto iter_thd = query_next_thread();
  253. auto thd = *iter_thd;
  254. if (current_thread == thd) {
  255. next_task(iter_thd);
  256. return;
  257. }
  258. process* proc = thd->owner;
  259. if (current_process != proc) {
  260. if (current_process)
  261. process_context_save(intrpt_data, current_process);
  262. process_context_load(intrpt_data, proc);
  263. }
  264. if (current_thread)
  265. thread_context_save(intrpt_data, current_thread);
  266. thread_context_load(intrpt_data, thd);
  267. next_task(iter_thd);
  268. if (thd->attr.system)
  269. to_kernel(intrpt_data);
  270. else
  271. to_user(intrpt_data);
  272. }