process.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. #include <memory>
  2. #include <queue>
  3. #include <utility>
  4. #include <assert.h>
  5. #include <bits/alltypes.h>
  6. #include <stdint.h>
  7. #include <stdio.h>
  8. #include <sys/mount.h>
  9. #include <sys/wait.h>
  10. #include <types/allocator.hpp>
  11. #include <types/cplusplus.hpp>
  12. #include <types/elf.hpp>
  13. #include <types/types.h>
  14. #include <kernel/async/lock.hpp>
  15. #include <kernel/log.hpp>
  16. #include <kernel/mem/paging.hpp>
  17. #include <kernel/module.hpp>
  18. #include <kernel/process.hpp>
  19. #include <kernel/signal.hpp>
  20. #include <kernel/task/readyqueue.hpp>
  21. #include <kernel/task/thread.hpp>
  22. #include <kernel/user/thread_local.hpp>
  23. #include <kernel/vfs.hpp>
  24. using kernel::async::mutex;
  25. using kernel::async::lock_guard, kernel::async::lock_guard_irq;
  26. static void (*volatile kthreadd_new_thd_func)(void*);
  27. static void* volatile kthreadd_new_thd_data;
  28. static mutex kthreadd_mtx;
  29. namespace kernel {
  30. struct no_irq_guard {
  31. explicit no_irq_guard()
  32. {
  33. asm volatile("cli");
  34. }
  35. no_irq_guard(const no_irq_guard&) = delete;
  36. no_irq_guard& operator=(const no_irq_guard&) = delete;
  37. ~no_irq_guard()
  38. {
  39. asm volatile("sti");
  40. }
  41. };
  42. } // namespace kernel
  43. int filearr::allocate_fd(int from)
  44. {
  45. if (from < min_avail)
  46. from = min_avail;
  47. if (from == min_avail) {
  48. int nextfd = min_avail + 1;
  49. auto iter = arr.find(nextfd);
  50. while (iter != arr.end() && nextfd == iter->first)
  51. ++nextfd, ++iter;
  52. int retval = min_avail;
  53. min_avail = nextfd;
  54. return retval;
  55. }
  56. int fd = from;
  57. auto iter = arr.find(fd);
  58. while (iter != arr.end() && fd == iter->first)
  59. ++fd, ++iter;
  60. return fd;
  61. }
  62. void filearr::release_fd(int fd)
  63. {
  64. if (fd < min_avail)
  65. min_avail = fd;
  66. }
  67. int filearr::dup(int old_fd)
  68. {
  69. return dup2(old_fd, next_fd());
  70. }
  71. int filearr::dup2(int old_fd, int new_fd)
  72. {
  73. close(new_fd);
  74. auto iter = arr.find(old_fd);
  75. if (!iter)
  76. return -EBADF;
  77. int fd = allocate_fd(new_fd);
  78. assert(fd == new_fd);
  79. auto [ newiter, inserted ] = this->arr.emplace(new_fd, iter->second);
  80. assert(inserted);
  81. newiter->second.flags = 0;
  82. return new_fd;
  83. }
  84. int filearr::dupfd(int fd, int minfd, int flags)
  85. {
  86. auto iter = arr.find(fd);
  87. if (!iter)
  88. return -EBADF;
  89. int new_fd = allocate_fd(minfd);
  90. auto [ newiter, inserted ] = arr.emplace(new_fd, iter->second);
  91. assert(inserted);
  92. newiter->second.flags = flags;
  93. return new_fd;
  94. }
  95. int filearr::set_flags(int fd, int flags)
  96. {
  97. auto iter = arr.find(fd);
  98. if (!iter)
  99. return -EBADF;
  100. iter->second.flags |= flags;
  101. return 0;
  102. }
  103. int filearr::clear_flags(int fd, int flags)
  104. {
  105. auto iter = arr.find(fd);
  106. if (!iter)
  107. return -EBADF;
  108. iter->second.flags &= ~flags;
  109. return 0;
  110. }
  111. // TODO: file opening permissions check
  112. int filearr::open(const process &current,
  113. const types::path& filepath, int flags, mode_t mode)
  114. {
  115. auto* dentry = fs::vfs_open(*current.root, filepath);
  116. if (flags & O_CREAT) {
  117. if (!dentry) {
  118. // create file
  119. auto filename = filepath.last_name();
  120. auto parent_path = filepath;
  121. parent_path.remove_last();
  122. auto* parent = fs::vfs_open(*current.root, parent_path);
  123. if (!parent)
  124. return -EINVAL;
  125. int ret = fs::vfs_mkfile(parent, filename.c_str(), mode);
  126. if (ret != 0)
  127. return ret;
  128. dentry = fs::vfs_open(*current.root, filepath);
  129. assert(dentry);
  130. } else {
  131. // file already exists
  132. if (flags & O_EXCL)
  133. return -EEXIST;
  134. }
  135. } else {
  136. if (!dentry)
  137. return -ENOENT;
  138. }
  139. auto filemode = dentry->ind->mode;
  140. // check whether dentry is a file if O_DIRECTORY is set
  141. if (flags & O_DIRECTORY) {
  142. if (!S_ISDIR(filemode))
  143. return -ENOTDIR;
  144. } else {
  145. if (S_ISDIR(filemode) && (flags & (O_WRONLY | O_RDWR)))
  146. return -EISDIR;
  147. }
  148. // truncate file
  149. if (flags & O_TRUNC) {
  150. if ((flags & (O_WRONLY | O_RDWR)) && S_ISREG(filemode)) {
  151. auto ret = fs::vfs_truncate(dentry->ind, 0);
  152. if (ret != 0)
  153. return ret;
  154. }
  155. }
  156. int fdflag = (flags & O_CLOEXEC) ? FD_CLOEXEC : 0;
  157. int fd = next_fd();
  158. auto [ _, inserted ] = arr.emplace(fd, fditem {
  159. fdflag, std::shared_ptr<fs::file> {
  160. new fs::regular_file(dentry->parent, {
  161. .read = !(flags & O_WRONLY),
  162. .write = !!(flags & (O_WRONLY | O_RDWR)),
  163. .append = !!(S_ISREG(filemode) && flags & O_APPEND),
  164. }, 0, dentry->ind),
  165. } } );
  166. assert(inserted);
  167. return fd;
  168. }
  169. process::process(const process& parent, pid_t pid)
  170. : mms { parent.mms }, attr { parent.attr } , files { parent.files }
  171. , pwd { parent.pwd }, umask { parent.umask }, pid { pid }
  172. , ppid { parent.pid }, pgid { parent.pgid } , sid { parent.sid }
  173. , control_tty { parent.control_tty }, root { parent.root } { }
  174. process::process(pid_t pid, pid_t ppid)
  175. : attr { .system = true }
  176. , pwd { "/" } , pid { pid } , ppid { ppid }
  177. {
  178. bool inserted;
  179. std::tie(std::ignore, inserted) = thds.emplace("", pid);
  180. assert(inserted);
  181. }
  182. using signo_type = kernel::signal_list::signo_type;
  183. void process::send_signal(signo_type signal)
  184. {
  185. for (auto& thd : thds)
  186. thd.send_signal(signal);
  187. }
  188. void kernel_threadd_main(void)
  189. {
  190. kmsg("kernel thread daemon started");
  191. for (;;) {
  192. if (kthreadd_new_thd_func) {
  193. void (*func)(void*) = nullptr;
  194. void* data = nullptr;
  195. if (1) {
  196. lock_guard lck(kthreadd_mtx);
  197. if (kthreadd_new_thd_func) {
  198. func = std::exchange(kthreadd_new_thd_func, nullptr);
  199. data = std::exchange(kthreadd_new_thd_data, nullptr);
  200. }
  201. }
  202. // TODO
  203. (void)func, (void)data;
  204. assert(false);
  205. // syscall_fork
  206. // int ret = syscall(0x00);
  207. // if (ret == 0) {
  208. // // child process
  209. // func(data);
  210. // // the function shouldn't return here
  211. // assert(false);
  212. // }
  213. }
  214. // TODO: sleep here to wait for new_kernel_thread event
  215. asm volatile("hlt");
  216. }
  217. }
  218. SECTION(".text.kinit")
  219. proclist::proclist()
  220. {
  221. // init process has no parent
  222. auto& init = real_emplace(1, 0);
  223. assert(init.pid == 1 && init.ppid == 0);
  224. auto& thd = *init.thds.begin();
  225. thd.name.assign("[kernel init]");
  226. current_process = &init;
  227. current_thread = &thd;
  228. kernel::task::dispatcher::enqueue(current_thread);
  229. // TODO: LONG MODE
  230. // tss.ss0 = KERNEL_DATA_SEGMENT;
  231. // tss.esp0 = (uint32_t)current_thread->kstack.esp;
  232. current_process->mms.switch_pd();
  233. if (1) {
  234. // pid 0 is kernel thread daemon
  235. auto& proc = real_emplace(0, 0);
  236. assert(proc.pid == 0 && proc.ppid == 0);
  237. // create thread
  238. auto& thd = *proc.thds.begin();
  239. thd.name.assign("[kernel thread daemon]");
  240. // TODO: LONG MODE
  241. // auto* esp = &thd.kstack.esp;
  242. // auto old_esp = (uint32_t)thd.kstack.esp;
  243. // // return(start) address
  244. // push_stack(esp, (uint32_t)kernel_threadd_main);
  245. // // ebx
  246. // push_stack(esp, 0);
  247. // // edi
  248. // push_stack(esp, 0);
  249. // // esi
  250. // push_stack(esp, 0);
  251. // // ebp
  252. // push_stack(esp, 0);
  253. // // eflags
  254. // push_stack(esp, 0x200);
  255. // // original esp
  256. // push_stack(esp, old_esp);
  257. // kernel::task::dispatcher::enqueue(&thd);
  258. }
  259. }
  260. process& proclist::real_emplace(pid_t pid, pid_t ppid)
  261. {
  262. auto [ iter, inserted ] = m_procs.try_emplace(pid, pid, ppid);
  263. assert(inserted);
  264. return iter->second;
  265. }
  266. void proclist::kill(pid_t pid, int exit_code)
  267. {
  268. auto& proc = this->find(pid);
  269. // put all threads into sleep
  270. for (auto& thd : proc.thds)
  271. thd.set_attr(kernel::task::thread::ZOMBIE);
  272. // write back mmap'ped files and close them
  273. proc.files.close_all();
  274. // unmap all user memory areas
  275. proc.mms.clear();
  276. // init should never exit
  277. if (proc.ppid == 0) {
  278. kmsg("kernel panic: init exited!");
  279. freeze();
  280. }
  281. // make child processes orphans (children of init)
  282. this->make_children_orphans(pid);
  283. proc.attr.zombie = 1;
  284. // notify parent process and init
  285. auto& parent = this->find(proc.ppid);
  286. auto& init = this->find(1);
  287. bool flag = false;
  288. if (1) {
  289. lock_guard_irq lck(init.mtx_waitprocs);
  290. if (1) {
  291. lock_guard_irq lck(proc.mtx_waitprocs);
  292. for (const auto& item : proc.waitprocs) {
  293. if (WIFSTOPPED(item.code) || WIFCONTINUED(item.code))
  294. continue;
  295. init.waitprocs.push_back(item);
  296. flag = true;
  297. }
  298. proc.waitprocs.clear();
  299. }
  300. }
  301. if (flag)
  302. init.waitlist.notify_all();
  303. if (1) {
  304. lock_guard_irq lck(parent.mtx_waitprocs);
  305. parent.waitprocs.push_back({ pid, exit_code });
  306. }
  307. parent.waitlist.notify_all();
  308. }
  309. static void release_kinit()
  310. {
  311. // free .kinit
  312. using namespace kernel::mem::paging;
  313. extern uintptr_t KINIT_START_ADDR, KINIT_END_ADDR, KINIT_PAGES;
  314. auto range = vaddr_range{KERNEL_PAGE_TABLE_ADDR,
  315. KINIT_START_ADDR, KINIT_END_ADDR, true};
  316. for (auto pte : range)
  317. pte.clear();
  318. create_zone(0x2000, 0x2000 + 0x1000 * KINIT_PAGES);
  319. }
  320. void NORETURN _kernel_init(void)
  321. {
  322. release_kinit();
  323. asm volatile("sti");
  324. // ------------------------------------------
  325. // interrupt enabled
  326. // ------------------------------------------
  327. // load kmods
  328. for (auto loader = kernel::module::KMOD_LOADERS_START; *loader; ++loader) {
  329. auto* mod = (*loader)();
  330. if (!mod)
  331. continue;
  332. if (auto ret = insmod(mod); ret == kernel::module::MODULE_SUCCESS)
  333. continue;
  334. kmsgf("[kernel] An error occured while loading \"%s\"", mod->name);
  335. }
  336. // mount fat32 /mnt directory
  337. // TODO: parse kernel parameters
  338. if (1) {
  339. auto* mount_point = fs::vfs_open(*fs::fs_root, types::path{"/mnt"});
  340. if (!mount_point) {
  341. int ret = fs::vfs_mkdir(fs::fs_root, "mnt", 0755);
  342. assert(ret == 0);
  343. mount_point = fs::vfs_open(*fs::fs_root, types::path{"/mnt"});
  344. }
  345. assert(mount_point);
  346. int ret = fs::fs_root->ind->fs->mount(mount_point, "/dev/sda", "/mnt",
  347. "fat32", MS_RDONLY | MS_NOATIME | MS_NODEV | MS_NOSUID, "ro,nodev");
  348. assert(ret == 0);
  349. }
  350. current_process->attr.system = 0;
  351. current_thread->attr &= ~kernel::task::thread::SYSTEM;
  352. types::elf::elf32_load_data d{
  353. .exec_dent{},
  354. .argv{ "/mnt/busybox", "sh", "/mnt/initsh" },
  355. .envp{ "LANG=C", "HOME=/root", "PATH=/mnt", "PWD=/" },
  356. .ip{}, .sp{}
  357. };
  358. d.exec_dent = fs::vfs_open(*fs::fs_root, types::path{d.argv[0].c_str()});
  359. if (!d.exec_dent) {
  360. kmsg("kernel panic: init not found!");
  361. freeze();
  362. }
  363. int ret = types::elf::elf32_load(d);
  364. assert(ret == 0);
  365. asm volatile(
  366. "mov $0x23, %%ax\n"
  367. "mov %%ax, %%ds\n"
  368. "mov %%ax, %%es\n"
  369. "mov %%ax, %%fs\n"
  370. "mov %%ax, %%gs\n"
  371. "push $0x23\n"
  372. "push %0\n"
  373. "push $0x200\n"
  374. "push $0x1b\n"
  375. "push %1\n"
  376. "iretq\n"
  377. : : "g"(d.sp), "g"(d.ip) : "eax", "memory");
  378. freeze();
  379. }
  380. void k_new_thread(void (*func)(void*), void* data)
  381. {
  382. lock_guard lck(kthreadd_mtx);
  383. kthreadd_new_thd_func = func;
  384. kthreadd_new_thd_data = data;
  385. }
  386. SECTION(".text.kinit")
  387. void NORETURN init_scheduler(void)
  388. {
  389. procs = new proclist;
  390. asm volatile(
  391. "mov %0, %%rsp\n"
  392. "sub $24, %%rsp\n"
  393. "mov %=f, %%rbx\n"
  394. "mov %%rbx, (%%rsp)\n" // return address
  395. "mov %%rbx, 16(%%rsp)\n" // previous frame return address
  396. "xor %%rbx, %%rbx\n"
  397. "mov %%rbx, 8(%%rsp)\n" // previous frame rbp
  398. "mov %%rsp, %%rbp\n" // current frame rbp
  399. "push %1\n"
  400. "mov $0x10, %%ax\n"
  401. "mov %%ax, %%ss\n"
  402. "mov %%ax, %%ds\n"
  403. "mov %%ax, %%es\n"
  404. "mov %%ax, %%fs\n"
  405. "mov %%ax, %%gs\n"
  406. "push $0x0\n"
  407. "popf\n"
  408. "ret\n"
  409. "%=:\n"
  410. "ud2"
  411. :
  412. : "a"(current_thread->kstack.sp), "c"(_kernel_init)
  413. : "memory");
  414. freeze();
  415. }
  416. extern "C" void asm_ctx_switch(uint32_t** curr_esp, uint32_t** next_esp);
  417. bool schedule()
  418. {
  419. if (kernel::async::preempt_count() != 0)
  420. return true;
  421. auto* next_thd = kernel::task::dispatcher::next();
  422. process* proc = nullptr;
  423. kernel::task::thread* curr_thd = nullptr;
  424. if (current_thread == next_thd)
  425. goto _end;
  426. proc = &procs->find(next_thd->owner);
  427. if (current_process != proc) {
  428. proc->mms.switch_pd();
  429. current_process = proc;
  430. }
  431. curr_thd = current_thread;
  432. freeze();
  433. // TODO: LONG MODE
  434. // current_thread = next_thd;
  435. // tss.esp0 = (uint32_t)next_thd->kstack.esp;
  436. // next_thd->load_thread_area();
  437. // asm_ctx_switch(&curr_thd->kstack.esp, &next_thd->kstack.esp);
  438. // tss.esp0 = (uint32_t)curr_thd->kstack.esp;
  439. _end:
  440. return current_thread->signals.pending_signal() == 0;
  441. }
  442. void NORETURN schedule_noreturn(void)
  443. {
  444. schedule();
  445. freeze();
  446. }
  447. void NORETURN freeze(void)
  448. {
  449. for (;;)
  450. asm volatile("cli\n\thlt");
  451. }
  452. void NORETURN kill_current(int signo)
  453. {
  454. procs->kill(current_process->pid,
  455. (signo + 128) << 8 | (signo & 0xff));
  456. schedule_noreturn();
  457. }