mem.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #include <asm/port_io.h>
  2. #include <asm/sys.h>
  3. #include <assert.h>
  4. #include <errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/mm.hpp>
  7. #include <kernel/process.hpp>
  8. #include <kernel/task.h>
  9. #include <kernel/vga.hpp>
  10. #include <stdint.h>
  11. #include <stdio.h>
  12. #include <types/allocator.hpp>
  13. #include <types/bitmap.h>
  14. #include <types/size.h>
  15. #include <types/status.h>
  16. // constant values
  17. #define EMPTY_PAGE ((page_t)0)
  18. // ---------------------
  19. static size_t mem_size;
  20. static uint8_t mem_bitmap[1024 * 1024 / 8];
  21. // global
  22. segment_descriptor gdt[6];
  23. uint8_t e820_mem_map[1024];
  24. uint32_t e820_mem_map_count;
  25. uint32_t e820_mem_map_entry_size;
  26. struct mem_size_info mem_size_info;
  27. void* operator new(size_t sz)
  28. {
  29. void* ptr = types::__allocator::m_palloc->alloc(sz);
  30. assert(ptr);
  31. return ptr;
  32. }
  33. void* operator new[](size_t sz)
  34. {
  35. void* ptr = types::__allocator::m_palloc->alloc(sz);
  36. assert(ptr);
  37. return ptr;
  38. }
  39. void operator delete(void* ptr)
  40. {
  41. types::__allocator::m_palloc->free(ptr);
  42. }
  43. void operator delete(void* ptr, size_t)
  44. {
  45. types::__allocator::m_palloc->free(ptr);
  46. }
  47. void operator delete[](void* ptr)
  48. {
  49. types::__allocator::m_palloc->free(ptr);
  50. }
  51. void operator delete[](void* ptr, size_t)
  52. {
  53. types::__allocator::m_palloc->free(ptr);
  54. }
  55. inline void mark_page(page_t n)
  56. {
  57. bm_set(mem_bitmap, n);
  58. }
  59. inline void free_page(page_t n)
  60. {
  61. bm_clear(mem_bitmap, n);
  62. }
  63. constexpr void mark_addr_len(pptr_t start, size_t n)
  64. {
  65. if (n == 0)
  66. return;
  67. page_t start_page = align_down<12>(start) >> 12;
  68. page_t end_page = align_up<12>(start + n) >> 12;
  69. for (page_t i = start_page; i < end_page; ++i)
  70. mark_page(i);
  71. }
  72. constexpr void free_addr_len(pptr_t start, size_t n)
  73. {
  74. if (n == 0)
  75. return;
  76. page_t start_page = align_down<12>(start) >> 12;
  77. page_t end_page = align_up<12>(start + n) >> 12;
  78. for (page_t i = start_page; i < end_page; ++i)
  79. free_page(i);
  80. }
  81. constexpr void mark_addr_range(pptr_t start, pptr_t end)
  82. {
  83. mark_addr_len(start, end - start);
  84. }
  85. constexpr void free_addr_range(pptr_t start, pptr_t end)
  86. {
  87. free_addr_len(start, end - start);
  88. }
  89. page_t __alloc_raw_page(void)
  90. {
  91. for (size_t i = 0; i < sizeof(mem_bitmap); ++i) {
  92. if (bm_test(mem_bitmap, i) == 0) {
  93. bm_set(mem_bitmap, i);
  94. return i;
  95. }
  96. }
  97. return -1;
  98. }
  99. void __free_raw_page(page_t pg)
  100. {
  101. bm_clear(mem_bitmap, pg);
  102. }
  103. page allocate_page(void)
  104. {
  105. return page {
  106. .phys_page_id = __alloc_raw_page(),
  107. .ref_count = types::_new<types::kernel_ident_allocator, size_t>(0),
  108. .pg_pteidx = 0,
  109. .attr = 0,
  110. };
  111. }
  112. void free_page(page* pg)
  113. {
  114. if (*pg->ref_count == 1) {
  115. types::pdelete<types::kernel_ident_allocator>(pg->ref_count);
  116. __free_raw_page(pg->phys_page_id);
  117. } else {
  118. --*pg->ref_count;
  119. }
  120. }
  121. void dealloc_pd(page_t pd)
  122. {
  123. {
  124. kernel::paccess pa(pd);
  125. auto p_pd = (pd_t)pa.ptr();
  126. assert(p_pd);
  127. for (pde_t* ent = (*p_pd); ent < (*p_pd) + 768; ++ent) {
  128. if (!ent->in.p)
  129. continue;
  130. __free_raw_page(ent->in.pt_page);
  131. }
  132. }
  133. __free_raw_page(pd);
  134. }
  135. SECTION(".text.kinit")
  136. static inline void init_mem_layout(void)
  137. {
  138. mem_size = 1024 * mem_size_info.n_1k_blks;
  139. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  140. // mark empty page
  141. mark_addr_range(0x00000000, 0x00001000);
  142. // mark kernel page directory
  143. mark_addr_range(0x00001000, 0x00002000);
  144. // mark kernel page table
  145. mark_addr_range(0x00002000, 0x00006000);
  146. // mark kernel early stack
  147. mark_addr_range(0x00006000, 0x00008000);
  148. // mark EBDA and upper memory as allocated
  149. mark_addr_range(0x80000, 0x100000);
  150. extern char __stage1_start[];
  151. extern char __kinit_end[];
  152. extern char __text_start[];
  153. extern char __data_end[];
  154. constexpr pptr_t PHYS_BSS_START = 0x100000;
  155. // mark .stage1 and .kinit
  156. mark_addr_range((pptr_t)__stage1_start, (pptr_t)__kinit_end);
  157. // mark kernel .text to .data
  158. mark_addr_len((pptr_t)__kinit_end, __data_end - __text_start);
  159. // mark kernel .bss
  160. mark_addr_len(PHYS_BSS_START, bss_len);
  161. if (e820_mem_map_entry_size == 20) {
  162. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  163. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  164. if (entry->type != 1) {
  165. mark_addr_len(entry->base, entry->len);
  166. }
  167. }
  168. } else {
  169. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  170. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  171. if (entry->in.type != 1) {
  172. mark_addr_len(entry->in.base, entry->in.len);
  173. }
  174. }
  175. }
  176. }
  177. using kernel::mm_list;
  178. mm_list::mm_list(const mm_list& v)
  179. : m_areas(v.m_areas)
  180. {
  181. m_pd = __alloc_raw_page();
  182. kernel::paccess pdst(m_pd), psrc(v.m_pd);
  183. auto* dst = pdst.ptr();
  184. auto* src = psrc.ptr();
  185. assert(dst && src);
  186. memcpy(dst, src, PAGE_SIZE);
  187. }
  188. inline void map_raw_page_to_pte(
  189. pte_t* pte,
  190. page_t page,
  191. bool present,
  192. bool write,
  193. bool priv)
  194. {
  195. // set P bit
  196. pte->v = 0;
  197. pte->in.p = present;
  198. pte->in.rw = write;
  199. pte->in.us = !priv;
  200. pte->in.page = page;
  201. }
  202. int mm::append_page(page& pg, uint32_t attr, bool priv)
  203. {
  204. void* addr = this->end();
  205. kernel::paccess pa(this->owner->m_pd);
  206. auto pd = (pd_t)pa.ptr();
  207. assert(pd);
  208. pde_t* pde = *pd + v_to_pdi(addr);
  209. page_t pt_pg = 0;
  210. pte_t* pte = nullptr;
  211. // page table not exist
  212. if (unlikely(!pde->in.p)) {
  213. // allocate a page for the page table
  214. pt_pg = __alloc_raw_page();
  215. pde->in.p = 1;
  216. pde->in.rw = 1;
  217. pde->in.us = 1;
  218. pde->in.pt_page = pt_pg;
  219. auto pt = (pt_t)kernel::pmap(pt_pg);
  220. assert(pt);
  221. pte = *pt;
  222. memset(pt, 0x00, PAGE_SIZE);
  223. } else {
  224. pt_pg = pde->in.pt_page;
  225. auto pt = (pt_t)kernel::pmap(pt_pg);
  226. assert(pt);
  227. pte = *pt;
  228. }
  229. // map the page in the page table
  230. int pti = v_to_pti(addr);
  231. pte += pti;
  232. map_raw_page_to_pte(
  233. pte,
  234. pg.phys_page_id,
  235. !(attr & PAGE_MMAP),
  236. false,
  237. priv);
  238. kernel::pfree(pt_pg);
  239. if (unlikely((attr & PAGE_COW) && !(pg.attr & PAGE_COW))) {
  240. kernel::paccess pa(pg.pg_pteidx >> 12);
  241. auto* pg_pte = (pte_t*)pa.ptr();
  242. assert(pg_pte);
  243. pg_pte += (pg.pg_pteidx & 0xfff);
  244. pg.attr |= PAGE_COW;
  245. pg_pte->in.rw = 0;
  246. pg_pte->in.a = 0;
  247. invalidate_tlb(addr);
  248. }
  249. ++*pg.ref_count;
  250. auto iter = this->pgs->emplace_back(pg);
  251. iter->pg_pteidx = (pt_pg << 12) + pti;
  252. iter->attr = attr;
  253. return GB_OK;
  254. }
  255. int mmap(
  256. void* hint,
  257. size_t len,
  258. fs::inode* file,
  259. size_t offset,
  260. int write,
  261. int priv)
  262. {
  263. auto& mms = current_process->mms;
  264. if (unlikely(!file->flags.in.file && !file->flags.in.special_node)) {
  265. errno = EINVAL;
  266. return GB_FAILED;
  267. }
  268. // TODO: find another address
  269. assert(((uint32_t)hint & 0xfff) == 0);
  270. // TODO: return failed
  271. assert((offset & 0xfff) == 0);
  272. size_t n_pgs = align_up<12>(len) >> 12;
  273. if (!mms.is_avail(hint, len)) {
  274. errno = EEXIST;
  275. return GB_FAILED;
  276. }
  277. auto mm = mms.addarea(hint, write, priv);
  278. mm->mapped_file = file;
  279. mm->file_offset = offset;
  280. for (size_t i = 0; i < n_pgs; ++i)
  281. mm->append_page(empty_page, PAGE_MMAP | PAGE_COW, priv);
  282. return GB_OK;
  283. }
  284. SECTION(".text.kinit")
  285. void init_mem(void)
  286. {
  287. init_mem_layout();
  288. // TODO: replace early kernel pd
  289. kernel_mms = types::pnew<types::kernel_ident_allocator>(kernel_mms, EARLY_KERNEL_PD_PAGE);
  290. auto heap_mm = kernel_mms->addarea(KERNEL_HEAP_START, true, true);
  291. // create empty_page struct
  292. empty_page.attr = 0;
  293. empty_page.phys_page_id = EMPTY_PAGE;
  294. empty_page.ref_count = types::_new<types::kernel_ident_allocator, size_t>(2);
  295. empty_page.pg_pteidx = 0x00002000;
  296. // 0xd0000000 to 0xd4000000 or 3.5GiB, size 64MiB
  297. while (heap_mm->pgs->size() < 64 * 1024 * 1024 / PAGE_SIZE)
  298. heap_mm->append_page(empty_page, PAGE_COW, true);
  299. types::__allocator::init_kernel_heap(KERNEL_HEAP_START,
  300. vptrdiff(KERNEL_HEAP_LIMIT, KERNEL_HEAP_START));
  301. }
  302. SECTION(".text.kinit")
  303. void create_segment_descriptor(
  304. segment_descriptor* sd,
  305. uint32_t base,
  306. uint32_t limit,
  307. uint32_t flags,
  308. uint32_t access)
  309. {
  310. sd->base_low = base & 0x0000ffff;
  311. sd->base_mid = ((base & 0x00ff0000) >> 16);
  312. sd->base_high = ((base & 0xff000000) >> 24);
  313. sd->limit_low = limit & 0x0000ffff;
  314. sd->limit_high = ((limit & 0x000f0000) >> 16);
  315. sd->access = access;
  316. sd->flags = flags;
  317. }
  318. namespace __physmapper {
  319. struct mapped_area {
  320. size_t ref;
  321. uint8_t* ptr;
  322. };
  323. static types::hash_map<page_t, mapped_area,
  324. types::linux_hasher<page_t>, types::kernel_ident_allocator>
  325. mapped;
  326. static uint8_t freebm[0x400 / 8];
  327. } // namespace __physmapper
  328. uint8_t* kernel::pmap(page_t pg)
  329. {
  330. auto iter = __physmapper::mapped.find(pg);
  331. if (iter) {
  332. ++iter->value.ref;
  333. return iter->value.ptr;
  334. }
  335. for (int i = 2; i < 0x400; ++i) {
  336. if (bm_test(__physmapper::freebm, i) == 0) {
  337. auto* pte = (pte_t*)(0xff001000) + i;
  338. pte->v = 0x3;
  339. pte->in.page = pg;
  340. uint8_t* ptr = (uint8_t*)0xff000000 + 0x1000 * i;
  341. invalidate_tlb(ptr);
  342. bm_set(__physmapper::freebm, i);
  343. __physmapper::mapped.emplace(pg,
  344. __physmapper::mapped_area { 1, ptr });
  345. return ptr;
  346. }
  347. }
  348. return nullptr;
  349. }
  350. void kernel::pfree(page_t pg)
  351. {
  352. auto iter = __physmapper::mapped.find(pg);
  353. if (!iter)
  354. return;
  355. if (iter->value.ref > 1) {
  356. --iter->value.ref;
  357. return;
  358. }
  359. int i = (uint32_t)iter->value.ptr - 0xff000000;
  360. i /= 0x1000;
  361. auto* pte = (pte_t*)(0xff001000) + i;
  362. pte->v = 0;
  363. invalidate_tlb(iter->value.ptr);
  364. bm_clear(__physmapper::freebm, i);
  365. __physmapper::mapped.remove(iter);
  366. }