mem.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <assert.h>
  5. #include <kernel/errno.h>
  6. #include <kernel/mem.h>
  7. #include <kernel/mm.hpp>
  8. #include <kernel/process.hpp>
  9. #include <kernel/task.h>
  10. #include <kernel/vga.hpp>
  11. #include <kernel_main.hpp>
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <types/allocator.hpp>
  15. #include <types/bitmap.h>
  16. #include <types/size.h>
  17. #include <types/status.h>
  18. // constant values
  19. #define EMPTY_PAGE_ADDR ((pptr_t)0x0000)
  20. #define EMPTY_PAGE_END ((pptr_t)0x1000)
  21. #define IDENTICALLY_MAPPED_HEAP_SIZE ((size_t)0x400000)
  22. // ---------------------
  23. static size_t mem_size;
  24. static char mem_bitmap[1024 * 1024 / 8];
  25. class brk_memory_allocator {
  26. public:
  27. using byte = uint8_t;
  28. using size_type = size_t;
  29. struct mem_blk_flags {
  30. uint8_t is_free;
  31. uint8_t has_next;
  32. uint8_t _unused2;
  33. uint8_t _unused3;
  34. };
  35. struct mem_blk {
  36. size_t size;
  37. struct mem_blk_flags flags;
  38. // the first byte of the memory space
  39. // the minimal allocated space is 4 bytes
  40. uint8_t data[4];
  41. };
  42. private:
  43. byte* p_start;
  44. byte* p_break;
  45. byte* p_limit;
  46. brk_memory_allocator(void) = delete;
  47. brk_memory_allocator(const brk_memory_allocator&) = delete;
  48. brk_memory_allocator(brk_memory_allocator&&) = delete;
  49. inline constexpr int brk(byte* addr)
  50. {
  51. if (unlikely(addr >= p_limit))
  52. return GB_FAILED;
  53. p_break = addr;
  54. return GB_OK;
  55. }
  56. // sets errno
  57. inline byte* sbrk(size_type increment)
  58. {
  59. if (unlikely(brk(p_break + increment) != GB_OK)) {
  60. errno = ENOMEM;
  61. return nullptr;
  62. } else {
  63. errno = 0;
  64. return p_break;
  65. }
  66. }
  67. inline mem_blk* _find_next_mem_blk(mem_blk* blk, size_type blk_size)
  68. {
  69. byte* p = (byte*)blk;
  70. p += sizeof(mem_blk);
  71. p += blk_size;
  72. p -= (4 * sizeof(byte));
  73. return (mem_blk*)p;
  74. }
  75. // sets errno
  76. // @param start_pos position where to start finding
  77. // @param size the size of the block we're looking for
  78. // @return found block if suitable block exists, if not, the last block
  79. mem_blk* find_blk(mem_blk* start_pos, size_type size)
  80. {
  81. while (1) {
  82. if (start_pos->flags.is_free && start_pos->size >= size) {
  83. errno = 0;
  84. return start_pos;
  85. } else {
  86. if (unlikely(!start_pos->flags.has_next)) {
  87. errno = ENOTFOUND;
  88. return start_pos;
  89. }
  90. start_pos = _find_next_mem_blk(start_pos, start_pos->size);
  91. }
  92. }
  93. }
  94. // sets errno
  95. mem_blk* allocate_new_block(mem_blk* blk_before, size_type size)
  96. {
  97. sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
  98. // preserves errno
  99. if (unlikely(errno)) {
  100. return nullptr;
  101. }
  102. mem_blk* blk = _find_next_mem_blk(blk_before, blk_before->size);
  103. blk_before->flags.has_next = 1;
  104. blk->flags.has_next = 0;
  105. blk->flags.is_free = 1;
  106. blk->size = size;
  107. errno = 0;
  108. return blk;
  109. }
  110. void split_block(mem_blk* blk, size_type this_size)
  111. {
  112. // block is too small to get split
  113. if (blk->size < sizeof(mem_blk) + this_size) {
  114. return;
  115. }
  116. mem_blk* blk_next = _find_next_mem_blk(blk, this_size);
  117. blk_next->size = blk->size
  118. - this_size
  119. - sizeof(mem_blk)
  120. + 4 * sizeof(byte);
  121. blk_next->flags.has_next = blk->flags.has_next;
  122. blk_next->flags.is_free = 1;
  123. blk->flags.has_next = 1;
  124. blk->size = this_size;
  125. }
  126. public:
  127. brk_memory_allocator(void* start, size_type limit)
  128. : p_start((byte*)start)
  129. , p_limit(p_start + limit)
  130. {
  131. brk(p_start);
  132. mem_blk* p_blk = (mem_blk*)sbrk(0);
  133. p_blk->size = 4;
  134. p_blk->flags.has_next = 0;
  135. p_blk->flags.is_free = 1;
  136. }
  137. // sets errno
  138. void* alloc(size_type size)
  139. {
  140. struct mem_blk* block_allocated;
  141. block_allocated = find_blk((mem_blk*)p_start, size);
  142. if (errno == ENOTFOUND) {
  143. // 'block_allocated' in the argument list is the pointer
  144. // pointing to the last block
  145. block_allocated = allocate_new_block(block_allocated, size);
  146. if (errno) {
  147. // preserves errno
  148. return nullptr;
  149. }
  150. } else {
  151. split_block(block_allocated, size);
  152. }
  153. errno = 0;
  154. block_allocated->flags.is_free = 0;
  155. return block_allocated->data;
  156. }
  157. void free(void* ptr)
  158. {
  159. mem_blk* blk = (mem_blk*)((byte*)ptr - (sizeof(mem_blk_flags) + sizeof(size_t)));
  160. blk->flags.is_free = 1;
  161. // TODO: fusion free blocks nearby
  162. }
  163. };
  164. static brk_memory_allocator* kernel_heap_allocator;
  165. static brk_memory_allocator
  166. kernel_ident_mapped_allocator((void*)bss_section_end_addr,
  167. IDENTICALLY_MAPPED_HEAP_SIZE);
  168. void* k_malloc(size_t size)
  169. {
  170. void* ptr = kernel_heap_allocator->alloc(size);
  171. assert(likely(ptr));
  172. return ptr;
  173. }
  174. void k_free(void* ptr)
  175. {
  176. kernel_heap_allocator->free(ptr);
  177. }
  178. void* ki_malloc(size_t size)
  179. {
  180. void* ptr = kernel_ident_mapped_allocator.alloc(size);
  181. assert(likely(ptr));
  182. return ptr;
  183. }
  184. void ki_free(void* ptr)
  185. {
  186. kernel_ident_mapped_allocator.free(ptr);
  187. }
  188. void* ptovp(pptr_t p_ptr)
  189. {
  190. // memory below 768MiB is identically mapped
  191. // TODO: address translation for high mem
  192. assert(p_ptr <= 0x30000000);
  193. return (void*)p_ptr;
  194. }
  195. inline void mark_page(page_t n)
  196. {
  197. bm_set(mem_bitmap, n);
  198. }
  199. inline void free_page(page_t n)
  200. {
  201. bm_clear(mem_bitmap, n);
  202. }
  203. constexpr void mark_addr_len(pptr_t start, size_t n)
  204. {
  205. if (unlikely(n == 0))
  206. return;
  207. page_t start_page = to_page(start);
  208. page_t end_page = to_page(start + n + 4095);
  209. for (page_t i = start_page; i < end_page; ++i)
  210. mark_page(i);
  211. }
  212. constexpr void free_addr_len(pptr_t start, size_t n)
  213. {
  214. if (unlikely(n == 0))
  215. return;
  216. page_t start_page = to_page(start);
  217. page_t end_page = to_page(start + n + 4095);
  218. for (page_t i = start_page; i < end_page; ++i)
  219. free_page(i);
  220. }
  221. inline constexpr void mark_addr_range(pptr_t start, pptr_t end)
  222. {
  223. mark_addr_len(start, end - start);
  224. }
  225. inline constexpr void free_addr_range(pptr_t start, pptr_t end)
  226. {
  227. free_addr_len(start, end - start);
  228. }
  229. // @return the max count (but less than n) of the pages continuously available
  230. static inline size_t _test_n_raw_pages(page_t start, size_t n)
  231. {
  232. // *start is already allocated
  233. if (bm_test(mem_bitmap, start))
  234. return 0;
  235. return 1 + ((n > 1) ? _test_n_raw_pages(start + 1, n - 1) : 0);
  236. }
  237. page_t alloc_n_raw_pages(size_t n)
  238. {
  239. page_t first = 0;
  240. while (first <= 1024 * 1024 - n) {
  241. size_t max = _test_n_raw_pages(first, n);
  242. if (max != n) {
  243. first += (max + 1);
  244. } else {
  245. for (page_t i = first; i < first + n; ++i)
  246. mark_page(i);
  247. return first;
  248. }
  249. }
  250. assert(false);
  251. return 0xffffffff;
  252. }
  253. void free_n_raw_pages(page_t start_pg, size_t n)
  254. {
  255. while (n--)
  256. free_page(start_pg++);
  257. }
  258. struct page allocate_page(void)
  259. {
  260. return page {
  261. .phys_page_id = alloc_raw_page(),
  262. .pte = nullptr,
  263. .ref_count = types::_new<types::kernel_ident_allocator, size_t>(0),
  264. .attr { 0 },
  265. };
  266. }
  267. pd_t alloc_pd(void)
  268. {
  269. // TODO: alloc page in low mem and gen struct page for it
  270. page_t pd_page = alloc_raw_page();
  271. pd_t pd = to_pd(pd_page);
  272. memset(pd, 0x00, PAGE_SIZE);
  273. return pd;
  274. }
  275. pt_t alloc_pt(void)
  276. {
  277. // TODO: alloc page in low mem and gen struct page for it
  278. page_t pt_page = alloc_raw_page();
  279. pt_t pt = to_pt(pt_page);
  280. memset(pt, 0x00, PAGE_SIZE);
  281. return pt;
  282. }
  283. void dealloc_pd(pd_t pd)
  284. {
  285. for (pde_t* ent = (*pd) + 256; ent < (*pd) + 1024; ++ent) {
  286. if (!ent->in.p)
  287. continue;
  288. dealloc_pt(to_pt(ent));
  289. }
  290. memset(pd, 0x00, sizeof(*pd));
  291. page_t pg = to_page((pptr_t)pd);
  292. free_page(pg);
  293. }
  294. void dealloc_pt(pt_t pt)
  295. {
  296. memset(pt, 0x00, sizeof(*pt));
  297. page_t pg = to_page((pptr_t)pt);
  298. free_page(pg);
  299. }
  300. static inline void init_mem_layout(void)
  301. {
  302. mem_size = 1024 * mem_size_info.n_1k_blks;
  303. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  304. // mark kernel page directory
  305. mark_addr_range(0x00001000, 0x00006000);
  306. // mark empty page
  307. mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
  308. // mark EBDA and upper memory as allocated
  309. mark_addr_range(0x80000, 0xfffff);
  310. // mark kernel
  311. mark_addr_len(0x00100000, kernel_size);
  312. // mark identically mapped heap
  313. mark_addr_len(bss_section_end_addr, IDENTICALLY_MAPPED_HEAP_SIZE);
  314. if (e820_mem_map_entry_size == 20) {
  315. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  316. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  317. if (entry->type != 1) {
  318. mark_addr_len(entry->base, entry->len);
  319. }
  320. }
  321. } else {
  322. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  323. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  324. if (entry->in.type != 1) {
  325. mark_addr_len(entry->in.base, entry->in.len);
  326. }
  327. }
  328. }
  329. }
  330. using kernel::mm_list;
  331. mm_list::mm_list(const mm_list& v)
  332. : m_areas(v.m_areas)
  333. {
  334. pd_t pd = alloc_pd();
  335. memcpy(pd, v.m_pd, PAGE_SIZE);
  336. m_pd = pd;
  337. }
  338. inline void map_raw_page_to_pte(
  339. pte_t* pte,
  340. page_t page,
  341. bool present,
  342. bool write,
  343. bool priv)
  344. {
  345. // set P bit
  346. pte->v = 0;
  347. pte->in.p = present;
  348. pte->in.rw = write;
  349. pte->in.us = !priv;
  350. pte->in.page = page;
  351. }
  352. int mm::append_page(page* pg, bool present, bool write, bool priv, bool cow)
  353. {
  354. void* addr = this->end();
  355. pde_t* pde = to_pde(this->owner->m_pd, addr);
  356. // page table not exist
  357. if (unlikely(!pde->in.p)) {
  358. // allocate a page for the page table
  359. pde->in.p = 1;
  360. pde->in.rw = 1;
  361. pde->in.us = 1;
  362. pde->in.pt_page = alloc_raw_page();
  363. memset(to_pt(pde), 0x00, PAGE_SIZE);
  364. }
  365. // map the page in the page table
  366. pte_t* pte = to_pte(pde, addr);
  367. map_raw_page_to_pte(pte, pg->phys_page_id, present, (write && !cow), priv);
  368. if (unlikely(cow && !pg->attr.in.cow)) {
  369. pg->attr.in.cow = 1;
  370. pg->pte->in.rw = 0;
  371. pg->pte->in.a = 0;
  372. invalidate_tlb(addr);
  373. }
  374. ++*pg->ref_count;
  375. auto iter = this->pgs->emplace_back(*pg);
  376. iter->pte = pte;
  377. return GB_OK;
  378. }
  379. static inline int _mmap(
  380. mm_list* mms,
  381. void* hint,
  382. size_t len,
  383. fs::inode* file,
  384. size_t offset,
  385. int write,
  386. int priv)
  387. {
  388. if (unlikely(!file->flags.in.file && !file->flags.in.special_node)) {
  389. errno = EINVAL;
  390. return GB_FAILED;
  391. }
  392. // TODO: find another address
  393. assert(((uint32_t)hint & 0xfff) == 0);
  394. // TODO: return failed
  395. assert((offset & 0xfff) == 0);
  396. size_t n_pgs = align_up<12>(len) >> 12;
  397. if (!mms->is_avail(hint, len)) {
  398. errno = EEXIST;
  399. return GB_FAILED;
  400. }
  401. auto mm = mms->addarea(hint, write, priv);
  402. mm->mapped_file = file;
  403. mm->file_offset = offset;
  404. for (size_t i = 0; i < n_pgs; ++i)
  405. mm->append_page(&empty_page, false, write, priv, true);
  406. return GB_OK;
  407. }
  408. int mmap(
  409. void* hint,
  410. size_t len,
  411. fs::inode* file,
  412. size_t offset,
  413. int write,
  414. int priv)
  415. {
  416. return _mmap(&current_process->mms, hint, len, file, offset, write, priv);
  417. }
  418. // map a page identically
  419. // this function is only meant to be used in the initialization process
  420. // it checks the pde's P bit so you need to make sure it's already set
  421. // to avoid dead loops
  422. static inline void _init_map_page_identically(page_t page)
  423. {
  424. pde_t* pde = *KERNEL_PAGE_DIRECTORY_ADDR + to_pdi(page);
  425. // page table not exist
  426. if (unlikely(!pde->in.p)) {
  427. // allocate a page for the page table
  428. // set the P bit of the pde in advance
  429. pde->in.p = 1;
  430. pde->in.rw = 1;
  431. pde->in.us = 0;
  432. pde->in.pt_page = alloc_raw_page();
  433. _init_map_page_identically(pde->in.pt_page);
  434. memset(to_pt(pde), 0x00, PAGE_SIZE);
  435. }
  436. // map the page in the page table
  437. pte_t* pt = to_pte(pde, page);
  438. pt->v = 0x00000003;
  439. pt->in.page = page;
  440. }
  441. static inline void init_paging_map_low_mem_identically(void)
  442. {
  443. for (pptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
  444. // check if the address is valid and not mapped
  445. if (bm_test(mem_bitmap, to_page(addr)))
  446. continue;
  447. _init_map_page_identically(to_page(addr));
  448. }
  449. }
  450. void init_mem(void)
  451. {
  452. init_mem_layout();
  453. // map the 16MiB-768MiB identically
  454. init_paging_map_low_mem_identically();
  455. kernel_mms = types::pnew<types::kernel_ident_allocator>(kernel_mms, KERNEL_PAGE_DIRECTORY_ADDR);
  456. auto heap_mm = kernel_mms->addarea(KERNEL_HEAP_START, true, true);
  457. // create empty_page struct
  458. empty_page.attr.in.cow = 0;
  459. empty_page.phys_page_id = to_page(EMPTY_PAGE_ADDR);
  460. empty_page.ref_count = types::_new<types::kernel_ident_allocator, size_t>(1);
  461. empty_page.pte = to_pte(*KERNEL_PAGE_DIRECTORY_ADDR, empty_page.phys_page_id);
  462. empty_page.pte->in.rw = 0;
  463. invalidate_tlb(0x00000000);
  464. // 0x30000000 to 0x40000000 or 768MiB to 1GiB
  465. while (heap_mm->pgs->size() < 256 * 1024 * 1024 / PAGE_SIZE)
  466. heap_mm->append_page(&empty_page, true, true, true, true);
  467. kernel_heap_allocator = types::pnew<types::kernel_ident_allocator>(kernel_heap_allocator,
  468. KERNEL_HEAP_START, vptrdiff(KERNEL_HEAP_LIMIT, KERNEL_HEAP_START));
  469. }
  470. void create_segment_descriptor(
  471. segment_descriptor* sd,
  472. uint32_t base,
  473. uint32_t limit,
  474. uint32_t flags,
  475. uint32_t access)
  476. {
  477. sd->base_low = base & 0x0000ffff;
  478. sd->base_mid = ((base & 0x00ff0000) >> 16);
  479. sd->base_high = ((base & 0xff000000) >> 24);
  480. sd->limit_low = limit & 0x0000ffff;
  481. sd->limit_high = ((limit & 0x000f0000) >> 16);
  482. sd->access = access;
  483. sd->flags = flags;
  484. }