mem.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/mm.hpp>
  7. #include <kernel/process.hpp>
  8. #include <kernel/task.h>
  9. #include <kernel/vga.hpp>
  10. #include <kernel_main.hpp>
  11. #include <stdio.h>
  12. #include <types/allocator.hpp>
  13. #include <types/assert.h>
  14. #include <types/bitmap.h>
  15. #include <types/size.h>
  16. #include <types/status.h>
  17. // constant values
  18. #define EMPTY_PAGE_ADDR ((pptr_t)0x0000)
  19. #define EMPTY_PAGE_END ((pptr_t)0x1000)
  20. #define IDENTICALLY_MAPPED_HEAP_SIZE ((size_t)0x400000)
  21. // ---------------------
  22. static size_t mem_size;
  23. static char mem_bitmap[1024 * 1024 / 8];
  24. class brk_memory_allocator {
  25. public:
  26. using byte = uint8_t;
  27. using size_type = size_t;
  28. struct mem_blk_flags {
  29. uint8_t is_free;
  30. uint8_t has_next;
  31. uint8_t _unused2;
  32. uint8_t _unused3;
  33. };
  34. struct mem_blk {
  35. size_t size;
  36. struct mem_blk_flags flags;
  37. // the first byte of the memory space
  38. // the minimal allocated space is 4 bytes
  39. uint8_t data[4];
  40. };
  41. private:
  42. byte* p_start;
  43. byte* p_break;
  44. byte* p_limit;
  45. brk_memory_allocator(void) = delete;
  46. brk_memory_allocator(const brk_memory_allocator&) = delete;
  47. brk_memory_allocator(brk_memory_allocator&&) = delete;
  48. inline constexpr int brk(byte* addr)
  49. {
  50. if (unlikely(addr >= p_limit))
  51. return GB_FAILED;
  52. p_break = addr;
  53. return GB_OK;
  54. }
  55. // sets errno
  56. inline byte* sbrk(size_type increment)
  57. {
  58. if (unlikely(brk(p_break + increment) != GB_OK)) {
  59. errno = ENOMEM;
  60. return nullptr;
  61. } else {
  62. errno = 0;
  63. return p_break;
  64. }
  65. }
  66. inline mem_blk* _find_next_mem_blk(mem_blk* blk, size_type blk_size)
  67. {
  68. byte* p = (byte*)blk;
  69. p += sizeof(mem_blk);
  70. p += blk_size;
  71. p -= (4 * sizeof(byte));
  72. return (mem_blk*)p;
  73. }
  74. // sets errno
  75. // @param start_pos position where to start finding
  76. // @param size the size of the block we're looking for
  77. // @return found block if suitable block exists, if not, the last block
  78. mem_blk* find_blk(mem_blk* start_pos, size_type size)
  79. {
  80. while (1) {
  81. if (start_pos->flags.is_free && start_pos->size >= size) {
  82. errno = 0;
  83. return start_pos;
  84. } else {
  85. if (unlikely(!start_pos->flags.has_next)) {
  86. errno = ENOTFOUND;
  87. return start_pos;
  88. }
  89. start_pos = _find_next_mem_blk(start_pos, start_pos->size);
  90. }
  91. }
  92. }
  93. // sets errno
  94. mem_blk* allocate_new_block(mem_blk* blk_before, size_type size)
  95. {
  96. sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
  97. // preserves errno
  98. if (unlikely(errno)) {
  99. return nullptr;
  100. }
  101. mem_blk* blk = _find_next_mem_blk(blk_before, blk_before->size);
  102. blk_before->flags.has_next = 1;
  103. blk->flags.has_next = 0;
  104. blk->flags.is_free = 1;
  105. blk->size = size;
  106. errno = 0;
  107. return blk;
  108. }
  109. void split_block(mem_blk* blk, size_type this_size)
  110. {
  111. // block is too small to get split
  112. if (blk->size < sizeof(mem_blk) + this_size) {
  113. return;
  114. }
  115. mem_blk* blk_next = _find_next_mem_blk(blk, this_size);
  116. blk_next->size = blk->size
  117. - this_size
  118. - sizeof(mem_blk)
  119. + 4 * sizeof(byte);
  120. blk_next->flags.has_next = blk->flags.has_next;
  121. blk_next->flags.is_free = 1;
  122. blk->flags.has_next = 1;
  123. blk->size = this_size;
  124. }
  125. public:
  126. brk_memory_allocator(void* start, size_type limit)
  127. : p_start((byte*)start)
  128. , p_limit(p_start + limit)
  129. {
  130. brk(p_start);
  131. mem_blk* p_blk = (mem_blk*)sbrk(0);
  132. p_blk->size = 4;
  133. p_blk->flags.has_next = 0;
  134. p_blk->flags.is_free = 1;
  135. }
  136. // sets errno
  137. void* alloc(size_type size)
  138. {
  139. struct mem_blk* block_allocated;
  140. block_allocated = find_blk((mem_blk*)p_start, size);
  141. if (errno == ENOTFOUND) {
  142. // 'block_allocated' in the argument list is the pointer
  143. // pointing to the last block
  144. block_allocated = allocate_new_block(block_allocated, size);
  145. if (errno) {
  146. // preserves errno
  147. return nullptr;
  148. }
  149. } else {
  150. split_block(block_allocated, size);
  151. }
  152. errno = 0;
  153. block_allocated->flags.is_free = 0;
  154. return block_allocated->data;
  155. }
  156. void free(void* ptr)
  157. {
  158. mem_blk* blk = (mem_blk*)((byte*)ptr - (sizeof(mem_blk_flags) + sizeof(size_t)));
  159. blk->flags.is_free = 1;
  160. // TODO: fusion free blocks nearby
  161. }
  162. };
  163. static brk_memory_allocator* kernel_heap_allocator;
  164. static brk_memory_allocator
  165. kernel_ident_mapped_allocator((void*)bss_section_end_addr,
  166. IDENTICALLY_MAPPED_HEAP_SIZE);
  167. void* k_malloc(size_t size)
  168. {
  169. void* ptr = kernel_heap_allocator->alloc(size);
  170. assert_likely(ptr);
  171. return ptr;
  172. }
  173. void k_free(void* ptr)
  174. {
  175. kernel_heap_allocator->free(ptr);
  176. }
  177. void* ki_malloc(size_t size)
  178. {
  179. void* ptr = kernel_ident_mapped_allocator.alloc(size);
  180. assert_likely(ptr);
  181. return ptr;
  182. }
  183. void ki_free(void* ptr)
  184. {
  185. kernel_ident_mapped_allocator.free(ptr);
  186. }
  187. void* ptovp(pptr_t p_ptr)
  188. {
  189. // memory below 768MiB is identically mapped
  190. // TODO: address translation for high mem
  191. assert(p_ptr <= 0x30000000);
  192. return (void*)p_ptr;
  193. }
  194. inline void mark_page(page_t n)
  195. {
  196. bm_set(mem_bitmap, n);
  197. }
  198. inline void free_page(page_t n)
  199. {
  200. bm_clear(mem_bitmap, n);
  201. }
  202. constexpr void mark_addr_len(pptr_t start, size_t n)
  203. {
  204. if (unlikely(n == 0))
  205. return;
  206. page_t start_page = to_page(start);
  207. page_t end_page = to_page(start + n + 4095);
  208. for (page_t i = start_page; i < end_page; ++i)
  209. mark_page(i);
  210. }
  211. constexpr void free_addr_len(pptr_t start, size_t n)
  212. {
  213. if (unlikely(n == 0))
  214. return;
  215. page_t start_page = to_page(start);
  216. page_t end_page = to_page(start + n + 4095);
  217. for (page_t i = start_page; i < end_page; ++i)
  218. free_page(i);
  219. }
  220. inline constexpr void mark_addr_range(pptr_t start, pptr_t end)
  221. {
  222. mark_addr_len(start, end - start);
  223. }
  224. inline constexpr void free_addr_range(pptr_t start, pptr_t end)
  225. {
  226. free_addr_len(start, end - start);
  227. }
  228. // @return the max count (but less than n) of the pages continuously available
  229. static inline size_t _test_n_raw_pages(page_t start, size_t n)
  230. {
  231. // *start is already allocated
  232. if (bm_test(mem_bitmap, start))
  233. return 0;
  234. return 1 + ((n > 1) ? _test_n_raw_pages(start + 1, n - 1) : 0);
  235. }
  236. page_t alloc_n_raw_pages(size_t n)
  237. {
  238. page_t first = 0;
  239. while (first <= 1024 * 1024 - n) {
  240. size_t max = _test_n_raw_pages(first, n);
  241. if (max != n) {
  242. first += (max + 1);
  243. } else {
  244. for (page_t i = first; i < first + n; ++i)
  245. mark_page(i);
  246. return first;
  247. }
  248. }
  249. assert(false);
  250. return 0xffffffff;
  251. }
  252. void free_n_raw_pages(page_t start_pg, size_t n)
  253. {
  254. while (n--)
  255. free_page(start_pg++);
  256. }
  257. struct page allocate_page(void)
  258. {
  259. return page {
  260. .phys_page_id = alloc_raw_page(),
  261. .pte = nullptr,
  262. .ref_count = types::_new<types::kernel_ident_allocator, size_t>(0),
  263. .attr { 0 },
  264. };
  265. }
  266. pd_t alloc_pd(void)
  267. {
  268. // TODO: alloc page in low mem and gen struct page for it
  269. page_t pd_page = alloc_raw_page();
  270. pd_t pd = to_pd(pd_page);
  271. memset(pd, 0x00, PAGE_SIZE);
  272. return pd;
  273. }
  274. pt_t alloc_pt(void)
  275. {
  276. // TODO: alloc page in low mem and gen struct page for it
  277. page_t pt_page = alloc_raw_page();
  278. pt_t pt = to_pt(pt_page);
  279. memset(pt, 0x00, PAGE_SIZE);
  280. return pt;
  281. }
  282. void dealloc_pd(pd_t pd)
  283. {
  284. for (pde_t* ent = (*pd) + 256; ent < (*pd) + 1024; ++ent) {
  285. if (!ent->in.p)
  286. continue;
  287. dealloc_pt(to_pt(ent));
  288. }
  289. memset(pd, 0x00, sizeof(*pd));
  290. page_t pg = to_page((pptr_t)pd);
  291. free_page(pg);
  292. }
  293. void dealloc_pt(pt_t pt)
  294. {
  295. memset(pt, 0x00, sizeof(*pt));
  296. page_t pg = to_page((pptr_t)pt);
  297. free_page(pg);
  298. }
  299. static inline void init_mem_layout(void)
  300. {
  301. mem_size = 1024 * mem_size_info.n_1k_blks;
  302. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  303. // mark kernel page directory
  304. mark_addr_range(0x00001000, 0x00006000);
  305. // mark empty page
  306. mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
  307. // mark EBDA and upper memory as allocated
  308. mark_addr_range(0x80000, 0xfffff);
  309. // mark kernel
  310. mark_addr_len(0x00100000, kernel_size);
  311. // mark identically mapped heap
  312. mark_addr_len(bss_section_end_addr, IDENTICALLY_MAPPED_HEAP_SIZE);
  313. if (e820_mem_map_entry_size == 20) {
  314. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  315. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  316. if (entry->type != 1) {
  317. mark_addr_len(entry->base, entry->len);
  318. }
  319. }
  320. } else {
  321. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  322. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  323. if (entry->in.type != 1) {
  324. mark_addr_len(entry->in.base, entry->in.len);
  325. }
  326. }
  327. }
  328. }
  329. using kernel::mm_list;
  330. mm_list::mm_list(const mm_list& v)
  331. : m_areas(v.m_areas)
  332. {
  333. pd_t pd = alloc_pd();
  334. memcpy(pd, v.m_pd, PAGE_SIZE);
  335. m_pd = pd;
  336. }
  337. inline void map_raw_page_to_pte(
  338. pte_t* pte,
  339. page_t page,
  340. bool present,
  341. bool write,
  342. bool priv)
  343. {
  344. // set P bit
  345. pte->v = 0;
  346. pte->in.p = present;
  347. pte->in.rw = write;
  348. pte->in.us = !priv;
  349. pte->in.page = page;
  350. }
  351. int mm::append_page(page* pg, bool present, bool write, bool priv, bool cow)
  352. {
  353. void* addr = this->end();
  354. pde_t* pde = to_pde(this->owner->m_pd, addr);
  355. // page table not exist
  356. if (unlikely(!pde->in.p)) {
  357. // allocate a page for the page table
  358. pde->in.p = 1;
  359. pde->in.rw = 1;
  360. pde->in.us = 1;
  361. pde->in.pt_page = alloc_raw_page();
  362. memset(to_pt(pde), 0x00, PAGE_SIZE);
  363. }
  364. // map the page in the page table
  365. pte_t* pte = to_pte(pde, addr);
  366. map_raw_page_to_pte(pte, pg->phys_page_id, present, (write && !cow), priv);
  367. if (unlikely(cow && !pg->attr.in.cow)) {
  368. pg->attr.in.cow = 1;
  369. pg->pte->in.rw = 0;
  370. pg->pte->in.a = 0;
  371. invalidate_tlb(addr);
  372. }
  373. ++*pg->ref_count;
  374. auto iter = this->pgs->emplace_back(*pg);
  375. iter->pte = pte;
  376. return GB_OK;
  377. }
  378. static inline int _mmap(
  379. mm_list* mms,
  380. void* hint,
  381. size_t len,
  382. fs::inode* file,
  383. size_t offset,
  384. int write,
  385. int priv)
  386. {
  387. if (unlikely(!file->flags.in.file && !file->flags.in.special_node)) {
  388. errno = EINVAL;
  389. return GB_FAILED;
  390. }
  391. len = (len + PAGE_SIZE - 1) & 0xfffff000;
  392. size_t n_pgs = len >> 12;
  393. for (const auto& mm_area : *mms)
  394. if (!mm_area.is_avail(hint, (char*)hint + len)) {
  395. errno = EEXIST;
  396. return GB_FAILED;
  397. }
  398. auto mm = mms->addarea(hint, write, priv);
  399. mm->mapped_file = file;
  400. mm->file_offset = offset;
  401. for (size_t i = 0; i < n_pgs; ++i)
  402. mm->append_page(&empty_page, false, write, priv, true);
  403. return GB_OK;
  404. }
  405. int mmap(
  406. void* hint,
  407. size_t len,
  408. fs::inode* file,
  409. size_t offset,
  410. int write,
  411. int priv)
  412. {
  413. return _mmap(&current_process->mms, hint, len, file, offset, write, priv);
  414. }
  415. // map a page identically
  416. // this function is only meant to be used in the initialization process
  417. // it checks the pde's P bit so you need to make sure it's already set
  418. // to avoid dead loops
  419. static inline void _init_map_page_identically(page_t page)
  420. {
  421. pde_t* pde = *KERNEL_PAGE_DIRECTORY_ADDR + to_pdi(page);
  422. // page table not exist
  423. if (unlikely(!pde->in.p)) {
  424. // allocate a page for the page table
  425. // set the P bit of the pde in advance
  426. pde->in.p = 1;
  427. pde->in.rw = 1;
  428. pde->in.us = 0;
  429. pde->in.pt_page = alloc_raw_page();
  430. _init_map_page_identically(pde->in.pt_page);
  431. memset(to_pt(pde), 0x00, PAGE_SIZE);
  432. }
  433. // map the page in the page table
  434. pte_t* pt = to_pte(pde, page);
  435. pt->v = 0x00000003;
  436. pt->in.page = page;
  437. }
  438. static inline void init_paging_map_low_mem_identically(void)
  439. {
  440. for (pptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
  441. // check if the address is valid and not mapped
  442. if (bm_test(mem_bitmap, to_page(addr)))
  443. continue;
  444. _init_map_page_identically(to_page(addr));
  445. }
  446. }
  447. void init_mem(void)
  448. {
  449. init_mem_layout();
  450. // map the 16MiB-768MiB identically
  451. init_paging_map_low_mem_identically();
  452. kernel_mms = types::pnew<types::kernel_ident_allocator>(kernel_mms, KERNEL_PAGE_DIRECTORY_ADDR);
  453. auto heap_mm = kernel_mms->addarea(KERNEL_HEAP_START, true, true);
  454. // create empty_page struct
  455. empty_page.attr.in.cow = 0;
  456. empty_page.phys_page_id = to_page(EMPTY_PAGE_ADDR);
  457. empty_page.ref_count = types::_new<types::kernel_ident_allocator, size_t>(1);
  458. empty_page.pte = to_pte(*KERNEL_PAGE_DIRECTORY_ADDR, empty_page.phys_page_id);
  459. empty_page.pte->in.rw = 0;
  460. invalidate_tlb(0x00000000);
  461. // 0x30000000 to 0x40000000 or 768MiB to 1GiB
  462. while (heap_mm->pgs->size() < 256 * 1024 * 1024 / PAGE_SIZE)
  463. heap_mm->append_page(&empty_page, true, true, true, true);
  464. kernel_heap_allocator = types::pnew<types::kernel_ident_allocator>(kernel_heap_allocator,
  465. KERNEL_HEAP_START, vptrdiff(KERNEL_HEAP_LIMIT, KERNEL_HEAP_START));
  466. }
  467. void create_segment_descriptor(
  468. segment_descriptor* sd,
  469. uint32_t base,
  470. uint32_t limit,
  471. uint32_t flags,
  472. uint32_t access)
  473. {
  474. sd->base_low = base & 0x0000ffff;
  475. sd->base_mid = ((base & 0x00ff0000) >> 16);
  476. sd->base_high = ((base & 0xff000000) >> 24);
  477. sd->limit_low = limit & 0x0000ffff;
  478. sd->limit_high = ((limit & 0x000f0000) >> 16);
  479. sd->access = access;
  480. sd->flags = flags;
  481. }