mem.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/mm.hpp>
  7. #include <kernel/stdio.h>
  8. #include <kernel/task.h>
  9. #include <kernel/vga.h>
  10. #include <kernel_main.h>
  11. #include <types/bitmap.h>
  12. // global objects
  13. mm_list* kernel_mms;
  14. // ---------------------
  15. // constant values
  16. #define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
  17. #define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
  18. #define IDENTICALLY_MAPPED_HEAP_SIZE ((size_t)0x400000)
  19. // ---------------------
  20. static size_t mem_size;
  21. static char mem_bitmap[1024 * 1024 / 8];
  22. class brk_memory_allocator {
  23. public:
  24. using byte = uint8_t;
  25. using size_type = size_t;
  26. struct mem_blk_flags {
  27. uint8_t is_free;
  28. uint8_t has_next;
  29. uint8_t _unused2;
  30. uint8_t _unused3;
  31. };
  32. struct mem_blk {
  33. size_t size;
  34. struct mem_blk_flags flags;
  35. // the first byte of the memory space
  36. // the minimal allocated space is 4 bytes
  37. uint8_t data[4];
  38. };
  39. private:
  40. byte* p_start;
  41. byte* p_break;
  42. byte* p_limit;
  43. brk_memory_allocator(void) = delete;
  44. brk_memory_allocator(const brk_memory_allocator&) = delete;
  45. brk_memory_allocator(brk_memory_allocator&&) = delete;
  46. inline int brk(byte* addr)
  47. {
  48. if (addr >= p_limit)
  49. return GB_FAILED;
  50. p_break = addr;
  51. return GB_OK;
  52. }
  53. // sets errno
  54. inline byte* sbrk(size_type increment)
  55. {
  56. if (brk(p_break + increment) != GB_OK) {
  57. errno = ENOMEM;
  58. return nullptr;
  59. } else {
  60. errno = 0;
  61. return p_break;
  62. }
  63. }
  64. inline mem_blk* _find_next_mem_blk(mem_blk* blk, size_type blk_size)
  65. {
  66. byte* p = (byte*)blk;
  67. p += sizeof(mem_blk);
  68. p += blk_size;
  69. p -= (4 * sizeof(byte));
  70. return (mem_blk*)p;
  71. }
  72. // sets errno
  73. // @param start_pos position where to start finding
  74. // @param size the size of the block we're looking for
  75. // @return found block if suitable block exists, if not, the last block
  76. mem_blk* find_blk(mem_blk* start_pos, size_type size)
  77. {
  78. while (1) {
  79. if (start_pos->flags.is_free && start_pos->size >= size) {
  80. errno = 0;
  81. return start_pos;
  82. } else {
  83. if (!start_pos->flags.has_next) {
  84. errno = ENOTFOUND;
  85. return start_pos;
  86. }
  87. start_pos = _find_next_mem_blk(start_pos, start_pos->size);
  88. }
  89. }
  90. }
  91. // sets errno
  92. mem_blk* allocate_new_block(mem_blk* blk_before, size_type size)
  93. {
  94. sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
  95. // preserves errno
  96. if (errno) {
  97. return nullptr;
  98. }
  99. mem_blk* blk = _find_next_mem_blk(blk_before, blk_before->size);
  100. blk_before->flags.has_next = 1;
  101. blk->flags.has_next = 0;
  102. blk->flags.is_free = 1;
  103. blk->size = size;
  104. errno = 0;
  105. return blk;
  106. }
  107. void split_block(mem_blk* blk, size_type this_size)
  108. {
  109. // block is too small to get split
  110. if (blk->size < sizeof(mem_blk) + this_size) {
  111. return;
  112. }
  113. mem_blk* blk_next = _find_next_mem_blk(blk, this_size);
  114. blk_next->size = blk->size
  115. - this_size
  116. - sizeof(mem_blk)
  117. + 4 * sizeof(byte);
  118. blk_next->flags.has_next = blk->flags.has_next;
  119. blk_next->flags.is_free = 1;
  120. blk->flags.has_next = 1;
  121. blk->size = this_size;
  122. }
  123. public:
  124. brk_memory_allocator(void* start, size_type limit)
  125. : p_start((byte*)start)
  126. , p_limit(p_start + limit)
  127. {
  128. brk(p_start);
  129. mem_blk* p_blk = (mem_blk*)sbrk(0);
  130. p_blk->size = 4;
  131. p_blk->flags.has_next = 0;
  132. p_blk->flags.is_free = 1;
  133. }
  134. // sets errno
  135. void* alloc(size_type size)
  136. {
  137. struct mem_blk* block_allocated;
  138. block_allocated = find_blk((mem_blk*)p_start, size);
  139. if (errno == ENOTFOUND) {
  140. // 'block_allocated' in the argument list is the pointer
  141. // pointing to the last block
  142. block_allocated = allocate_new_block(block_allocated, size);
  143. if (errno) {
  144. // preserves errno
  145. return nullptr;
  146. }
  147. } else {
  148. split_block(block_allocated, size);
  149. }
  150. errno = 0;
  151. block_allocated->flags.is_free = 0;
  152. return block_allocated->data;
  153. }
  154. void free(void* ptr)
  155. {
  156. mem_blk* blk = (mem_blk*)((byte*)ptr - (sizeof(mem_blk_flags) + sizeof(size_t)));
  157. blk->flags.is_free = 1;
  158. // TODO: fusion free blocks nearby
  159. }
  160. };
  161. static brk_memory_allocator* kernel_heap_allocator;
  162. static brk_memory_allocator
  163. kernel_ident_mapped_allocator((void*)bss_section_end_addr,
  164. IDENTICALLY_MAPPED_HEAP_SIZE);
  165. void* k_malloc(size_t size)
  166. {
  167. return kernel_heap_allocator->alloc(size);
  168. }
  169. void k_free(void* ptr)
  170. {
  171. kernel_heap_allocator->free(ptr);
  172. }
  173. void* ki_malloc(size_t size)
  174. {
  175. void* ptr = kernel_ident_mapped_allocator.alloc(size);
  176. if (!ptr) {
  177. MAKE_BREAK_POINT();
  178. }
  179. return ptr;
  180. }
  181. void ki_free(void* ptr)
  182. {
  183. kernel_ident_mapped_allocator.free(ptr);
  184. }
  185. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
  186. {
  187. if (p_ptr <= 0x30000000) {
  188. // memory below 768MiB is identically mapped
  189. return (void*)p_ptr;
  190. } else {
  191. // TODO: address translation
  192. MAKE_BREAK_POINT();
  193. return (void*)0xffffffff;
  194. }
  195. }
  196. phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, linr_ptr_t v_ptr)
  197. {
  198. for (const mm& item : *mms) {
  199. if (v_ptr < item.start || v_ptr >= item.start + item.pgs->size() * PAGE_SIZE)
  200. continue;
  201. size_t offset = (size_t)(v_ptr - item.start);
  202. const page& p = item.pgs->at(offset / PAGE_SIZE);
  203. return page_to_phys_addr(p.phys_page_id) + (offset % PAGE_SIZE);
  204. }
  205. // TODO: handle error
  206. return 0xffffffff;
  207. }
  208. phys_ptr_t v_ptr_to_p_ptr(const void* v_ptr)
  209. {
  210. if (v_ptr < KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
  211. return (phys_ptr_t)v_ptr;
  212. }
  213. return l_ptr_to_p_ptr(kernel_mms, (linr_ptr_t)v_ptr);
  214. }
  215. static inline void mark_page(page_t n)
  216. {
  217. bm_set(mem_bitmap, n);
  218. }
  219. static inline void free_page(page_t n)
  220. {
  221. bm_clear(mem_bitmap, n);
  222. }
  223. static void mark_addr_len(phys_ptr_t start, size_t n)
  224. {
  225. if (n == 0)
  226. return;
  227. page_t start_page = phys_addr_to_page(start);
  228. page_t end_page = phys_addr_to_page(start + n + 4095);
  229. for (page_t i = start_page; i < end_page; ++i)
  230. mark_page(i);
  231. }
  232. static void free_addr_len(phys_ptr_t start, size_t n)
  233. {
  234. if (n == 0)
  235. return;
  236. page_t start_page = phys_addr_to_page(start);
  237. page_t end_page = phys_addr_to_page(start + n + 4095);
  238. for (page_t i = start_page; i < end_page; ++i)
  239. free_page(i);
  240. }
  241. static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
  242. {
  243. mark_addr_len(start, end - start);
  244. }
  245. static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
  246. {
  247. free_addr_len(start, end - start);
  248. }
  249. page_t alloc_raw_page(void)
  250. {
  251. return alloc_n_raw_pages(1);
  252. }
  253. // @return the max count (but less than n) of the pages continuously available
  254. static inline size_t _test_n_raw_pages(page_t start, size_t n)
  255. {
  256. // *start is already allocated
  257. if (bm_test(mem_bitmap, start))
  258. return 0;
  259. return 1 + ((n > 1) ? _test_n_raw_pages(start + 1, n - 1) : 0);
  260. }
  261. page_t alloc_n_raw_pages(size_t n)
  262. {
  263. page_t first = 0;
  264. while (first <= 1024 * 1024 - n) {
  265. size_t max = _test_n_raw_pages(first, n);
  266. if (max != n) {
  267. first += (max + 1);
  268. } else {
  269. for (page_t i = first; i < first + n; ++i)
  270. bm_set(mem_bitmap, i);
  271. return first;
  272. }
  273. }
  274. MAKE_BREAK_POINT();
  275. return 0xffffffff;
  276. }
  277. struct page allocate_page(void)
  278. {
  279. struct page p { };
  280. p.phys_page_id = alloc_raw_page();
  281. p.ref_count = types::kernel_ident_allocator_new<size_t>(0);
  282. return p;
  283. }
  284. static inline void make_page_table(page_table_entry* pt)
  285. {
  286. memset(pt, 0x00, sizeof(page_table_entry) * 1024);
  287. }
  288. page_directory_entry* alloc_pd(void)
  289. {
  290. // TODO: alloc page in low mem and gen struct page for it
  291. page_t pd_page = alloc_raw_page();
  292. page_directory_entry* pd = (page_directory_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pd_page));
  293. memset(pd, 0x00, PAGE_SIZE);
  294. return pd;
  295. }
  296. page_table_entry* alloc_pt(void)
  297. {
  298. // TODO: alloc page in low mem and gen struct page for it
  299. page_t pt_page = alloc_raw_page();
  300. page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pt_page));
  301. make_page_table(pt);
  302. return pt;
  303. }
  304. static inline void init_mem_layout(void)
  305. {
  306. mem_size = 1024 * mem_size_info.n_1k_blks;
  307. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  308. // mark kernel page directory
  309. mark_addr_range(0x00000000, 0x00005000);
  310. // mark empty page
  311. mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
  312. // mark EBDA and upper memory as allocated
  313. mark_addr_range(0x80000, 0xfffff);
  314. // mark kernel
  315. mark_addr_len(0x00100000, kernel_size);
  316. // mark identically mapped heap
  317. mark_addr_len(bss_section_end_addr, IDENTICALLY_MAPPED_HEAP_SIZE);
  318. if (e820_mem_map_entry_size == 20) {
  319. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  320. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  321. if (entry->type != 1) {
  322. mark_addr_len(entry->base, entry->len);
  323. }
  324. }
  325. } else {
  326. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  327. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  328. if (entry->in.type != 1) {
  329. mark_addr_len(entry->in.base, entry->in.len);
  330. }
  331. }
  332. }
  333. }
  334. int is_l_ptr_valid(const mm_list* mms, linr_ptr_t l_ptr)
  335. {
  336. for (const auto& item : *mms)
  337. if (l_ptr >= item.start && l_ptr < item.start + item.pgs->size() * PAGE_SIZE)
  338. return GB_OK;
  339. return GB_FAILED;
  340. }
  341. struct page* find_page_by_l_ptr(const mm_list* mms, linr_ptr_t l_ptr)
  342. {
  343. for (const mm& item : *mms) {
  344. if (l_ptr >= item.start && l_ptr < item.start + item.pgs->size() * PAGE_SIZE) {
  345. size_t offset = (size_t)(l_ptr - item.start);
  346. return &item.pgs->at(offset / PAGE_SIZE);
  347. }
  348. }
  349. // TODO: error handling
  350. return nullptr;
  351. }
  352. static inline void map_raw_page_to_pte(
  353. page_table_entry* pte,
  354. page_t page,
  355. int rw,
  356. int priv)
  357. {
  358. // set P bit
  359. pte->v = 0x00000001;
  360. pte->in.rw = (rw == 1);
  361. pte->in.us = (priv == 0);
  362. pte->in.page = page;
  363. }
  364. // map page to the end of mm_area in pd
  365. int k_map(
  366. mm* mm_area,
  367. const struct page* page,
  368. int read,
  369. int write,
  370. int priv,
  371. int cow)
  372. {
  373. linr_ptr_t addr = (linr_ptr_t)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  374. page_directory_entry* pde = mm_area->pd + linr_addr_to_pd_i(addr);
  375. // page table not exist
  376. if (!pde->in.p) {
  377. // allocate a page for the page table
  378. pde->in.p = 1;
  379. pde->in.rw = 1;
  380. pde->in.us = (priv == 0);
  381. pde->in.pt_page = alloc_raw_page();
  382. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  383. }
  384. // map the page in the page table
  385. page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  386. pte += linr_addr_to_pt_i(addr);
  387. map_raw_page_to_pte(pte, page->phys_page_id, (write && !cow), priv);
  388. mm_area->pgs->push_back(*page);
  389. mm_area->pgs->back()->attr.cow = cow;
  390. ++*page->ref_count;
  391. return GB_OK;
  392. }
  393. // map a page identically
  394. // this function is only meant to be used in the initialization process
  395. // it checks the pde's P bit so you need to make sure it's already set
  396. // to avoid dead loops
  397. static inline void _init_map_page_identically(page_t page)
  398. {
  399. page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
  400. // page table not exist
  401. if (!pde->in.p) {
  402. // allocate a page for the page table
  403. // set the P bit of the pde in advance
  404. pde->in.p = 1;
  405. pde->in.rw = 1;
  406. pde->in.us = 0;
  407. pde->in.pt_page = alloc_raw_page();
  408. _init_map_page_identically(pde->in.pt_page);
  409. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  410. }
  411. // map the page in the page table
  412. page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  413. pt += page_to_pt_i(page);
  414. pt->v = 0x00000003;
  415. pt->in.page = page;
  416. }
  417. static inline void init_paging_map_low_mem_identically(void)
  418. {
  419. for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
  420. // check if the address is valid and not mapped
  421. if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
  422. continue;
  423. _init_map_page_identically(phys_addr_to_page(addr));
  424. }
  425. }
  426. page empty_page;
  427. void init_mem(void)
  428. {
  429. init_mem_layout();
  430. // map the 16MiB-768MiB identically
  431. init_paging_map_low_mem_identically();
  432. kernel_mms = types::kernel_ident_allocator_new<mm_list>();
  433. auto heap_mm = kernel_mms->emplace_back((linr_ptr_t)KERNEL_HEAP_START, KERNEL_PAGE_DIRECTORY_ADDR, 1, 1);
  434. page heap_first_page {
  435. .phys_page_id = alloc_raw_page(),
  436. .ref_count = types::kernel_ident_allocator_new<size_t>(0),
  437. .attr = {
  438. .cow = 0,
  439. },
  440. };
  441. k_map(heap_mm.ptr(), &heap_first_page, 1, 1, 1, 0);
  442. memset(KERNEL_HEAP_START, 0x00, PAGE_SIZE);
  443. kernel_heap_allocator = types::kernel_ident_allocator_new<brk_memory_allocator>(KERNEL_HEAP_START,
  444. (uint32_t)KERNEL_HEAP_LIMIT - (uint32_t)KERNEL_HEAP_START);
  445. // create empty_page struct
  446. empty_page.attr.cow = 0;
  447. empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
  448. empty_page.ref_count = types::kernel_ident_allocator_new<size_t>(1);
  449. // TODO: improve the algorithm SO FREAKING SLOW
  450. // while (kernel_mm_head->len < 256 * 1024 * 1024 / PAGE_SIZE) {
  451. while (heap_mm->pgs->size() < 256 * 1024 * 1024 / PAGE_SIZE) {
  452. k_map(
  453. heap_mm.ptr(), &empty_page,
  454. 1, 1, 1, 1);
  455. }
  456. }
  457. void create_segment_descriptor(
  458. segment_descriptor* sd,
  459. uint32_t base,
  460. uint32_t limit,
  461. uint32_t flags,
  462. uint32_t access)
  463. {
  464. sd->base_low = base & 0x0000ffff;
  465. sd->base_mid = ((base & 0x00ff0000) >> 16);
  466. sd->base_high = ((base & 0xff000000) >> 24);
  467. sd->limit_low = limit & 0x0000ffff;
  468. sd->limit_high = ((limit & 0x000f0000) >> 16);
  469. sd->access = access;
  470. sd->flags = flags;
  471. }
  472. mm::mm(linr_ptr_t start, page_directory_entry* pd, bool write, bool system)
  473. : start(start)
  474. , attr({
  475. .read { 1 },
  476. .write { write },
  477. .system { system },
  478. })
  479. , pd(pd)
  480. , pgs(types::kernel_ident_allocator_new<page_arr>())
  481. {
  482. }
  483. mm::mm(const mm& val)
  484. : start(val.start)
  485. , attr({
  486. .read { val.attr.read },
  487. .write { val.attr.write },
  488. .system { val.attr.system },
  489. })
  490. , pd(val.pd)
  491. , pgs(val.pgs)
  492. {
  493. }