mem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/stdio.h>
  7. #include <kernel/task.h>
  8. #include <kernel/vga.h>
  9. #include <kernel_main.h>
  10. #include <types/bitmap.h>
  11. #include <types/list.h>
  12. // static variables
  13. struct mm kernel_mm;
  14. struct mm* kernel_mm_head;
  15. // ---------------------
  16. // constant values
  17. #define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
  18. #define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
  19. // ---------------------
  20. static void* p_start;
  21. static void* p_break;
  22. static size_t mem_size;
  23. static char mem_bitmap[1024 * 1024 / 8];
  24. static int32_t set_heap_start(void* start_addr)
  25. {
  26. p_start = start_addr;
  27. return 0;
  28. }
  29. static int32_t brk(void* addr)
  30. {
  31. if (addr >= KERNEL_HEAP_LIMIT) {
  32. return GB_FAILED;
  33. }
  34. p_break = addr;
  35. return 0;
  36. }
  37. // sets errno when failed to increase heap pointer
  38. static void* sbrk(size_t increment)
  39. {
  40. if (brk(p_break + increment) != 0) {
  41. errno = ENOMEM;
  42. return 0;
  43. } else {
  44. errno = 0;
  45. return p_break;
  46. }
  47. }
  48. int init_heap(void)
  49. {
  50. set_heap_start(KERNEL_HEAP_START);
  51. if (brk(KERNEL_HEAP_START) != 0) {
  52. return GB_FAILED;
  53. }
  54. struct mem_blk* p_blk = sbrk(0);
  55. p_blk->size = 4;
  56. p_blk->flags.has_next = 0;
  57. p_blk->flags.is_free = 1;
  58. return GB_OK;
  59. }
  60. // @param start_pos position where to start finding
  61. // @param size the size of the block we're looking for
  62. // @return found block if suitable block exists, if not, the last block
  63. static struct mem_blk*
  64. find_blk(
  65. struct mem_blk* start_pos,
  66. size_t size)
  67. {
  68. while (1) {
  69. if (start_pos->flags.is_free && start_pos->size >= size) {
  70. errno = 0;
  71. return start_pos;
  72. } else {
  73. if (!start_pos->flags.has_next) {
  74. errno = ENOTFOUND;
  75. return start_pos;
  76. }
  77. start_pos = ((void*)start_pos)
  78. + sizeof(struct mem_blk)
  79. + start_pos->size
  80. - 4 * sizeof(uint8_t);
  81. }
  82. }
  83. }
  84. static struct mem_blk*
  85. allocate_new_block(
  86. struct mem_blk* blk_before,
  87. size_t size)
  88. {
  89. sbrk(sizeof(struct mem_blk) + size - 4 * sizeof(uint8_t));
  90. if (errno) {
  91. return 0;
  92. }
  93. struct mem_blk* blk = ((void*)blk_before)
  94. + sizeof(struct mem_blk)
  95. + blk_before->size
  96. - 4 * sizeof(uint8_t);
  97. blk_before->flags.has_next = 1;
  98. blk->flags.has_next = 0;
  99. blk->flags.is_free = 1;
  100. blk->size = size;
  101. errno = 0;
  102. return blk;
  103. }
  104. static void split_block(
  105. struct mem_blk* blk,
  106. size_t this_size)
  107. {
  108. // block is too small to get split
  109. if (blk->size < sizeof(struct mem_blk) + this_size) {
  110. return;
  111. }
  112. struct mem_blk* blk_next = ((void*)blk)
  113. + sizeof(struct mem_blk)
  114. + this_size
  115. - 4 * sizeof(uint8_t);
  116. blk_next->size = blk->size
  117. - this_size
  118. - sizeof(struct mem_blk)
  119. + 4 * sizeof(uint8_t);
  120. blk_next->flags.has_next = blk->flags.has_next;
  121. blk_next->flags.is_free = 1;
  122. blk->flags.has_next = 1;
  123. blk->size = this_size;
  124. }
  125. void* k_malloc(size_t size)
  126. {
  127. struct mem_blk* block_allocated;
  128. block_allocated = find_blk(p_start, size);
  129. if (errno == ENOTFOUND) {
  130. // 'block_allocated' in the argument list is the pointer
  131. // pointing to the last block
  132. block_allocated = allocate_new_block(block_allocated, size);
  133. // no need to check errno and return value
  134. // preserve these for the caller
  135. } else {
  136. split_block(block_allocated, size);
  137. }
  138. block_allocated->flags.is_free = 0;
  139. return block_allocated->data;
  140. }
  141. void k_free(void* ptr)
  142. {
  143. ptr -= (sizeof(struct mem_blk_flags) + sizeof(size_t));
  144. struct mem_blk* blk = (struct mem_blk*)ptr;
  145. blk->flags.is_free = 1;
  146. // TODO: fusion free blocks nearby
  147. }
  148. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
  149. {
  150. if (p_ptr <= 0x30000000) {
  151. // memory below 768MiB is identically mapped
  152. return (void*)p_ptr;
  153. } else {
  154. // TODO: address translation
  155. MAKE_BREAK_POINT();
  156. return (void*)0xffffffff;
  157. }
  158. }
  159. phys_ptr_t l_ptr_to_p_ptr(struct mm* mm, linr_ptr_t v_ptr)
  160. {
  161. if (mm == kernel_mm_head && v_ptr < (linr_ptr_t)KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
  162. return (phys_ptr_t)v_ptr;
  163. }
  164. while (mm != NULL) {
  165. if (v_ptr < mm->start || v_ptr >= mm->start + mm->len * 4096) {
  166. goto next;
  167. }
  168. size_t offset = (size_t)(v_ptr - mm->start);
  169. LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
  170. return page_to_phys_addr(result->phys_page_id) + (offset % 4096);
  171. next:
  172. mm = mm->next;
  173. }
  174. // TODO: handle error
  175. return 0xffffffff;
  176. }
  177. phys_ptr_t v_ptr_to_p_ptr(void* v_ptr)
  178. {
  179. return l_ptr_to_p_ptr(kernel_mm_head, (linr_ptr_t)v_ptr);
  180. }
  181. static inline void mark_page(page_t n)
  182. {
  183. bm_set(mem_bitmap, n);
  184. }
  185. static inline void free_page(page_t n)
  186. {
  187. bm_clear(mem_bitmap, n);
  188. }
  189. static void mark_addr_len(phys_ptr_t start, size_t n)
  190. {
  191. if (n == 0)
  192. return;
  193. page_t start_page = phys_addr_to_page(start);
  194. page_t end_page = phys_addr_to_page(start + n + 4095);
  195. for (page_t i = start_page; i < end_page; ++i)
  196. mark_page(i);
  197. }
  198. static void free_addr_len(phys_ptr_t start, size_t n)
  199. {
  200. if (n == 0)
  201. return;
  202. page_t start_page = phys_addr_to_page(start);
  203. page_t end_page = phys_addr_to_page(start + n + 4095);
  204. for (page_t i = start_page; i < end_page; ++i)
  205. free_page(i);
  206. }
  207. static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
  208. {
  209. mark_addr_len(start, end - start);
  210. }
  211. static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
  212. {
  213. free_addr_len(start, end - start);
  214. }
  215. page_t alloc_raw_page(void)
  216. {
  217. for (page_t i = 0; i < 1024 * 1024; ++i) {
  218. if (bm_test(mem_bitmap, i) == 0) {
  219. mark_page(i);
  220. return i;
  221. }
  222. }
  223. return GB_FAILED;
  224. }
  225. struct page* allocate_page(void)
  226. {
  227. // TODO: allocate memory on identically mapped area
  228. struct page* p = (struct page*)k_malloc(sizeof(struct page));
  229. memset(p, 0x00, sizeof(struct page));
  230. p->phys_page_id = alloc_raw_page();
  231. p->ref_count = (size_t*)k_malloc(sizeof(size_t));
  232. return p;
  233. }
  234. static inline void make_page_table(page_table_entry* pt)
  235. {
  236. memset(pt, 0x00, sizeof(page_table_entry) * 1024);
  237. }
  238. static inline void init_mem_layout(void)
  239. {
  240. mem_size = 1024 * mem_size_info.n_1k_blks;
  241. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  242. // mark kernel page directory
  243. mark_addr_range(0x00000000, 0x00005000);
  244. // mark empty page
  245. mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
  246. // mark EBDA and upper memory as allocated
  247. mark_addr_range(0x80000, 0xfffff);
  248. // mark kernel
  249. mark_addr_len(0x00100000, kernel_size);
  250. if (e820_mem_map_entry_size == 20) {
  251. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  252. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  253. if (entry->type != 1) {
  254. mark_addr_len(entry->base, entry->len);
  255. }
  256. }
  257. } else {
  258. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  259. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  260. if (entry->in.type != 1) {
  261. mark_addr_len(entry->in.base, entry->in.len);
  262. }
  263. }
  264. }
  265. }
  266. int is_l_ptr_valid(struct mm* mm_area, linr_ptr_t l_ptr)
  267. {
  268. while (mm_area != NULL) {
  269. if (l_ptr >= mm_area->start && l_ptr < mm_area->start + mm_area->len * PAGE_SIZE) {
  270. return GB_OK;
  271. }
  272. mm_area = mm_area->next;
  273. }
  274. return GB_FAILED;
  275. }
  276. struct page* find_page_by_l_ptr(struct mm* mm, linr_ptr_t l_ptr)
  277. {
  278. if (mm == kernel_mm_head && l_ptr < (linr_ptr_t)KERNEL_IDENTICALLY_MAPPED_AREA_LIMIT) {
  279. // TODO: make mm for identically mapped area
  280. MAKE_BREAK_POINT();
  281. return (struct page*)0xffffffff;
  282. }
  283. while (mm != NULL) {
  284. if (l_ptr >= mm->start && l_ptr < mm->start + mm->len * 4096) {
  285. size_t offset = (size_t)(l_ptr - mm->start);
  286. LIST_LIKE_AT(struct page, mm->pgs, offset / PAGE_SIZE, result);
  287. return result;
  288. }
  289. mm = mm->next;
  290. }
  291. // TODO: error handling
  292. return NULL;
  293. }
  294. void map_raw_page_to_pte(
  295. page_table_entry* pte,
  296. page_t page,
  297. int rw,
  298. int priv)
  299. {
  300. // set P bit
  301. pte->v = 0x00000001;
  302. pte->in.rw = (rw == 1);
  303. pte->in.us = (priv == 1);
  304. pte->in.page = page;
  305. }
  306. static void _map_raw_page_to_addr(
  307. struct mm* mm_area,
  308. page_t page,
  309. int rw,
  310. int priv)
  311. {
  312. linr_ptr_t addr = (linr_ptr_t)mm_area->start + mm_area->len * 4096;
  313. page_directory_entry* pde = mm_area->pd + linr_addr_to_pd_i(addr);
  314. // page table not exist
  315. if (!pde->in.p) {
  316. // allocate a page for the page table
  317. pde->in.p = 1;
  318. pde->in.rw = 1;
  319. pde->in.us = 0;
  320. pde->in.pt_page = alloc_raw_page();
  321. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  322. }
  323. // map the page in the page table
  324. page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  325. pte += linr_addr_to_pt_i(addr);
  326. map_raw_page_to_pte(pte, page, rw, priv);
  327. }
  328. // map page to the end of mm_area in pd
  329. int k_map(
  330. struct mm* mm_area,
  331. struct page* page,
  332. int read,
  333. int write,
  334. int priv,
  335. int cow)
  336. {
  337. struct page* p_page_end = mm_area->pgs;
  338. while (p_page_end != NULL && p_page_end->next != NULL)
  339. p_page_end = p_page_end->next;
  340. if (cow) {
  341. // find its ancestor
  342. while (page->attr.cow)
  343. page = page->next;
  344. // create a new page node
  345. struct page* new_page = k_malloc(sizeof(struct page));
  346. new_page->attr.read = (read == 1);
  347. new_page->attr.write = (write == 1);
  348. new_page->attr.system = (priv == 1);
  349. new_page->attr.cow = 1;
  350. // TODO: move *next out of struct page
  351. new_page->next = NULL;
  352. new_page->phys_page_id = page->phys_page_id;
  353. new_page->ref_count = page->ref_count;
  354. if (p_page_end != NULL)
  355. p_page_end->next = new_page;
  356. else
  357. mm_area->pgs = new_page;
  358. } else {
  359. page->attr.read = (read == 1);
  360. page->attr.write = (write == 1);
  361. page->attr.system = (priv == 1);
  362. page->attr.cow = 0;
  363. // TODO: move *next out of struct page
  364. page->next = NULL;
  365. if (p_page_end != NULL)
  366. p_page_end->next = page;
  367. else
  368. mm_area->pgs = page;
  369. }
  370. _map_raw_page_to_addr(
  371. mm_area,
  372. page->phys_page_id,
  373. (write && !cow),
  374. priv);
  375. ++mm_area->len;
  376. ++*page->ref_count;
  377. return GB_OK;
  378. }
  379. // map a page identically
  380. // this function is only meant to be used in the initialization process
  381. // it checks the pde's P bit so you need to make sure it's already set
  382. // to avoid dead loops
  383. static inline void _init_map_page_identically(page_t page)
  384. {
  385. page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
  386. // page table not exist
  387. if (!pde->in.p) {
  388. // allocate a page for the page table
  389. // set the P bit of the pde in advance
  390. pde->in.p = 1;
  391. pde->in.rw = 1;
  392. pde->in.us = 0;
  393. pde->in.pt_page = alloc_raw_page();
  394. _init_map_page_identically(pde->in.pt_page);
  395. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  396. }
  397. // map the page in the page table
  398. page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  399. pt += page_to_pt_i(page);
  400. pt->v = 0x00000003;
  401. pt->in.page = page;
  402. }
  403. static inline void init_paging_map_low_mem_identically(void)
  404. {
  405. for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
  406. // check if the address is valid and not mapped
  407. if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
  408. continue;
  409. _init_map_page_identically(phys_addr_to_page(addr));
  410. }
  411. }
  412. static struct page empty_page;
  413. static struct page heap_first_page;
  414. static size_t heap_first_page_ref_count;
  415. void init_mem(void)
  416. {
  417. init_mem_layout();
  418. // map the 16MiB-768MiB identically
  419. init_paging_map_low_mem_identically();
  420. kernel_mm_head = &kernel_mm;
  421. kernel_mm.attr.read = 1;
  422. kernel_mm.attr.write = 1;
  423. kernel_mm.attr.system = 1;
  424. kernel_mm.len = 0;
  425. kernel_mm.next = NULL;
  426. kernel_mm.pd = KERNEL_PAGE_DIRECTORY_ADDR;
  427. kernel_mm.pgs = NULL;
  428. kernel_mm.start = (linr_ptr_t)KERNEL_HEAP_START;
  429. heap_first_page.attr.cow = 0;
  430. heap_first_page.attr.read = 1;
  431. heap_first_page.attr.write = 1;
  432. heap_first_page.attr.system = 1;
  433. heap_first_page.next = NULL;
  434. heap_first_page.phys_page_id = alloc_raw_page();
  435. heap_first_page.ref_count = &heap_first_page_ref_count;
  436. *heap_first_page.ref_count = 0;
  437. k_map(kernel_mm_head, &heap_first_page, 1, 1, 1, 0);
  438. init_heap();
  439. // create empty_page struct
  440. empty_page.attr.cow = 0;
  441. empty_page.attr.read = 1;
  442. empty_page.attr.write = 0;
  443. empty_page.attr.system = 0;
  444. empty_page.next = NULL;
  445. empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
  446. empty_page.ref_count = (size_t*)k_malloc(sizeof(size_t));
  447. *empty_page.ref_count = 1;
  448. // TODO: improve the algorithm SO FREAKING SLOW
  449. // while (kernel_mm_head->len < 256 * 1024 * 1024 / PAGE_SIZE) {
  450. while (kernel_mm_head->len < 16 * 1024 * 1024 / PAGE_SIZE) {
  451. k_map(
  452. kernel_mm_head, &empty_page,
  453. 1, 1, 1, 1);
  454. }
  455. }
  456. void create_segment_descriptor(
  457. segment_descriptor* sd,
  458. uint32_t base,
  459. uint32_t limit,
  460. uint32_t flags,
  461. uint32_t access)
  462. {
  463. sd->base_low = base & 0x0000ffff;
  464. sd->base_mid = ((base & 0x00ff0000) >> 16);
  465. sd->base_high = ((base & 0xff000000) >> 24);
  466. sd->limit_low = limit & 0x0000ffff;
  467. sd->limit_high = ((limit & 0x000f0000) >> 16);
  468. sd->access = access;
  469. sd->flags = flags;
  470. }