mem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/stdio.h>
  7. #include <kernel/task.h>
  8. #include <kernel/vga.h>
  9. #include <kernel_main.h>
  10. #include <types/bitmap.h>
  11. // static variables
  12. static struct mm kernel_mm;
  13. // ---------------------
  14. // constant values
  15. #define EMPTY_PAGE_ADDR ((phys_ptr_t)0x5000)
  16. #define EMPTY_PAGE_END ((phys_ptr_t)0x6000)
  17. // ---------------------
  18. // forward declarations
  19. static page_t alloc_page(void);
  20. // map page to the end of mm_area in pd
  21. int k_map(
  22. struct mm* mm_area,
  23. struct page* page,
  24. int read,
  25. int write,
  26. int priv,
  27. int cow);
  28. // ---------------------
  29. static void* p_start;
  30. static void* p_break;
  31. static segment_descriptor* gdt;
  32. static size_t mem_size;
  33. static char mem_bitmap[1024 * 1024 / 8];
  34. static int32_t set_heap_start(void* start_addr)
  35. {
  36. p_start = start_addr;
  37. return 0;
  38. }
  39. static int32_t brk(void* addr)
  40. {
  41. if (addr >= KERNEL_HEAP_LIMIT) {
  42. return GB_FAILED;
  43. }
  44. p_break = addr;
  45. return 0;
  46. }
  47. // sets errno when failed to increase heap pointer
  48. static void* sbrk(size_t increment)
  49. {
  50. if (brk(p_break + increment) != 0) {
  51. errno = ENOMEM;
  52. return 0;
  53. } else {
  54. errno = 0;
  55. return p_break;
  56. }
  57. }
  58. int init_heap(void)
  59. {
  60. set_heap_start(KERNEL_HEAP_START);
  61. if (brk(KERNEL_HEAP_START) != 0) {
  62. return GB_FAILED;
  63. }
  64. struct mem_blk* p_blk = sbrk(0);
  65. p_blk->size = 4;
  66. p_blk->flags.has_next = 0;
  67. p_blk->flags.is_free = 1;
  68. return GB_OK;
  69. }
  70. // @param start_pos position where to start finding
  71. // @param size the size of the block we're looking for
  72. // @return found block if suitable block exists, if not, the last block
  73. static struct mem_blk*
  74. find_blk(
  75. struct mem_blk* start_pos,
  76. size_t size)
  77. {
  78. while (1) {
  79. if (start_pos->flags.is_free && start_pos->size >= size) {
  80. errno = 0;
  81. return start_pos;
  82. } else {
  83. if (!start_pos->flags.has_next) {
  84. errno = ENOTFOUND;
  85. return start_pos;
  86. }
  87. start_pos = ((void*)start_pos)
  88. + sizeof(struct mem_blk)
  89. + start_pos->size
  90. - 4 * sizeof(uint8_t);
  91. }
  92. }
  93. }
  94. static struct mem_blk*
  95. allocate_new_block(
  96. struct mem_blk* blk_before,
  97. size_t size)
  98. {
  99. sbrk(sizeof(struct mem_blk) + size - 4 * sizeof(uint8_t));
  100. if (errno) {
  101. return 0;
  102. }
  103. struct mem_blk* blk = ((void*)blk_before)
  104. + sizeof(struct mem_blk)
  105. + blk_before->size
  106. - 4 * sizeof(uint8_t);
  107. blk_before->flags.has_next = 1;
  108. blk->flags.has_next = 0;
  109. blk->flags.is_free = 1;
  110. blk->size = size;
  111. errno = 0;
  112. return blk;
  113. }
  114. static void split_block(
  115. struct mem_blk* blk,
  116. size_t this_size)
  117. {
  118. // block is too small to get split
  119. if (blk->size < sizeof(struct mem_blk) + this_size) {
  120. return;
  121. }
  122. struct mem_blk* blk_next = ((void*)blk)
  123. + sizeof(struct mem_blk)
  124. + this_size
  125. - 4 * sizeof(uint8_t);
  126. blk_next->size = blk->size
  127. - this_size
  128. - sizeof(struct mem_blk)
  129. + 4 * sizeof(uint8_t);
  130. blk_next->flags.has_next = blk->flags.has_next;
  131. blk_next->flags.is_free = 1;
  132. blk->flags.has_next = 1;
  133. blk->size = this_size;
  134. }
  135. void* k_malloc(size_t size)
  136. {
  137. struct mem_blk* block_allocated;
  138. block_allocated = find_blk(p_start, size);
  139. if (errno == ENOTFOUND) {
  140. // 'block_allocated' in the argument list is the pointer
  141. // pointing to the last block
  142. block_allocated = allocate_new_block(block_allocated, size);
  143. // no need to check errno and return value
  144. // preserve these for the caller
  145. } else {
  146. split_block(block_allocated, size);
  147. }
  148. block_allocated->flags.is_free = 0;
  149. return block_allocated->data;
  150. }
  151. void k_free(void* ptr)
  152. {
  153. ptr -= (sizeof(struct mem_blk_flags) + sizeof(size_t));
  154. struct mem_blk* blk = (struct mem_blk*)ptr;
  155. blk->flags.is_free = 1;
  156. // TODO: fusion free blocks nearby
  157. }
  158. static inline page_t phys_addr_to_page(phys_ptr_t ptr)
  159. {
  160. return ptr >> 12;
  161. }
  162. static inline pd_i_t page_to_pd_i(page_t p)
  163. {
  164. return p >> 10;
  165. }
  166. static inline pt_i_t page_to_pt_i(page_t p)
  167. {
  168. return p & (1024 - 1);
  169. }
  170. static inline phys_ptr_t page_to_phys_addr(page_t p)
  171. {
  172. return p << 12;
  173. }
  174. static inline pd_i_t phys_addr_to_pd_i(phys_ptr_t ptr)
  175. {
  176. return page_to_pd_i(phys_addr_to_page(ptr));
  177. }
  178. static inline pd_i_t phys_addr_to_pt_i(phys_ptr_t ptr)
  179. {
  180. return page_to_pt_i(phys_addr_to_page(ptr));
  181. }
  182. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr)
  183. {
  184. if (p_ptr <= 0x30000000) {
  185. // memory below 768MiB is identically mapped
  186. return (void*)p_ptr;
  187. } else {
  188. // TODO: address translation
  189. return (void*)0xffffffff;
  190. }
  191. }
  192. phys_ptr_t v_ptr_to_p_ptr(struct mm* mm, void* v_ptr)
  193. {
  194. if (mm == &kernel_mm && v_ptr < 0x30000000) {
  195. return (phys_ptr_t)v_ptr;
  196. }
  197. while (mm != NULL) {
  198. if (v_ptr < mm->start || v_ptr >= mm->start + mm->len * 4096) {
  199. goto next;
  200. }
  201. size_t offset = (size_t)(v_ptr - mm->start);
  202. return page_to_phys_addr(mm->pgs[offset / 4096].phys_page_id) + (offset % 4096);
  203. next:
  204. mm = mm->next;
  205. }
  206. // TODO: handle error
  207. return 0xffffffff;
  208. }
  209. static inline void mark_page(page_t n)
  210. {
  211. bm_set(mem_bitmap, n);
  212. }
  213. static inline void free_page(page_t n)
  214. {
  215. bm_clear(mem_bitmap, n);
  216. }
  217. static void mark_addr_len(phys_ptr_t start, size_t n)
  218. {
  219. if (n == 0)
  220. return;
  221. page_t start_page = phys_addr_to_page(start);
  222. page_t end_page = phys_addr_to_page(start + n + 4095);
  223. for (page_t i = start_page; i < end_page; ++i)
  224. mark_page(i);
  225. }
  226. static void free_addr_len(phys_ptr_t start, size_t n)
  227. {
  228. if (n == 0)
  229. return;
  230. page_t start_page = phys_addr_to_page(start);
  231. page_t end_page = phys_addr_to_page(start + n + 4095);
  232. for (page_t i = start_page; i < end_page; ++i)
  233. free_page(i);
  234. }
  235. static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
  236. {
  237. mark_addr_len(start, end - start);
  238. }
  239. static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
  240. {
  241. free_addr_len(start, end - start);
  242. }
  243. static page_t alloc_page(void)
  244. {
  245. for (page_t i = 0; i < 1024 * 1024; ++i) {
  246. if (bm_test(mem_bitmap, i) == 0) {
  247. mark_page(i);
  248. return i;
  249. }
  250. }
  251. return GB_FAILED;
  252. }
  253. static inline void make_page_table(page_table_entry* pt)
  254. {
  255. memset(pt, 0x00, sizeof(page_table_entry) * 1024);
  256. }
  257. static inline void init_mem_layout(void)
  258. {
  259. mem_size = 1024 * mem_size_info.n_1k_blks;
  260. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  261. // mark kernel page directory
  262. mark_addr_range(0x00000000, 0x00005000);
  263. // mark empty page
  264. mark_addr_range(EMPTY_PAGE_ADDR, EMPTY_PAGE_END);
  265. // mark EBDA and upper memory as allocated
  266. mark_addr_range(0x80000, 0xfffff);
  267. // mark kernel
  268. mark_addr_len(0x00100000, kernel_size);
  269. if (e820_mem_map_entry_size == 20) {
  270. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  271. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  272. if (entry->type != 1) {
  273. mark_addr_len(entry->base, entry->len);
  274. }
  275. }
  276. } else {
  277. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  278. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  279. if (entry->in.type != 1) {
  280. mark_addr_len(entry->in.base, entry->in.len);
  281. }
  282. }
  283. }
  284. }
  285. static void _map_raw_page_to_addr(
  286. struct mm* mm_area,
  287. page_t page,
  288. int rw,
  289. int priv)
  290. {
  291. // although it's NOT a physical address, we treat it as one
  292. phys_ptr_t addr = (phys_ptr_t)mm_area->start + mm_area->len * 4096;
  293. page_directory_entry* pde = mm_area->pd + phys_addr_to_pd_i(addr);
  294. // page table not exist
  295. if (!pde->in.p) {
  296. // allocate a page for the page table
  297. pde->in.p = 1;
  298. pde->in.rw = 1;
  299. pde->in.us = 0;
  300. pde->in.pt_page = alloc_page();
  301. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  302. }
  303. // map the page in the page table
  304. page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  305. pte += phys_addr_to_pt_i(addr);
  306. // set P bit
  307. pte->v = 0x00000001;
  308. pte->in.rw = (rw == 1);
  309. pte->in.us = !(priv == 1);
  310. pte->in.page = page;
  311. }
  312. // map page to the end of mm_area in pd
  313. int k_map(
  314. struct mm* mm_area,
  315. struct page* page,
  316. int read,
  317. int write,
  318. int priv,
  319. int cow)
  320. {
  321. struct page* p_page_end = mm_area->pgs;
  322. while (p_page_end != NULL && p_page_end->next != NULL)
  323. p_page_end = p_page_end->next;
  324. if (cow) {
  325. // find its ancestor
  326. while (page->attr.cow)
  327. page = page->next;
  328. // create a new page node
  329. struct page* new_page = k_malloc(sizeof(struct page));
  330. new_page->attr.read = (read == 1);
  331. new_page->attr.write = (write == 1);
  332. new_page->attr.system = (priv == 1);
  333. new_page->attr.cow = 1;
  334. // TODO: move *next out of struct page
  335. new_page->next = NULL;
  336. new_page->phys_page_id = page->phys_page_id;
  337. new_page->ref_count = page->ref_count;
  338. if (p_page_end != NULL)
  339. p_page_end->next = new_page;
  340. else
  341. mm_area->pgs = new_page;
  342. } else {
  343. page->attr.read = (read == 1);
  344. page->attr.write = (write == 1);
  345. page->attr.system = (priv == 1);
  346. page->attr.cow = 0;
  347. // TODO: move *next out of struct page
  348. page->next = NULL;
  349. if (p_page_end != NULL)
  350. p_page_end->next = page;
  351. else
  352. mm_area->pgs = page;
  353. }
  354. _map_raw_page_to_addr(
  355. mm_area,
  356. page->phys_page_id,
  357. (write && !cow),
  358. priv);
  359. ++mm_area->len;
  360. ++*page->ref_count;
  361. return GB_OK;
  362. }
  363. // map a page identically
  364. // this function is only meant to be used in the initialization process
  365. // it checks the pde's P bit so you need to make sure it's already set
  366. // to avoid dead loops
  367. static inline void _init_map_page_identically(page_t page)
  368. {
  369. page_directory_entry* pde = KERNEL_PAGE_DIRECTORY_ADDR + page_to_pd_i(page);
  370. // page table not exist
  371. if (!pde->in.p) {
  372. // allocate a page for the page table
  373. // set the P bit of the pde in advance
  374. pde->in.p = 1;
  375. pde->in.rw = 1;
  376. pde->in.us = 0;
  377. pde->in.pt_page = alloc_page();
  378. _init_map_page_identically(pde->in.pt_page);
  379. make_page_table((page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page)));
  380. }
  381. // map the page in the page table
  382. page_table_entry* pt = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  383. pt += page_to_pt_i(page);
  384. pt->v = 0x00000003;
  385. pt->in.page = page;
  386. }
  387. static inline void init_paging_map_low_mem_identically(void)
  388. {
  389. for (phys_ptr_t addr = 0x01000000; addr < 0x30000000; addr += 0x1000) {
  390. // check if the address is valid and not mapped
  391. if (bm_test(mem_bitmap, phys_addr_to_page(addr)))
  392. continue;
  393. _init_map_page_identically(phys_addr_to_page(addr));
  394. }
  395. }
  396. struct mm* mm_head;
  397. static struct page empty_page;
  398. static struct page heap_first_page;
  399. static size_t heap_first_page_ref_count;
  400. void init_mem(void)
  401. {
  402. init_mem_layout();
  403. // map the 16MiB-768MiB identically
  404. init_paging_map_low_mem_identically();
  405. mm_head = &kernel_mm;
  406. kernel_mm.attr.read = 1;
  407. kernel_mm.attr.write = 1;
  408. kernel_mm.attr.system = 1;
  409. kernel_mm.len = 0;
  410. kernel_mm.next = NULL;
  411. kernel_mm.pd = KERNEL_PAGE_DIRECTORY_ADDR;
  412. kernel_mm.pgs = NULL;
  413. kernel_mm.start = KERNEL_HEAP_START;
  414. heap_first_page.attr.cow = 0;
  415. heap_first_page.attr.read = 1;
  416. heap_first_page.attr.write = 1;
  417. heap_first_page.attr.system = 1;
  418. heap_first_page.next = NULL;
  419. heap_first_page.phys_page_id = alloc_page();
  420. heap_first_page.ref_count = &heap_first_page_ref_count;
  421. *heap_first_page.ref_count = 0;
  422. k_map(mm_head, &heap_first_page, 1, 1, 1, 0);
  423. init_heap();
  424. // create empty_page struct
  425. empty_page.attr.cow = 0;
  426. empty_page.attr.read = 1;
  427. empty_page.attr.write = 0;
  428. empty_page.attr.system = 0;
  429. empty_page.next = NULL;
  430. empty_page.phys_page_id = phys_addr_to_page(EMPTY_PAGE_ADDR);
  431. empty_page.ref_count = (size_t*)k_malloc(sizeof(size_t));
  432. }
  433. void create_segment_descriptor(
  434. segment_descriptor* sd,
  435. uint32_t base,
  436. uint32_t limit,
  437. uint32_t flags,
  438. uint32_t access)
  439. {
  440. sd->base_low = base & 0x0000ffff;
  441. sd->base_mid = ((base & 0x00ff0000) >> 16);
  442. sd->base_high = ((base & 0xff000000) >> 24);
  443. sd->limit_low = limit & 0x0000ffff;
  444. sd->limit_high = ((limit & 0x000f0000) >> 16);
  445. sd->access = access;
  446. sd->flags = flags;
  447. }