mem.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/stdio.h>
  7. #include <kernel/task.h>
  8. #include <kernel/vga.h>
  9. #include <kernel_main.h>
  10. #include <types/bitmap.h>
  11. static void* p_start;
  12. static void* p_break;
  13. static segment_descriptor* gdt;
  14. // temporary
  15. static struct tss32_t _tss;
  16. static size_t mem_size;
  17. static char mem_bitmap[1024 * 1024 / 8];
  18. static int32_t set_heap_start(void* start_addr)
  19. {
  20. p_start = start_addr;
  21. return 0;
  22. }
  23. static int32_t brk(void* addr)
  24. {
  25. p_break = addr;
  26. return 0;
  27. }
  28. // sets errno when failed to increase heap pointer
  29. static void* sbrk(size_t increment)
  30. {
  31. if (brk(p_break + increment) != 0) {
  32. errno = ENOMEM;
  33. return 0;
  34. } else {
  35. errno = 0;
  36. return p_break;
  37. }
  38. }
  39. int init_heap(void)
  40. {
  41. // start of the available address space
  42. // TODO: adjust heap start address
  43. // according to user's memory size
  44. set_heap_start(HEAP_START);
  45. if (brk(HEAP_START) != 0) {
  46. return GB_FAILED;
  47. }
  48. struct mem_blk* p_blk = sbrk(0);
  49. p_blk->size = 4;
  50. p_blk->flags.has_next = 0;
  51. p_blk->flags.is_free = 1;
  52. return GB_OK;
  53. }
  54. // @param start_pos position where to start finding
  55. // @param size the size of the block we're looking for
  56. // @return found block if suitable block exists, if not, the last block
  57. static struct mem_blk*
  58. find_blk(
  59. struct mem_blk* start_pos,
  60. size_t size)
  61. {
  62. while (1) {
  63. if (start_pos->flags.is_free && start_pos->size >= size) {
  64. errno = 0;
  65. return start_pos;
  66. } else {
  67. if (!start_pos->flags.has_next) {
  68. errno = ENOTFOUND;
  69. return start_pos;
  70. }
  71. start_pos = ((void*)start_pos)
  72. + sizeof(struct mem_blk)
  73. + start_pos->size
  74. - 4 * sizeof(uint8_t);
  75. }
  76. }
  77. }
  78. static struct mem_blk*
  79. allocate_new_block(
  80. struct mem_blk* blk_before,
  81. size_t size)
  82. {
  83. sbrk(sizeof(struct mem_blk) + size - 4 * sizeof(uint8_t));
  84. if (errno) {
  85. return 0;
  86. }
  87. struct mem_blk* blk = ((void*)blk_before)
  88. + sizeof(struct mem_blk)
  89. + blk_before->size
  90. - 4 * sizeof(uint8_t);
  91. blk_before->flags.has_next = 1;
  92. blk->flags.has_next = 0;
  93. blk->flags.is_free = 1;
  94. blk->size = size;
  95. errno = 0;
  96. return blk;
  97. }
  98. static void split_block(
  99. struct mem_blk* blk,
  100. size_t this_size)
  101. {
  102. // block is too small to get split
  103. if (blk->size < sizeof(struct mem_blk) + this_size) {
  104. return;
  105. }
  106. struct mem_blk* blk_next = ((void*)blk)
  107. + sizeof(struct mem_blk)
  108. + this_size
  109. - 4 * sizeof(uint8_t);
  110. blk_next->size = blk->size
  111. - this_size
  112. - sizeof(struct mem_blk)
  113. + 4 * sizeof(uint8_t);
  114. blk_next->flags.has_next = blk->flags.has_next;
  115. blk_next->flags.is_free = 1;
  116. blk->flags.has_next = 1;
  117. blk->size = this_size;
  118. }
  119. void* k_malloc(size_t size)
  120. {
  121. struct mem_blk* block_allocated;
  122. block_allocated = find_blk(p_start, size);
  123. if (errno == ENOTFOUND) {
  124. // 'block_allocated' in the argument list is the pointer
  125. // pointing to the last block
  126. block_allocated = allocate_new_block(block_allocated, size);
  127. // no need to check errno and return value
  128. // preserve these for the caller
  129. } else {
  130. split_block(block_allocated, size);
  131. }
  132. block_allocated->flags.is_free = 0;
  133. return block_allocated->data;
  134. }
  135. void k_free(void* ptr)
  136. {
  137. ptr -= (sizeof(struct mem_blk_flags) + sizeof(size_t));
  138. struct mem_blk* blk = (struct mem_blk*)ptr;
  139. blk->flags.is_free = 1;
  140. // TODO: fusion free blocks nearby
  141. }
  142. static inline page_t phys_addr_to_page(phys_ptr_t ptr)
  143. {
  144. return ptr >> 12;
  145. }
  146. static inline pd_i_t page_to_pd_i(page_t p)
  147. {
  148. return p >> 10;
  149. }
  150. static inline pt_i_t page_to_pt_i(page_t p)
  151. {
  152. return p & (1024-1);
  153. }
  154. static inline phys_ptr_t page_to_phys_addr(page_t p)
  155. {
  156. return p << 12;
  157. }
  158. static inline pd_i_t phys_addr_to_pd_i(phys_ptr_t ptr)
  159. {
  160. return page_to_pd_i(phys_addr_to_page(ptr));
  161. }
  162. static inline void mark_page(page_t n)
  163. {
  164. bm_set(mem_bitmap, n);
  165. }
  166. static inline void free_page(page_t n)
  167. {
  168. bm_clear(mem_bitmap, n);
  169. }
  170. static void mark_addr_len(phys_ptr_t start, size_t n)
  171. {
  172. if (n == 0) return;
  173. page_t start_page = phys_addr_to_page(start);
  174. page_t end_page = phys_addr_to_page(start + n + 4095);
  175. for (page_t i = start_page; i < end_page; ++i)
  176. mark_page(i);
  177. }
  178. static void free_addr_len(phys_ptr_t start, size_t n)
  179. {
  180. if (n == 0) return;
  181. page_t start_page = phys_addr_to_page(start);
  182. page_t end_page = phys_addr_to_page(start + n + 4095);
  183. for (page_t i = start_page; i < end_page; ++i)
  184. free_page(i);
  185. }
  186. static inline void mark_addr_range(phys_ptr_t start, phys_ptr_t end)
  187. {
  188. mark_addr_len(start, end - start);
  189. }
  190. static inline void free_addr_range(phys_ptr_t start, phys_ptr_t end)
  191. {
  192. free_addr_len(start, end - start);
  193. }
  194. static int alloc_page(void)
  195. {
  196. for (page_t i = 0; i < 1024 * 1024; ++i) {
  197. if (bm_test(mem_bitmap, i) == 0) {
  198. mark_page(i);
  199. return i;
  200. }
  201. }
  202. return GB_FAILED;
  203. }
  204. // allocate ONE whole page
  205. static phys_ptr_t _k_p_malloc(void)
  206. {
  207. return page_to_phys_addr(alloc_page());
  208. }
  209. static void _k_p_free(phys_ptr_t ptr)
  210. {
  211. free_page(phys_addr_to_page(ptr));
  212. }
  213. static inline void create_pd(page_directory_entry* pde)
  214. {
  215. for (int i = 0; i < 1024; ++i)
  216. {
  217. pde->v = 0;
  218. ++pde;
  219. }
  220. }
  221. static page_directory_entry* _kernel_pd = KERNEL_PAGE_DIRECTORY_ADDR;
  222. // map n pages from p_ptr to v_ptr
  223. // p_ptr and v_ptr needs to be 4kb-aligned
  224. static int p_map(
  225. page_directory_entry* pd,
  226. phys_ptr_t p_ptr,
  227. virt_ptr_t v_ptr,
  228. size_t n,
  229. int rw,
  230. int priv);
  231. // map n pages
  232. static inline int p_n_map(
  233. page_directory_entry* pd,
  234. phys_ptr_t p_ptr,
  235. virt_ptr_t v_ptr,
  236. size_t n,
  237. int rw,
  238. int priv);
  239. // map n bytes identically
  240. static inline int _p_ident_n_map(
  241. page_directory_entry* pd,
  242. phys_ptr_t p_ptr,
  243. size_t n,
  244. int rw,
  245. int priv);
  246. static inline void make_page_table(page_directory_entry* pd, page_t p)
  247. {
  248. phys_ptr_t pp_pt = page_to_phys_addr(p);
  249. page_table_entry* pt = (page_table_entry*)pp_pt;
  250. memset(pt, 0x00, sizeof(page_table_entry) * 1024);
  251. _p_ident_n_map(
  252. pd,
  253. pp_pt,
  254. sizeof(page_table_entry) * 1024, 1, 1
  255. );
  256. }
  257. // map n pages from p_ptr to v_ptr
  258. // p_ptr and v_ptr needs to be 4kb-aligned
  259. static int p_map(
  260. page_directory_entry* pd,
  261. phys_ptr_t p_ptr,
  262. virt_ptr_t v_ptr,
  263. size_t n,
  264. int rw,
  265. int priv)
  266. {
  267. // pages to be mapped
  268. page_t v_page_start = phys_addr_to_page(v_ptr);
  269. page_t v_page_end = v_page_start + n;
  270. for (pd_i_t pde_index = page_to_pd_i(v_page_start); pde_index <= page_to_pd_i(v_page_end); ++pde_index)
  271. {
  272. // page table not present
  273. if (pd[pde_index].in.p != 1)
  274. {
  275. pd[pde_index].in.p = 1;
  276. pd[pde_index].in.a = 0;
  277. pd[pde_index].in.rw = 1;
  278. page_t p_page = alloc_page();
  279. pd[pde_index].in.addr = p_page;
  280. make_page_table(pd, p_page);
  281. }
  282. }
  283. for (size_t i = 0; i < n; ++i)
  284. {
  285. page_t v_page = v_page_start + i;
  286. pd_i_t pd_i = page_to_pd_i(v_page);
  287. page_table_entry* pt = (page_table_entry*) page_to_phys_addr(pd[pd_i].in.addr);
  288. pt += page_to_pt_i(v_page);
  289. if (pt->in.p == 1)
  290. {
  291. errno = EEXIST;
  292. return GB_FAILED;
  293. }
  294. pt->in.p = 1;
  295. pt->in.rw = (rw == 1);
  296. pt->in.us = !(priv == 1);
  297. pt->in.a = 0;
  298. pt->in.d = 0;
  299. pt->in.addr = phys_addr_to_page(p_ptr) + i;
  300. }
  301. return GB_OK;
  302. }
  303. // map n pages
  304. static inline int p_n_map(
  305. page_directory_entry* pd,
  306. phys_ptr_t p_ptr,
  307. virt_ptr_t v_ptr,
  308. size_t n,
  309. int rw,
  310. int priv)
  311. {
  312. return p_map(
  313. pd,
  314. p_ptr,
  315. v_ptr,
  316. (n + 4096 - 1) >> 12,
  317. rw,
  318. priv
  319. );
  320. }
  321. // map n bytes identically
  322. static inline int _p_ident_n_map(
  323. page_directory_entry* pd,
  324. phys_ptr_t p_ptr,
  325. size_t n,
  326. int rw,
  327. int priv)
  328. {
  329. return p_n_map(
  330. pd,
  331. p_ptr,
  332. p_ptr,
  333. n,
  334. rw,
  335. priv
  336. );
  337. }
  338. static inline void _create_kernel_pd(void)
  339. {
  340. create_pd(_kernel_pd);
  341. _p_ident_n_map(_kernel_pd,
  342. (phys_ptr_t)KERNEL_PAGE_DIRECTORY_ADDR,
  343. sizeof(page_directory_entry) * 1024, 1, 1);
  344. _p_ident_n_map(_kernel_pd,
  345. (0x00080000),
  346. (0xfffff - 0x80000 + 1), 1, 1);
  347. _p_ident_n_map(_kernel_pd,
  348. KERNEL_START_ADDR,
  349. kernel_size, 1, 1);
  350. _p_ident_n_map(_kernel_pd,
  351. KERNEL_EARLY_STACK_ADDR - KERNEL_EARLY_STACK_SIZE,
  352. KERNEL_EARLY_STACK_SIZE, 1, 1);
  353. }
  354. static void init_mem_layout(void)
  355. {
  356. mem_size = 1024 * mem_size_info.n_1k_blks;
  357. mem_size += 64 * 1024 * mem_size_info.n_64k_blks;
  358. // mark kernel page directory
  359. mark_addr_range(0x00000000, 0x00001000);
  360. // mark EBDA and upper memory as allocated
  361. mark_addr_range(0x80000, 0xfffff);
  362. // mark kernel
  363. mark_addr_len(0x00100000, kernel_size);
  364. if (e820_mem_map_entry_size == 20) {
  365. struct e820_mem_map_entry_20* entry = (struct e820_mem_map_entry_20*)e820_mem_map;
  366. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  367. if (entry->type != 1)
  368. {
  369. mark_addr_len(entry->base, entry->len);
  370. }
  371. }
  372. } else {
  373. struct e820_mem_map_entry_24* entry = (struct e820_mem_map_entry_24*)e820_mem_map;
  374. for (uint32_t i = 0; i < e820_mem_map_count; ++i, ++entry) {
  375. if (entry->in.type != 1)
  376. {
  377. mark_addr_len(entry->in.base, entry->in.len);
  378. }
  379. }
  380. }
  381. }
  382. void init_paging(void)
  383. {
  384. init_mem_layout();
  385. _create_kernel_pd();
  386. asm_enable_paging(_kernel_pd);
  387. }
  388. static inline void
  389. set_segment_descriptor(
  390. segment_descriptor* sd,
  391. uint32_t base,
  392. uint32_t limit,
  393. uint8_t access,
  394. uint8_t flags)
  395. {
  396. sd->access = access;
  397. sd->flags = flags;
  398. sd->base_low = base;
  399. sd->base_mid = base >> 16;
  400. sd->base_high = base >> 24;
  401. sd->limit_low = limit;
  402. sd->limit_high = limit >> 16;
  403. }
  404. void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
  405. {
  406. // TODO: fix this
  407. return;
  408. gdt = k_malloc(sizeof(segment_descriptor) * 6);
  409. // since the size in the struct is an OFFSET
  410. // it needs to be added one to get its real size
  411. uint16_t asm_gdt_size = (asm_gdt_descriptor.size + 1) / 8;
  412. segment_descriptor* asm_gdt = (segment_descriptor*)asm_gdt_descriptor.address;
  413. for (int i = 0; i < asm_gdt_size; ++i) {
  414. gdt[i] = asm_gdt[i];
  415. }
  416. set_segment_descriptor(gdt + 5, (uint32_t)&_tss, sizeof(struct tss32_t), SD_TYPE_TSS, 0b0000);
  417. _tss.esp0 = (uint32_t)kernel_esp;
  418. _tss.ss0 = kernel_ss;
  419. // +1 for enabling interrupt
  420. asm_load_gdt(((6 * sizeof(segment_descriptor) - 1) << 16) + 1, (uint32_t)gdt);
  421. asm_load_tr((6 - 1) * 8);
  422. }
  423. void create_segment_descriptor(
  424. segment_descriptor* sd,
  425. uint32_t base,
  426. uint32_t limit,
  427. uint32_t flags,
  428. uint32_t access)
  429. {
  430. sd->base_low = base & 0x0000ffff;
  431. sd->base_mid = ((base & 0x00ff0000) >> 16);
  432. sd->base_high = ((base & 0xff000000) >> 24);
  433. sd->limit_low = limit & 0x0000ffff;
  434. sd->limit_high = ((limit & 0x000f0000) >> 16);
  435. sd->access = access;
  436. sd->flags = flags;
  437. }