mem.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. #include <asm/boot.h>
  2. #include <asm/port_io.h>
  3. #include <asm/sys.h>
  4. #include <kernel/errno.h>
  5. #include <kernel/mem.h>
  6. #include <kernel/task.h>
  7. #include <kernel/vga.h>
  8. #include <kernel_main.h>
  9. static void* p_start;
  10. static void* p_break;
  11. static segment_descriptor* gdt;
  12. // temporary
  13. static struct tss32_t _tss;
  14. static int32_t set_heap_start(void* start_addr)
  15. {
  16. p_start = start_addr;
  17. return 0;
  18. }
  19. static int32_t brk(void* addr)
  20. {
  21. p_break = addr;
  22. return 0;
  23. }
  24. // sets errno when failed to increase heap pointer
  25. static void* sbrk(size_t increment)
  26. {
  27. if (brk(p_break + increment) != 0) {
  28. errno = ENOMEM;
  29. return 0;
  30. } else {
  31. errno = 0;
  32. return p_break;
  33. }
  34. }
  35. void init_heap(void)
  36. {
  37. // start of the available address space
  38. // TODO: adjust heap start address
  39. // according to user's memory size
  40. set_heap_start(HEAP_START);
  41. if (brk(HEAP_START) != 0) {
  42. vga_printk("Failed to initialize heap, halting...", 0x0fu);
  43. MAKE_BREAK_POINT();
  44. asm_cli();
  45. asm_hlt();
  46. }
  47. struct mem_blk* p_blk = sbrk(0);
  48. p_blk->size = 4;
  49. p_blk->flags.has_next = 0;
  50. p_blk->flags.is_free = 1;
  51. }
  52. // @param start_pos position where to start finding
  53. // @param size the size of the block we're looking for
  54. // @return found block if suitable block exists, if not, the last block
  55. static struct mem_blk*
  56. find_blk(
  57. struct mem_blk* start_pos,
  58. size_t size)
  59. {
  60. while (1) {
  61. if (start_pos->flags.is_free && start_pos->size >= size) {
  62. errno = 0;
  63. return start_pos;
  64. } else {
  65. if (!start_pos->flags.has_next) {
  66. errno = ENOTFOUND;
  67. return start_pos;
  68. }
  69. start_pos = ((void*)start_pos)
  70. + sizeof(struct mem_blk)
  71. + start_pos->size
  72. - 4 * sizeof(uint8_t);
  73. }
  74. }
  75. }
  76. static struct mem_blk*
  77. allocate_new_block(
  78. struct mem_blk* blk_before,
  79. size_t size)
  80. {
  81. sbrk(sizeof(struct mem_blk) + size - 4 * sizeof(uint8_t));
  82. if (errno) {
  83. return 0;
  84. }
  85. struct mem_blk* blk = ((void*)blk_before)
  86. + sizeof(struct mem_blk)
  87. + blk_before->size
  88. - 4 * sizeof(uint8_t);
  89. blk_before->flags.has_next = 1;
  90. blk->flags.has_next = 0;
  91. blk->flags.is_free = 1;
  92. blk->size = size;
  93. errno = 0;
  94. return blk;
  95. }
  96. static void split_block(
  97. struct mem_blk* blk,
  98. size_t this_size)
  99. {
  100. // block is too small to get split
  101. if (blk->size < sizeof(struct mem_blk) + this_size) {
  102. return;
  103. }
  104. struct mem_blk* blk_next = ((void*)blk)
  105. + sizeof(struct mem_blk)
  106. + this_size
  107. - 4 * sizeof(uint8_t);
  108. blk_next->size = blk->size
  109. - this_size
  110. - sizeof(struct mem_blk)
  111. + 4 * sizeof(uint8_t);
  112. blk_next->flags.has_next = blk->flags.has_next;
  113. blk_next->flags.is_free = 1;
  114. blk->flags.has_next = 1;
  115. blk->size = this_size;
  116. }
  117. void* k_malloc(size_t size)
  118. {
  119. struct mem_blk* block_allocated;
  120. block_allocated = find_blk(p_start, size);
  121. if (errno == ENOTFOUND) {
  122. // 'block_allocated' in the argument list is the pointer
  123. // pointing to the last block
  124. block_allocated = allocate_new_block(block_allocated, size);
  125. // no need to check errno and return value
  126. // preserve these for the caller
  127. } else {
  128. split_block(block_allocated, size);
  129. }
  130. block_allocated->flags.is_free = 0;
  131. return block_allocated->data;
  132. }
  133. void k_free(void* ptr)
  134. {
  135. ptr -= (sizeof(struct mem_blk_flags) + sizeof(size_t));
  136. struct mem_blk* blk = (struct mem_blk*)ptr;
  137. blk->flags.is_free = 1;
  138. // TODO: fusion free blocks nearby
  139. }
  140. static inline void _create_pd(page_directory_entry* pde)
  141. {
  142. }
  143. static page_directory_entry* _kernel_pd = KERNEL_PAGE_DIRECTORY_ADDR;
  144. static inline void _create_kernel_pt(int32_t index)
  145. {
  146. page_table_entry* pt = KERNEL_PAGE_TABLE_START_ADDR + index * 0x1000;
  147. // 0xc0000000 ~ 0xffffffff is mapped as kernel space
  148. // from physical address 0 to
  149. int32_t is_kernel = (index >= 768);
  150. if (is_kernel) {
  151. index -= 768;
  152. }
  153. for (int32_t i = 0; i < 1024; ++i) {
  154. if (is_kernel) {
  155. pt[i].v = 0b00000011;
  156. } else {
  157. pt[i].v = 0b00000111;
  158. }
  159. pt[i].in.addr = ((index * 0x400000) + i * 0x1000) >> 12;
  160. }
  161. }
  162. static inline void _create_kernel_pd(void)
  163. {
  164. for (int32_t i = 0; i < 1024; ++i) {
  165. if (i >= 768) {
  166. _kernel_pd[i].v = 0b00000011;
  167. } else {
  168. _kernel_pd[i].v = 0b00000111;
  169. }
  170. _kernel_pd[i].in.addr = ((uint32_t)(KERNEL_PAGE_TABLE_START_ADDR + i * 0x1000) >> 12);
  171. _create_kernel_pt(i);
  172. }
  173. }
  174. void init_paging(void)
  175. {
  176. _create_kernel_pd();
  177. asm_enable_paging(_kernel_pd);
  178. }
  179. static inline void
  180. set_segment_descriptor(
  181. segment_descriptor* sd,
  182. uint32_t base,
  183. uint32_t limit,
  184. uint8_t access,
  185. uint8_t flags)
  186. {
  187. sd->access = access;
  188. sd->flags = flags;
  189. sd->base_low = base;
  190. sd->base_mid = base >> 16;
  191. sd->base_high = base >> 24;
  192. sd->limit_low = limit;
  193. sd->limit_high = limit >> 16;
  194. }
  195. void init_gdt_with_tss(void* kernel_esp, uint16_t kernel_ss)
  196. {
  197. gdt = k_malloc(sizeof(segment_descriptor) * 6);
  198. // since the size in the struct is an OFFSET
  199. // it needs to be added one to get its real size
  200. uint16_t asm_gdt_size = (asm_gdt_descriptor.size + 1) / 8;
  201. segment_descriptor* asm_gdt = (segment_descriptor*)asm_gdt_descriptor.address;
  202. for (int i = 0; i < asm_gdt_size; ++i) {
  203. gdt[i] = asm_gdt[i];
  204. }
  205. set_segment_descriptor(gdt + 5, (uint32_t)&_tss, sizeof(struct tss32_t), SD_TYPE_TSS, 0b0000);
  206. _tss.esp0 = (uint32_t)kernel_esp;
  207. _tss.ss0 = kernel_ss;
  208. asm_load_gdt((6 * sizeof(segment_descriptor) - 1) << 16, (uint32_t)gdt);
  209. asm_load_tr((6 - 1) * 8);
  210. }