mm.hpp 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  12. : \
  13. : "r"(addr) \
  14. : "memory")
  15. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  16. struct page {
  17. page_t phys_page_id;
  18. pte_t* pte;
  19. size_t* ref_count;
  20. union {
  21. uint32_t v;
  22. struct {
  23. uint32_t cow : 1;
  24. } in;
  25. } attr;
  26. };
  27. struct mm;
  28. // map the page to the end of the mm_area in pd
  29. int k_map(
  30. mm* mm_area,
  31. page* page,
  32. int read,
  33. int write,
  34. int priv,
  35. int cow);
  36. // private memory mapping
  37. // changes won't be neither written back to file nor shared between processes
  38. // TODO: shared mapping
  39. // @param len is aligned to 4kb boundary automatically, exceeding part will
  40. // be filled with '0's and not written back to the file
  41. int mmap(
  42. void* hint,
  43. size_t len,
  44. fs::inode* file,
  45. size_t offset,
  46. int write,
  47. int priv);
  48. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  49. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  50. struct mm {
  51. public:
  52. void* start;
  53. union {
  54. uint32_t v;
  55. struct {
  56. uint32_t read : 1;
  57. uint32_t write : 1;
  58. uint32_t system : 1;
  59. } in;
  60. } attr;
  61. pd_t pd;
  62. page_arr* pgs = types::kernel_ident_allocator_new<page_arr>();
  63. fs::inode* mapped_file = nullptr;
  64. size_t file_offset = 0;
  65. public:
  66. static constexpr int mirror_mm_area(mm_list* dst, const mm* src, pd_t pd)
  67. {
  68. mm new_nn {
  69. .start = src->start,
  70. .attr { src->attr.v },
  71. .pd = pd,
  72. .mapped_file = src->mapped_file,
  73. .file_offset = src->file_offset,
  74. };
  75. for (auto iter = src->pgs->begin(); iter != src->pgs->end(); ++iter) {
  76. if (k_map(&new_nn, &*iter,
  77. src->attr.in.read, src->attr.in.write, src->attr.in.system, 1)
  78. != GB_OK) {
  79. return GB_FAILED;
  80. }
  81. }
  82. dst->emplace_back(types::move(new_nn));
  83. return GB_OK;
  84. }
  85. };
  86. // in mem.cpp
  87. extern mm_list* kernel_mms;
  88. extern page empty_page;
  89. // translate physical address to virtual(mapped) address
  90. void* ptovp(pptr_t p_ptr);
  91. // @return the pointer to the mm_area containing l_ptr
  92. // nullptr if not
  93. mm* find_mm_area(mm_list* mms, void* l_ptr);
  94. inline constexpr size_t vptrdiff(void* p1, void* p2)
  95. {
  96. return (uint8_t*)p1 - (uint8_t*)p2;
  97. }
  98. inline constexpr page* lto_page(const mm* mm_area, void* l_ptr)
  99. {
  100. size_t offset = vptrdiff(l_ptr, mm_area->start);
  101. return &mm_area->pgs->at(offset / PAGE_SIZE);
  102. }
  103. inline constexpr page_t to_page(pptr_t ptr)
  104. {
  105. return ptr >> 12;
  106. }
  107. inline constexpr size_t to_pdi(page_t pg)
  108. {
  109. return pg >> 10;
  110. }
  111. inline constexpr size_t to_pti(page_t pg)
  112. {
  113. return pg & (1024 - 1);
  114. }
  115. inline constexpr pptr_t to_pp(page_t p)
  116. {
  117. return p << 12;
  118. }
  119. inline constexpr size_t lto_pdi(pptr_t ptr)
  120. {
  121. return to_pdi(to_page(ptr));
  122. }
  123. inline constexpr size_t lto_pti(pptr_t ptr)
  124. {
  125. return to_pti(to_page(ptr));
  126. }
  127. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  128. {
  129. return *pt + to_pti(pg);
  130. }
  131. inline pd_t mms_get_pd(const mm_list* mms)
  132. {
  133. return mms->begin()->pd;
  134. }
  135. inline void* to_vp(page_t pg)
  136. {
  137. return ptovp(to_pp(pg));
  138. }
  139. inline pd_t to_pd(page_t pg)
  140. {
  141. return reinterpret_cast<pd_t>(to_vp(pg));
  142. }
  143. inline pt_t to_pt(page_t pg)
  144. {
  145. return reinterpret_cast<pt_t>(to_vp(pg));
  146. }
  147. inline pt_t to_pt(pde_t* pde)
  148. {
  149. return to_pt(pde->in.pt_page);
  150. }
  151. inline pde_t* to_pde(pd_t pd, void* addr)
  152. {
  153. return *pd + lto_pdi((pptr_t)addr);
  154. }
  155. inline pte_t* to_pte(pt_t pt, void* addr)
  156. {
  157. return *pt + lto_pti((pptr_t)addr);
  158. }
  159. inline pte_t* to_pte(pde_t* pde, void* addr)
  160. {
  161. return to_pte(to_pt(pde), addr);
  162. }
  163. inline pte_t* to_pte(pd_t pd, void* addr)
  164. {
  165. return to_pte(to_pde(pd, addr), addr);
  166. }
  167. inline pte_t* to_pte(pde_t* pde, page_t pg)
  168. {
  169. return to_pte(to_pt(pde), pg);
  170. }
  171. inline constexpr void* mmend(const mm* mm_area)
  172. {
  173. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  174. }
  175. // allocate a raw page
  176. page_t alloc_raw_page(void);
  177. // allocate n raw page(s)
  178. // @return the id of the first page allocated
  179. page_t alloc_n_raw_pages(size_t n);
  180. // allocate a struct page together with the raw page
  181. struct page allocate_page(void);
  182. pd_t alloc_pd(void);
  183. pt_t alloc_pt(void);