mm.hpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  12. : \
  13. : "r"(addr) \
  14. : "memory")
  15. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  16. struct page {
  17. page_t phys_page_id;
  18. pte_t* pte;
  19. size_t* ref_count;
  20. union {
  21. uint32_t v;
  22. struct {
  23. uint32_t cow : 1;
  24. } in;
  25. } attr;
  26. };
  27. struct mm;
  28. // map the page to the end of the mm_area in pd
  29. int k_map(
  30. mm* mm_area,
  31. page* page,
  32. int read,
  33. int write,
  34. int priv,
  35. int cow);
  36. // unmap a whole mem area, making sure that we will never use it again
  37. int k_unmap(mm* mm_area);
  38. // private memory mapping
  39. // changes won't be neither written back to file nor shared between processes
  40. // TODO: shared mapping
  41. // @param len is aligned to 4kb boundary automatically, exceeding part will
  42. // be filled with '0's and not written back to the file
  43. int mmap(
  44. void* hint,
  45. size_t len,
  46. fs::inode* file,
  47. size_t offset,
  48. int write,
  49. int priv);
  50. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  51. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  52. struct mm {
  53. public:
  54. void* start;
  55. union {
  56. uint32_t v;
  57. struct {
  58. uint32_t read : 1;
  59. uint32_t write : 1;
  60. uint32_t system : 1;
  61. } in;
  62. } attr;
  63. pd_t pd;
  64. page_arr* pgs = types::kernel_ident_allocator_new<page_arr>();
  65. fs::inode* mapped_file = nullptr;
  66. size_t file_offset = 0;
  67. public:
  68. static constexpr int mirror_mm_area(mm_list* dst, const mm* src, pd_t pd)
  69. {
  70. mm new_nn {
  71. .start = src->start,
  72. .attr { src->attr.v },
  73. .pd = pd,
  74. .mapped_file = src->mapped_file,
  75. .file_offset = src->file_offset,
  76. };
  77. for (auto iter = src->pgs->begin(); iter != src->pgs->end(); ++iter) {
  78. // TODO: preserve dirty flag, clear accessed flag
  79. if (k_map(&new_nn, &*iter,
  80. src->attr.in.read, src->attr.in.write, src->attr.in.system, 1)
  81. != GB_OK) {
  82. return GB_FAILED;
  83. }
  84. }
  85. dst->emplace_back(types::move(new_nn));
  86. return GB_OK;
  87. }
  88. };
  89. inline constexpr void unmap_user_space_memory(mm_list& mms)
  90. {
  91. // skip kernel heap
  92. for (auto iter = ++mms.begin(); iter != mms.end();) {
  93. k_unmap(iter.ptr());
  94. types::kernel_ident_allocator_delete(iter->pgs);
  95. iter = mms.erase(iter);
  96. }
  97. }
  98. // in mem.cpp
  99. extern mm_list* kernel_mms;
  100. extern page empty_page;
  101. // translate physical address to virtual(mapped) address
  102. void* ptovp(pptr_t p_ptr);
  103. // @return the pointer to the mm_area containing l_ptr
  104. // nullptr if not
  105. mm* find_mm_area(mm_list* mms, void* l_ptr);
  106. inline constexpr size_t vptrdiff(void* p1, void* p2)
  107. {
  108. return (uint8_t*)p1 - (uint8_t*)p2;
  109. }
  110. inline constexpr page* lto_page(const mm* mm_area, void* l_ptr)
  111. {
  112. size_t offset = vptrdiff(l_ptr, mm_area->start);
  113. return &mm_area->pgs->at(offset / PAGE_SIZE);
  114. }
  115. inline constexpr page_t to_page(pptr_t ptr)
  116. {
  117. return ptr >> 12;
  118. }
  119. inline constexpr size_t to_pdi(page_t pg)
  120. {
  121. return pg >> 10;
  122. }
  123. inline constexpr size_t to_pti(page_t pg)
  124. {
  125. return pg & (1024 - 1);
  126. }
  127. inline constexpr pptr_t to_pp(page_t p)
  128. {
  129. return p << 12;
  130. }
  131. inline constexpr size_t lto_pdi(pptr_t ptr)
  132. {
  133. return to_pdi(to_page(ptr));
  134. }
  135. inline constexpr size_t lto_pti(pptr_t ptr)
  136. {
  137. return to_pti(to_page(ptr));
  138. }
  139. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  140. {
  141. return *pt + to_pti(pg);
  142. }
  143. inline pd_t mms_get_pd(const mm_list* mms)
  144. {
  145. return mms->begin()->pd;
  146. }
  147. inline void* to_vp(page_t pg)
  148. {
  149. return ptovp(to_pp(pg));
  150. }
  151. inline pd_t to_pd(page_t pg)
  152. {
  153. return reinterpret_cast<pd_t>(to_vp(pg));
  154. }
  155. inline pt_t to_pt(page_t pg)
  156. {
  157. return reinterpret_cast<pt_t>(to_vp(pg));
  158. }
  159. inline pt_t to_pt(pde_t* pde)
  160. {
  161. return to_pt(pde->in.pt_page);
  162. }
  163. inline pde_t* to_pde(pd_t pd, void* addr)
  164. {
  165. return *pd + lto_pdi((pptr_t)addr);
  166. }
  167. inline pte_t* to_pte(pt_t pt, void* addr)
  168. {
  169. return *pt + lto_pti((pptr_t)addr);
  170. }
  171. inline pte_t* to_pte(pde_t* pde, void* addr)
  172. {
  173. return to_pte(to_pt(pde), addr);
  174. }
  175. inline pte_t* to_pte(pd_t pd, void* addr)
  176. {
  177. return to_pte(to_pde(pd, addr), addr);
  178. }
  179. inline pte_t* to_pte(pde_t* pde, page_t pg)
  180. {
  181. return to_pte(to_pt(pde), pg);
  182. }
  183. inline constexpr void* mmend(const mm* mm_area)
  184. {
  185. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  186. }
  187. // allocate n raw page(s)
  188. // @return the id of the first page allocated
  189. page_t alloc_n_raw_pages(size_t n);
  190. // allocate a raw page
  191. inline page_t alloc_raw_page(void)
  192. {
  193. return alloc_n_raw_pages(1);
  194. }
  195. // allocate a struct page together with the raw page
  196. struct page allocate_page(void);
  197. pd_t alloc_pd(void);
  198. pt_t alloc_pt(void);
  199. void dealloc_pd(pd_t pd);
  200. void dealloc_pt(pt_t pt);