mm.hpp 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  12. : \
  13. : "r"(addr) \
  14. : "memory")
  15. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  16. struct page {
  17. page_t phys_page_id;
  18. pte_t* pte;
  19. size_t* ref_count;
  20. union {
  21. uint32_t v;
  22. struct {
  23. uint32_t cow : 1;
  24. } in;
  25. } attr;
  26. };
  27. struct mm;
  28. // map the page to the end of the mm_area in pd
  29. int k_map(
  30. mm* mm_area,
  31. page* page,
  32. int read,
  33. int write,
  34. int priv,
  35. int cow);
  36. // unmap a whole mem area, making sure that we will never use it again
  37. int k_unmap(mm* mm_area);
  38. // private memory mapping
  39. // changes won't be neither written back to file nor shared between processes
  40. // TODO: shared mapping
  41. // @param len is aligned to 4kb boundary automatically, exceeding part will
  42. // be filled with '0's and not written back to the file
  43. int mmap(
  44. void* hint,
  45. size_t len,
  46. fs::inode* file,
  47. size_t offset,
  48. int write,
  49. int priv);
  50. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  51. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  52. struct mm {
  53. public:
  54. void* start;
  55. union {
  56. uint32_t v;
  57. struct {
  58. uint32_t read : 1;
  59. uint32_t write : 1;
  60. uint32_t system : 1;
  61. } in;
  62. } attr;
  63. pd_t pd;
  64. page_arr* pgs = types::kernel_ident_allocator_new<page_arr>();
  65. fs::inode* mapped_file = nullptr;
  66. size_t file_offset = 0;
  67. public:
  68. static constexpr int mirror_mm_area(mm_list* dst, const mm* src, pd_t pd)
  69. {
  70. mm new_nn {
  71. .start = src->start,
  72. .attr { src->attr.v },
  73. .pd = pd,
  74. .mapped_file = src->mapped_file,
  75. .file_offset = src->file_offset,
  76. };
  77. for (auto iter = src->pgs->begin(); iter != src->pgs->end(); ++iter) {
  78. // TODO: preserve dirty flag, clear accessed flag
  79. if (k_map(&new_nn, &*iter,
  80. src->attr.in.read, src->attr.in.write, src->attr.in.system, 1)
  81. != GB_OK) {
  82. return GB_FAILED;
  83. }
  84. }
  85. dst->emplace_back(types::move(new_nn));
  86. return GB_OK;
  87. }
  88. };
  89. // in mem.cpp
  90. extern mm_list* kernel_mms;
  91. extern page empty_page;
  92. // translate physical address to virtual(mapped) address
  93. void* ptovp(pptr_t p_ptr);
  94. // @return the pointer to the mm_area containing l_ptr
  95. // nullptr if not
  96. mm* find_mm_area(mm_list* mms, void* l_ptr);
  97. inline constexpr size_t vptrdiff(void* p1, void* p2)
  98. {
  99. return (uint8_t*)p1 - (uint8_t*)p2;
  100. }
  101. inline constexpr page* lto_page(const mm* mm_area, void* l_ptr)
  102. {
  103. size_t offset = vptrdiff(l_ptr, mm_area->start);
  104. return &mm_area->pgs->at(offset / PAGE_SIZE);
  105. }
  106. inline constexpr page_t to_page(pptr_t ptr)
  107. {
  108. return ptr >> 12;
  109. }
  110. inline constexpr size_t to_pdi(page_t pg)
  111. {
  112. return pg >> 10;
  113. }
  114. inline constexpr size_t to_pti(page_t pg)
  115. {
  116. return pg & (1024 - 1);
  117. }
  118. inline constexpr pptr_t to_pp(page_t p)
  119. {
  120. return p << 12;
  121. }
  122. inline constexpr size_t lto_pdi(pptr_t ptr)
  123. {
  124. return to_pdi(to_page(ptr));
  125. }
  126. inline constexpr size_t lto_pti(pptr_t ptr)
  127. {
  128. return to_pti(to_page(ptr));
  129. }
  130. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  131. {
  132. return *pt + to_pti(pg);
  133. }
  134. inline pd_t mms_get_pd(const mm_list* mms)
  135. {
  136. return mms->begin()->pd;
  137. }
  138. inline void* to_vp(page_t pg)
  139. {
  140. return ptovp(to_pp(pg));
  141. }
  142. inline pd_t to_pd(page_t pg)
  143. {
  144. return reinterpret_cast<pd_t>(to_vp(pg));
  145. }
  146. inline pt_t to_pt(page_t pg)
  147. {
  148. return reinterpret_cast<pt_t>(to_vp(pg));
  149. }
  150. inline pt_t to_pt(pde_t* pde)
  151. {
  152. return to_pt(pde->in.pt_page);
  153. }
  154. inline pde_t* to_pde(pd_t pd, void* addr)
  155. {
  156. return *pd + lto_pdi((pptr_t)addr);
  157. }
  158. inline pte_t* to_pte(pt_t pt, void* addr)
  159. {
  160. return *pt + lto_pti((pptr_t)addr);
  161. }
  162. inline pte_t* to_pte(pde_t* pde, void* addr)
  163. {
  164. return to_pte(to_pt(pde), addr);
  165. }
  166. inline pte_t* to_pte(pd_t pd, void* addr)
  167. {
  168. return to_pte(to_pde(pd, addr), addr);
  169. }
  170. inline pte_t* to_pte(pde_t* pde, page_t pg)
  171. {
  172. return to_pte(to_pt(pde), pg);
  173. }
  174. inline constexpr void* mmend(const mm* mm_area)
  175. {
  176. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  177. }
  178. // allocate a raw page
  179. page_t alloc_raw_page(void);
  180. // allocate n raw page(s)
  181. // @return the id of the first page allocated
  182. page_t alloc_n_raw_pages(size_t n);
  183. // allocate a struct page together with the raw page
  184. struct page allocate_page(void);
  185. pd_t alloc_pd(void);
  186. pt_t alloc_pt(void);
  187. void dealloc_pd(pd_t pd);
  188. void dealloc_pt(pt_t pt);