mm.hpp 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  12. : \
  13. : "r"(addr) \
  14. : "memory")
  15. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  16. struct page {
  17. page_t phys_page_id;
  18. pte_t* pte;
  19. size_t* ref_count;
  20. union {
  21. uint32_t v;
  22. struct {
  23. uint32_t cow : 1;
  24. } in;
  25. } attr;
  26. };
  27. struct mm;
  28. // map the page to the end of the mm_area in pd
  29. int k_map(
  30. mm* mm_area,
  31. page* page,
  32. int read,
  33. int write,
  34. int priv,
  35. int cow);
  36. // unmap a whole mem area, making sure that we will never use it again
  37. int k_unmap(mm* mm_area);
  38. // private memory mapping
  39. // changes won't be neither written back to file nor shared between processes
  40. // TODO: shared mapping
  41. // @param len is aligned to 4kb boundary automatically, exceeding part will
  42. // be filled with '0's and not written back to the file
  43. int mmap(
  44. void* hint,
  45. size_t len,
  46. fs::inode* file,
  47. size_t offset,
  48. int write,
  49. int priv);
  50. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  51. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  52. struct mm {
  53. public:
  54. void* start;
  55. union {
  56. uint32_t v;
  57. struct {
  58. uint32_t read : 1;
  59. uint32_t write : 1;
  60. uint32_t system : 1;
  61. } in;
  62. } attr;
  63. pd_t pd;
  64. page_arr* pgs = types::kernel_ident_allocator_new<page_arr>();
  65. fs::inode* mapped_file = nullptr;
  66. size_t file_offset = 0;
  67. public:
  68. static constexpr int mirror_mm_area(mm_list* dst, const mm* src, pd_t pd)
  69. {
  70. mm new_nn {
  71. .start = src->start,
  72. .attr { src->attr.v },
  73. .pd = pd,
  74. .mapped_file = src->mapped_file,
  75. .file_offset = src->file_offset,
  76. };
  77. for (auto iter = src->pgs->begin(); iter != src->pgs->end(); ++iter) {
  78. if (k_map(&new_nn, &*iter,
  79. src->attr.in.read, src->attr.in.write, src->attr.in.system, 1)
  80. != GB_OK) {
  81. return GB_FAILED;
  82. }
  83. }
  84. dst->emplace_back(types::move(new_nn));
  85. return GB_OK;
  86. }
  87. };
  88. // in mem.cpp
  89. extern mm_list* kernel_mms;
  90. extern page empty_page;
  91. // translate physical address to virtual(mapped) address
  92. void* ptovp(pptr_t p_ptr);
  93. // @return the pointer to the mm_area containing l_ptr
  94. // nullptr if not
  95. mm* find_mm_area(mm_list* mms, void* l_ptr);
  96. inline constexpr size_t vptrdiff(void* p1, void* p2)
  97. {
  98. return (uint8_t*)p1 - (uint8_t*)p2;
  99. }
  100. inline constexpr page* lto_page(const mm* mm_area, void* l_ptr)
  101. {
  102. size_t offset = vptrdiff(l_ptr, mm_area->start);
  103. return &mm_area->pgs->at(offset / PAGE_SIZE);
  104. }
  105. inline constexpr page_t to_page(pptr_t ptr)
  106. {
  107. return ptr >> 12;
  108. }
  109. inline constexpr size_t to_pdi(page_t pg)
  110. {
  111. return pg >> 10;
  112. }
  113. inline constexpr size_t to_pti(page_t pg)
  114. {
  115. return pg & (1024 - 1);
  116. }
  117. inline constexpr pptr_t to_pp(page_t p)
  118. {
  119. return p << 12;
  120. }
  121. inline constexpr size_t lto_pdi(pptr_t ptr)
  122. {
  123. return to_pdi(to_page(ptr));
  124. }
  125. inline constexpr size_t lto_pti(pptr_t ptr)
  126. {
  127. return to_pti(to_page(ptr));
  128. }
  129. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  130. {
  131. return *pt + to_pti(pg);
  132. }
  133. inline pd_t mms_get_pd(const mm_list* mms)
  134. {
  135. return mms->begin()->pd;
  136. }
  137. inline void* to_vp(page_t pg)
  138. {
  139. return ptovp(to_pp(pg));
  140. }
  141. inline pd_t to_pd(page_t pg)
  142. {
  143. return reinterpret_cast<pd_t>(to_vp(pg));
  144. }
  145. inline pt_t to_pt(page_t pg)
  146. {
  147. return reinterpret_cast<pt_t>(to_vp(pg));
  148. }
  149. inline pt_t to_pt(pde_t* pde)
  150. {
  151. return to_pt(pde->in.pt_page);
  152. }
  153. inline pde_t* to_pde(pd_t pd, void* addr)
  154. {
  155. return *pd + lto_pdi((pptr_t)addr);
  156. }
  157. inline pte_t* to_pte(pt_t pt, void* addr)
  158. {
  159. return *pt + lto_pti((pptr_t)addr);
  160. }
  161. inline pte_t* to_pte(pde_t* pde, void* addr)
  162. {
  163. return to_pte(to_pt(pde), addr);
  164. }
  165. inline pte_t* to_pte(pd_t pd, void* addr)
  166. {
  167. return to_pte(to_pde(pd, addr), addr);
  168. }
  169. inline pte_t* to_pte(pde_t* pde, page_t pg)
  170. {
  171. return to_pte(to_pt(pde), pg);
  172. }
  173. inline constexpr void* mmend(const mm* mm_area)
  174. {
  175. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  176. }
  177. // allocate a raw page
  178. page_t alloc_raw_page(void);
  179. // allocate n raw page(s)
  180. // @return the id of the first page allocated
  181. page_t alloc_n_raw_pages(size_t n);
  182. // allocate a struct page together with the raw page
  183. struct page allocate_page(void);
  184. pd_t alloc_pd(void);
  185. pt_t alloc_pt(void);
  186. void dealloc_pd(pd_t pd);
  187. void dealloc_pt(pt_t pt);