mm.hpp 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  12. struct page {
  13. page_t phys_page_id;
  14. pte_t* pte;
  15. size_t* ref_count;
  16. union {
  17. uint32_t v;
  18. struct {
  19. uint32_t cow : 1;
  20. } in;
  21. } attr;
  22. };
  23. struct mm;
  24. // map the page to the end of the mm_area in pd
  25. int k_map(
  26. mm* mm_area,
  27. page* page,
  28. int read,
  29. int write,
  30. int priv,
  31. int cow);
  32. // private memory mapping
  33. // changes won't be neither written back to file nor shared between processes
  34. // TODO: shared mapping
  35. // @param len is aligned to 4kb boundary automatically, exceeding part will
  36. // be filled with '0's and not written back to the file
  37. int mmap(
  38. void* hint,
  39. size_t len,
  40. fs::inode* file,
  41. size_t offset,
  42. int write,
  43. int priv);
  44. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  45. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  46. struct mm {
  47. public:
  48. void* start;
  49. union {
  50. uint32_t v;
  51. struct {
  52. uint32_t read : 1;
  53. uint32_t write : 1;
  54. uint32_t system : 1;
  55. } in;
  56. } attr;
  57. pd_t pd;
  58. page_arr* pgs = types::kernel_ident_allocator_new<page_arr>();
  59. fs::inode* mapped_file = nullptr;
  60. size_t file_offset = 0;
  61. public:
  62. static constexpr int mirror_mm_area(mm_list* dst, const mm* src, pd_t pd)
  63. {
  64. mm new_nn {
  65. .start = src->start,
  66. .attr { src->attr.v },
  67. .pd = pd,
  68. .mapped_file = src->mapped_file,
  69. .file_offset = src->file_offset,
  70. };
  71. for (auto iter = src->pgs->begin(); iter != src->pgs->end(); ++iter) {
  72. if (k_map(&new_nn, &*iter,
  73. src->attr.in.read, src->attr.in.write, src->attr.in.system, 1)
  74. != GB_OK) {
  75. return GB_FAILED;
  76. }
  77. }
  78. dst->emplace_back(types::move(new_nn));
  79. return GB_OK;
  80. }
  81. };
  82. // in mem.cpp
  83. extern mm_list* kernel_mms;
  84. extern page empty_page;
  85. // translate physical address to virtual(mapped) address
  86. void* ptovp(pptr_t p_ptr);
  87. // @return the pointer to the mm_area containing l_ptr
  88. // nullptr if not
  89. mm* find_mm_area(mm_list* mms, void* l_ptr);
  90. inline constexpr size_t vptrdiff(void* p1, void* p2)
  91. {
  92. return (uint8_t*)p1 - (uint8_t*)p2;
  93. }
  94. inline constexpr page* lto_page(const mm* mm_area, void* l_ptr)
  95. {
  96. size_t offset = vptrdiff(l_ptr, mm_area->start);
  97. return &mm_area->pgs->at(offset / PAGE_SIZE);
  98. }
  99. inline constexpr page_t to_page(pptr_t ptr)
  100. {
  101. return ptr >> 12;
  102. }
  103. inline constexpr size_t to_pdi(page_t pg)
  104. {
  105. return pg >> 10;
  106. }
  107. inline constexpr size_t to_pti(page_t pg)
  108. {
  109. return pg & (1024 - 1);
  110. }
  111. inline constexpr pptr_t to_pp(page_t p)
  112. {
  113. return p << 12;
  114. }
  115. inline constexpr size_t lto_pdi(pptr_t ptr)
  116. {
  117. return to_pdi(to_page(ptr));
  118. }
  119. inline constexpr size_t lto_pti(pptr_t ptr)
  120. {
  121. return to_pti(to_page(ptr));
  122. }
  123. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  124. {
  125. return *pt + to_pti(pg);
  126. }
  127. inline pd_t mms_get_pd(const mm_list* mms)
  128. {
  129. return mms->begin()->pd;
  130. }
  131. inline void* to_vp(page_t pg)
  132. {
  133. return ptovp(to_pp(pg));
  134. }
  135. inline pd_t to_pd(page_t pg)
  136. {
  137. return reinterpret_cast<pd_t>(to_vp(pg));
  138. }
  139. inline pt_t to_pt(page_t pg)
  140. {
  141. return reinterpret_cast<pt_t>(to_vp(pg));
  142. }
  143. inline pt_t to_pt(pde_t* pde)
  144. {
  145. return to_pt(pde->in.pt_page);
  146. }
  147. inline pde_t* to_pde(pd_t pd, void* addr)
  148. {
  149. return *pd + lto_pdi((pptr_t)addr);
  150. }
  151. inline pte_t* to_pte(pt_t pt, void* addr)
  152. {
  153. return *pt + lto_pti((pptr_t)addr);
  154. }
  155. inline pte_t* to_pte(pde_t* pde, void* addr)
  156. {
  157. return to_pte(to_pt(pde), addr);
  158. }
  159. inline pte_t* to_pte(pd_t pd, void* addr)
  160. {
  161. return to_pte(to_pde(pd, addr), addr);
  162. }
  163. inline pte_t* to_pte(pde_t* pde, page_t pg)
  164. {
  165. return to_pte(to_pt(pde), pg);
  166. }
  167. inline constexpr void* mmend(const mm* mm_area)
  168. {
  169. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  170. }
  171. // allocate a raw page
  172. page_t alloc_raw_page(void);
  173. // allocate n raw page(s)
  174. // @return the id of the first page allocated
  175. page_t alloc_n_raw_pages(size_t n);
  176. // allocate a struct page together with the raw page
  177. struct page allocate_page(void);
  178. pd_t alloc_pd(void);
  179. pt_t alloc_pt(void);