mm.hpp 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/cplusplus.hpp>
  6. #include <types/list.hpp>
  7. #include <types/size.h>
  8. #include <types/status.h>
  9. #include <types/types.h>
  10. #include <types/vector.hpp>
  11. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  12. : \
  13. : "r"(addr) \
  14. : "memory")
  15. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  16. struct page {
  17. page_t phys_page_id;
  18. pte_t* pte;
  19. size_t* ref_count;
  20. union {
  21. uint32_t v;
  22. struct {
  23. uint32_t cow : 1;
  24. } in;
  25. } attr;
  26. };
  27. // private memory mapping
  28. // changes won't be neither written back to file nor shared between processes
  29. // TODO: shared mapping
  30. // @param len is aligned to 4kb boundary automatically, exceeding part will
  31. // be filled with '0's and not written back to the file
  32. int mmap(
  33. void* hint,
  34. size_t len,
  35. fs::inode* file,
  36. size_t offset,
  37. int write,
  38. int priv);
  39. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  40. // allocate n raw page(s)
  41. // @return the id of the first page allocated
  42. page_t alloc_n_raw_pages(size_t n);
  43. void free_n_raw_pages(page_t start_pg, size_t n);
  44. pd_t alloc_pd(void);
  45. pt_t alloc_pt(void);
  46. void dealloc_pd(pd_t pd);
  47. void dealloc_pt(pt_t pt);
  48. // forward declaration
  49. namespace kernel {
  50. class mm_list;
  51. } // namespace kernel
  52. struct mm {
  53. public:
  54. void* start;
  55. union {
  56. uint32_t v;
  57. struct {
  58. uint32_t read : 1;
  59. uint32_t write : 1;
  60. uint32_t system : 1;
  61. } in;
  62. } attr;
  63. kernel::mm_list* owner;
  64. page_arr* pgs = nullptr;
  65. fs::inode* mapped_file = nullptr;
  66. size_t file_offset = 0;
  67. public:
  68. constexpr void* end(void) const
  69. {
  70. return (char*)this->start + this->pgs->size() * PAGE_SIZE;
  71. }
  72. inline bool is_ident(void) const
  73. {
  74. return this->end() <= (void*)0x40000000U;
  75. }
  76. constexpr bool is_avail(void* start, void* end) const
  77. {
  78. void* m_start = this->start;
  79. void* m_end = this->end();
  80. return (start >= m_end || end <= m_start);
  81. }
  82. int append_page(page* pg, bool present, bool write, bool priv, bool cow);
  83. };
  84. namespace kernel {
  85. class mm_list {
  86. public:
  87. using list_type = ::types::list<mm, types::kernel_ident_allocator>;
  88. using iterator_type = list_type::iterator_type;
  89. using const_iterator_type = list_type::const_iterator_type;
  90. private:
  91. list_type m_areas;
  92. public:
  93. pd_t m_pd;
  94. public:
  95. explicit constexpr mm_list(pd_t pd)
  96. : m_pd(pd)
  97. {
  98. }
  99. mm_list(const mm_list& v);
  100. constexpr mm_list(mm_list&& v)
  101. : m_areas(::types::move(v.m_areas))
  102. , m_pd(v.m_pd)
  103. {
  104. v.m_pd = nullptr;
  105. }
  106. ~mm_list()
  107. {
  108. if (!m_pd)
  109. return;
  110. this->clear_user();
  111. dealloc_pd(m_pd);
  112. }
  113. constexpr iterator_type begin(void)
  114. {
  115. return m_areas.begin();
  116. }
  117. constexpr iterator_type end(void)
  118. {
  119. return m_areas.end();
  120. }
  121. constexpr const_iterator_type begin(void) const
  122. {
  123. return m_areas.begin();
  124. }
  125. constexpr const_iterator_type end(void) const
  126. {
  127. return m_areas.end();
  128. }
  129. constexpr const_iterator_type cbegin(void) const
  130. {
  131. return m_areas.cbegin();
  132. }
  133. constexpr const_iterator_type cend(void) const
  134. {
  135. return m_areas.cend();
  136. }
  137. constexpr iterator_type addarea(void* start, bool w, bool system)
  138. {
  139. return m_areas.emplace_back(mm {
  140. .start = start,
  141. .attr {
  142. .in {
  143. .read = 1,
  144. .write = w,
  145. .system = system,
  146. },
  147. },
  148. .owner = this,
  149. .pgs = types::_new<types::kernel_ident_allocator, page_arr>(),
  150. });
  151. }
  152. constexpr void clear_user()
  153. {
  154. for (auto iter = this->begin(); iter != this->end();) {
  155. if (iter->is_ident()) {
  156. ++iter;
  157. continue;
  158. }
  159. this->unmap(iter);
  160. iter = m_areas.erase(iter);
  161. }
  162. }
  163. constexpr int mirror_area(mm& src)
  164. {
  165. auto area = this->addarea(
  166. src.start, src.attr.in.write, src.attr.in.system);
  167. if (src.mapped_file) {
  168. area->mapped_file = src.mapped_file;
  169. area->file_offset = src.file_offset;
  170. }
  171. for (auto& pg : *src.pgs) {
  172. if (area->append_page(&pg,
  173. true,
  174. src.attr.in.write,
  175. src.attr.in.system,
  176. true)
  177. != GB_OK) {
  178. return GB_FAILED;
  179. }
  180. }
  181. return GB_OK;
  182. }
  183. constexpr void unmap(iterator_type area)
  184. {
  185. int i = 0;
  186. // TODO:
  187. // if there are more than 4 pages, calling invlpg
  188. // should be faster. otherwise, we use movl cr3
  189. // bool should_invlpg = (area->pgs->size() > 4);
  190. for (auto& pg : *area->pgs) {
  191. if (*pg.ref_count == 1) {
  192. ki_free(pg.ref_count);
  193. free_n_raw_pages(pg.phys_page_id, 1);
  194. } else {
  195. --*pg.ref_count;
  196. }
  197. pg.phys_page_id = 0;
  198. pg.attr.v = 0;
  199. pg.pte->v = 0;
  200. invalidate_tlb((uint32_t)area->start + (i++) * PAGE_SIZE);
  201. }
  202. types::pdelete<types::kernel_ident_allocator>(area->pgs);
  203. area->attr.v = 0;
  204. area->start = 0;
  205. }
  206. constexpr iterator_type find(void* lp)
  207. {
  208. for (auto iter = this->begin(); iter != this->end(); ++iter)
  209. if (lp >= iter->start && lp < iter->end())
  210. return iter;
  211. return this->end();
  212. }
  213. };
  214. } // namespace kernel
  215. // global variables
  216. inline kernel::mm_list* kernel_mms;
  217. inline page empty_page;
  218. // --------------------------------
  219. // translate physical address to virtual(mapped) address
  220. void* ptovp(pptr_t p_ptr);
  221. inline constexpr size_t vptrdiff(void* p1, void* p2)
  222. {
  223. return (uint8_t*)p1 - (uint8_t*)p2;
  224. }
  225. inline constexpr page* lto_page(mm* mm_area, void* l_ptr)
  226. {
  227. size_t offset = vptrdiff(l_ptr, mm_area->start);
  228. return &mm_area->pgs->at(offset / PAGE_SIZE);
  229. }
  230. inline constexpr page_t to_page(pptr_t ptr)
  231. {
  232. return ptr >> 12;
  233. }
  234. inline constexpr size_t to_pdi(page_t pg)
  235. {
  236. return pg >> 10;
  237. }
  238. inline constexpr size_t to_pti(page_t pg)
  239. {
  240. return pg & (1024 - 1);
  241. }
  242. inline constexpr pptr_t to_pp(page_t p)
  243. {
  244. return p << 12;
  245. }
  246. inline constexpr size_t lto_pdi(pptr_t ptr)
  247. {
  248. return to_pdi(to_page(ptr));
  249. }
  250. inline constexpr size_t lto_pti(pptr_t ptr)
  251. {
  252. return to_pti(to_page(ptr));
  253. }
  254. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  255. {
  256. return *pt + to_pti(pg);
  257. }
  258. inline void* to_vp(page_t pg)
  259. {
  260. return ptovp(to_pp(pg));
  261. }
  262. inline pd_t to_pd(page_t pg)
  263. {
  264. return reinterpret_cast<pd_t>(to_vp(pg));
  265. }
  266. inline pt_t to_pt(page_t pg)
  267. {
  268. return reinterpret_cast<pt_t>(to_vp(pg));
  269. }
  270. inline pt_t to_pt(pde_t* pde)
  271. {
  272. return to_pt(pde->in.pt_page);
  273. }
  274. inline pde_t* to_pde(pd_t pd, void* addr)
  275. {
  276. return *pd + lto_pdi((pptr_t)addr);
  277. }
  278. inline pte_t* to_pte(pt_t pt, void* addr)
  279. {
  280. return *pt + lto_pti((pptr_t)addr);
  281. }
  282. inline pte_t* to_pte(pde_t* pde, void* addr)
  283. {
  284. return to_pte(to_pt(pde), addr);
  285. }
  286. inline pte_t* to_pte(pd_t pd, void* addr)
  287. {
  288. return to_pte(to_pde(pd, addr), addr);
  289. }
  290. inline pte_t* to_pte(pde_t* pde, page_t pg)
  291. {
  292. return to_pte(to_pt(pde), pg);
  293. }
  294. // allocate a raw page
  295. inline page_t alloc_raw_page(void)
  296. {
  297. return alloc_n_raw_pages(1);
  298. }
  299. // allocate a struct page together with the raw page
  300. struct page allocate_page(void);