mm.hpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <stdint.h>
  5. #include <types/allocator.hpp>
  6. #include <types/cplusplus.hpp>
  7. #include <types/list.hpp>
  8. #include <types/size.h>
  9. #include <types/status.h>
  10. #include <types/types.h>
  11. #include <types/vector.hpp>
  12. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  13. : \
  14. : "r"(addr) \
  15. : "memory")
  16. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  17. struct page {
  18. page_t phys_page_id;
  19. pte_t* pte;
  20. size_t* ref_count;
  21. union {
  22. uint32_t v;
  23. struct {
  24. uint32_t cow : 1;
  25. } in;
  26. } attr;
  27. };
  28. // private memory mapping
  29. // changes won't be neither written back to file nor shared between processes
  30. // TODO: shared mapping
  31. // @param len is aligned to 4kb boundary automatically, exceeding part will
  32. // be filled with '0's and not written back to the file
  33. // @param offset MUST be aligned to 4kb
  34. int mmap(
  35. void* hint,
  36. size_t len,
  37. fs::inode* file,
  38. size_t offset,
  39. int write,
  40. int priv);
  41. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  42. // allocate n raw page(s)
  43. // @return the id of the first page allocated
  44. page_t alloc_n_raw_pages(size_t n);
  45. void free_n_raw_pages(page_t start_pg, size_t n);
  46. pd_t alloc_pd(void);
  47. pt_t alloc_pt(void);
  48. void dealloc_pd(pd_t pd);
  49. void dealloc_pt(pt_t pt);
  50. // forward declaration
  51. namespace kernel {
  52. class mm_list;
  53. } // namespace kernel
  54. template <uint32_t base, uint32_t expo>
  55. inline constexpr uint32_t pow()
  56. {
  57. if constexpr (expo == 0)
  58. return 1;
  59. if constexpr (expo == 1)
  60. return base;
  61. if constexpr (expo % 2 == 0)
  62. return pow<base, expo / 2>() * pow<base, expo / 2>();
  63. else
  64. return pow<base, expo / 2>() * pow<base, expo / 2 + 1>();
  65. }
  66. template <int n>
  67. inline constexpr uint32_t align_down(uint32_t v)
  68. {
  69. return v & ~(pow<2, n>() - 1);
  70. }
  71. template <int n>
  72. inline constexpr uint32_t align_up(uint32_t v)
  73. {
  74. return align_down<n>(v + pow<2, n>() - 1);
  75. }
  76. struct mm {
  77. public:
  78. void* start;
  79. union {
  80. uint32_t v;
  81. struct {
  82. uint32_t read : 1;
  83. uint32_t write : 1;
  84. uint32_t system : 1;
  85. } in;
  86. } attr;
  87. kernel::mm_list* owner;
  88. page_arr* pgs = nullptr;
  89. fs::inode* mapped_file = nullptr;
  90. size_t file_offset = 0;
  91. public:
  92. constexpr void* end(void) const
  93. {
  94. return (char*)this->start + this->pgs->size() * PAGE_SIZE;
  95. }
  96. inline bool is_ident(void) const
  97. {
  98. return this->end() <= (void*)0x40000000U;
  99. }
  100. constexpr bool is_avail(void* start, void* end) const
  101. {
  102. void* m_start = this->start;
  103. void* m_end = this->end();
  104. return (start >= m_end || end <= m_start);
  105. }
  106. int append_page(page* pg, bool present, bool write, bool priv, bool cow);
  107. };
  108. namespace kernel {
  109. class mm_list {
  110. public:
  111. using list_type = ::types::list<mm, types::kernel_ident_allocator>;
  112. using iterator_type = list_type::iterator_type;
  113. using const_iterator_type = list_type::const_iterator_type;
  114. private:
  115. list_type m_areas;
  116. public:
  117. pd_t m_pd;
  118. public:
  119. explicit constexpr mm_list(pd_t pd)
  120. : m_pd(pd)
  121. {
  122. }
  123. mm_list(const mm_list& v);
  124. constexpr mm_list(mm_list&& v)
  125. : m_areas(::types::move(v.m_areas))
  126. , m_pd(v.m_pd)
  127. {
  128. v.m_pd = nullptr;
  129. }
  130. ~mm_list()
  131. {
  132. if (!m_pd)
  133. return;
  134. this->clear_user();
  135. dealloc_pd(m_pd);
  136. }
  137. constexpr iterator_type begin(void)
  138. {
  139. return m_areas.begin();
  140. }
  141. constexpr iterator_type end(void)
  142. {
  143. return m_areas.end();
  144. }
  145. constexpr const_iterator_type begin(void) const
  146. {
  147. return m_areas.begin();
  148. }
  149. constexpr const_iterator_type end(void) const
  150. {
  151. return m_areas.end();
  152. }
  153. constexpr const_iterator_type cbegin(void) const
  154. {
  155. return m_areas.cbegin();
  156. }
  157. constexpr const_iterator_type cend(void) const
  158. {
  159. return m_areas.cend();
  160. }
  161. constexpr iterator_type addarea(void* start, bool w, bool system)
  162. {
  163. return m_areas.emplace_back(mm {
  164. .start = start,
  165. .attr {
  166. .in {
  167. .read = 1,
  168. .write = w,
  169. .system = system,
  170. },
  171. },
  172. .owner = this,
  173. .pgs = types::_new<types::kernel_ident_allocator, page_arr>(),
  174. });
  175. }
  176. constexpr void clear_user()
  177. {
  178. for (auto iter = this->begin(); iter != this->end();) {
  179. if (iter->is_ident()) {
  180. ++iter;
  181. continue;
  182. }
  183. this->unmap(iter);
  184. iter = m_areas.erase(iter);
  185. }
  186. }
  187. constexpr int mirror_area(mm& src)
  188. {
  189. auto area = this->addarea(
  190. src.start, src.attr.in.write, src.attr.in.system);
  191. if (src.mapped_file) {
  192. area->mapped_file = src.mapped_file;
  193. area->file_offset = src.file_offset;
  194. }
  195. for (auto& pg : *src.pgs) {
  196. if (area->append_page(&pg,
  197. true,
  198. src.attr.in.write,
  199. src.attr.in.system,
  200. true)
  201. != GB_OK) {
  202. return GB_FAILED;
  203. }
  204. }
  205. return GB_OK;
  206. }
  207. constexpr void unmap(iterator_type area)
  208. {
  209. int i = 0;
  210. // TODO:
  211. // if there are more than 4 pages, calling invlpg
  212. // should be faster. otherwise, we use movl cr3
  213. // bool should_invlpg = (area->pgs->size() > 4);
  214. for (auto& pg : *area->pgs) {
  215. if (*pg.ref_count == 1) {
  216. ki_free(pg.ref_count);
  217. free_n_raw_pages(pg.phys_page_id, 1);
  218. } else {
  219. --*pg.ref_count;
  220. }
  221. pg.phys_page_id = 0;
  222. pg.attr.v = 0;
  223. pg.pte->v = 0;
  224. invalidate_tlb((uint32_t)area->start + (i++) * PAGE_SIZE);
  225. }
  226. types::pdelete<types::kernel_ident_allocator>(area->pgs);
  227. area->attr.v = 0;
  228. area->start = 0;
  229. }
  230. constexpr iterator_type find(void* lp)
  231. {
  232. for (auto iter = this->begin(); iter != this->end(); ++iter)
  233. if (lp >= iter->start && lp < iter->end())
  234. return iter;
  235. return this->end();
  236. }
  237. bool is_avail(void* start, size_t len)
  238. {
  239. start = (void*)align_down<12>((uint32_t)start);
  240. len = align_up<12>((uint32_t)start + len)
  241. - (uint32_t)start;
  242. for (const auto& area : *this) {
  243. if (!area.is_avail(start, (char*)start + len))
  244. return false;
  245. }
  246. return true;
  247. }
  248. };
  249. } // namespace kernel
  250. // global variables
  251. inline kernel::mm_list* kernel_mms;
  252. inline page empty_page;
  253. // --------------------------------
  254. // translate physical address to virtual(mapped) address
  255. void* ptovp(pptr_t p_ptr);
  256. inline constexpr size_t vptrdiff(void* p1, void* p2)
  257. {
  258. return (uint8_t*)p1 - (uint8_t*)p2;
  259. }
  260. inline constexpr page* lto_page(mm* mm_area, void* l_ptr)
  261. {
  262. size_t offset = vptrdiff(l_ptr, mm_area->start);
  263. return &mm_area->pgs->at(offset / PAGE_SIZE);
  264. }
  265. inline constexpr page_t to_page(pptr_t ptr)
  266. {
  267. return ptr >> 12;
  268. }
  269. inline constexpr size_t to_pdi(page_t pg)
  270. {
  271. return pg >> 10;
  272. }
  273. inline constexpr size_t to_pti(page_t pg)
  274. {
  275. return pg & (1024 - 1);
  276. }
  277. inline constexpr pptr_t to_pp(page_t p)
  278. {
  279. return p << 12;
  280. }
  281. inline constexpr size_t lto_pdi(pptr_t ptr)
  282. {
  283. return to_pdi(to_page(ptr));
  284. }
  285. inline constexpr size_t lto_pti(pptr_t ptr)
  286. {
  287. return to_pti(to_page(ptr));
  288. }
  289. inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  290. {
  291. return *pt + to_pti(pg);
  292. }
  293. inline void* to_vp(page_t pg)
  294. {
  295. return ptovp(to_pp(pg));
  296. }
  297. inline pd_t to_pd(page_t pg)
  298. {
  299. return reinterpret_cast<pd_t>(to_vp(pg));
  300. }
  301. inline pt_t to_pt(page_t pg)
  302. {
  303. return reinterpret_cast<pt_t>(to_vp(pg));
  304. }
  305. inline pt_t to_pt(pde_t* pde)
  306. {
  307. return to_pt(pde->in.pt_page);
  308. }
  309. inline pde_t* to_pde(pd_t pd, void* addr)
  310. {
  311. return *pd + lto_pdi((pptr_t)addr);
  312. }
  313. inline pte_t* to_pte(pt_t pt, void* addr)
  314. {
  315. return *pt + lto_pti((pptr_t)addr);
  316. }
  317. inline pte_t* to_pte(pde_t* pde, void* addr)
  318. {
  319. return to_pte(to_pt(pde), addr);
  320. }
  321. inline pte_t* to_pte(pd_t pd, void* addr)
  322. {
  323. return to_pte(to_pde(pd, addr), addr);
  324. }
  325. inline pte_t* to_pte(pde_t* pde, page_t pg)
  326. {
  327. return to_pte(to_pt(pde), pg);
  328. }
  329. // allocate a raw page
  330. inline page_t alloc_raw_page(void)
  331. {
  332. return alloc_n_raw_pages(1);
  333. }
  334. // allocate a struct page together with the raw page
  335. struct page allocate_page(void);