mm.hpp 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <stdint.h>
  5. #include <types/allocator.hpp>
  6. #include <types/cplusplus.hpp>
  7. #include <types/list.hpp>
  8. #include <types/size.h>
  9. #include <types/status.h>
  10. #include <types/types.h>
  11. #include <types/vector.hpp>
  12. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  13. : \
  14. : "r"(addr) \
  15. : "memory")
  16. #define memory_fence asm volatile("" ::: "memory")
  17. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  18. constexpr uint32_t PAGE_COW = (1 << 0);
  19. constexpr uint32_t PAGE_MMAP = (1 << 1);
  20. #define PAGE_COW PAGE_COW
  21. #define PAGE_MMAP PAGE_MMAP
  22. struct page {
  23. page_t phys_page_id;
  24. size_t* ref_count;
  25. // 0 :11 : pte_index
  26. // 12:31 : pt_page
  27. uint32_t pg_pteidx;
  28. uint32_t attr;
  29. };
  30. // private memory mapping
  31. // changes won't be neither written back to file nor shared between processes
  32. // TODO: shared mapping
  33. // @param len is aligned to 4kb boundary automatically, exceeding part will
  34. // be filled with '0's and not written back to the file
  35. // @param offset MUST be aligned to 4kb
  36. int mmap(
  37. void* hint,
  38. size_t len,
  39. fs::inode* file,
  40. size_t offset,
  41. int write,
  42. int priv);
  43. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  44. // forward declaration
  45. namespace kernel {
  46. class mm_list;
  47. } // namespace kernel
  48. template <uint32_t base, uint32_t expo>
  49. inline constexpr uint32_t pow()
  50. {
  51. if constexpr (expo == 0)
  52. return 1;
  53. if constexpr (expo == 1)
  54. return base;
  55. if constexpr (expo % 2 == 0)
  56. return pow<base, expo / 2>() * pow<base, expo / 2>();
  57. else
  58. return pow<base, expo / 2>() * pow<base, expo / 2 + 1>();
  59. }
  60. template <int n>
  61. inline constexpr uint32_t align_down(uint32_t v)
  62. {
  63. return v & ~(pow<2, n>() - 1);
  64. }
  65. template <int n>
  66. inline constexpr uint32_t align_up(uint32_t v)
  67. {
  68. return align_down<n>(v + pow<2, n>() - 1);
  69. }
  70. void dealloc_pd(page_t pd);
  71. // allocate a struct page together with the raw page
  72. page allocate_page(void);
  73. void free_page(page* pg);
  74. // TODO: this is for alloc_kstack()
  75. // CHANGE THIS
  76. page_t __alloc_raw_page(void);
  77. void __free_raw_page(page_t pg);
  78. struct mm {
  79. public:
  80. void* start;
  81. union {
  82. uint32_t v;
  83. struct {
  84. uint32_t read : 1;
  85. uint32_t write : 1;
  86. uint32_t system : 1;
  87. } in;
  88. } attr;
  89. kernel::mm_list* owner;
  90. page_arr* pgs = nullptr;
  91. fs::inode* mapped_file = nullptr;
  92. size_t file_offset = 0;
  93. public:
  94. constexpr void* end(void) const
  95. {
  96. return (char*)this->start + this->pgs->size() * PAGE_SIZE;
  97. }
  98. inline bool is_kernel_space(void) const
  99. {
  100. return this->start >= (void*)0xc0000000;
  101. }
  102. constexpr bool is_avail(void* start, void* end) const
  103. {
  104. void* m_start = this->start;
  105. void* m_end = this->end();
  106. return (start >= m_end || end <= m_start);
  107. }
  108. int append_page(page& pg, uint32_t attr, bool priv);
  109. };
  110. namespace kernel {
  111. uint8_t* pmap(page_t pg);
  112. void pfree(page_t pg);
  113. class paccess : public types::non_copyable {
  114. private:
  115. page_t m_pg;
  116. void* m_ptr;
  117. public:
  118. paccess(void) = delete;
  119. paccess(paccess&&) = delete;
  120. paccess& operator=(paccess&&) = delete;
  121. inline explicit paccess(page_t pg)
  122. : m_pg(pg)
  123. {
  124. m_ptr = pmap(pg);
  125. }
  126. constexpr void* ptr(void) const
  127. {
  128. return m_ptr;
  129. }
  130. ~paccess()
  131. {
  132. pfree(m_pg);
  133. }
  134. };
  135. class mm_list {
  136. public:
  137. using list_type = ::types::list<mm, types::kernel_ident_allocator>;
  138. using iterator_type = list_type::iterator_type;
  139. using const_iterator_type = list_type::const_iterator_type;
  140. private:
  141. list_type m_areas;
  142. public:
  143. page_t m_pd;
  144. public:
  145. explicit constexpr mm_list(page_t pd)
  146. : m_pd(pd)
  147. {
  148. }
  149. mm_list(const mm_list& v);
  150. constexpr mm_list(mm_list&& v)
  151. : m_areas(::types::move(v.m_areas))
  152. , m_pd(v.m_pd)
  153. {
  154. v.m_pd = 0;
  155. for (auto& area : m_areas)
  156. area.owner = this;
  157. }
  158. ~mm_list()
  159. {
  160. if (!m_pd)
  161. return;
  162. this->clear_user();
  163. dealloc_pd(m_pd);
  164. }
  165. constexpr iterator_type begin(void)
  166. {
  167. return m_areas.begin();
  168. }
  169. constexpr iterator_type end(void)
  170. {
  171. return m_areas.end();
  172. }
  173. constexpr const_iterator_type begin(void) const
  174. {
  175. return m_areas.begin();
  176. }
  177. constexpr const_iterator_type end(void) const
  178. {
  179. return m_areas.end();
  180. }
  181. constexpr const_iterator_type cbegin(void) const
  182. {
  183. return m_areas.cbegin();
  184. }
  185. constexpr const_iterator_type cend(void) const
  186. {
  187. return m_areas.cend();
  188. }
  189. constexpr iterator_type addarea(void* start, bool w, bool system)
  190. {
  191. return m_areas.emplace_back(mm {
  192. .start = start,
  193. .attr {
  194. .in {
  195. .read = 1,
  196. .write = w,
  197. .system = system,
  198. },
  199. },
  200. .owner = this,
  201. .pgs = types::_new<types::kernel_ident_allocator, page_arr>(),
  202. });
  203. }
  204. constexpr void clear_user()
  205. {
  206. for (auto iter = this->begin(); iter != this->end();) {
  207. if (iter->is_kernel_space()) {
  208. ++iter;
  209. continue;
  210. }
  211. this->unmap(iter);
  212. iter = m_areas.erase(iter);
  213. }
  214. }
  215. constexpr int mirror_area(mm& src)
  216. {
  217. auto area = this->addarea(
  218. src.start, src.attr.in.write, src.attr.in.system);
  219. if (src.mapped_file) {
  220. area->mapped_file = src.mapped_file;
  221. area->file_offset = src.file_offset;
  222. }
  223. for (auto& pg : *src.pgs) {
  224. if (area->append_page(pg,
  225. PAGE_COW | (pg.attr & PAGE_MMAP),
  226. src.attr.in.system)
  227. != GB_OK) {
  228. return GB_FAILED;
  229. }
  230. }
  231. return GB_OK;
  232. }
  233. inline void unmap(iterator_type area)
  234. {
  235. int i = 0;
  236. // TODO:
  237. // if there are more than 4 pages, calling invlpg
  238. // should be faster. otherwise, we use movl cr3
  239. // bool should_invlpg = (area->pgs->size() > 4);
  240. for (auto& pg : *area->pgs) {
  241. kernel::paccess pa(pg.pg_pteidx >> 12);
  242. auto pt = (pt_t)pa.ptr();
  243. assert(pt);
  244. auto* pte = *pt + (pg.pg_pteidx & 0xfff);
  245. pte->v = 0;
  246. free_page(&pg);
  247. invalidate_tlb((uint32_t)area->start + (i++) * PAGE_SIZE);
  248. }
  249. types::pdelete<types::kernel_ident_allocator>(area->pgs);
  250. area->attr.v = 0;
  251. area->start = 0;
  252. }
  253. constexpr iterator_type find(void* lp)
  254. {
  255. for (auto iter = this->begin(); iter != this->end(); ++iter) {
  256. void* start = iter->start;
  257. void* end = iter->end();
  258. if (start == end && lp == start)
  259. return iter;
  260. if (lp >= start && lp < end)
  261. return iter;
  262. }
  263. return this->end();
  264. }
  265. bool is_avail(void* start, size_t len)
  266. {
  267. start = (void*)align_down<12>((uint32_t)start);
  268. len = align_up<12>((uint32_t)start + len)
  269. - (uint32_t)start;
  270. for (const auto& area : *this) {
  271. if (!area.is_avail(start, (char*)start + len))
  272. return false;
  273. }
  274. return true;
  275. }
  276. };
  277. } // namespace kernel
  278. // global variables
  279. inline kernel::mm_list* kernel_mms;
  280. inline page empty_page;
  281. // --------------------------------
  282. inline constexpr size_t vptrdiff(void* p1, void* p2)
  283. {
  284. return (uint8_t*)p1 - (uint8_t*)p2;
  285. }
  286. // inline constexpr page* lto_page(mm* mm_area, void* l_ptr)
  287. // {
  288. // size_t offset = vptrdiff(l_ptr, mm_area->start);
  289. // return &mm_area->pgs->at(offset / PAGE_SIZE);
  290. // }
  291. // inline constexpr page_t to_page(pptr_t ptr)
  292. // {
  293. // return ptr >> 12;
  294. // }
  295. // inline constexpr size_t to_pdi(page_t pg)
  296. // {
  297. // return pg >> 10;
  298. // }
  299. // inline constexpr size_t to_pti(page_t pg)
  300. // {
  301. // return pg & (1024 - 1);
  302. // }
  303. // inline constexpr pptr_t to_pp(page_t p)
  304. // {
  305. // return p << 12;
  306. // }
  307. inline size_t v_to_pdi(void* addr)
  308. {
  309. return (uint32_t)addr >> 22;
  310. }
  311. inline size_t v_to_pti(void* addr)
  312. {
  313. return ((uint32_t)addr >> 12) & 0x3ff;
  314. }
  315. // inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  316. // {
  317. // return *pt + to_pti(pg);
  318. // }
  319. // inline void* to_vp(page_t pg)
  320. // {
  321. // return ptovp(to_pp(pg));
  322. // }
  323. // inline pd_t to_pd(page_t pg)
  324. // {
  325. // return reinterpret_cast<pd_t>(to_vp(pg));
  326. // }
  327. // inline pt_t to_pt(page_t pg)
  328. // {
  329. // return reinterpret_cast<pt_t>(to_vp(pg));
  330. // }
  331. // inline pt_t to_pt(pde_t* pde)
  332. // {
  333. // return to_pt(pde->in.pt_page);
  334. // }
  335. // inline pde_t* to_pde(pd_t pd, void* addr)
  336. // {
  337. // return *pd + lto_pdi((pptr_t)addr);
  338. // }
  339. // inline pte_t* to_pte(pt_t pt, void* addr)
  340. // {
  341. // return *pt + lto_pti((pptr_t)addr);
  342. // }
  343. // inline pte_t* to_pte(pde_t* pde, void* addr)
  344. // {
  345. // return to_pte(to_pt(pde), addr);
  346. // }
  347. // inline pte_t* to_pte(pd_t pd, void* addr)
  348. // {
  349. // return to_pte(to_pde(pd, addr), addr);
  350. // }
  351. // inline pte_t* to_pte(pde_t* pde, page_t pg)
  352. // {
  353. // return to_pte(to_pt(pde), pg);
  354. // }