mm.hpp 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. #pragma once
  2. #include <set>
  3. #include <vector>
  4. #include <bit>
  5. #include <cstddef>
  6. #include <utility>
  7. #include <kernel/mem.h>
  8. #include <kernel/vfs.hpp>
  9. #include <stdint.h>
  10. #include <types/allocator.hpp>
  11. #include <types/cplusplus.hpp>
  12. #include <types/size.h>
  13. #include <types/status.h>
  14. #include <types/types.h>
  15. #define invalidate_tlb(addr) asm("invlpg (%0)" \
  16. : \
  17. : "r"(addr) \
  18. : "memory")
  19. #define memory_fence asm volatile("" ::: "memory")
  20. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  21. constexpr uint32_t PAGE_COW = (1 << 0);
  22. constexpr uint32_t PAGE_MMAP = (1 << 1);
  23. #define PAGE_COW PAGE_COW
  24. #define PAGE_MMAP PAGE_MMAP
  25. struct page {
  26. page_t phys_page_id;
  27. size_t* ref_count;
  28. // 0 :11 : pte_index
  29. // 12:31 : pt_page
  30. uint32_t pg_pteidx;
  31. mutable uint32_t attr;
  32. };
  33. // private memory mapping
  34. // changes won't be neither written back to file nor shared between processes
  35. // TODO: shared mapping
  36. // @param len is aligned to 4kb boundary automatically, exceeding part will
  37. // be filled with '0's and not written back to the file
  38. // @param offset MUST be aligned to 4kb
  39. int mmap(
  40. void* hint,
  41. size_t len,
  42. fs::inode* file,
  43. size_t offset,
  44. int write,
  45. int priv);
  46. template <uint32_t base, uint32_t expo>
  47. constexpr uint32_t pow()
  48. {
  49. if constexpr (expo == 0)
  50. return 1;
  51. if constexpr (expo == 1)
  52. return base;
  53. if constexpr (expo % 2 == 0)
  54. return pow<base, expo / 2>() * pow<base, expo / 2>();
  55. else
  56. return pow<base, expo / 2>() * pow<base, expo / 2 + 1>();
  57. }
  58. template <int N>
  59. constexpr uint32_t align_down(uint32_t v)
  60. {
  61. return v & ~(pow<2, N>() - 1);
  62. }
  63. template <int N>
  64. constexpr void* align_down(void* v)
  65. {
  66. return std::bit_cast<void*>(align_down<N>(std::bit_cast<uint32_t>(v)));
  67. }
  68. template <int N>
  69. constexpr uint32_t align_up(uint32_t v)
  70. {
  71. return align_down<N>(v + pow<2, N>() - 1);
  72. }
  73. template <int N>
  74. constexpr void* align_up(void* v)
  75. {
  76. return std::bit_cast<void*>(align_up<N>(std::bit_cast<uint32_t>(v)));
  77. }
  78. constexpr size_t vptrdiff(void* p1, void* p2)
  79. {
  80. auto* _p1 = static_cast<std::byte*>(p1);
  81. auto* _p2 = static_cast<std::byte*>(p2);
  82. return _p1 - _p2;
  83. }
  84. constexpr void* vptradd(void* p, std::size_t off)
  85. {
  86. auto* _p = static_cast<std::byte*>(p);
  87. return _p + off;
  88. }
  89. void dealloc_pd(page_t pd);
  90. // allocate a struct page together with the raw page
  91. page allocate_page(void);
  92. void free_page(page* pg);
  93. // TODO: this is for alloc_kstack()
  94. // CHANGE THIS
  95. page_t __alloc_raw_page(void);
  96. void __free_raw_page(page_t pg);
  97. namespace kernel {
  98. void* pmap(page_t pg, bool cached = true);
  99. void pfree(page_t pg);
  100. class paccess : public types::non_copyable {
  101. private:
  102. page_t m_pg;
  103. void* m_ptr;
  104. public:
  105. paccess(void) = delete;
  106. paccess(paccess&&) = delete;
  107. paccess& operator=(paccess&&) = delete;
  108. constexpr explicit paccess(page_t pg, bool cached = true)
  109. : m_pg(pg)
  110. {
  111. m_ptr = pmap(pg, cached);
  112. }
  113. constexpr void* ptr(void) const
  114. {
  115. return m_ptr;
  116. }
  117. ~paccess()
  118. {
  119. pfree(m_pg);
  120. }
  121. };
  122. namespace memory {
  123. struct mm {
  124. public:
  125. using pages_vector = std::vector<page,
  126. types::allocator_adapter<page, types::kernel_ident_allocator>>;
  127. public:
  128. void* start {};
  129. struct mm_attr {
  130. uint32_t write : 1;
  131. uint32_t system : 1;
  132. uint32_t mapped : 1;
  133. } attr {};
  134. pages_vector* pgs {};
  135. fs::inode* mapped_file {};
  136. size_t file_offset {};
  137. public:
  138. constexpr void* end() const noexcept
  139. { return vptradd(start, pgs->size() * PAGE_SIZE); }
  140. constexpr bool is_kernel_space() const noexcept
  141. { return attr.system; }
  142. constexpr bool is_avail(void* ostart, void* oend) const noexcept
  143. {
  144. void* m_start = start;
  145. void* m_end = end();
  146. return (ostart >= m_end || oend <= m_start);
  147. }
  148. void append_page(pd_t pd, const page& pg, uint32_t attr, bool priv);
  149. /**
  150. * @brief Splits the memory block at the specified address.
  151. *
  152. * @param addr The address at which the memory block will be split.
  153. * @return The new memory block created after splitting.
  154. */
  155. mm split(void* addr);
  156. constexpr bool operator<(const mm& rhs) const noexcept
  157. { return end() <= rhs.start; }
  158. constexpr bool operator<(void* rhs) const noexcept
  159. { return end() <= rhs; }
  160. friend constexpr bool operator<(void* lhs, const mm& rhs) noexcept
  161. { return lhs < rhs.start; }
  162. };
  163. class mm_list {
  164. private:
  165. struct comparator {
  166. constexpr bool operator()(const mm& lhs, const mm& rhs) const noexcept
  167. { return lhs < rhs; }
  168. constexpr bool operator()(const mm& lhs, void* rhs) const noexcept
  169. { return lhs < rhs; }
  170. constexpr bool operator()(void* lhs, const mm& rhs) const noexcept
  171. { return lhs < rhs; }
  172. };
  173. public:
  174. using list_type = std::set<mm, comparator,
  175. types::allocator_adapter<mm, types::kernel_ident_allocator>>;
  176. using iterator = list_type::iterator;
  177. using const_iterator = list_type::const_iterator;
  178. public:
  179. static inline mm_list* s_kernel_mms;
  180. private:
  181. list_type m_areas;
  182. page_t m_pd;
  183. mm* m_brk {};
  184. public:
  185. // for system initialization only
  186. explicit constexpr mm_list(page_t pd)
  187. : m_pd(pd) { }
  188. // default constructor copies kernel_mms
  189. explicit mm_list();
  190. // copies kernel_mms and mirrors user space
  191. explicit mm_list(const mm_list& other);
  192. constexpr mm_list(mm_list&& v)
  193. : m_areas(std::move(v.m_areas))
  194. , m_pd(std::exchange(v.m_pd, 0)) { }
  195. ~mm_list();
  196. void switch_pd() const;
  197. int register_brk(void* addr);
  198. void* set_brk(void* addr);
  199. void* find_avail(void* hint, size_t len, bool priv) const;
  200. int unmap(void* start, size_t len, bool priv);
  201. constexpr mm& addarea(void* start, bool w, bool system)
  202. {
  203. auto [ iter, inserted ] = m_areas.emplace(mm {
  204. .start = start,
  205. .attr {
  206. .write = w,
  207. .system = system,
  208. .mapped = 0,
  209. },
  210. .pgs = types::_new<types::kernel_ident_allocator, mm::pages_vector>(),
  211. });
  212. assert(inserted);
  213. return *iter;
  214. }
  215. mm& add_empty_area(void* start, std::size_t page_count,
  216. uint32_t page_attr, bool w, bool system);
  217. constexpr void clear_user()
  218. {
  219. for (auto iter = m_areas.begin(); iter != m_areas.end(); ) {
  220. if (iter->is_kernel_space()) {
  221. ++iter;
  222. continue;
  223. }
  224. this->unmap(*iter);
  225. iter = m_areas.erase(iter);
  226. }
  227. m_brk = nullptr;
  228. }
  229. inline void unmap(mm& area)
  230. {
  231. int i = 0;
  232. // TODO:
  233. // if there are more than 4 pages, calling invlpg
  234. // should be faster. otherwise, we use movl cr3
  235. // bool should_invlpg = (area->pgs->size() > 4);
  236. for (auto& pg : *area.pgs) {
  237. kernel::paccess pa(pg.pg_pteidx >> 12);
  238. auto pt = (pt_t)pa.ptr();
  239. assert(pt);
  240. auto* pte = *pt + (pg.pg_pteidx & 0xfff);
  241. pte->v = 0;
  242. free_page(&pg);
  243. invalidate_tlb((uint32_t)area.start + (i++) * PAGE_SIZE);
  244. }
  245. types::pdelete<types::kernel_ident_allocator>(area.pgs);
  246. }
  247. constexpr mm* find(void* lp)
  248. {
  249. auto iter = m_areas.find(lp);
  250. if (iter == m_areas.end())
  251. return nullptr;
  252. return &*iter;
  253. }
  254. constexpr const mm* find(void* lp) const
  255. {
  256. auto iter = m_areas.find(lp);
  257. if (iter == m_areas.end())
  258. return nullptr;
  259. return &*iter;
  260. }
  261. constexpr bool is_avail(void* start, size_t len) const noexcept
  262. {
  263. start = align_down<12>(start);
  264. len = vptrdiff(align_up<12>(vptradd(start, len)), start);
  265. for (const auto& area : m_areas) {
  266. if (!area.is_avail(start, vptradd(start, len)))
  267. return false;
  268. }
  269. return true;
  270. }
  271. constexpr bool is_avail(void* addr) const
  272. {
  273. auto iter = m_areas.find(addr);
  274. return iter == m_areas.end();
  275. }
  276. };
  277. } // namespace memory
  278. } // namespace kernel
  279. // global variables
  280. inline page empty_page;
  281. // --------------------------------
  282. // inline constexpr page* lto_page(mm* mm_area, void* l_ptr)
  283. // {
  284. // size_t offset = vptrdiff(l_ptr, mm_area->start);
  285. // return &mm_area->pgs->at(offset / PAGE_SIZE);
  286. // }
  287. // inline constexpr page_t to_page(pptr_t ptr)
  288. // {
  289. // return ptr >> 12;
  290. // }
  291. // inline constexpr size_t to_pdi(page_t pg)
  292. // {
  293. // return pg >> 10;
  294. // }
  295. // inline constexpr size_t to_pti(page_t pg)
  296. // {
  297. // return pg & (1024 - 1);
  298. // }
  299. // inline constexpr pptr_t to_pp(page_t p)
  300. // {
  301. // return p << 12;
  302. // }
  303. constexpr size_t v_to_pdi(void* addr)
  304. {
  305. return std::bit_cast<uint32_t>(addr) >> 22;
  306. }
  307. constexpr size_t v_to_pti(void* addr)
  308. {
  309. return (std::bit_cast<uint32_t>(addr) >> 12) & 0x3ff;
  310. }
  311. // inline constexpr pte_t* to_pte(pt_t pt, page_t pg)
  312. // {
  313. // return *pt + to_pti(pg);
  314. // }
  315. // inline void* to_vp(page_t pg)
  316. // {
  317. // return ptovp(to_pp(pg));
  318. // }
  319. // inline pd_t to_pd(page_t pg)
  320. // {
  321. // return reinterpret_cast<pd_t>(to_vp(pg));
  322. // }
  323. // inline pt_t to_pt(page_t pg)
  324. // {
  325. // return reinterpret_cast<pt_t>(to_vp(pg));
  326. // }
  327. // inline pt_t to_pt(pde_t* pde)
  328. // {
  329. // return to_pt(pde->in.pt_page);
  330. // }
  331. // inline pde_t* to_pde(pd_t pd, void* addr)
  332. // {
  333. // return *pd + lto_pdi((pptr_t)addr);
  334. // }
  335. // inline pte_t* to_pte(pt_t pt, void* addr)
  336. // {
  337. // return *pt + lto_pti((pptr_t)addr);
  338. // }
  339. // inline pte_t* to_pte(pde_t* pde, void* addr)
  340. // {
  341. // return to_pte(to_pt(pde), addr);
  342. // }
  343. // inline pte_t* to_pte(pd_t pd, void* addr)
  344. // {
  345. // return to_pte(to_pde(pd, addr), addr);
  346. // }
  347. // inline pte_t* to_pte(pde_t* pde, page_t pg)
  348. // {
  349. // return to_pte(to_pt(pde), pg);
  350. // }