mm.hpp 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #pragma once
  2. #include "types/size.h"
  3. #include <kernel/mem.h>
  4. #include <kernel/vfs.hpp>
  5. #include <types/allocator.hpp>
  6. #include <types/list.hpp>
  7. #include <types/types.h>
  8. #include <types/vector.hpp>
  9. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  10. struct page {
  11. page_t phys_page_id;
  12. pte_t* pte;
  13. size_t* ref_count;
  14. union {
  15. uint32_t v;
  16. struct {
  17. uint32_t cow : 1;
  18. } in;
  19. } attr;
  20. };
  21. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  22. class mm {
  23. public:
  24. void* start;
  25. union {
  26. uint32_t v;
  27. struct {
  28. uint32_t read : 1;
  29. uint32_t write : 1;
  30. uint32_t system : 1;
  31. } in;
  32. } attr;
  33. pd_t pd;
  34. page_arr* pgs;
  35. fs::inode* mapped_file;
  36. size_t file_offset;
  37. public:
  38. mm(const mm& val);
  39. mm(void* start, pd_t pd, bool write, bool system);
  40. };
  41. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  42. // in mem.cpp
  43. extern mm_list* kernel_mms;
  44. extern page empty_page;
  45. // translate physical address to virtual(mapped) address
  46. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
  47. // translate linear address to physical address
  48. phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, void* v_ptr);
  49. // translate virtual(mapped) address to physical address
  50. phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
  51. // @return the pointer to the mm_area containing l_ptr
  52. // nullptr if not
  53. mm* find_mm_area(mm_list* mms, void* l_ptr);
  54. // find the corresponding page the l_ptr pointing to
  55. // @return the pointer to the struct if found, NULL if not found
  56. struct page* find_page_by_l_ptr(const mm_list* mms, void* l_ptr);
  57. inline size_t vptrdiff(void* p1, void* p2)
  58. {
  59. return (uint8_t*)p1 - (uint8_t*)p2;
  60. }
  61. inline page_t phys_addr_to_page(phys_ptr_t ptr)
  62. {
  63. return ptr >> 12;
  64. }
  65. inline pd_i_t page_to_pd_i(page_t p)
  66. {
  67. return p >> 10;
  68. }
  69. inline constexpr pt_i_t page_to_pt_i(page_t p)
  70. {
  71. return p & (1024 - 1);
  72. }
  73. inline phys_ptr_t page_to_phys_addr(page_t p)
  74. {
  75. return p << 12;
  76. }
  77. inline pd_i_t linr_addr_to_pd_i(void* ptr)
  78. {
  79. return page_to_pd_i(phys_addr_to_page((phys_ptr_t)ptr));
  80. }
  81. inline pd_i_t linr_addr_to_pt_i(void* ptr)
  82. {
  83. return page_to_pt_i(phys_addr_to_page((phys_ptr_t)ptr));
  84. }
  85. inline pd_t mms_get_pd(const mm_list* mms)
  86. {
  87. return mms->begin()->pd;
  88. }
  89. inline void* to_vp(page_t pg)
  90. {
  91. return p_ptr_to_v_ptr(page_to_phys_addr(pg));
  92. }
  93. inline pd_t to_pd(page_t pg)
  94. {
  95. return reinterpret_cast<pd_t>(to_vp(pg));
  96. }
  97. inline pt_t to_pt(page_t pg)
  98. {
  99. return reinterpret_cast<pt_t>(to_vp(pg));
  100. }
  101. inline pt_t to_pt(pde_t* pde)
  102. {
  103. return to_pt(pde->in.pt_page);
  104. }
  105. inline pde_t* to_pde(pd_t pd, void* addr)
  106. {
  107. return *pd + linr_addr_to_pd_i(addr);
  108. }
  109. inline pte_t* to_pte(pt_t pt, void* addr)
  110. {
  111. return *pt + linr_addr_to_pt_i(addr);
  112. }
  113. inline pte_t* to_pte(pt_t pt, page_t pg)
  114. {
  115. return *pt + page_to_pt_i(pg);
  116. }
  117. inline pte_t* to_pte(pde_t* pde, page_t pg)
  118. {
  119. return to_pte(to_pt(pde), pg);
  120. }
  121. inline pte_t* to_pte(pde_t* pde, void* addr)
  122. {
  123. return to_pte(to_pt(pde), addr);
  124. }
  125. inline pte_t* to_pte(pd_t pd, void* addr)
  126. {
  127. return to_pte(to_pde(pd, addr), addr);
  128. }
  129. inline void* mmend(const mm* mm_area)
  130. {
  131. return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
  132. }
  133. // map the page to the end of the mm_area in pd
  134. int k_map(
  135. mm* mm_area,
  136. page* page,
  137. int read,
  138. int write,
  139. int priv,
  140. int cow);
  141. // @param len is aligned to 4kb boundary automatically, exceeding part will
  142. // be filled with '0's and not written back to the file
  143. int mmap(
  144. void* hint,
  145. size_t len,
  146. fs::inode* file,
  147. size_t offset,
  148. int write,
  149. int priv);
  150. // allocate a raw page
  151. page_t alloc_raw_page(void);
  152. // allocate n raw page(s)
  153. // @return the id of the first page allocated
  154. page_t alloc_n_raw_pages(size_t n);
  155. // allocate a struct page together with the raw page
  156. struct page allocate_page(void);
  157. pd_t alloc_pd(void);
  158. pt_t alloc_pt(void);