mm.hpp 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <types/allocator.hpp>
  4. #include <types/list.hpp>
  5. #include <types/types.h>
  6. #include <types/vector.hpp>
  7. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  8. struct page_attr {
  9. uint32_t cow : 1;
  10. };
  11. struct page {
  12. page_t phys_page_id;
  13. size_t* ref_count;
  14. struct page_attr attr;
  15. };
  16. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  17. struct mm_attr {
  18. uint32_t read : 1;
  19. uint32_t write : 1;
  20. uint32_t system : 1;
  21. };
  22. class mm {
  23. public:
  24. linr_ptr_t start;
  25. struct mm_attr attr;
  26. page_directory_entry* pd;
  27. page_arr* pgs;
  28. public:
  29. mm(const mm& val);
  30. mm(linr_ptr_t start, page_directory_entry* pd, bool write, bool system);
  31. };
  32. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  33. // in mem.cpp
  34. extern mm_list* kernel_mms;
  35. extern page empty_page;
  36. // translate physical address to virtual(mapped) address
  37. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
  38. // translate linear address to physical address
  39. phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, linr_ptr_t v_ptr);
  40. // translate virtual(mapped) address to physical address
  41. phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
  42. // check if the l_ptr is contained in the area
  43. // @return GB_OK if l_ptr is in the area
  44. // GB_FAILED if not
  45. int is_l_ptr_valid(const mm_list* mms, linr_ptr_t l_ptr);
  46. // find the corresponding page the l_ptr pointing to
  47. // @return the pointer to the struct if found, NULL if not found
  48. struct page* find_page_by_l_ptr(const mm_list* mms, linr_ptr_t l_ptr);
  49. static inline page_t phys_addr_to_page(phys_ptr_t ptr)
  50. {
  51. return ptr >> 12;
  52. }
  53. static inline pd_i_t page_to_pd_i(page_t p)
  54. {
  55. return p >> 10;
  56. }
  57. static inline pt_i_t page_to_pt_i(page_t p)
  58. {
  59. return p & (1024 - 1);
  60. }
  61. static inline phys_ptr_t page_to_phys_addr(page_t p)
  62. {
  63. return p << 12;
  64. }
  65. static inline pd_i_t linr_addr_to_pd_i(linr_ptr_t ptr)
  66. {
  67. return page_to_pd_i(phys_addr_to_page(ptr));
  68. }
  69. static inline pd_i_t linr_addr_to_pt_i(linr_ptr_t ptr)
  70. {
  71. return page_to_pt_i(phys_addr_to_page(ptr));
  72. }
  73. static inline page_directory_entry* mms_get_pd(const mm_list* mms)
  74. {
  75. return mms->begin()->pd;
  76. }
  77. static inline page_directory_entry* lptr_to_pde(const mm_list* mms, linr_ptr_t l_ptr)
  78. {
  79. return mms_get_pd(mms) + linr_addr_to_pd_i((phys_ptr_t)l_ptr);
  80. }
  81. static inline page_table_entry* lptr_to_pte(const mm_list* mms, linr_ptr_t l_ptr)
  82. {
  83. page_directory_entry* pde = lptr_to_pde(mms, l_ptr);
  84. page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(page_to_phys_addr(pde->in.pt_page));
  85. return pte + linr_addr_to_pt_i((phys_ptr_t)l_ptr);
  86. }
  87. static inline page_directory_entry* lp_to_pde(const mm_list* mms, linr_ptr_t l_ptr)
  88. {
  89. phys_ptr_t p_ptr = l_ptr_to_p_ptr(mms, l_ptr);
  90. page_directory_entry* pde = mms_get_pd(mms) + linr_addr_to_pd_i(p_ptr);
  91. return pde;
  92. }
  93. // get the corresponding pte for the linear address
  94. // for example: l_ptr = 0x30001000 will return the pte including the page it is mapped to
  95. static inline page_table_entry* lp_to_pte(const mm_list* mms, linr_ptr_t l_ptr)
  96. {
  97. phys_ptr_t p_ptr = l_ptr_to_p_ptr(mms, l_ptr);
  98. page_directory_entry* pde = lp_to_pde(mms, l_ptr);
  99. phys_ptr_t p_pt = page_to_phys_addr(pde->in.pt_page);
  100. page_table_entry* pte = (page_table_entry*)p_ptr_to_v_ptr(p_pt);
  101. pte += linr_addr_to_pt_i(p_ptr);
  102. return pte;
  103. }
  104. // map the page to the end of the mm_area in pd
  105. int k_map(
  106. mm* mm_area,
  107. const struct page* page,
  108. int read,
  109. int write,
  110. int priv,
  111. int cow);
  112. // allocate a raw page
  113. page_t alloc_raw_page(void);
  114. // allocate n raw page(s)
  115. // @return the id of the first page allocated
  116. page_t alloc_n_raw_pages(size_t n);
  117. // allocate a struct page together with the raw page
  118. struct page allocate_page(void);
  119. page_directory_entry* alloc_pd(void);
  120. page_table_entry* alloc_pt(void);