mm.hpp 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #pragma once
  2. #include <kernel/mem.h>
  3. #include <kernel/vfs.hpp>
  4. #include <types/allocator.hpp>
  5. #include <types/list.hpp>
  6. #include <types/types.h>
  7. #include <types/vector.hpp>
  8. constexpr size_t THREAD_KERNEL_STACK_SIZE = 2 * PAGE_SIZE;
  9. struct page_attr {
  10. uint32_t cow : 1;
  11. };
  12. struct page {
  13. page_t phys_page_id;
  14. size_t* ref_count;
  15. struct page_attr attr;
  16. };
  17. using page_arr = types::vector<page, types::kernel_ident_allocator>;
  18. struct mm_attr {
  19. uint32_t read : 1;
  20. uint32_t write : 1;
  21. uint32_t system : 1;
  22. };
  23. class mm {
  24. public:
  25. linr_ptr_t start;
  26. struct mm_attr attr;
  27. page_directory_entry* pd;
  28. page_arr* pgs;
  29. fs::inode* mapped_file;
  30. size_t file_offset;
  31. public:
  32. mm(const mm& val);
  33. mm(linr_ptr_t start, page_directory_entry* pd, bool write, bool system);
  34. };
  35. using mm_list = types::list<mm, types::kernel_ident_allocator>;
  36. // in mem.cpp
  37. extern mm_list* kernel_mms;
  38. extern page empty_page;
  39. // translate physical address to virtual(mapped) address
  40. void* p_ptr_to_v_ptr(phys_ptr_t p_ptr);
  41. // translate linear address to physical address
  42. phys_ptr_t l_ptr_to_p_ptr(const mm_list* mms, linr_ptr_t v_ptr);
  43. // translate virtual(mapped) address to physical address
  44. phys_ptr_t v_ptr_to_p_ptr(void* v_ptr);
  45. // @return the pointer to the mm_area containing l_ptr
  46. // nullptr if not
  47. mm* find_mm_area(mm_list* mms, linr_ptr_t l_ptr);
  48. // find the corresponding page the l_ptr pointing to
  49. // @return the pointer to the struct if found, NULL if not found
  50. struct page* find_page_by_l_ptr(const mm_list* mms, linr_ptr_t l_ptr);
  51. static inline page_t phys_addr_to_page(phys_ptr_t ptr)
  52. {
  53. return ptr >> 12;
  54. }
  55. static inline pd_i_t page_to_pd_i(page_t p)
  56. {
  57. return p >> 10;
  58. }
  59. static inline pt_i_t page_to_pt_i(page_t p)
  60. {
  61. return p & (1024 - 1);
  62. }
  63. static inline phys_ptr_t page_to_phys_addr(page_t p)
  64. {
  65. return p << 12;
  66. }
  67. static inline pd_i_t linr_addr_to_pd_i(linr_ptr_t ptr)
  68. {
  69. return page_to_pd_i(phys_addr_to_page(ptr));
  70. }
  71. static inline pd_i_t linr_addr_to_pt_i(linr_ptr_t ptr)
  72. {
  73. return page_to_pt_i(phys_addr_to_page(ptr));
  74. }
  75. static inline page_directory_entry* mms_get_pd(const mm_list* mms)
  76. {
  77. return mms->begin()->pd;
  78. }
  79. // map the page to the end of the mm_area in pd
  80. int k_map(
  81. mm* mm_area,
  82. const struct page* page,
  83. int read,
  84. int write,
  85. int priv,
  86. int cow);
  87. // @param len is aligned to 4kb boundary automatically, exceeding part will
  88. // be filled with '0's and not written back to the file
  89. int mmap(
  90. void* hint,
  91. size_t len,
  92. fs::inode* file,
  93. size_t offset,
  94. int write,
  95. int priv);
  96. // allocate a raw page
  97. page_t alloc_raw_page(void);
  98. // allocate n raw page(s)
  99. // @return the id of the first page allocated
  100. page_t alloc_n_raw_pages(size_t n);
  101. // allocate a struct page together with the raw page
  102. struct page allocate_page(void);
  103. page_directory_entry* alloc_pd(void);
  104. page_table_entry* alloc_pt(void);