Heinz 8 місяців тому
батько
коміт
641eb7d8da

+ 4 - 5
crates/eonix_mm/src/page_table/page_table.rs

@@ -7,7 +7,7 @@ use super::{
 use crate::{
     address::{PAddr, VRange},
     page_table::PageTableIterator,
-    paging::{GlobalPageAlloc, Page, PageAccess, PageAlloc, PageBlock},
+    paging::{GlobalPageAlloc, Page, PageAccess, PageAlloc, PageBlock, PageSize},
 };
 use core::{marker::PhantomData, ptr::NonNull};
 
@@ -84,11 +84,10 @@ where
         };
 
         // default 3 level
-        PageTableIterator::<M, A, X, UserIterator>::new(root_page_table, range, alloc.clone(), M::LEVELS.len() - 1)
+        PageTableIterator::<M, A, X, UserIterator>::new(root_page_table, range, alloc.clone(), PageSize::_4KbPage)
     }
 
-    pub fn iter_kernel(&self, range: VRange, level_in_array: usize) -> impl Iterator<Item = &mut M::Entry> {
-        assert!(0 < level_in_array && level_in_array < M::LEVELS.len(), "Invalid page table level");
+    pub fn iter_kernel(&self, range: VRange, page_size: PageSize) -> impl Iterator<Item = &mut M::Entry> {
         let alloc = self.root_table_page.allocator();
         let page_table_ptr = X::get_ptr_for_page(&self.root_table_page);
         let root_page_table = unsafe {
@@ -96,7 +95,7 @@ where
             M::RawTable::from_ptr(page_table_ptr)
         };
 
-        PageTableIterator::<M, A, X, KernelIterator>::new(root_page_table, range, alloc.clone(), level_in_array)
+        PageTableIterator::<M, A, X, KernelIterator>::new(root_page_table, range, alloc.clone(), page_size)
     }
 
     fn drop_page_table_recursive(page_table: &Page<A>, levels: &[PageTableLevel]) {

+ 21 - 19
crates/eonix_mm/src/page_table/pte_iterator.rs

@@ -4,9 +4,9 @@ use super::{
 };
 use crate::{
     address::{AddrOps as _, VRange},
-    paging::{Page, PageAccess, PageAlloc, LEVEL0_PAGE_SIZE, LEVEL1_PAGE_SIZE, LEVEL2_PAGE_SIZE},
+    paging::{Page, PageAccess, PageAlloc, PageSize},
 };
-use core::{marker::PhantomData, panic};
+use core::{marker::PhantomData};
 
 pub struct KernelIterator;
 pub struct UserIterator;
@@ -60,8 +60,7 @@ where
     X: PageAccess,
     K: IteratorType<M>,
 {
-    // from root to down: 0 1 2 3
-    level_in_array: usize,
+    page_size: PageSize,
     remaining: usize,
 
     indicies: [u16; 8],
@@ -79,13 +78,22 @@ where
     X: PageAccess,
     K: IteratorType<M>,
 {
+    fn table_level(&self) -> usize {
+        match self.page_size {
+            PageSize::_4KbPage => 3,
+            PageSize::_2MbPage => 2,
+            PageSize::_1GbPage => 1,
+        }
+    }
+
     fn parse_tables_starting_from(&mut self, idx_level: usize) {
+        let table_level = self.table_level();
 
         for (idx, &pt_idx) in self
             .indicies
             .iter()
             .enumerate()
-            .take(self.level_in_array)
+            .take(table_level)
             .skip(idx_level)
         {
             let [parent_table, child_table] = unsafe {
@@ -100,20 +108,13 @@ where
         }
     }
 
-    pub fn new(page_table: M::RawTable<'a>, range: VRange, alloc: A, level_in_array: usize) -> Self {
+    pub fn new(page_table: M::RawTable<'a>, range: VRange, alloc: A, page_size: PageSize) -> Self {
         let start = range.start().floor();
         let end = range.end().ceil();
 
-        // not allow to modify root page table
-        let page_size = match level_in_array {
-            1 => LEVEL2_PAGE_SIZE,
-            2 => LEVEL1_PAGE_SIZE,
-            3 => LEVEL0_PAGE_SIZE,
-            _ => panic!("Out of index"),
-        };
         let mut me = Self {
-            level_in_array,
-            remaining: (end - start) / page_size,
+            page_size,
+            remaining: (end - start) / (page_size as usize),
             indicies: [0; 8],
             tables: [const { None }; 8],
             alloc,
@@ -148,10 +149,11 @@ where
             self.remaining -= 1;
         }
 
-        let retval = self.tables[self.level_in_array]
+        let table_level = self.table_level();
+        let retval = self.tables[self.table_level()]
             .as_mut()
             .unwrap()
-            .index_mut(self.indicies[self.level_in_array]);
+            .index_mut(self.indicies[table_level]);
 
         let idx_level_start_updating = M::LEVELS
             .iter()
@@ -159,14 +161,14 @@ where
             .enumerate()
             .rev()
             .skip_while(|(i, (level, idx))| {
-                *i >= self.level_in_array && **idx == level.max_index()
+                *i >= table_level && **idx == level.max_index()
             })
             .map(|(i, _)| i)
             .next()
             .expect("Index out of bounds");
 
         self.indicies[idx_level_start_updating] += 1;
-        self.indicies[idx_level_start_updating + 1..self.level_in_array].fill(0);
+        self.indicies[idx_level_start_updating + 1..table_level].fill(0);
         self.parse_tables_starting_from(idx_level_start_updating);
 
         Some(retval)

+ 1 - 1
crates/eonix_mm/src/paging.rs

@@ -3,7 +3,7 @@ mod page_alloc;
 mod pfn;
 mod raw_page;
 
-pub use page::{Page, PageAccess, PageBlock, PAGE_SIZE, LEVEL0_PAGE_SIZE, LEVEL1_PAGE_SIZE, LEVEL2_PAGE_SIZE, PAGE_SIZE_BITS};
+pub use page::{Page, PageAccess, PageBlock, PAGE_SIZE, PageSize, PAGE_SIZE_BITS};
 pub use page_alloc::{GlobalPageAlloc, PageAlloc};
 pub use pfn::PFN;
 pub use raw_page::RawPage;

+ 7 - 3
crates/eonix_mm/src/paging/page.rs

@@ -3,9 +3,13 @@ use crate::address::{AddrRange, PAddr};
 use core::{fmt, mem::ManuallyDrop, ptr::NonNull, sync::atomic::Ordering};
 
 pub const PAGE_SIZE: usize = 4096;
-pub const LEVEL0_PAGE_SIZE: usize = 4096;
-pub const LEVEL1_PAGE_SIZE: usize = 2 * 1024 * 1024;
-pub const LEVEL2_PAGE_SIZE: usize = 1 * 1024 * 1024 * 1024;
+
+#[derive(Clone, Copy)]
+pub enum PageSize {
+    _4KbPage = 4096,
+    _2MbPage = 2 * 1024 * 1024,
+    _1GbPage = 1 * 1024 * 1024 * 1024,
+}
 pub const PAGE_SIZE_BITS: u32 = PAGE_SIZE.trailing_zeros();
 
 /// A block of memory that is aligned to the page size and can be used for