Explorar o código

refactor: refactor page alloc in rust

shao hai 2 semanas
pai
achega
5b3f887a65

+ 0 - 1
CMakeLists.txt

@@ -42,7 +42,6 @@ set(KERNEL_MAIN_SOURCES src/kinit.cpp
                         src/kernel/async/lock.cc
                         src/kernel/allocator.cc
                         src/kernel/process.cpp
-                        src/kernel/mem/paging.cc
                         src/kernel/mem/slab.cc
                         src/kernel/vga.cpp
                         src/kernel/hw/acpi.cc

+ 3 - 3
arch/src/x86_64/context.rs

@@ -1,4 +1,4 @@
-use core::arch::asm;
+use core::arch::naked_asm;
 
 #[repr(C)]
 #[derive(Debug, Default)]
@@ -48,7 +48,7 @@ impl TaskContext {
 
 #[naked]
 unsafe extern "C" fn _switch_to(current_context_sp: &mut u64, next_context_sp: &mut u64) {
-    asm!(
+    naked_asm!(
         "pushf",
         "push %rbp",
         "push %rbx",
@@ -66,6 +66,6 @@ unsafe extern "C" fn _switch_to(current_context_sp: &mut u64, next_context_sp: &
         "pop %rbp",
         "popf",
         "ret",
-        options(att_syntax, noreturn),
+        options(att_syntax),
     );
 }

+ 0 - 23
include/kernel/mem/paging.hpp

@@ -96,29 +96,6 @@ struct page {
 
 inline page* PAGE_ARRAY;
 
-void create_zone(uintptr_t start, uintptr_t end);
-void mark_present(uintptr_t start, uintptr_t end);
-
-[[nodiscard]] page* alloc_page();
-// order represents power of 2
-[[nodiscard]] page* alloc_pages(unsigned order);
-
-// order represents power of 2
-void free_pages(page* page, unsigned order);
-void free_page(page* page);
-
-// order represents power of 2
-void free_pages(pfn_t pfn, unsigned order);
-void free_page(pfn_t pfn);
-
-// clear the page all zero
-[[nodiscard]] pfn_t alloc_page_table();
-
-pfn_t page_to_pfn(page* page);
-page* pfn_to_page(pfn_t pfn);
-
-void increase_refcount(page* page);
-
 constexpr unsigned long PAGE_FAULT_P = 0x00000001;
 constexpr unsigned long PAGE_FAULT_W = 0x00000002;
 constexpr unsigned long PAGE_FAULT_U = 0x00000004;

+ 11 - 2
src/kernel/allocator.cc

@@ -16,6 +16,15 @@ constexpr uintptr_t KERNEL_HEAP_SIZE = KERNEL_HEAP_END - KERNEL_HEAP_START;
 
 namespace types::memory {
 
+extern "C" {
+    struct Page {
+       char item[32]; 
+    };
+    Page* c_alloc_pages(uint32_t order);
+    uintptr_t page_to_pfn(Page* page);
+    uintptr_t c_alloc_page_table();
+}
+
 struct mem_blk_flags {
     unsigned long is_free : 8;
     unsigned long has_next : 8;
@@ -117,11 +126,11 @@ std::byte* brk_memory_allocator::brk(byte* addr) {
 
         auto pdpte = pdpt[std::get<2>(idx)];
         if (!pdpte.pfn())
-            pdpte.set(PA_KERNEL_PAGE_TABLE, alloc_page_table());
+            pdpte.set(PA_KERNEL_PAGE_TABLE, c_alloc_page_table());
 
         auto pde = pdpte.parse()[std::get<3>(idx)];
         assert(!(pde.attributes() & PA_P));
-        pde.set(PA_KERNEL_DATA_HUGE, page_to_pfn(alloc_pages(9)));
+        pde.set(PA_KERNEL_DATA_HUGE, page_to_pfn(c_alloc_pages(9)) << 12);
 
         current_allocated += 0x200000;
     }

+ 4 - 2
src/kernel/mem.rs

@@ -4,9 +4,11 @@ pub mod phys;
 mod mm_area;
 mod mm_list;
 mod page_table;
-mod vrange;
+mod page_alloc;
+mod address;
 
 pub(self) use mm_area::MMArea;
 pub use mm_list::{handle_page_fault, FileMapping, MMList, Mapping, PageFaultError, Permission};
 pub(self) use page_table::{PageTable, PTE};
-pub use vrange::{VAddr, VRange};
+pub use address::{VAddr, PAddr, VPN, PFN, VRange};
+pub use page_alloc::{alloc_page, alloc_pages, free_pages, mark_present, create_pages};

+ 392 - 0
src/kernel/mem/address.rs

@@ -0,0 +1,392 @@
+use core::{
+    cmp::Ordering,
+    fmt::{self, Debug, Formatter},
+    ops::{Add, Sub, RangeBounds},
+};
+
+#[repr(C)]
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+pub struct PAddr(pub usize);
+
+#[repr(C)]
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+pub struct VAddr(pub usize);
+
+#[repr(C)]
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+pub struct PFN(pub usize);
+
+#[repr(C)]
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+pub struct VPN(pub usize);
+
+const PAGE_SIZE: usize = 4096;
+const PAGE_SIZE_BITS: usize = 12;
+const USER_SPACE_MEMORY_TOP: VAddr = VAddr(0x8000_0000_0000);
+
+impl From<PAddr> for usize {
+    fn from(v: PAddr) -> Self {
+        v.0
+    }
+}
+
+impl From<PFN> for usize {
+    fn from(v: PFN) -> Self {
+        v.0
+    }
+}
+
+impl From<VAddr> for usize {
+    fn from(v: VAddr) -> Self {
+       v.0 
+    }
+}
+
+impl From<VPN> for usize {
+    fn from(v: VPN) -> Self {
+        v.0
+    }
+}
+
+impl From<usize> for PAddr {
+    fn from(v: usize) -> Self {
+        Self(v)
+    }
+}
+
+impl From<usize> for PFN {
+    fn from(v: usize) -> Self {
+        Self(v)
+    }
+}
+
+impl From<usize> for VAddr {
+    fn from(v: usize) -> Self {
+        Self(v)
+    }
+}
+
+impl From<usize> for VPN {
+    fn from(v: usize) -> Self {
+        Self(v)
+    }
+}
+
+
+impl From<VPN> for VAddr {
+    fn from(v: VPN) -> Self {
+        Self(v.0 << PAGE_SIZE_BITS)
+    }
+}
+
+impl From<VAddr> for VPN {
+    fn from(v: VAddr) -> Self {
+        assert_eq!(v.page_offset(), 0);
+        v.floor_vpn()
+    }
+}
+
+impl From<PAddr> for PFN {
+    fn from(v: PAddr) -> Self {
+        assert_eq!(v.page_offset(), 0);
+        v.floor_pfn()
+    }
+}
+
+impl From<PFN> for PAddr {
+    fn from(v: PFN) -> Self {
+        Self(v.0 << PAGE_SIZE_BITS)
+    }
+}
+
+impl PAddr {
+    pub fn floor_pfn(&self) -> PFN {
+        PFN(self.0 / PAGE_SIZE)
+    }
+
+    pub fn ceil_pfn(&self) -> PFN {
+        PFN((self.0 + PAGE_SIZE - 1) / PAGE_SIZE)
+    }
+
+    pub fn page_offset(&self) -> usize {
+        self.0 & (PAGE_SIZE - 1)
+    }
+
+    pub fn is_aligned(&self) -> bool {
+        self.page_offset() == 0
+    }
+}
+
+impl PFN {
+    pub fn buddy_pfn(&self, order: u32) -> PFN {
+        PFN::from(self.0 ^ (1 << order))
+    }
+
+    pub fn combined_pfn(&self, buddy_pfn: PFN) -> PFN {
+        PFN::from(self.0 & buddy_pfn.0)
+    }
+}
+
+impl VAddr {
+    pub const NULL: Self = Self(0);
+
+    pub fn floor_vpn(&self) -> VPN {
+        VPN(self.0 / PAGE_SIZE)
+    }
+
+    pub fn ceil_vpn(&self) -> VPN {
+        VPN((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
+    }
+
+    pub fn page_offset(&self) -> usize {
+        self.0 & (PAGE_SIZE - 1)
+    }
+
+    pub fn is_aligned(&self) -> bool {
+        self.page_offset() == 0
+    }
+
+    pub fn is_user(&self) -> bool {
+        self.0 != 0 && self < &USER_SPACE_MEMORY_TOP
+    }
+
+    pub fn floor(&self) -> Self {
+        VAddr(self.0 & !(PAGE_SIZE - 1))
+    }
+
+    pub fn ceil(&self) -> Self {
+        VAddr((self.0 + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1))
+    }
+}
+
+impl Sub for VAddr {
+    type Output = usize;
+
+    fn sub(self, rhs: Self) -> Self::Output {
+        self.0 - rhs.0
+    }
+}
+
+impl Sub<usize> for VAddr {
+    type Output = Self;
+
+    fn sub(self, rhs: usize) -> Self::Output {
+        VAddr(self.0 - rhs)
+    }
+}
+
+impl Add<usize> for VAddr {
+    type Output = Self;
+
+    fn add(self, rhs: usize) -> Self::Output {
+        VAddr(self.0 + rhs)
+    }
+}
+
+impl Sub for PAddr {
+    type Output = usize;
+
+    fn sub(self, rhs: Self) -> Self::Output {
+        self.0 - rhs.0
+    }
+}
+
+impl Sub<usize> for PAddr {
+    type Output = Self;
+
+    fn sub(self, rhs: usize) -> Self::Output {
+        PAddr(self.0 - rhs)
+    }
+}
+
+impl Add<usize> for PAddr {
+    type Output = Self;
+
+    fn add(self, rhs: usize) -> Self::Output {
+        PAddr(self.0 + rhs)
+    }
+}
+
+impl Debug for VAddr {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "VAddr{:#x}", self.0)
+    }
+}
+
+impl Debug for PAddr {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "PAddr{:#x}", self.0)
+    }
+}
+
+impl Add<usize> for PFN {
+    type Output = Self;
+
+    fn add(self, rhs: usize) -> Self::Output {
+        PFN(self.0 + rhs)
+    } 
+}
+
+impl Sub for PFN {
+    type Output = usize;
+
+    fn sub(self, rhs: Self) -> Self::Output {
+        self.0 - rhs.0
+    }
+}
+
+impl Sub<usize> for PFN {
+    type Output = Self;
+
+    fn sub(self, rhs: usize) -> Self::Output {
+        PFN(self.0 - rhs)
+    }
+}
+
+impl Add<usize> for VPN {
+    type Output = Self;
+
+    fn add(self, rhs: usize) -> Self::Output {
+        VPN(self.0 + rhs)
+    } 
+}
+
+impl Sub for VPN {
+    type Output = usize;
+
+    fn sub(self, rhs: Self) -> Self::Output {
+        self.0 - rhs.0
+    }
+}
+
+impl Sub<usize> for VPN {
+    type Output = Self;
+
+    fn sub(self, rhs: usize) -> Self::Output {
+        VPN(self.0 - rhs)
+    }
+}
+
+impl Debug for VPN {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "VPN{:#x}", self.0)
+    }
+}
+
+impl Debug for PFN {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "PFN{:#x}", self.0)
+    }
+}
+
+#[derive(Clone, Copy)]
+pub struct VRange {
+    start: VAddr,
+    end: VAddr,
+}
+
+impl Debug for VRange {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        write!(f, "[{:?}, {:?})", self.start, self.end)
+    }
+}
+
+impl Eq for VRange {}
+impl PartialOrd for VRange {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl PartialEq for VRange {
+    fn eq(&self, other: &Self) -> bool {
+        self.cmp(other) == Ordering::Equal
+    }
+}
+
+/// Any two ranges that have one of them containing the other are considered equal.
+impl Ord for VRange {
+    fn cmp(&self, other: &Self) -> Ordering {
+        if self.start == other.start {
+            return Ordering::Equal;
+        }
+
+        if self.end == other.end {
+            if self.start == self.end {
+                return Ordering::Greater;
+            }
+            if other.start == other.end {
+                return Ordering::Less;
+            }
+            return Ordering::Equal;
+        }
+
+        if self.start < other.start {
+            if other.end < self.end {
+                return Ordering::Equal;
+            } else {
+                return Ordering::Less;
+            }
+        }
+
+        if other.start < self.start {
+            if self.end < other.end {
+                return Ordering::Equal;
+            } else {
+                return Ordering::Greater;
+            }
+        }
+
+        unreachable!()
+    }
+}
+
+impl From<VAddr> for VRange {
+    fn from(addr: VAddr) -> Self {
+        VRange::new(addr, addr)
+    }
+}
+
+impl VRange {
+    pub fn new(start: VAddr, end: VAddr) -> Self {
+        assert!(start <= end);
+        VRange { start, end }
+    }
+
+    pub fn is_overlapped(&self, other: &Self) -> bool {
+        self == other
+    }
+
+    pub fn is_user(&self) -> bool {
+        self.start < USER_SPACE_MEMORY_TOP && self.end <= USER_SPACE_MEMORY_TOP
+    }
+
+    pub fn start(&self) -> VAddr {
+        self.start
+    }
+
+    pub fn end(&self) -> VAddr {
+        self.end
+    }
+
+    pub fn len(&self) -> usize {
+        self.end.0 - self.start.0
+    }
+
+    pub fn shrink(&self, count: usize) -> Self {
+        assert!(count <= self.len());
+        VRange::new(self.start, self.end - count)
+    }
+
+    pub fn grow(&self, count: usize) -> Self {
+        VRange::new(self.start, self.end + count)
+    }
+
+    pub fn into_range(self) -> impl RangeBounds<Self> {
+        if self.len() == 0 {
+            VRange::from(self.start())..=VRange::from(self.start())
+        } else {
+            VRange::from(self.start())..=VRange::from(self.end() - 1)
+        }
+    }
+}

+ 0 - 323
src/kernel/mem/mm_list.cc

@@ -1,323 +0,0 @@
-#include <assert.h>
-#include <errno.h>
-#include <stdint.h>
-
-#include <kernel/mem/mm_list.hpp>
-#include <kernel/mem/paging.hpp>
-#include <kernel/mem/vm_area.hpp>
-
-using namespace kernel::mem;
-
-static inline void __invalidate_all_tlb() {
-    asm volatile(
-        "mov %%cr3, %%rax\n\t"
-        "mov %%rax, %%cr3\n\t"
-        :
-        :
-        : "rax", "memory");
-}
-
-static inline void __dealloc_page_table_all(paging::pfn_t pt, int depth, int from, int to) {
-    using namespace paging;
-
-    if (depth > 1) {
-        for (int i = from; i < to; ++i) {
-            auto pse = PSE{pt}[i];
-            if (!(pse.attributes() & PA_P))
-                continue;
-
-            int pfn = pse.pfn();
-            __dealloc_page_table_all(pfn, depth - 1, 0, 512);
-        }
-    }
-
-    free_page(pt);
-}
-
-static inline void __dealloc_page_table(paging::pfn_t pt) {
-    using namespace paging;
-    auto start_idx = idx_p4(0);
-    auto end_idx = idx_p4(KERNEL_SPACE_START);
-
-    __dealloc_page_table_all(pt, 4, start_idx, end_idx);
-}
-
-mm_list::mm_list() : m_pt{paging::alloc_page_table()}, m_brk{m_areas.end()} {
-    // copy only kernel space
-    memcpy(physaddr<void>{m_pt + 0x800}, physaddr<void>{KERNEL_PML4 + 0x800}, 0x800);
-}
-
-mm_list::mm_list(const mm_list& other) : mm_list{} {
-    m_areas = other.m_areas;
-
-    using namespace paging;
-    for (auto iter = m_areas.begin(); iter != m_areas.end(); ++iter) {
-        auto& area = *iter;
-
-        if (area.flags & MM_BREAK)
-            m_brk = iter;
-
-        auto this_iter = vaddr_range{m_pt, area.start, area.end};
-        auto other_iter = vaddr_range{other.m_pt, area.start, area.end};
-
-        while (this_iter) {
-            auto this_pte = *this_iter, other_pte = *other_iter;
-            auto attributes = other_pte.attributes();
-            auto pfn = other_pte.pfn();
-
-            attributes &= ~(PA_RW | PA_A | PA_D);
-            attributes |= PA_COW;
-            this_pte.set(attributes, pfn);
-
-            increase_refcount(pfn_to_page(pfn));
-
-            // TODO: create a function to set COW mappings
-            attributes = other_pte.attributes();
-            attributes &= ~PA_RW;
-            attributes |= PA_COW;
-            other_pte.set(attributes, pfn);
-
-            ++this_iter, ++other_iter;
-        }
-    }
-
-    __invalidate_all_tlb();
-}
-
-mm_list::~mm_list() {
-    if (!m_pt)
-        return;
-
-    clear();
-    __dealloc_page_table(m_pt);
-}
-
-bool mm_list::is_avail(uintptr_t start, std::size_t len) const noexcept {
-    start &= ~0xfff;
-    uintptr_t end = (start + len + 0xfff) & ~0xfff;
-    len = end - start;
-
-    if (end > USER_SPACE_MEMORY_TOP)
-        return false;
-
-    for (const auto& area : m_areas) {
-        if (!area.is_avail(start, end))
-            return false;
-    }
-    return true;
-}
-
-bool mm_list::is_avail(uintptr_t addr) const {
-    if (addr >= USER_SPACE_MEMORY_TOP)
-        return false;
-
-    auto iter = m_areas.find(addr);
-    return iter == m_areas.end();
-}
-
-uintptr_t mm_list::find_avail(uintptr_t hint, size_t len) const {
-    auto addr = std::max(hint, MMAP_MIN_ADDR);
-
-    while (!is_avail(addr, len)) {
-        auto iter = m_areas.lower_bound(addr);
-        if (iter == m_areas.end())
-            return 0;
-
-        addr = iter->end;
-    }
-
-    return addr;
-}
-
-void mm_list::switch_pd() const noexcept {
-    asm volatile("mov %0, %%cr3" : : "r"(m_pt) : "memory");
-}
-
-int mm_list::register_brk(uintptr_t addr) {
-    assert(m_brk == m_areas.end());
-    if (!is_avail(addr))
-        return -ENOMEM;
-
-    bool inserted;
-    std::tie(m_brk, inserted) = m_areas.emplace(addr, MM_ANONYMOUS | MM_WRITE | MM_BREAK);
-
-    assert(inserted);
-    return 0;
-}
-
-uintptr_t mm_list::set_brk(uintptr_t addr) {
-    using namespace paging;
-    assert(m_brk != m_areas.end());
-    uintptr_t curbrk = m_brk->end;
-
-    addr += 4096 - 1;
-    addr &= ~0xfff;
-
-    if (addr <= curbrk || !is_avail(curbrk, addr - curbrk))
-        return curbrk;
-
-    for (auto pte : vaddr_range{m_pt, curbrk, addr})
-        pte.set(PA_ANONYMOUS_PAGE | PA_NXE, EMPTY_PAGE_PFN);
-
-    m_brk->end = addr;
-    return m_brk->end;
-}
-
-void mm_list::clear() {
-    for (auto iter = m_areas.begin(); iter != m_areas.end(); ++iter)
-        unmap(iter, false);
-
-    __invalidate_all_tlb();
-
-    m_areas.clear();
-    m_brk = m_areas.end();
-}
-
-mm_list::iterator mm_list::split(iterator area, uintptr_t addr) {
-    assert(!(addr & 0xfff));
-    assert(addr > area->start && addr < area->end);
-
-    std::size_t old_len = addr - area->start;
-    std::size_t new_file_offset = 0;
-
-    if (area->mapped_file)
-        new_file_offset = area->file_offset + old_len;
-
-    auto new_end = area->end;
-    area->end = addr;
-
-    auto [iter, inserted] =
-        m_areas.emplace(addr, area->flags, new_end, d_get(area->mapped_file), new_file_offset);
-
-    assert(inserted);
-    return iter;
-}
-
-int mm_list::unmap(iterator area, bool should_invalidate_tlb) {
-    using namespace paging;
-
-    bool should_use_invlpg = area->end - area->start <= 0x4000;
-    auto range = vaddr_range{m_pt, area->start, area->end};
-    uintptr_t cur_addr = area->start;
-
-    // TODO: write back dirty pages
-    for (auto pte : range) {
-        free_page(pte.pfn());
-        pte.clear();
-
-        if (should_invalidate_tlb && should_use_invlpg) {
-            asm volatile("invlpg (%0)" : : "r"(cur_addr) : "memory");
-            cur_addr += 0x1000;
-        }
-    }
-
-    if (should_invalidate_tlb && !should_use_invlpg)
-        __invalidate_all_tlb();
-
-    return 0;
-}
-
-int mm_list::unmap(uintptr_t start, std::size_t length, bool should_invalidate_tlb) {
-    // standard says that addr and len MUST be
-    // page-aligned or the call is invalid
-    if (start & 0xfff)
-        return -EINVAL;
-
-    uintptr_t end = (start + length + 0xfff) & ~0xfff;
-
-    // check address validity
-    if (end > KERNEL_SPACE_START)
-        return -EINVAL;
-    if (end > USER_SPACE_MEMORY_TOP)
-        return -ENOMEM;
-
-    auto iter = m_areas.lower_bound(start);
-    auto iter_end = m_areas.upper_bound(end);
-
-    // start <= iter <= end a.k.a. !(start > *iter) && !(*iter > end)
-    while (iter != iter_end) {
-        // start == iter:
-        // start is between (iter->start, iter->end)
-        //
-        // strip out the area before start
-        if (!(start < *iter) && start != iter->start)
-            iter = split(iter, start);
-
-        // iter.end <= end
-        // it is safe to unmap the area directly
-        if (*iter < end) {
-            if (int ret = unmap(iter, should_invalidate_tlb); ret != 0)
-                return ret;
-
-            iter = m_areas.erase(iter);
-            continue;
-        }
-
-        // end == iter:
-        // end is between [iter->start, iter->end)
-        //
-        // if end == iter->start, no need to strip the area
-        if (end == iter->start) {
-            ++iter;
-            continue;
-        }
-
-        (void)split(iter, end);
-        if (int ret = unmap(iter, should_invalidate_tlb); ret != 0)
-            return ret;
-
-        iter = m_areas.erase(iter);
-
-        // no need to check areas after this
-        break;
-    }
-
-    return 0;
-}
-
-int mm_list::mmap(const map_args& args) {
-    auto& vaddr = args.vaddr;
-    auto& length = args.length;
-    auto& file = args.file;
-    auto& foff = args.file_offset;
-    auto& flags = args.flags;
-
-    assert((vaddr & 0xfff) == 0 && (foff & 0xfff) == 0);
-    assert((length & 0xfff) == 0 && length != 0);
-
-    if (!is_avail(vaddr, length))
-        return -EEXIST;
-
-    using namespace kernel::mem::paging;
-
-    // PA_RW is set during page fault while PA_NXE is preserved
-    // so we set PA_NXE now
-    psattr_t attributes = PA_US;
-    if (!(flags & MM_EXECUTE))
-        attributes |= PA_NXE;
-
-    if (flags & MM_MAPPED) {
-        assert(file);
-
-        auto [area, inserted] =
-            m_areas.emplace(vaddr, flags & ~MM_INTERNAL_MASK, vaddr + length, d_get(file), foff);
-        assert(inserted);
-
-        attributes |= PA_MMAPPED_PAGE;
-        for (auto pte : vaddr_range{m_pt, vaddr, vaddr + length})
-            pte.set(attributes, EMPTY_PAGE_PFN);
-    } else if (flags & MM_ANONYMOUS) {
-        // private mapping of zero-filled pages
-        // TODO: shared mapping
-        auto [area, inserted] = m_areas.emplace(vaddr, (flags & ~MM_INTERNAL_MASK), vaddr + length);
-        assert(inserted);
-
-        attributes |= PA_ANONYMOUS_PAGE;
-        for (auto pte : vaddr_range{m_pt, vaddr, vaddr + length})
-            pte.set(attributes, EMPTY_PAGE_PFN);
-    } else {
-        return -EINVAL;
-    }
-
-    return 0;
-}

+ 1 - 1
src/kernel/mem/mm_list/page_fault.rs

@@ -86,7 +86,7 @@ impl MMList {
             }
 
             let page = unsafe { Page::take_pfn(pfn, 0) };
-            if unsafe { page.load_refcount() } == 1 {
+            if page.load_refcount() == 1 {
                 // SAFETY: This is actually safe. If we read `1` here and we have `MMList` lock
                 // held, there couldn't be neither other processes sharing the page, nor other
                 // threads making the page COW at the same time.

+ 431 - 0
src/kernel/mem/page_alloc.rs

@@ -0,0 +1,431 @@
+use core::panic;
+use core::{ptr::NonNull, sync::atomic::AtomicU32};
+use core::sync::atomic::Ordering;
+use crate::prelude::*;
+use bitflags::bitflags;
+use lazy_static::lazy_static;
+use super::address::{PAddr, PFN};
+
+const MAX_PAGE_ORDER: u32 = 10;
+const PAGE_ARRAY: usize = 0xffffff8040000000;
+
+#[macro_export]
+macro_rules! container_of {
+    ($ptr:expr, $type:ty, $($f:tt)*) => {{
+        let ptr = $ptr as *const _ as *const u8;
+        let offset: usize = ::core::mem::offset_of!($type, $($f)*);
+        ptr.sub(offset) as *const $type
+    }}
+}
+
+pub type PagePtr = Ptr<Page>;
+
+#[repr(C)]
+struct Link {
+    prev: Option<NonNull<Link>>,
+    next: Option<NonNull<Link>>,
+}
+
+impl Link {
+    pub const fn new() -> Self {
+        Self {
+            prev: None,
+            next: None
+        }
+    }
+
+    pub fn insert(&mut self, node: &mut Self) {
+        unsafe {
+            let insert_node = NonNull::new(node as *mut Self);
+            if let Some(next) = self.next {
+                (*next.as_ptr()).prev = insert_node;
+            } 
+            node.next = self.next;
+            node.prev = NonNull::new(self as *mut Self); 
+            self.next = insert_node;
+        }
+    }
+
+    pub fn remove(&mut self) -> &Self {
+        unsafe {
+            if let Some(next) = self.next {
+                (*next.as_ptr()).prev = self.prev;
+            }  
+            if let Some(prev) = self.prev {
+                (*prev.as_ptr()).next = self.next;
+            } 
+        }
+        self
+    }
+}
+
+#[repr(C)]
+pub struct Ptr<T> (Option<NonNull<T>>);
+
+impl<T> Clone for Ptr<T> {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl<T> Copy for Ptr<T> {}
+
+impl<T> Ptr<T> {
+    pub fn is_none(&self) -> bool {
+        self.0.is_none()
+    }
+
+    pub fn is_some(&self) -> bool {
+        self.0.is_some()
+    }
+
+    pub fn as_ptr(&self) -> *mut T {
+        match self.0 {
+            Some(non_null_ptr) => {
+               non_null_ptr.as_ptr() 
+            }
+            None => panic!(),
+        }
+    }
+
+    pub fn as_ref(&self) -> &T {
+        match self.0 {
+            Some(non_null_ptr) => {
+                unsafe { non_null_ptr.as_ref() }
+            }
+            None => panic!(),
+        }
+    }
+
+    pub fn as_mut(&self) -> &mut T {
+        match self.0 {
+            Some(non_null_ptr) => {
+                unsafe { &mut *non_null_ptr.as_ptr() }
+            }
+            None => panic!(),
+        }
+    }
+}
+
+impl PagePtr {
+    pub fn add_refcount(&self) -> u32 {
+        self.as_mut().add_refcount()
+    } 
+
+    pub fn sub_refcount(&self) -> u32 {
+        self.as_mut().sub_refcount()
+    } 
+
+    pub fn load_refcount(&self) -> u32 {
+        self.as_ref().refcount.load(Ordering::Acquire)
+    }
+
+    pub fn get_order(&self) -> u32 {
+        self.as_ref().order
+    }
+
+    fn new(ptr: Option<NonNull<Page>>) -> Self {
+        Ptr::<Page>(ptr)
+    }
+
+    fn offset(&self, size: usize) -> PagePtr {
+        match self.0 {
+            Some(non_null_ptr) => {
+                let new_raw_ptr = unsafe { non_null_ptr.as_ptr().add(size) };
+                PagePtr::new(NonNull::new(new_raw_ptr)) 
+            }
+            None => PagePtr::new(None),
+        }
+    }
+}
+
+impl Into<PFN> for PagePtr {
+    fn into(self) -> PFN {
+        unsafe {
+            PFN::from(self.as_ptr().offset_from(PAGE_ARRAY as *const Page) as usize)
+        }
+    }
+}
+
+impl From<PFN> for PagePtr {
+    fn from(pfn: PFN) -> Self {
+        unsafe {
+            PagePtr::new(NonNull::new((PAGE_ARRAY as *mut Page).add(pfn.0)))
+        }
+    }
+}
+
+bitflags! {
+    // TODO: atomic !
+    struct PageFlags: usize {
+        const PRESENT = 1 << 0;
+        const LOCKED  = 1 << 1;
+        const BUDDY   = 1 << 2;
+        const SLAB    = 1 << 3;
+        const DIRTY   = 1 << 4;
+        const FREE    = 1 << 5;
+    }
+}
+
+#[repr(C)]
+pub struct Page {
+    // Now only use for free pages link for buddy system
+    // can be used for lru page swap in future
+    link: Link,
+    flags: PageFlags,   // atomic!!!
+    order: u32,
+    refcount: AtomicU32,
+}
+
+struct FreeArea {
+    free_list: Link,
+    count: usize,
+}
+
+/// Safety: TODO
+unsafe impl Send for Zone {}
+/// Safety: TODO
+unsafe impl Sync for Zone {}
+
+struct Zone {
+    free_areas: [FreeArea; MAX_PAGE_ORDER as usize + 1],
+}
+
+impl Page {
+    fn set_flags(&mut self, flags: PageFlags) {
+        self.flags.insert(flags);
+    }
+
+    fn remove_flags(&mut self, flags: PageFlags) {
+        self.flags.remove(flags);
+    }
+
+    pub fn set_order(&mut self, order: u32) {
+       self.order = order; 
+    }
+
+    pub fn add_refcount(&mut self) -> u32 {
+        self.refcount.fetch_add(1, Ordering::Relaxed)
+    }
+
+    pub fn sub_refcount(&mut self) -> u32 {
+        self.refcount.fetch_sub(1, Ordering::AcqRel)
+    }
+
+    pub fn is_buddy(&self) -> bool {
+        self.flags.contains(PageFlags::BUDDY)
+    }
+
+    pub fn is_slab(&self) -> bool {
+        self.flags.contains(PageFlags::SLAB)
+    }
+
+    pub fn is_present(&self) -> bool {
+        self.flags.contains(PageFlags::PRESENT)
+    }
+
+    pub fn is_free(&self) -> bool {
+        self.flags.contains(PageFlags::FREE)
+    }
+}
+
+impl FreeArea {
+    const fn new() -> Self {
+        Self {
+            free_list: Link::new(),
+            count: 0,
+        }
+    }
+
+    fn alloc_pages(&mut self) -> PagePtr {
+        if let Some(pages_link) = self.free_list.next {
+            assert!(self.count > 0);
+            unsafe {
+                let link_ptr = (&mut *pages_link.as_ptr()).remove();
+                let page_ptr = container_of!(link_ptr, Page, link) as *mut Page;
+                self.count -= 1;
+                (&mut *page_ptr).remove_flags(PageFlags::FREE);
+                return PagePtr::new(NonNull::new(page_ptr as *mut _)); 
+            }
+        }
+        PagePtr::new(None)
+    }
+
+    fn add_pages(&mut self, pages_ptr: PagePtr) {
+        self.count += 1;
+        pages_ptr.as_mut().set_flags(PageFlags::FREE);
+        self.free_list.insert( &mut pages_ptr.as_mut().link)
+    }
+
+    fn del_pages(&mut self, pages_ptr: PagePtr) {
+        assert!(self.count >= 1 && pages_ptr.as_ref().is_free());
+        self.count -= 1;
+        pages_ptr.as_mut().remove_flags(PageFlags::FREE);
+        pages_ptr.as_mut().link.remove();
+    }
+}
+
+impl Zone {
+    const fn new() -> Self {
+        Self {
+           free_areas: [const { FreeArea::new() }; MAX_PAGE_ORDER as usize + 1], 
+        }
+    }
+
+    fn alloc_pages(&mut self, order: u32) -> PagePtr {
+        for current_order in order..=MAX_PAGE_ORDER {
+            let pages_ptr = self.free_areas[current_order as usize].alloc_pages();
+            if pages_ptr.is_none() {
+                continue;
+            }
+
+            pages_ptr.as_mut().add_refcount();
+            pages_ptr.as_mut().set_order(order);
+
+            if current_order > order {
+                self.expand(pages_ptr, current_order, order);            
+            }
+            assert!(pages_ptr.as_ref().is_present() && !pages_ptr.as_ref().is_free());
+            return pages_ptr;
+        }
+        PagePtr::new(None)
+    }
+
+    fn expand(&mut self, pages_ptr: PagePtr, order: u32, target_order: u32) {
+        assert!(pages_ptr.is_some());
+        let mut offset = 1 << order;
+
+        for order in (target_order..order).rev() {
+            offset >>= 1;
+            let split_pages_ptr = pages_ptr.offset(offset);
+            split_pages_ptr.as_mut().set_order(order);
+            split_pages_ptr.as_mut().set_flags(PageFlags::BUDDY);
+            self.free_areas[order as usize].add_pages(split_pages_ptr);
+        }
+    }
+
+    fn free_pages(&mut self, mut pages_ptr: PagePtr, order: u32) {
+        assert_eq!(pages_ptr.load_refcount(), 0);
+        assert_eq!(pages_ptr.get_order(), order);
+
+        let mut pfn: PFN = pages_ptr.into();
+        let mut current_order = order;
+        
+        while current_order < MAX_PAGE_ORDER {
+            let buddy_pfn = pfn.buddy_pfn(current_order);
+            let buddy_pages_ptr = PagePtr::from(buddy_pfn);
+
+            if !self.buddy_check(buddy_pages_ptr, current_order) {
+                break;
+            }
+
+            assert_eq!(buddy_pages_ptr.load_refcount(), 0);
+            pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
+            buddy_pages_ptr.as_mut().remove_flags(PageFlags::BUDDY);
+            self.free_areas[current_order as usize].del_pages(buddy_pages_ptr);
+            pages_ptr = PagePtr::from(pfn.combined_pfn(buddy_pfn));
+            pages_ptr.as_mut().set_flags(PageFlags::BUDDY); 
+            pfn = pfn.combined_pfn(buddy_pfn);
+            current_order += 1;
+        }
+
+        pages_ptr.as_mut().set_order(current_order);
+        self.free_areas[current_order as usize].add_pages(pages_ptr);
+    }
+    
+    /// This function checks whether a page is free && is the buddy
+    /// we can coalesce a page and its buddy if
+    /// - the buddy is valid(present) &&
+    /// - the buddy is right now in free_areas &&
+    /// - a page and its buddy have the same order &&
+    /// - a page and its buddy are in the same zone.    // check when smp 
+    fn buddy_check(&self, pages_ptr: PagePtr, order: u32) -> bool {
+        if !pages_ptr.as_ref().is_present() {
+            return false;
+        } 
+        if !(pages_ptr.as_ref().is_free()) {
+            return false;
+        }
+        if pages_ptr.as_ref().order != order {
+            return false;
+        }
+        true
+    }
+
+    // only used when buddy init
+    fn create_pages(&mut self, start: usize, end: usize) {
+        let mut start_pfn = PAddr::from(start).ceil_pfn();
+        let end_pfn = PAddr::from(end).floor_pfn();
+
+        while start_pfn < end_pfn {
+            let mut order = usize::from(start_pfn).trailing_zeros().min(MAX_PAGE_ORDER);
+         
+            while start_pfn + order as usize > end_pfn {
+                order -= 1;
+            }
+            let page_ptr: PagePtr = start_pfn.into();
+            page_ptr.as_mut().set_flags(PageFlags::BUDDY);
+            self.free_areas[order as usize].add_pages(page_ptr);
+            start_pfn = start_pfn + (1 << order) as usize;
+        }
+    }
+}
+
+lazy_static! {
+    static ref ZONE: Spin<Zone> = Spin::new(Zone::new());
+}
+
+pub fn alloc_page() -> PagePtr {
+    ZONE.lock().alloc_pages(0)
+}
+
+pub fn alloc_pages(order: u32) -> PagePtr {
+    ZONE.lock().alloc_pages(order)
+}
+
+pub fn free_pages(page_ptr: PagePtr, order: u32) {
+    assert_eq!(page_ptr.get_order(), order);
+    ZONE.lock().free_pages(page_ptr, order)
+}
+
+#[no_mangle]
+pub extern "C" fn mark_present(start: usize, end: usize) {
+    let mut start_pfn = PAddr::from(start).ceil_pfn();
+    let end_pfn = PAddr::from(end).floor_pfn();
+    while start_pfn < end_pfn {
+        PagePtr::from(start_pfn).as_mut().set_flags(PageFlags::PRESENT);
+        start_pfn = start_pfn + 1;
+    }
+}
+
+#[no_mangle]
+pub extern "C" fn create_pages(start: usize, end: usize) {
+    ZONE.lock().create_pages(start, end);
+}
+
+#[no_mangle]
+pub extern "C" fn page_to_pfn(page: *const Page) -> usize {
+    unsafe {
+        page.offset_from(PAGE_ARRAY as *const Page) as usize
+    }
+}
+
+#[no_mangle]
+pub extern "C" fn c_alloc_page() -> *const Page {
+    ZONE.lock().alloc_pages(0).as_ptr() as *const Page
+}
+
+#[no_mangle]
+pub extern "C" fn c_alloc_pages(order: u32) -> *const Page {
+    ZONE.lock().alloc_pages(order).as_ptr() as *const Page
+}
+
+#[no_mangle]
+pub extern "C" fn c_alloc_page_table() -> usize {
+    let pfn: PFN = ZONE.lock().alloc_pages(0).into();
+    let paddr: usize = usize::from(pfn) << 12;
+    unsafe {
+        core::ptr::write_bytes(paddr as *mut u8, 0, 4096);
+    }
+    paddr
+}

+ 0 - 256
src/kernel/mem/paging.cc

@@ -1,256 +0,0 @@
-#include <assert.h>
-#include <string.h>
-
-#include <types/list.hpp>
-
-#include <kernel/async/lock.hpp>
-#include <kernel/log.hpp>
-#include <kernel/mem/paging.hpp>
-#include <kernel/mem/slab.hpp>
-#include <kernel/process.hpp>
-
-using namespace types::list;
-
-using namespace kernel::async;
-using namespace kernel::mem::paging;
-
-static struct zone_info {
-    page* next;
-    std::size_t count;
-} zones[52];
-
-static mutex zone_lock;
-
-constexpr unsigned _msb(std::size_t x) {
-    unsigned n = 0;
-    while (x >>= 1)
-        n++;
-    return n;
-}
-
-constexpr pfn_t buddy(pfn_t pfn, unsigned order) {
-    return pfn ^ (1 << (order + 12));
-}
-
-constexpr pfn_t parent(pfn_t pfn, unsigned order) {
-    return pfn & ~(1 << (order + 12));
-}
-
-// call with zone_lock held
-static inline void _zone_list_insert(unsigned order, page* zone) {
-    assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
-    assert((zone->flags & 0xff) == 0);
-    zone->flags |= order;
-
-    zones[order].count++;
-    list_insert(&zones[order].next, zone);
-}
-
-// call with zone_lock held
-static inline void _zone_list_remove(unsigned order, page* zone) {
-    assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
-    assert(zones[order].count > 0 && (zone->flags & 0xff) == order);
-    zone->flags &= ~0xff;
-
-    zones[order].count--;
-    list_remove(&zones[order].next, zone);
-}
-
-// call with zone_lock held
-static inline page* _zone_list_get(unsigned order) {
-    if (zones[order].count == 0)
-        return nullptr;
-
-    zones[order].count--;
-    auto* pg = list_get(&zones[order].next);
-
-    assert((pg->flags & 0xff) == order);
-    return pg;
-}
-
-// where order represents power of 2
-// call with zone_lock held
-static inline page* _create_zone(pfn_t pfn, unsigned order) {
-    page* zone = pfn_to_page(pfn);
-
-    assert(zone->flags & PAGE_PRESENT);
-    zone->flags |= PAGE_BUDDY;
-
-    _zone_list_insert(order, zone);
-    return zone;
-}
-
-// call with zone_lock held
-static inline void _split_zone(page* zone, unsigned order, unsigned target_order) {
-    while (order > target_order) {
-        pfn_t pfn = page_to_pfn(zone);
-        _create_zone(buddy(pfn, order - 1), order - 1);
-
-        order--;
-    }
-
-    zone->flags &= ~0xff;
-    zone->flags |= target_order;
-}
-
-// call with zone_lock held
-static inline page* _alloc_zone(unsigned order) {
-    for (unsigned i = order; i < 52; ++i) {
-        auto zone = _zone_list_get(i);
-        if (!zone)
-            continue;
-
-        zone->refcount++;
-
-        if (i > order)
-            _split_zone(zone, i, order);
-
-        assert(zone->flags & PAGE_PRESENT && zone->flags & PAGE_BUDDY);
-        return zone;
-    }
-
-    return nullptr;
-}
-
-constexpr uintptr_t _find_mid(uintptr_t l, uintptr_t r) {
-    if (l == r)
-        return l;
-    uintptr_t bit = 1 << _msb(l ^ r);
-
-    return (l & r & ~(bit - 1)) | bit;
-}
-
-static void _recur_create_zone(uintptr_t l, uintptr_t r) {
-    auto mid = _find_mid(l, r);
-    assert(l <= mid);
-
-    // empty zone
-    if (l == mid) {
-        assert(l == r);
-        return;
-    }
-
-    // create [l, r) directly
-    if (r == mid) {
-        auto diff = r - l;
-        int order = 0;
-        while ((1u << order) <= diff) {
-            while (!(diff & (1 << order)))
-                order++;
-            _create_zone(l << 12, order);
-
-            l += (1 << order);
-            diff &= ~(1 << order);
-        }
-
-        return;
-    }
-
-    // split into halves
-    _recur_create_zone(l, mid);
-    _recur_create_zone(mid, r);
-}
-
-void kernel::mem::paging::create_zone(uintptr_t start, uintptr_t end) {
-    start += (4096 - 1);
-    start >>= 12;
-    end >>= 12;
-
-    if (start >= end)
-        return;
-
-    lock_guard_irq lock{zone_lock};
-
-    _recur_create_zone(start, end);
-}
-
-void kernel::mem::paging::mark_present(uintptr_t start, uintptr_t end) {
-    start >>= 12;
-
-    end += (4096 - 1);
-    end >>= 12;
-
-    while (start < end)
-        PAGE_ARRAY[start++].flags |= PAGE_PRESENT;
-}
-
-page* kernel::mem::paging::alloc_pages(unsigned order) {
-    lock_guard_irq lock{zone_lock};
-    auto* zone = _alloc_zone(order);
-    if (!zone)
-        freeze();
-
-    return zone;
-}
-
-page* kernel::mem::paging::alloc_page() {
-    return alloc_pages(0);
-}
-
-pfn_t kernel::mem::paging::alloc_page_table() {
-    page* zone = alloc_page();
-    pfn_t pfn = page_to_pfn(zone);
-
-    memset(physaddr<void>{pfn}, 0x00, 0x1000);
-
-    return pfn;
-}
-
-void kernel::mem::paging::free_pages(page* pg, unsigned order) {
-    lock_guard_irq lock{zone_lock};
-    assert((pg->flags & 0xff) == order);
-
-    if (!(pg->flags & PAGE_BUDDY) || --pg->refcount)
-        return;
-
-    while (order < 52) {
-        pfn_t pfn = page_to_pfn(pg);
-        pfn_t buddy_pfn = buddy(pfn, order);
-        page* buddy_page = pfn_to_page(buddy_pfn);
-
-        if (!(buddy_page->flags & PAGE_BUDDY))
-            break;
-
-        if ((buddy_page->flags & 0xff) != order)
-            break;
-
-        if (buddy_page->refcount)
-            break;
-
-        _zone_list_remove(order, buddy_page);
-
-        if (buddy_page < pg)
-            std::swap(buddy_page, pg);
-
-        buddy_page->flags &= ~(PAGE_BUDDY | 0xff);
-        order++;
-    }
-
-    pg->flags &= ~0xff;
-    _zone_list_insert(order, pg);
-}
-
-void kernel::mem::paging::free_page(page* page) {
-    return free_pages(page, 0);
-}
-
-void kernel::mem::paging::free_pages(pfn_t pfn, unsigned order) {
-    return free_pages(pfn_to_page(pfn), order);
-}
-
-void kernel::mem::paging::free_page(pfn_t pfn) {
-    return free_page(pfn_to_page(pfn));
-}
-
-pfn_t kernel::mem::paging::page_to_pfn(page* _page) {
-    return (pfn_t)(_page - PAGE_ARRAY) * 0x1000;
-}
-
-page* kernel::mem::paging::pfn_to_page(pfn_t pfn) {
-    return PAGE_ARRAY + pfn / 0x1000;
-}
-
-void kernel::mem::paging::increase_refcount(page* pg) {
-    lock_guard_irq lock{zone_lock};
-    pg->refcount++;
-}

+ 30 - 87
src/kernel/mem/paging.rs

@@ -1,89 +1,33 @@
-use crate::bindings::root::kernel::mem::paging::{
-    alloc_page as c_alloc_page, alloc_pages as c_alloc_pages, free_pages as c_free_pages,
-    increase_refcount as c_increase_refcount, page as c_page, page_to_pfn as c_page_to_pfn,
-    pfn_to_page as c_pfn_to_page, PAGE_BUDDY,
-};
 use crate::bindings::root::EFAULT;
 use crate::io::{Buffer, FillResult};
 use crate::kernel::mem::phys;
-use core::fmt;
+use super::page_alloc::{alloc_page, alloc_pages, free_pages, PagePtr};
+use core::fmt; 
 use core::sync::atomic::{AtomicU64, Ordering};
-
+use super::address::PFN;
 use super::phys::PhysPtr;
 
-fn msb(x: u64) -> u64 {
-    // What the ****.
-    let mut x = x;
-    x |= x >> 1;
-    x |= x >> 2;
-    x |= x >> 4;
-    x |= x >> 8;
-    x |= x >> 16;
-    x |= x >> 32;
-    x -= x >> 1;
-    x
-}
-
-fn msb_position(x: u64) -> Option<u32> {
-    if x == 0 {
-        return None;
-    }
-
-    let mut pos = 0;
-    let mut x = x;
-    if x >= 1 << 32 {
-        x >>= 32;
-        pos += 32;
-    }
-    if x >= 1 << 16 {
-        x >>= 16;
-        pos += 16;
-    }
-    if x >= 1 << 8 {
-        x >>= 8;
-        pos += 8;
-    }
-    if x >= 1 << 4 {
-        x >>= 4;
-        pos += 4;
-    }
-    if x >= 1 << 2 {
-        x >>= 2;
-        pos += 2;
-    }
-    if x >= 1 {
-        pos += 1;
-    }
-
-    Some(pos)
-}
-
 pub struct Page {
-    page_ptr: *mut c_page,
+    page_ptr: PagePtr,
     order: u32,
 }
 
 #[allow(dead_code)]
 impl Page {
     pub fn alloc_one() -> Self {
-        let page_ptr = unsafe { c_alloc_page() };
-
+        let page_ptr = alloc_page();
         Self { page_ptr, order: 0 }
     }
 
-    pub fn alloc_many(order: u32) -> Self {
-        let page_ptr = unsafe { c_alloc_pages(order) };
-
+    pub fn alloc_many(order: u32) -> Self {        
+        let page_ptr = alloc_pages(order);
         Self { page_ptr, order }
     }
 
     /// Allocate a contiguous block of pages that can contain at least `count` pages.
     pub fn alloc_ceil(count: usize) -> Self {
         assert_ne!(count, 0);
-        let count_msb = msb(count as u64) as usize;
-        let order = msb_position((count + count_msb - 1) as u64)
-            .expect("`count` can't be 0, so can't `order`");
-
+        let order = count.next_power_of_two().trailing_zeros(); 
         Self::alloc_many(order)
     }
 
@@ -92,15 +36,15 @@ impl Page {
     /// # Safety
     /// Caller must ensure that the pfn is no longer referenced by any other code.
     pub unsafe fn take_pfn(pfn: usize, order: u32) -> Self {
-        let page_ptr = unsafe { c_pfn_to_page(pfn) };
+        let page_ptr: PagePtr = PFN::from(pfn >> 12).into(); 
 
         // Only buddy pages can be used here.
-        assert!(unsafe { page_ptr.as_ref().unwrap() }.flags & PAGE_BUDDY != 0);
+        assert!(page_ptr.as_ref().is_buddy());
 
         // Check if the order is correct.
         assert_eq!(
-            unsafe { page_ptr.as_ref().unwrap() }.flags & 0xff,
-            order as u64
+            page_ptr.get_order(),
+            order
         );
 
         Self { page_ptr, order }
@@ -112,7 +56,7 @@ impl Page {
     /// Caller must ensure that `pfn` refers to a valid physical frame number with `refcount` > 0.
     pub unsafe fn from_pfn(pfn: usize, order: u32) -> Self {
         // SAFETY: `pfn` is a valid physical frame number with refcount > 0.
-        unsafe { Self::increase_refcount(pfn) };
+        Self::increase_refcount(pfn); 
 
         // SAFETY: `pfn` has an increased refcount.
         unsafe { Self::take_pfn(pfn, order) }
@@ -121,9 +65,9 @@ impl Page {
     /// Consumes the `Page` and returns the physical frame number without dropping the reference
     /// count the page holds.
     pub fn into_pfn(self) -> usize {
-        let pfn = unsafe { c_page_to_pfn(self.page_ptr) };
+        let pfn: PFN = self.page_ptr.into();
         core::mem::forget(self);
-        pfn
+        usize::from(pfn) << 12 
     }
 
     pub fn len(&self) -> usize {
@@ -131,7 +75,8 @@ impl Page {
     }
 
     pub fn as_phys(&self) -> usize {
-        unsafe { c_page_to_pfn(self.page_ptr) }
+        let pfn: PFN = self.page_ptr.into();
+        usize::from(pfn) << 12 
     }
 
     pub fn as_cached(&self) -> phys::CachedPP {
@@ -152,24 +97,19 @@ impl Page {
 
     /// # Safety
     /// Caller must ensure that the page is properly freed.
-    pub unsafe fn increase_refcount(pfn: usize) {
-        let page = unsafe { c_pfn_to_page(pfn) };
-
-        unsafe {
-            c_increase_refcount(page);
-        }
+    pub fn increase_refcount(pfn: usize) {
+        let page_ptr: PagePtr = PFN::from(pfn >> 12).into();
+        page_ptr.add_refcount();
     }
 
-    pub unsafe fn load_refcount(&self) -> usize {
-        AtomicU64::from_ptr(&mut (*self.page_ptr).refcount).load(Ordering::Acquire) as usize
+    pub fn load_refcount(&self) -> usize {
+        self.page_ptr.load_refcount() as usize
     }
 }
 
 impl Clone for Page {
     fn clone(&self) -> Self {
-        unsafe {
-            c_increase_refcount(self.page_ptr);
-        }
+        assert_ne!(self.page_ptr.add_refcount(), 0);
 
         Self {
             page_ptr: self.page_ptr,
@@ -180,17 +120,20 @@ impl Clone for Page {
 
 impl Drop for Page {
     fn drop(&mut self) {
-        unsafe {
-            c_free_pages(self.page_ptr, self.order);
+        // not right here
+        match self.page_ptr.sub_refcount() {
+            0 => panic!(),
+            1 => free_pages(self.page_ptr, self.order),
+            _ => {}
         }
     }
 }
 
 impl PartialEq for Page {
     fn eq(&self, other: &Self) -> bool {
-        assert!(self.page_ptr != other.page_ptr || self.order == other.order);
+        // assert!(self.page_ptr != other.page_ptr || self.order == other.order);
 
-        self.page_ptr == other.page_ptr
+        self.page_ptr.as_ptr() == other.page_ptr.as_ptr()
     }
 }
 

+ 11 - 3
src/kernel/mem/slab.cc

@@ -13,6 +13,14 @@ using namespace types::list;
 
 constexpr std::size_t SLAB_PAGE_SIZE = 0x1000; // 4K
 
+extern "C" {
+    struct Page {
+       char item[32]; 
+    };
+    Page* c_alloc_page();
+    uintptr_t page_to_pfn(Page* page);
+}
+
 kernel::async::mutex slab_lock;
 
 std::ptrdiff_t _slab_data_start_offset(std::size_t size) {
@@ -58,10 +66,10 @@ slab_head* _make_slab(uintptr_t start, std::size_t size) {
 }
 
 void _slab_add_page(slab_cache* cache) {
-    auto* new_page = paging::alloc_page();
-    auto new_page_pfn = paging::page_to_pfn(new_page);
+    auto new_page_pfn = page_to_pfn(c_alloc_page()) << 12;
 
-    new_page->flags |= paging::PAGE_SLAB;
+    // TODO!!!
+    // new_page->flags |= paging::PAGE_SLAB;
 
     auto* slab = _make_slab(new_page_pfn, cache->obj_size);
     slab->cache = cache;

+ 0 - 168
src/kernel/mem/vrange.rs

@@ -1,168 +0,0 @@
-use core::{
-    cmp::Ordering,
-    fmt::{self, Debug, Formatter},
-    ops::{Add, RangeBounds, Sub},
-};
-
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
-pub struct VAddr(pub usize);
-
-#[derive(Clone, Copy)]
-pub struct VRange {
-    start: VAddr,
-    end: VAddr,
-}
-
-const USER_SPACE_MEMORY_TOP: VAddr = VAddr(0x8000_0000_0000);
-
-impl VAddr {
-    pub const NULL: Self = Self(0);
-
-    pub fn floor(&self) -> Self {
-        VAddr(self.0 & !0xfff)
-    }
-
-    pub fn ceil(&self) -> Self {
-        VAddr((self.0 + 0xfff) & !0xfff)
-    }
-
-    pub fn is_user(&self) -> bool {
-        self.0 != 0 && self < &USER_SPACE_MEMORY_TOP
-    }
-}
-
-impl Sub for VAddr {
-    type Output = usize;
-
-    fn sub(self, rhs: Self) -> Self::Output {
-        self.0 - rhs.0
-    }
-}
-
-impl Add<usize> for VAddr {
-    type Output = Self;
-
-    fn add(self, rhs: usize) -> Self::Output {
-        VAddr(self.0 + rhs)
-    }
-}
-
-impl Sub<usize> for VAddr {
-    type Output = Self;
-
-    fn sub(self, rhs: usize) -> Self::Output {
-        VAddr(self.0 - rhs)
-    }
-}
-
-impl Debug for VAddr {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "V{:#x}", self.0)
-    }
-}
-
-impl Debug for VRange {
-    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
-        write!(f, "[{:?}, {:?})", self.start, self.end)
-    }
-}
-
-impl Eq for VRange {}
-impl PartialOrd for VRange {
-    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-impl PartialEq for VRange {
-    fn eq(&self, other: &Self) -> bool {
-        self.cmp(other) == Ordering::Equal
-    }
-}
-
-/// Any two ranges that have one of them containing the other are considered equal.
-impl Ord for VRange {
-    fn cmp(&self, other: &Self) -> Ordering {
-        if self.start == other.start {
-            return Ordering::Equal;
-        }
-
-        if self.end == other.end {
-            if self.start == self.end {
-                return Ordering::Greater;
-            }
-            if other.start == other.end {
-                return Ordering::Less;
-            }
-            return Ordering::Equal;
-        }
-
-        if self.start < other.start {
-            if other.end < self.end {
-                return Ordering::Equal;
-            } else {
-                return Ordering::Less;
-            }
-        }
-
-        if other.start < self.start {
-            if self.end < other.end {
-                return Ordering::Equal;
-            } else {
-                return Ordering::Greater;
-            }
-        }
-
-        unreachable!()
-    }
-}
-
-impl From<VAddr> for VRange {
-    fn from(addr: VAddr) -> Self {
-        VRange::new(addr, addr)
-    }
-}
-
-impl VRange {
-    pub fn new(start: VAddr, end: VAddr) -> Self {
-        assert!(start <= end);
-        VRange { start, end }
-    }
-
-    pub fn is_overlapped(&self, other: &Self) -> bool {
-        self == other
-    }
-
-    pub fn is_user(&self) -> bool {
-        self.start < USER_SPACE_MEMORY_TOP && self.end <= USER_SPACE_MEMORY_TOP
-    }
-
-    pub fn start(&self) -> VAddr {
-        self.start
-    }
-
-    pub fn end(&self) -> VAddr {
-        self.end
-    }
-
-    pub fn len(&self) -> usize {
-        self.end.0 - self.start.0
-    }
-
-    pub fn shrink(&self, count: usize) -> Self {
-        assert!(count <= self.len());
-        VRange::new(self.start, self.end - count)
-    }
-
-    pub fn grow(&self, count: usize) -> Self {
-        VRange::new(self.start, self.end + count)
-    }
-
-    pub fn into_range(self) -> impl RangeBounds<Self> {
-        if self.len() == 0 {
-            VRange::from(self.start())..=VRange::from(self.start())
-        } else {
-            VRange::from(self.start())..=VRange::from(self.end() - 1)
-        }
-    }
-}

+ 14 - 5
src/kinit.cpp

@@ -48,6 +48,15 @@ static inline void setup_early_kernel_page_table() {
 }
 
 extern "C" char KIMAGE_PAGES[];
+extern "C" {
+    void create_pages(uintptr_t start, uintptr_t end);
+    void mark_present(uintptr_t start, uintptr_t end);
+    struct Page {
+       char fuck[32]; 
+    };
+    Page* c_alloc_pages(uint32_t order);
+    uintptr_t page_to_pfn(Page* page);
+}
 
 static inline void setup_buddy(uintptr_t addr_max) {
     using namespace kernel::mem;
@@ -95,13 +104,13 @@ static inline void setup_buddy(uintptr_t addr_max) {
         if (start > end)
             continue;
 
-        mem::paging::create_zone(start, end);
+        create_pages(start, end);
     }
 
     // unused space
-    create_zone(0x9000, 0x80000);
-    create_zone(0x100000, 0x200000);
-    create_zone(real_start_pfn, saved_start_pfn);
+    create_pages(0x9000, 0x80000);
+    create_pages(0x100000, 0x200000);
+    create_pages(real_start_pfn, saved_start_pfn);
 }
 
 static inline void save_memory_info(bootloader_data* data) {
@@ -133,7 +142,7 @@ extern "C" void NORETURN kernel_init(bootloader_data* data) {
     init_allocator();
 
     using namespace mem::paging;
-    auto kernel_stack_pfn = page_to_pfn(alloc_pages(9));
+    auto kernel_stack_pfn = page_to_pfn(c_alloc_pages(9)) << 12;
     auto kernel_stack_ptr = mem::physaddr<std::byte>{kernel_stack_pfn} + (1 << 9) * 0x1000;
 
     asm volatile(