Explorar o código

mem, paging: introduce page locks and exclusive pages

Introduce the new page locking mechanism to ensure exclusiveness of
pages when we access them. The underlying locks are not implemented yet
for now because we will change the paging structs in the following few
patches.

Introduce a new `PageExcl` struct representing a page that conforms with
Rust's ownership rules. The page owned exclusively can be accessed
without taking page locks.

Remove the `MemoryBlock` structs as they are not easy to use and
represent barely no semantic meanings.

Signed-off-by: greatbridf <greatbridf@icloud.com>
greatbridf hai 4 meses
pai
achega
210a6693c7

+ 2 - 2
.rustfmt.toml

@@ -29,8 +29,8 @@ fn_single_line = false
 where_single_line = false
 imports_indent = "Block"
 imports_layout = "Mixed"
-imports_granularity = "Preserve"
-group_imports = "Preserve"
+imports_granularity = "Module"
+group_imports = "StdExternalCrate"
 reorder_imports = true
 reorder_modules = true
 reorder_impl_items = false

+ 7 - 0
Cargo.lock

@@ -180,6 +180,7 @@ dependencies = [
  "posix_types",
  "slab_allocator",
  "stalloc",
+ "static_assertions",
  "unwinding",
  "virtio-drivers",
  "xmas-elf",
@@ -525,6 +526,12 @@ version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a37f0ead4094eeb54c6893316aa139e48b252f1c07511e5124fa1f9414df5b6c"
 
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
 [[package]]
 name = "syn"
 version = "2.0.104"

+ 1 - 0
Cargo.toml

@@ -42,6 +42,7 @@ futures = { version = "0.3.31", features = [
     "alloc",
     "async-await",
 ], default-features = false }
+static_assertions = "1.1.0"
 
 [target.'cfg(any(target_arch = "riscv64", target_arch = "loongarch64"))'.dependencies]
 virtio-drivers = { version = "0.11.0" }

+ 35 - 26
src/driver/ahci/command_table.rs

@@ -1,45 +1,54 @@
-use super::{command::Command, PRDTEntry, FISH2D};
-use crate::kernel::mem::{AsMemoryBlock as _, Page};
+use core::ptr::NonNull;
+
 use eonix_mm::address::PAddr;
 
-pub struct CommandTable<'a> {
-    page: Page,
-    command_fis: &'a mut FISH2D,
+use super::command::Command;
+use super::{PRDTEntry, FISH2D};
+use crate::kernel::mem::{Page, PageExt};
 
-    prdt: &'a mut [PRDTEntry; 248],
-    prdt_entries: Option<u16>,
+pub struct CommandTable {
+    page: Page,
+    cmd_fis: NonNull<FISH2D>,
+    prdt: NonNull<[PRDTEntry; 248]>,
+    prdt_entries: usize,
 }
 
-impl CommandTable<'_> {
+unsafe impl Send for CommandTable {}
+unsafe impl Sync for CommandTable {}
+
+impl CommandTable {
     pub fn new() -> Self {
         let page = Page::alloc();
-        let memory = page.as_memblk();
-
-        let (lhs, prdt) = memory.split_at(0x80);
-
-        let (command_fis, _) = lhs.split_at(size_of::<FISH2D>());
-        let command_fis = unsafe { command_fis.as_ptr().as_mut() };
-        let prdt = unsafe { prdt.as_ptr().as_mut() };
-
-        Self {
-            page,
-            command_fis,
-            prdt,
-            prdt_entries: None,
+        let base = page.get_ptr();
+
+        unsafe {
+            Self {
+                page,
+                cmd_fis: base.cast(),
+                prdt: base.byte_add(0x80).cast(),
+                prdt_entries: 0,
+            }
         }
     }
 
     pub fn setup(&mut self, cmd: &impl Command) {
-        self.command_fis.setup(cmd.cmd(), cmd.lba(), cmd.count());
-        self.prdt_entries = Some(cmd.pages().len() as u16);
+        unsafe {
+            self.cmd_fis
+                .as_mut()
+                .setup(cmd.cmd(), cmd.lba(), cmd.count());
+        }
+
+        self.prdt_entries = cmd.pages().len();
 
         for (idx, page) in cmd.pages().iter().enumerate() {
-            self.prdt[idx].setup(page);
+            unsafe {
+                self.prdt.as_mut()[idx].setup(page);
+            }
         }
     }
 
-    pub fn prdt_len(&self) -> u16 {
-        self.prdt_entries.unwrap()
+    pub fn prdt_len(&self) -> usize {
+        self.prdt_entries
     }
 
     pub fn base(&self) -> PAddr {

+ 20 - 22
src/driver/ahci/mod.rs

@@ -1,25 +1,23 @@
-use crate::{
-    fs::procfs,
-    io::Buffer as _,
-    kernel::{
-        block::BlockDevice,
-        constants::{EINVAL, EIO},
-        interrupt::register_irq_handler,
-        pcie::{self, Header, PCIDevice, PCIDriver, PciError},
-        vfs::types::DeviceId,
-    },
-    prelude::*,
-};
-use alloc::{format, sync::Arc};
+use alloc::format;
+use alloc::sync::Arc;
+
 use async_trait::async_trait;
 use control::AdapterControl;
 use defs::*;
 use eonix_mm::address::{AddrOps as _, PAddr};
 use eonix_sync::SpinIrq as _;
 use port::AdapterPort;
-
 pub(self) use register::Register;
 
+use crate::fs::procfs;
+use crate::io::Buffer as _;
+use crate::kernel::block::BlockDevice;
+use crate::kernel::constants::{EINVAL, EIO};
+use crate::kernel::interrupt::register_irq_handler;
+use crate::kernel::pcie::{self, Header, PCIDevice, PCIDriver, PciError};
+use crate::kernel::vfs::types::DeviceId;
+use crate::prelude::*;
+
 mod command;
 mod command_table;
 mod control;
@@ -30,7 +28,7 @@ pub(self) mod slot;
 mod stats;
 
 pub struct AHCIDriver {
-    devices: Spin<Vec<Arc<Device<'static>>>>,
+    devices: Spin<Vec<Arc<Device>>>,
 }
 
 pub struct BitsIterator {
@@ -64,22 +62,22 @@ impl Iterator for BitsIterator {
     }
 }
 
-struct Device<'a> {
+struct Device {
     control_base: PAddr,
     control: AdapterControl,
     _pcidev: Arc<PCIDevice<'static>>,
     /// # Lock
     /// Might be accessed from irq handler, use with `lock_irq()`
-    ports: Spin<[Option<Arc<AdapterPort<'a>>>; 32]>,
+    ports: Spin<[Option<Arc<AdapterPort>>; 32]>,
 }
 
 /// # Safety
 /// `pcidev` is never accessed from Rust code
 /// TODO!!!: place *mut pci_device in a safe wrapper
-unsafe impl Send for Device<'_> {}
-unsafe impl Sync for Device<'_> {}
+unsafe impl Send for Device {}
+unsafe impl Sync for Device {}
 
-impl Device<'_> {
+impl Device {
     fn handle_interrupt(&self) {
         // Safety
         // `self.ports` is accessed inside irq handler
@@ -108,8 +106,8 @@ impl Device<'_> {
     }
 }
 
-impl Device<'static> {
-    async fn probe_port(&self, port: Arc<AdapterPort<'static>>) -> KResult<()> {
+impl Device {
+    async fn probe_port(&self, port: Arc<AdapterPort>) -> KResult<()> {
         port.init().await?;
 
         {

+ 36 - 68
src/driver/ahci/port.rs

@@ -1,20 +1,18 @@
+use alloc::collections::vec_deque::VecDeque;
+use core::task::{Poll, Waker};
+
+use async_trait::async_trait;
+use eonix_mm::address::{Addr as _, PAddr};
+use eonix_sync::SpinIrq as _;
+
 use super::command::{Command, IdentifyCommand, ReadLBACommand, WriteLBACommand};
-use super::slot::CommandSlot;
+use super::slot::CommandList;
 use super::stats::AdapterPortStats;
-use super::{
-    CommandHeader, Register, PORT_CMD_CR, PORT_CMD_FR, PORT_CMD_FRE, PORT_CMD_ST, PORT_IE_DEFAULT,
-};
+use super::{Register, PORT_CMD_CR, PORT_CMD_FR, PORT_CMD_FRE, PORT_CMD_ST, PORT_IE_DEFAULT};
 use crate::driver::ahci::command_table::CommandTable;
 use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
 use crate::kernel::constants::{EINVAL, EIO};
-use crate::kernel::mem::paging::Page;
-use crate::kernel::mem::AsMemoryBlock as _;
 use crate::prelude::*;
-use alloc::collections::vec_deque::VecDeque;
-use async_trait::async_trait;
-use core::pin::pin;
-use eonix_mm::address::{Addr as _, PAddr};
-use eonix_sync::{SpinIrq as _, WaitList};
 
 /// An `AdapterPort` is an HBA device in AHCI mode.
 ///
@@ -55,6 +53,8 @@ pub struct AdapterPortData {
 struct FreeList {
     free: VecDeque<u32>,
     working: VecDeque<u32>,
+
+    wakers: VecDeque<Waker>,
 }
 
 impl FreeList {
@@ -62,57 +62,32 @@ impl FreeList {
         Self {
             free: (0..32).collect(),
             working: VecDeque::new(),
+            wakers: VecDeque::new(),
         }
     }
 }
 
-pub struct AdapterPort<'a> {
+pub struct AdapterPort {
     pub nport: u32,
     regs_base: PAddr,
 
-    slots: [CommandSlot<'a>; 32],
+    cmdlist: CommandList,
     free_list: Spin<FreeList>,
-    free_list_wait: WaitList,
-
-    /// Holds the command list.
-    /// **DO NOT USE IT DIRECTLY**
-    _page: Page,
-
-    cmdlist_base: PAddr,
-    fis_base: PAddr,
 
     stats: AdapterPortStats,
 }
 
-impl<'a> AdapterPort<'a> {
+impl AdapterPort {
     pub fn new(base: PAddr, nport: u32) -> Self {
-        let page = Page::alloc();
-        let cmdlist_base = page.start();
-        let cmdlist_size = 32 * size_of::<CommandHeader>();
-        let fis_base = cmdlist_base + cmdlist_size;
-
-        let (mut cmdheaders, _) = page.as_memblk().split_at(cmdlist_size);
-        let slots = core::array::from_fn(move |_| {
-            let (cmdheader, next) = cmdheaders.split_at(size_of::<CommandHeader>());
-            cmdheaders = next;
-            CommandSlot::new(unsafe { cmdheader.as_ptr().as_mut() })
-        });
-
         Self {
             nport,
             regs_base: base + 0x100 + 0x80 * nport as usize,
-            slots,
+            cmdlist: CommandList::new(),
             free_list: Spin::new(FreeList::new()),
-            free_list_wait: WaitList::new(),
-            _page: page,
             stats: AdapterPortStats::new(),
-            cmdlist_base,
-            fis_base,
         }
     }
-}
 
-impl AdapterPort<'_> {
     fn command_list_base(&self) -> Register<u64> {
         Register::new(self.regs_base + 0x00)
     }
@@ -146,25 +121,16 @@ impl AdapterPort<'_> {
     }
 
     async fn get_free_slot(&self) -> u32 {
-        loop {
-            let mut wait = pin!(self.free_list_wait.prepare_to_wait());
-
-            {
-                let mut free_list = self.free_list.lock_irq();
-
-                if let Some(slot) = free_list.free.pop_front() {
-                    return slot;
-                }
-
-                wait.as_mut().add_to_wait_list();
-
-                if let Some(slot) = free_list.free.pop_front() {
-                    return slot;
-                }
+        core::future::poll_fn(|ctx| {
+            let mut free_list = self.free_list.lock_irq();
+            if let Some(slot) = free_list.free.pop_front() {
+                return Poll::Ready(slot);
             }
 
-            wait.await;
-        }
+            free_list.wakers.push_back(ctx.waker().clone());
+            Poll::Pending
+        })
+        .await
     }
 
     fn save_working(&self, slot: u32) {
@@ -172,8 +138,10 @@ impl AdapterPort<'_> {
     }
 
     fn release_free_slot(&self, slot: u32) {
-        self.free_list.lock_irq().free.push_back(slot);
-        self.free_list_wait.notify_one();
+        let mut free_list = self.free_list.lock_irq();
+
+        free_list.free.push_back(slot);
+        free_list.wakers.drain(..).for_each(|waker| waker.wake());
     }
 
     pub fn handle_interrupt(&self) {
@@ -187,7 +155,7 @@ impl AdapterPort<'_> {
                 return true;
             }
 
-            self.slots[n as usize].handle_irq();
+            self.cmdlist.get(n as usize).handle_irq();
             self.stats.inc_int_fired();
 
             false
@@ -216,7 +184,7 @@ impl AdapterPort<'_> {
         cmdtable.setup(cmd);
 
         let slot_index = self.get_free_slot().await;
-        let slot = &self.slots[slot_index as usize];
+        let slot = self.cmdlist.get(slot_index as usize);
 
         slot.prepare_command(&cmdtable, cmd.write());
         self.save_working(slot_index);
@@ -229,10 +197,9 @@ impl AdapterPort<'_> {
 
         self.stats.inc_cmd_sent();
 
-        if let Err(_) = slot.wait_finish().await {
+        slot.wait_finish().await.inspect_err(|_| {
             self.stats.inc_cmd_error();
-            return Err(EIO);
-        };
+        })?;
 
         self.release_free_slot(slot_index);
         Ok(())
@@ -251,8 +218,9 @@ impl AdapterPort<'_> {
         self.stop_command()?;
 
         self.command_list_base()
-            .write(self.cmdlist_base.addr() as u64);
-        self.fis_base().write(self.fis_base.addr() as u64);
+            .write(self.cmdlist.cmdlist_base().addr() as u64);
+        self.fis_base()
+            .write(self.cmdlist.recv_fis_base().addr() as u64);
 
         self.interrupt_enable().write_once(PORT_IE_DEFAULT);
 
@@ -277,7 +245,7 @@ impl AdapterPort<'_> {
 }
 
 #[async_trait]
-impl BlockRequestQueue for AdapterPort<'_> {
+impl BlockRequestQueue for AdapterPort {
     fn max_request_pages(&self) -> u64 {
         1024
     }

+ 134 - 45
src/driver/ahci/slot.rs

@@ -1,20 +1,37 @@
-use super::{command_table::CommandTable, CommandHeader};
+use core::cell::UnsafeCell;
+use core::ptr::NonNull;
+use core::task::{Poll, Waker};
+
+use eonix_mm::address::{Addr as _, PAddr};
+use eonix_sync::{Spin, SpinIrq as _};
+
+use super::command_table::CommandTable;
+use super::CommandHeader;
+use crate::kernel::constants::EIO;
+use crate::kernel::mem::paging::AllocZeroed;
+use crate::kernel::mem::{Page, PageExt};
 use crate::KResult;
-use core::pin::pin;
-use eonix_mm::address::Addr as _;
-use eonix_sync::{Spin, SpinIrq as _, WaitList};
+
+pub struct CommandList {
+    base: NonNull<u8>,
+    _page: Page,
+}
+
+unsafe impl Send for CommandList {}
+unsafe impl Sync for CommandList {}
 
 pub struct CommandSlot<'a> {
-    /// # Usage
-    /// `inner.cmdheader` might be used in irq handler. So in order to wait for
-    /// commands to finish, we should use `lock_irq` on `inner`
-    inner: Spin<CommandSlotInner<'a>>,
-    wait_list: WaitList,
+    cmdheader: &'a UnsafeCell<CommandHeader>,
+    /// [`Self::control`] might be used in irq handlers.
+    control: &'a Spin<SlotControl>,
 }
 
-struct CommandSlotInner<'a> {
+unsafe impl Send for CommandSlot<'_> {}
+unsafe impl Sync for CommandSlot<'_> {}
+
+struct SlotControl {
     state: SlotState,
-    cmdheader: &'a mut CommandHeader,
+    waker: Option<Waker>,
 }
 
 #[derive(Debug, PartialEq, Eq, Clone, Copy)]
@@ -25,32 +42,103 @@ enum SlotState {
     Error,
 }
 
-impl<'a> CommandSlot<'a> {
-    pub fn new(cmdheader: &'a mut CommandHeader) -> Self {
+impl CommandList {
+    fn cmdheaders(&self) -> &[UnsafeCell<CommandHeader>; 32] {
+        unsafe { self.base.cast().as_ref() }
+    }
+
+    fn controls_ptr(base: NonNull<u8>) -> NonNull<Spin<SlotControl>> {
+        // 24 bytes for SlotControl and extra 8 bytes for Spin.
+        const_assert_eq!(size_of::<Spin<SlotControl>>(), 32);
+
+        unsafe { base.add(size_of::<UnsafeCell<CommandHeader>>() * 32).cast() }
+    }
+
+    fn controls(&self) -> &[Spin<SlotControl>; 32] {
+        unsafe { Self::controls_ptr(self.base).cast().as_ref() }
+    }
+
+    pub fn cmdlist_base(&self) -> PAddr {
+        self._page.start()
+    }
+
+    pub fn recv_fis_base(&self) -> PAddr {
+        self._page.start()
+            + (size_of::<UnsafeCell<CommandHeader>>() + size_of::<Spin<SlotControl>>()) * 32
+    }
+
+    pub fn get(&self, index: usize) -> CommandSlot {
+        CommandSlot {
+            cmdheader: &self.cmdheaders()[index],
+            control: &self.controls()[index],
+        }
+    }
+
+    pub fn new() -> Self {
+        let page = Page::zeroed();
+        let base = page.get_ptr();
+
+        let controls_ptr = Self::controls_ptr(base);
+
+        for i in 0..32 {
+            unsafe {
+                controls_ptr.add(i).write(Spin::new(SlotControl {
+                    state: SlotState::Idle,
+                    waker: None,
+                }));
+            }
+        }
+
         Self {
-            inner: Spin::new(CommandSlotInner {
-                state: SlotState::Idle,
-                cmdheader,
-            }),
-            wait_list: WaitList::new(),
+            base: page.get_ptr(),
+            _page: page,
         }
     }
+}
 
+impl Drop for CommandList {
+    fn drop(&mut self) {
+        let controls_ptr = Self::controls_ptr(self.base);
+
+        for i in 0..32 {
+            unsafe {
+                controls_ptr.add(i).drop_in_place();
+            }
+        }
+    }
+}
+
+impl CommandSlot<'_> {
     pub fn handle_irq(&self) {
-        let mut inner = self.inner.lock();
-        debug_assert_eq!(inner.state, SlotState::Working);
+        // We are already in the IRQ handler.
+        let mut control = self.control.lock();
+        assert_eq!(control.state, SlotState::Working);
+
+        let cmdheader = unsafe {
+            // SAFETY: The IRQ handler is only called after the command
+            //         is finished.
+            &mut *self.cmdheader.get()
+        };
 
         // TODO: Check errors.
-        inner.state = SlotState::Finished;
-        inner.cmdheader.bytes_transferred = 0;
-        inner.cmdheader.prdt_length = 0;
+        cmdheader.bytes_transferred = 0;
+        cmdheader.prdt_length = 0;
 
-        self.wait_list.notify_all();
+        control.state = SlotState::Finished;
+
+        if let Some(waker) = control.waker.take() {
+            waker.wake();
+        }
     }
 
     pub fn prepare_command(&self, cmdtable: &CommandTable, write: bool) {
-        let mut inner = self.inner.lock_irq();
-        let cmdheader = &mut inner.cmdheader;
+        let mut control = self.control.lock_irq();
+        assert_eq!(control.state, SlotState::Idle);
+
+        let cmdheader = unsafe {
+            // SAFETY: We are in the idle state.
+            &mut *self.cmdheader.get()
+        };
 
         cmdheader.first = 0x05; // FIS type
 
@@ -60,36 +148,37 @@ impl<'a> CommandSlot<'a> {
 
         cmdheader.second = 0x00;
 
-        cmdheader.prdt_length = cmdtable.prdt_len();
+        cmdheader.prdt_length = cmdtable.prdt_len() as u16;
         cmdheader.bytes_transferred = 0;
         cmdheader.command_table_base = cmdtable.base().addr() as u64;
 
         cmdheader._reserved = [0; 4];
 
-        inner.state = SlotState::Working;
+        control.state = SlotState::Working;
     }
 
     pub async fn wait_finish(&self) -> KResult<()> {
-        let mut inner = loop {
-            let mut wait = pin!(self.wait_list.prepare_to_wait());
-
-            {
-                let inner = self.inner.lock_irq();
-                if inner.state != SlotState::Working {
-                    break inner;
+        core::future::poll_fn(|ctx| {
+            let mut control = self.control.lock_irq();
+
+            match control.state {
+                SlotState::Idle => unreachable!("Poll called in idle state"),
+                SlotState::Working => {
+                    control.waker = Some(ctx.waker().clone());
+                    Poll::Pending
                 }
-                wait.as_mut().add_to_wait_list();
+                SlotState::Finished => {
+                    control.state = SlotState::Idle;
+                    Poll::Ready(Ok(()))
+                }
+                SlotState::Error => {
+                    control.state = SlotState::Idle;
 
-                if inner.state != SlotState::Working {
-                    break inner;
+                    // TODO: Report errors.
+                    Poll::Ready(Err(EIO))
                 }
             }
-
-            wait.await;
-        };
-
-        inner.state = SlotState::Idle;
-
-        Ok(())
+        })
+        .await
     }
 }

+ 52 - 83
src/driver/e1000e.rs

@@ -1,19 +1,18 @@
-use crate::kernel::constants::{EAGAIN, EFAULT, EINVAL, EIO};
-use crate::kernel::interrupt::register_irq_handler;
-use crate::kernel::mem::paging::{self, AllocZeroed};
-use crate::kernel::mem::{AsMemoryBlock, PhysAccess};
-use crate::kernel::pcie::{self, Header, PCIDevice, PCIDriver, PciError};
-use crate::net::netdev;
-use crate::prelude::*;
 use alloc::boxed::Box;
 use alloc::sync::Arc;
-use alloc::vec::Vec;
-use async_trait::async_trait;
 use core::ptr::NonNull;
+
+use async_trait::async_trait;
 use eonix_hal::fence::memory_barrier;
 use eonix_mm::address::{Addr, PAddr};
 use eonix_sync::SpinIrq;
-use paging::Page;
+
+use crate::kernel::constants::{EAGAIN, EFAULT, EINVAL, EIO};
+use crate::kernel::interrupt::register_irq_handler;
+use crate::kernel::mem::{PageExcl, PageExt, PhysAccess};
+use crate::kernel::pcie::{self, Header, PCIDevice, PCIDriver, PciError};
+use crate::net::netdev;
+use crate::prelude::*;
 
 mod defs;
 
@@ -55,13 +54,13 @@ struct E1000eDev {
     id: u32,
 
     regs: Registers,
-    rt_desc_page: Page,
+    rt_desc_page: PageExcl,
     rx_head: Option<u32>,
     rx_tail: Option<u32>,
     tx_tail: Option<u32>,
 
-    rx_buffers: Option<Box<Vec<Page>>>,
-    tx_buffers: Option<Box<Vec<Page>>>,
+    rx_buffers: Box<[PageExcl; RX_DESC_SIZE]>,
+    tx_buffers: Box<[Option<PageExcl>; TX_DESC_SIZE]>,
 }
 
 fn test(val: u32, bit: u32) -> bool {
@@ -196,7 +195,7 @@ impl netdev::Netdev for E1000eDev {
                 break;
             }
 
-            let ref mut desc = self.rx_desc_table()[next_tail as usize];
+            let desc = unsafe { &mut self.rx_desc_table()[next_tail as usize] };
             if !test(desc.status as u32, defs::RXD_STAT_DD as u32) {
                 Err(EIO)?;
             }
@@ -204,11 +203,8 @@ impl netdev::Netdev for E1000eDev {
             desc.status = 0;
             let len = desc.length as usize;
 
-            let buffers = self.rx_buffers.as_mut().ok_or(EIO)?;
-            let data = unsafe {
-                // SAFETY: No one could be writing to the buffer at this point.
-                &buffers[next_tail as usize].as_memblk().as_bytes()[..len]
-            };
+            let buffer = &self.rx_buffers[next_tail as usize];
+            let data = &buffer.as_bytes()[..len];
 
             println_debug!("e1000e: received {len} bytes, {:?}", PrintableBytes(data));
             self.rx_tail = Some(next_tail);
@@ -226,20 +222,17 @@ impl netdev::Netdev for E1000eDev {
             return Err(EAGAIN);
         }
 
-        let ref mut desc = self.tx_desc_table()[tail as usize];
+        let desc = unsafe { &mut self.tx_desc_table()[tail as usize] };
         if !test(desc.status as u32, defs::TXD_STAT_DD as u32) {
             return Err(EIO);
         }
 
-        let buffer_page = Page::alloc();
+        let mut buffer_page = PageExcl::alloc();
         if buf.len() > buffer_page.len() {
             return Err(EFAULT);
         }
 
-        unsafe {
-            // SAFETY: We are the only one writing to this memory block.
-            buffer_page.as_memblk().as_bytes_mut()[..buf.len()].copy_from_slice(buf);
-        }
+        buffer_page.as_bytes_mut()[..buf.len()].copy_from_slice(buf);
 
         desc.buffer = PAddr::from(buffer_page.pfn()).addr() as u64;
         desc.length = buf.len() as u16;
@@ -249,9 +242,8 @@ impl netdev::Netdev for E1000eDev {
         self.tx_tail = Some(next_tail);
         self.regs.write(defs::REG_TDT, next_tail);
 
-        // TODO: check if the packets are sent and update self.tx_head state
-
-        Ok(())
+        unimplemented!("Check if the packets are sent and update self.tx_head state");
+        // Ok(())
     }
 }
 
@@ -324,26 +316,26 @@ impl E1000eDev {
         Ok(())
     }
 
-    fn reset(&self) -> Result<(), u32> {
+    fn reset(regs: &Registers) -> Result<(), u32> {
         // disable interrupts so we won't mess things up
-        self.regs.write(defs::REG_IMC, 0xffffffff);
+        regs.write(defs::REG_IMC, 0xffffffff);
 
-        let ctrl = self.regs.read(defs::REG_CTRL);
-        self.regs.write(defs::REG_CTRL, ctrl | defs::CTRL_GIOD);
+        let ctrl = regs.read(defs::REG_CTRL);
+        regs.write(defs::REG_CTRL, ctrl | defs::CTRL_GIOD);
 
-        while self.regs.read(defs::REG_STAT) & defs::STAT_GIOE != 0 {
+        while regs.read(defs::REG_STAT) & defs::STAT_GIOE != 0 {
             // wait for link up
         }
 
-        let ctrl = self.regs.read(defs::REG_CTRL);
-        self.regs.write(defs::REG_CTRL, ctrl | defs::CTRL_RST);
+        let ctrl = regs.read(defs::REG_CTRL);
+        regs.write(defs::REG_CTRL, ctrl | defs::CTRL_RST);
 
-        while self.regs.read(defs::REG_CTRL) & defs::CTRL_RST != 0 {
+        while regs.read(defs::REG_CTRL) & defs::CTRL_RST != 0 {
             // wait for reset
         }
 
         // disable interrupts again
-        self.regs.write(defs::REG_IMC, 0xffffffff);
+        regs.write(defs::REG_IMC, 0xffffffff);
 
         Ok(())
     }
@@ -360,64 +352,45 @@ impl E1000eDev {
         Ok(())
     }
 
-    pub fn new(base: PAddr, irq_no: usize) -> Result<Self, u32> {
-        let page = Page::zeroed();
+    pub fn new(base: PAddr, irq_no: usize) -> KResult<Self> {
+        let regs = Registers::new(base);
+        Self::reset(&regs)?;
 
-        let mut dev = Self {
+        let dev = Self {
             irq_no,
-            mac: [0; 6],
+            mac: regs.read_as(0x5400),
             status: netdev::LinkStatus::Down,
             speed: netdev::LinkSpeed::SpeedUnknown,
             id: netdev::alloc_id(),
-            regs: Registers::new(base),
-            rt_desc_page: page,
+            regs,
+            rt_desc_page: PageExcl::zeroed(),
             rx_head: None,
             rx_tail: None,
             tx_tail: None,
-            rx_buffers: None,
-            tx_buffers: None,
+            rx_buffers: Box::new(core::array::from_fn(|_| PageExcl::alloc_order(2))),
+            tx_buffers: Box::new([const { None }; 32]),
         };
 
-        dev.reset()?;
-
-        dev.mac = dev.regs.read_as(0x5400);
-        dev.tx_buffers = Some(Box::new(Vec::with_capacity(TX_DESC_SIZE)));
-
-        let mut rx_buffers = Box::new(Vec::with_capacity(RX_DESC_SIZE));
-
-        for index in 0..RX_DESC_SIZE {
-            let page = Page::alloc_order(2);
-
-            let ref mut desc = dev.rx_desc_table()[index];
-            desc.buffer = PAddr::from(page.pfn()).addr() as u64;
-            desc.status = 0;
-
-            rx_buffers.push(page);
-        }
+        unsafe {
+            for (desc, page) in dev.rx_desc_table().into_iter().zip(dev.rx_buffers.iter()) {
+                desc.buffer = page.start().addr() as u64;
+                desc.status = 0;
+            }
 
-        for index in 0..TX_DESC_SIZE {
-            let ref mut desc = dev.tx_desc_table()[index];
-            desc.status = defs::TXD_STAT_DD;
+            for desc in dev.tx_desc_table() {
+                desc.status = defs::TXD_STAT_DD;
+            }
         }
 
-        dev.rx_buffers = Some(rx_buffers);
-
         Ok(dev)
     }
 
-    fn rx_desc_table(&self) -> &mut [RxDescriptor; RX_DESC_SIZE] {
-        unsafe {
-            // SAFETY: TODO
-            self.rt_desc_page.as_memblk().as_ptr().as_mut()
-        }
+    unsafe fn rx_desc_table(&self) -> &mut [RxDescriptor; RX_DESC_SIZE] {
+        self.rt_desc_page.get_ptr().cast().as_mut()
     }
 
-    fn tx_desc_table(&self) -> &mut [TxDescriptor; TX_DESC_SIZE] {
-        let (_, right) = self.rt_desc_page.as_memblk().split_at(0x200);
-        unsafe {
-            // SAFETY: TODO
-            right.as_ptr().as_mut()
-        }
+    unsafe fn tx_desc_table(&self) -> &mut [TxDescriptor; TX_DESC_SIZE] {
+        self.rt_desc_page.get_ptr().add(0x200).cast().as_mut()
     }
 }
 
@@ -425,12 +398,8 @@ impl Drop for E1000eDev {
     fn drop(&mut self) {
         assert_eq!(self.status, netdev::LinkStatus::Down);
 
-        if let Some(_) = self.rx_buffers.take() {}
-
-        // TODO: we should wait until all packets are sent
-        if let Some(_) = self.tx_buffers.take() {}
-
-        let _ = self.rt_desc_page;
+        // TODO: we should wait until all packets are sent before dropping
+        //       tx buffers.
     }
 }
 

+ 25 - 30
src/driver/virtio/virtio_blk.rs

@@ -1,21 +1,19 @@
-use crate::{
-    io::Chunks,
-    kernel::{
-        block::{BlockDeviceRequest, BlockRequestQueue},
-        constants::EIO,
-        mem::{AsMemoryBlock, Page},
-    },
-    prelude::KResult,
-};
 use alloc::boxed::Box;
+
 use async_trait::async_trait;
 use eonix_hal::mm::ArchPhysAccess;
-use eonix_mm::{
-    address::{Addr, PAddr, PhysAccess},
-    paging::PFN,
-};
+use eonix_mm::address::{Addr, PAddr, PhysAccess};
+use eonix_mm::paging::PFN;
 use eonix_sync::Spin;
-use virtio_drivers::{device::blk::VirtIOBlk, transport::Transport, Hal};
+use virtio_drivers::device::blk::VirtIOBlk;
+use virtio_drivers::transport::Transport;
+use virtio_drivers::Hal;
+
+use crate::io::Chunks;
+use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
+use crate::kernel::constants::EIO;
+use crate::kernel::mem::{Page, PageExt};
+use crate::prelude::KResult;
 
 pub struct HAL;
 
@@ -26,11 +24,10 @@ unsafe impl Hal for HAL {
     ) -> (virtio_drivers::PhysAddr, core::ptr::NonNull<u8>) {
         let page = Page::alloc_at_least(pages);
 
-        let paddr = page.start().addr();
-        let ptr = page.as_memblk().as_byte_ptr();
-        page.into_raw();
+        let ptr = page.get_ptr();
+        let pfn = page.into_raw();
 
-        (paddr, ptr)
+        (PAddr::from(pfn).addr(), ptr)
     }
 
     unsafe fn dma_dealloc(
@@ -93,15 +90,14 @@ where
                 buffer,
             } => {
                 let mut dev = self.lock();
-                for ((start, len), buffer_page) in
+                for ((start, sectors), buffer_page) in
                     Chunks::new(sector as usize, count as usize, 8).zip(buffer.iter())
                 {
-                    let buffer = unsafe {
-                        // SAFETY: Pages in `req.buffer` are guaranteed to be exclusively owned by us.
-                        &buffer_page.as_memblk().as_bytes()[..len as usize * 512]
-                    };
+                    let len = sectors * 512;
+                    let pg = buffer_page.lock();
 
-                    dev.write_blocks(start, buffer).map_err(|_| EIO)?;
+                    dev.write_blocks(start, &pg.as_bytes()[..len])
+                        .map_err(|_| EIO)?;
                 }
             }
             BlockDeviceRequest::Read {
@@ -110,15 +106,14 @@ where
                 buffer,
             } => {
                 let mut dev = self.lock();
-                for ((start, len), buffer_page) in
+                for ((start, sectors), buffer_page) in
                     Chunks::new(sector as usize, count as usize, 8).zip(buffer.iter())
                 {
-                    let buffer = unsafe {
-                        // SAFETY: Pages in `req.buffer` are guaranteed to be exclusively owned by us.
-                        &mut buffer_page.as_memblk().as_bytes_mut()[..len as usize * 512]
-                    };
+                    let len = sectors * 512;
+                    let mut pg = buffer_page.lock();
 
-                    dev.read_blocks(start, buffer).map_err(|_| EIO)?;
+                    dev.read_blocks(start, &mut pg.as_bytes_mut()[..len])
+                        .map_err(|_| EIO)?;
                 }
             }
         }

+ 24 - 40
src/fs/fat32.rs

@@ -1,45 +1,38 @@
 mod dir;
 mod file;
 
+use alloc::sync::{Arc, Weak};
 use core::future::Future;
 use core::ops::Deref;
 
-use alloc::sync::{Arc, Weak};
 use async_trait::async_trait;
 use dir::{as_raw_dirents, ParseDirent};
 use eonix_sync::RwLock;
 use itertools::Itertools;
 
+use crate::io::{Buffer, ByteBuffer, UninitBuffer};
+use crate::kernel::block::{BlockDevice, BlockDeviceRequest};
 use crate::kernel::constants::{EINVAL, EIO};
-use crate::kernel::mem::{AsMemoryBlock, CachePageStream};
+use crate::kernel::mem::{
+    CachePage, CachePageStream, Page, PageCache, PageCacheBackendOps, PageExcl, PageExt,
+};
 use crate::kernel::timer::Instant;
-use crate::kernel::vfs::inode::{InodeDirOps, InodeFileOps, InodeInfo, InodeOps, InodeUse};
+use crate::kernel::vfs::dentry::Dentry;
+use crate::kernel::vfs::inode::{
+    Ino, Inode, InodeDirOps, InodeFileOps, InodeInfo, InodeOps, InodeUse,
+};
+use crate::kernel::vfs::mount::{register_filesystem, Mount, MountCreator};
 use crate::kernel::vfs::types::{DeviceId, Format, Permission};
 use crate::kernel::vfs::{SbRef, SbUse, SuperBlock, SuperBlockInfo};
 use crate::prelude::*;
-use crate::{
-    io::{Buffer, ByteBuffer, UninitBuffer},
-    kernel::{
-        block::{BlockDevice, BlockDeviceRequest},
-        mem::{
-            paging::Page,
-            {CachePage, PageCache, PageCacheBackendOps},
-        },
-        vfs::{
-            dentry::Dentry,
-            inode::{Ino, Inode},
-            mount::{register_filesystem, Mount, MountCreator},
-        },
-    },
-    KResult,
-};
+use crate::KResult;
 
 #[repr(transparent)]
 #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
 struct Cluster(u32);
 
 #[repr(transparent)]
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
 struct RawCluster(pub u32);
 
 impl RawCluster {
@@ -70,7 +63,7 @@ impl Cluster {
 
 const SECTOR_SIZE: usize = 512;
 
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
 #[repr(C, packed)]
 struct Bootsector {
     jmp: [u8; 3],
@@ -302,10 +295,8 @@ impl InodeFileOps for FileInode {
         for cluster in cluster_iter {
             fs.read_cluster(cluster, &buffer_page).await?;
 
-            let data = unsafe {
-                // SAFETY: We are the only one holding this page.
-                &buffer_page.as_memblk().as_bytes()[inner_offset..]
-            };
+            let pg = buffer_page.lock();
+            let data = &pg.as_bytes()[inner_offset..];
 
             let end = offset + data.len();
             let real_end = end.min(self.info.lock().size as usize);
@@ -340,7 +331,7 @@ struct DirInode {
     sb: SbRef<FatFs>,
 
     // TODO: Use the new PageCache...
-    dir_pages: RwLock<Vec<Page>>,
+    dir_pages: RwLock<Vec<PageExcl>>,
 }
 
 impl DirInode {
@@ -375,7 +366,7 @@ impl DirInode {
         let clusters = ClusterIterator::new(fat.as_ref(), self.cluster);
 
         for cluster in clusters {
-            let page = Page::alloc();
+            let page = PageExcl::alloc();
             fs.read_cluster(cluster, &page).await?;
 
             dir_pages.push(page);
@@ -384,7 +375,7 @@ impl DirInode {
         Ok(())
     }
 
-    async fn get_dir_pages(&self) -> KResult<impl Deref<Target = Vec<Page>> + use<'_>> {
+    async fn get_dir_pages(&self) -> KResult<impl Deref<Target = Vec<PageExcl>> + use<'_>> {
         {
             let dir_pages = self.dir_pages.read().await;
             if !dir_pages.is_empty() {
@@ -432,12 +423,7 @@ impl InodeDirOps for DirInode {
         let sb = self.sb.get()?;
         let dir_pages = self.get_dir_pages().await?;
 
-        let dir_data = dir_pages.iter().map(|page| {
-            unsafe {
-                // SAFETY: No one could be writing to it.
-                page.as_memblk().as_bytes()
-            }
-        });
+        let dir_data = dir_pages.iter().map(|pg| pg.as_bytes());
 
         let raw_dirents = dir_data
             .map(as_raw_dirents)
@@ -481,12 +467,10 @@ impl InodeDirOps for DirInode {
             let inner_offset = offset % cluster_size;
             let inner_raw_dirent_offset = inner_offset / core::mem::size_of::<dir::RawDirEntry>();
 
-            let dir_data = dir_pages.iter().skip(cluster_offset).map(|page| {
-                unsafe {
-                    // SAFETY: No one could be writing to it.
-                    page.as_memblk().as_bytes()
-                }
-            });
+            let dir_data = dir_pages
+                .iter()
+                .skip(cluster_offset)
+                .map(|pg| pg.as_bytes());
 
             let raw_dirents = dir_data
                 .map(as_raw_dirents)

+ 20 - 10
src/io.rs

@@ -1,6 +1,8 @@
+use core::mem::MaybeUninit;
+use core::ops::{Add, AddAssign, Sub};
+
 use crate::kernel::constants::EFAULT;
 use crate::prelude::*;
-use core::{cmp, mem::MaybeUninit};
 
 #[must_use]
 #[derive(Debug)]
@@ -236,18 +238,26 @@ impl Buffer for ByteBuffer<'_> {
     }
 }
 
+pub trait Integer:
+    Add<Output = Self> + Sub<Output = Self> + AddAssign + Copy + PartialOrd + Ord
+{
+}
+
+impl Integer for u64 {}
+impl Integer for usize {}
+
 /// Iterator that generates chunks of a given length from a start index
 /// until the end of the total length.
 ///
 /// The iterator returns a tuple of (start, len) for each chunk.
-pub struct Chunks {
-    end: usize,
-    cur: usize,
-    chunk_len: usize,
+pub struct Chunks<T: Integer> {
+    end: T,
+    cur: T,
+    chunk_len: T,
 }
 
-impl Chunks {
-    pub const fn new(start: usize, total_len: usize, chunk_len: usize) -> Self {
+impl<T: Integer> Chunks<T> {
+    pub fn new(start: T, total_len: T, chunk_len: T) -> Self {
         Self {
             end: start + total_len,
             cur: start,
@@ -256,8 +266,8 @@ impl Chunks {
     }
 }
 
-impl Iterator for Chunks {
-    type Item = (usize, usize);
+impl<T: Integer> Iterator for Chunks<T> {
+    type Item = (T, T);
 
     fn next(&mut self) -> Option<Self::Item> {
         if self.cur >= self.end {
@@ -265,7 +275,7 @@ impl Iterator for Chunks {
         }
 
         let start = self.cur;
-        let len = cmp::min(self.chunk_len, self.end - start);
+        let len = self.chunk_len.min(self.end - start);
 
         self.cur += self.chunk_len;
         Some((start, len))

+ 57 - 165
src/kernel/block.rs

@@ -1,23 +1,20 @@
 mod mbr;
 
-use super::{
-    constants::ENOENT,
-    mem::{paging::Page, AsMemoryBlock as _},
-    vfs::types::DeviceId,
-};
-use crate::kernel::constants::{EEXIST, EINVAL};
-use crate::{
-    io::{Buffer, FillResult},
-    prelude::*,
-};
-use alloc::{
-    collections::btree_map::{BTreeMap, Entry},
-    sync::Arc,
-};
-use async_trait::async_trait;
+use alloc::collections::btree_map::{BTreeMap, Entry};
+use alloc::sync::Arc;
 use core::cmp::Ordering;
+
+use async_trait::async_trait;
 use mbr::MBRPartTable;
 
+use super::constants::ENOENT;
+use super::mem::paging::Page;
+use super::mem::PageExt;
+use super::vfs::types::DeviceId;
+use crate::io::{Buffer, Chunks, FillResult};
+use crate::kernel::constants::{EEXIST, EINVAL};
+use crate::prelude::*;
+
 pub struct Partition {
     pub lba_offset: u64,
     pub sector_count: u64,
@@ -193,177 +190,72 @@ impl BlockDevice {
     /// `offset` - offset in bytes
     ///
     pub async fn read_some(&self, offset: usize, buffer: &mut dyn Buffer) -> KResult<FillResult> {
-        let mut sector_start = offset as u64 / 512;
-        let mut first_sector_offset = offset as u64 % 512;
-        let mut sector_count = (first_sector_offset + buffer.total() as u64 + 511) / 512;
-
-        let mut nfilled = 0;
-        'outer: while sector_count != 0 {
-            let pages: &[Page];
-            let page: Option<Page>;
-            let page_vec: Option<Vec<Page>>;
-
-            let nread;
-
-            match sector_count {
-                count if count <= 8 => {
-                    nread = count;
-
-                    let _page = Page::alloc();
-                    page = Some(_page);
-                    pages = core::slice::from_ref(page.as_ref().unwrap());
+        let sector_start = offset as u64 / 512;
+        let mut first_sector_offset = offset % 512;
+        let nr_sectors = (first_sector_offset + buffer.total() + 511) / 512;
+
+        let nr_sectors_per_batch = self.queue().max_request_pages() / 2 * 2 * 8;
+
+        let mut nr_filled = 0;
+        for (start, nr_batch) in Chunks::new(sector_start, nr_sectors as u64, nr_sectors_per_batch)
+        {
+            let (page_slice, page, mut page_vec);
+            match nr_batch {
+                ..=8 => {
+                    page = Page::alloc();
+                    page_slice = core::slice::from_ref(&page);
                 }
-                count if count <= 16 => {
-                    nread = count;
-
-                    let _pages = Page::alloc_order(1);
-                    page = Some(_pages);
-                    pages = core::slice::from_ref(page.as_ref().unwrap());
+                ..=16 => {
+                    page = Page::alloc_order(1);
+                    page_slice = core::slice::from_ref(&page);
+                }
+                ..=32 => {
+                    page = Page::alloc_order(2);
+                    page_slice = core::slice::from_ref(&page);
                 }
                 count => {
-                    nread = count.min(self.queue().max_request_pages());
+                    let nr_huge_pages = count as usize / 32;
+                    let nr_small_pages = ((count as usize % 32) + 7) / 8;
 
-                    let npages = (nread + 15) / 16;
-                    let mut _page_vec = Vec::with_capacity(npages as usize);
-                    for _ in 0..npages {
-                        _page_vec.push(Page::alloc_order(1));
-                    }
-                    page_vec = Some(_page_vec);
-                    pages = page_vec.as_ref().unwrap().as_slice();
+                    let nr_pages = nr_huge_pages + nr_small_pages;
+                    page_vec = Vec::with_capacity(nr_pages);
+
+                    page_vec.resize_with(nr_huge_pages, || Page::alloc_order(2));
+                    page_vec.resize_with(nr_pages, || Page::alloc());
+                    page_slice = &page_vec;
                 }
             }
 
             let req = BlockDeviceRequest::Read {
-                sector: sector_start,
-                count: nread,
-                buffer: &pages,
+                sector: start,
+                count: nr_batch,
+                buffer: page_slice,
             };
 
             self.commit_request(req).await?;
 
-            for page in pages.iter() {
-                // SAFETY: We are the only owner of the page so no one could be mutating it.
-                let data = unsafe { &page.as_memblk().as_bytes()[first_sector_offset as usize..] };
+            for page in page_slice {
+                let pg = page.lock();
+                let data = &pg.as_bytes()[first_sector_offset..];
                 first_sector_offset = 0;
 
-                match buffer.fill(data)? {
-                    FillResult::Done(n) => nfilled += n,
-                    FillResult::Partial(n) => {
-                        nfilled += n;
-                        break 'outer;
-                    }
-                    FillResult::Full => {
-                        break 'outer;
-                    }
-                }
-            }
-
-            sector_start += nread;
-            sector_count -= nread;
-        }
-
-        if nfilled == buffer.total() {
-            Ok(FillResult::Done(nfilled))
-        } else {
-            Ok(FillResult::Partial(nfilled))
-        }
-    }
-
-    /// Write some data to the block device, may involve some copy and fragmentation
-    ///
-    /// # Arguments
-    /// `offset` - offset in bytes
-    /// `data` - data to write
-    ///
-    pub async fn write_some(&self, offset: usize, data: &[u8]) -> KResult<usize> {
-        let mut sector_start = offset as u64 / 512;
-        let mut first_sector_offset = offset as u64 % 512;
-        let mut remaining_data = data;
-        let mut nwritten = 0;
-
-        while !remaining_data.is_empty() {
-            let pages: &[Page];
-            let page: Option<Page>;
-            let page_vec: Option<Vec<Page>>;
-
-            // Calculate sectors needed for this write
-            let write_end = first_sector_offset + remaining_data.len() as u64;
-            let sector_count = ((write_end + 511) / 512).min(self.queue().max_request_pages());
-
-            match sector_count {
-                count if count <= 8 => {
-                    let _page = Page::alloc();
-                    page = Some(_page);
-                    pages = core::slice::from_ref(page.as_ref().unwrap());
-                }
-                count if count <= 16 => {
-                    let _pages = Page::alloc_order(1);
-                    page = Some(_pages);
-                    pages = core::slice::from_ref(page.as_ref().unwrap());
-                }
-                count => {
-                    let npages = (count + 15) / 16;
-                    let mut _page_vec = Vec::with_capacity(npages as usize);
-                    for _ in 0..npages {
-                        _page_vec.push(Page::alloc_order(1));
-                    }
-                    page_vec = Some(_page_vec);
-                    pages = page_vec.as_ref().unwrap().as_slice();
-                }
-            }
-
-            if first_sector_offset != 0 || remaining_data.len() < (sector_count * 512) as usize {
-                let read_req = BlockDeviceRequest::Read {
-                    sector: sector_start,
-                    count: sector_count,
-                    buffer: pages,
-                };
-                self.commit_request(read_req).await?;
-            }
-
-            let mut data_offset = 0;
-            let mut page_offset = first_sector_offset as usize;
+                nr_filled += buffer.fill(data)?.allow_partial();
 
-            for page in pages.iter() {
-                // SAFETY: We own the page and can modify it
-                let page_data = unsafe {
-                    let memblk = page.as_memblk();
-                    core::slice::from_raw_parts_mut(memblk.addr().get() as *mut u8, memblk.len())
-                };
-
-                let copy_len =
-                    (remaining_data.len() - data_offset).min(page_data.len() - page_offset);
-
-                if copy_len == 0 {
-                    break;
-                }
-
-                page_data[page_offset..page_offset + copy_len]
-                    .copy_from_slice(&remaining_data[data_offset..data_offset + copy_len]);
-
-                data_offset += copy_len;
-                page_offset = 0; // Only first page has offset
-
-                if data_offset >= remaining_data.len() {
+                if buffer.available() == 0 {
                     break;
                 }
             }
 
-            let write_req = BlockDeviceRequest::Write {
-                sector: sector_start,
-                count: sector_count,
-                buffer: pages,
-            };
-            self.commit_request(write_req).await?;
-
-            let bytes_written = data_offset;
-            nwritten += bytes_written;
-            remaining_data = &remaining_data[bytes_written..];
-            sector_start += sector_count;
-            first_sector_offset = 0;
+            if buffer.available() == 0 {
+                break;
+            }
         }
 
-        Ok(nwritten)
+        if buffer.available() == 0 {
+            Ok(FillResult::Done(nr_filled))
+        } else {
+            Ok(FillResult::Partial(nr_filled))
+        }
     }
 }
 

+ 2 - 2
src/kernel/mem.rs

@@ -8,9 +8,9 @@ mod mm_list;
 mod page_alloc;
 mod page_cache;
 
-pub use access::{AsMemoryBlock, MemoryBlock, PhysAccess};
+pub use access::PhysAccess;
 pub(self) use mm_area::MMArea;
 pub use mm_list::{handle_kernel_page_fault, FileMapping, MMList, Mapping, Permission};
 pub use page_alloc::{GlobalPageAlloc, RawPage};
 pub use page_cache::{CachePage, CachePageStream, PageCache, PageCacheBackendOps};
-pub use paging::{Page, PageBuffer};
+pub use paging::{Page, PageBuffer, PageExcl, PageExt};

+ 1 - 117
src/kernel/mem/access.rs

@@ -1,22 +1,7 @@
-use core::{num::NonZero, ptr::NonNull};
+use core::ptr::NonNull;
 use eonix_hal::mm::ArchPhysAccess;
 use eonix_mm::address::{PAddr, PhysAccess as _PhysAccess};
 
-/// A block of memory starting at a non-zero address and having a specific length.
-///
-/// This struct is used to represent a memory block that can be accessed
-/// in the kernel space.
-pub struct MemoryBlock {
-    addr: NonZero<usize>,
-    len: usize,
-}
-
-pub trait AsMemoryBlock {
-    /// Translate the physical page the page object pointing to into kernel
-    /// accessible pointer. Use it with care.
-    fn as_memblk(&self) -> MemoryBlock;
-}
-
 pub trait PhysAccess {
     /// Translate the data that this address is pointing to into kernel
     /// accessible pointer. Use it with care.
@@ -30,107 +15,6 @@ pub trait PhysAccess {
     unsafe fn as_ptr<T>(&self) -> NonNull<T>;
 }
 
-impl MemoryBlock {
-    /// Create a new `MemoryBlock` with the given address and length.
-    ///
-    /// # Safety
-    /// The caller must ensure that the address is valid.
-    /// Otherwise, it may lead to undefined behavior.
-    pub unsafe fn new(addr: NonZero<usize>, len: usize) -> Self {
-        Self { addr, len }
-    }
-
-    /// Get the start address of the memory block.
-    #[allow(dead_code)]
-    pub fn addr(&self) -> NonZero<usize> {
-        self.addr
-    }
-
-    /// Get the length of the memory block.
-    #[allow(dead_code)]
-    pub fn len(&self) -> usize {
-        self.len
-    }
-
-    /// Split the memory block into two parts at the given offset.
-    pub fn split_at(&self, at: usize) -> (Self, Self) {
-        if at > self.len {
-            panic!("Out of bounds");
-        }
-
-        let rhs_start = self.addr.checked_add(at).expect("Overflow");
-
-        let lhs = unsafe { Self::new(self.addr, at) };
-        let rhs = unsafe { Self::new(rhs_start, self.len - at) };
-
-        (lhs, rhs)
-    }
-
-    /// Provide a pointer to the data.
-    ///
-    /// # Safety
-    /// Using the returned pointer is undefined behavior if the address is not
-    ///  properly aligned or the size is not equal to the size of `T`.
-    pub unsafe fn as_ptr_unchecked<T>(&self) -> NonNull<T> {
-        // SAFETY: `self.addr` is a non-zero value.
-        NonNull::new_unchecked(self.addr.get() as *mut T)
-    }
-
-    /// Provide a pointer to the data.
-    ///
-    /// # Panic
-    /// Panic if the address is not properly aligned.
-    pub fn as_ptr<T>(&self) -> NonNull<T> {
-        let alignment = align_of::<T>();
-
-        if self.addr.get() % alignment != 0 {
-            panic!("Alignment error");
-        }
-
-        unsafe {
-            // SAFETY: We've checked that `self.addr` is properly aligned.
-            self.as_ptr_unchecked()
-        }
-    }
-
-    /// Provide a pointer to the bytes.
-    pub fn as_byte_ptr(&self) -> NonNull<u8> {
-        unsafe {
-            // SAFETY: No alignment check is needed for bytes.
-            self.as_ptr_unchecked()
-        }
-    }
-
-    /// Provide immutable access to the data it pointed to.
-    ///
-    /// # Safety
-    /// This function is unsafe because it returns an immutable reference with
-    /// a created lifetime.
-    ///
-    /// The caller must ensure that the data has no other mutable aliases while
-    /// the reference is in use. Otherwise, it may lead to undefined behavior.
-    pub unsafe fn as_bytes<'a>(&self) -> &'a [u8] {
-        core::slice::from_raw_parts(self.as_ptr_unchecked().as_ptr(), self.len)
-    }
-
-    /// Provide mutable access to the data it pointed to.
-    ///
-    /// # Panic
-    /// Panic if the address is not properly aligned or the size is not
-    /// equal to the size of `T`.
-    ///
-    /// # Safety
-    /// This function is unsafe because it returns a mutable reference with a
-    /// created lifetime.
-    ///
-    /// The caller must ensure that the data has no other immutable or mutable
-    /// aliases while the reference is in use.
-    /// Otherwise, it may lead to undefined behavior.
-    pub unsafe fn as_bytes_mut<'a>(&mut self) -> &'a mut [u8] {
-        core::slice::from_raw_parts_mut(self.as_ptr_unchecked().as_ptr(), self.len)
-    }
-}
-
 impl PhysAccess for PAddr {
     unsafe fn as_ptr<T>(&self) -> NonNull<T> {
         ArchPhysAccess::as_ptr(*self)

+ 5 - 3
src/kernel/mem/allocator.rs

@@ -1,13 +1,15 @@
-use super::page_alloc::RawPagePtr;
-use super::{AsMemoryBlock, GlobalPageAlloc, Page};
 use core::alloc::{GlobalAlloc, Layout};
 use core::ptr::NonNull;
+
 use eonix_hal::mm::ArchPhysAccess;
 use eonix_mm::address::PhysAccess;
 use eonix_mm::paging::{PAGE_SIZE_BITS, PFN};
 use eonix_sync::LazyLock;
 use slab_allocator::SlabAllocator;
 
+use super::page_alloc::RawPagePtr;
+use super::{GlobalPageAlloc, Page, PageExt};
+
 static SLAB_ALLOCATOR: LazyLock<SlabAllocator<RawPagePtr, GlobalPageAlloc, 9>> =
     LazyLock::new(|| SlabAllocator::new_in(GlobalPageAlloc));
 
@@ -23,7 +25,7 @@ unsafe impl GlobalAlloc for Allocator {
             let page_count = size >> PAGE_SIZE_BITS;
             let page = Page::alloc_at_least(page_count);
 
-            let ptr = page.as_memblk().as_ptr();
+            let ptr = page.get_ptr();
             page.into_raw();
 
             ptr.as_ptr()

+ 25 - 27
src/kernel/mem/mm_area.rs

@@ -1,15 +1,17 @@
-use super::mm_list::EMPTY_PAGE;
-use super::paging::AllocZeroed as _;
-use super::{AsMemoryBlock, Mapping, Page, Permission};
-use crate::kernel::constants::EINVAL;
-use crate::prelude::KResult;
 use core::borrow::Borrow;
 use core::cell::UnsafeCell;
 use core::cmp;
+
 use eonix_mm::address::{AddrOps as _, VAddr, VRange};
 use eonix_mm::page_table::{PageAttribute, RawAttribute, PTE};
 use eonix_mm::paging::{PAGE_SIZE, PFN};
 
+use super::mm_list::EMPTY_PAGE;
+use super::{Mapping, Page, Permission};
+use crate::kernel::constants::EINVAL;
+use crate::kernel::mem::{PageExcl, PageExt};
+use crate::prelude::KResult;
+
 #[derive(Debug)]
 pub struct MMArea {
     range: UnsafeCell<VRange>,
@@ -105,25 +107,23 @@ impl MMArea {
             return;
         }
 
-        let new_page;
+        let mut new_page;
         if *pfn == EMPTY_PAGE.pfn() {
-            new_page = Page::zeroed();
+            new_page = PageExcl::zeroed();
         } else {
-            new_page = Page::alloc();
+            new_page = PageExcl::alloc();
 
             unsafe {
                 // SAFETY: `page` is CoW, which means that others won't write to it.
-                let old_page_data = page.as_memblk().as_bytes();
-
-                // SAFETY: `new_page` is exclusive owned by us.
-                let new_page_data = new_page.as_memblk().as_bytes_mut();
+                let old_page_data = page.get_bytes_ptr().as_ref();
+                let new_page_data = new_page.as_bytes_mut();
 
                 new_page_data.copy_from_slice(old_page_data);
             };
         }
 
         attr.remove(PageAttribute::ACCESSED);
-        *pfn = new_page.into_raw();
+        *pfn = new_page.into_page().into_raw();
     }
 
     /// # Arguments
@@ -156,13 +156,12 @@ impl MMArea {
                     // Bss is embarrassing in pagecache!
                     // We have to assume cnt_to_read < PAGE_SIZE all bss
                     if cnt_to_read < PAGE_SIZE {
-                        let new_page = Page::zeroed();
-                        unsafe {
-                            let page_data = new_page.as_memblk().as_bytes_mut();
-                            page_data[..cnt_to_read]
-                                .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
-                        }
-                        *pfn = new_page.into_raw();
+                        let mut new_page = PageExcl::zeroed();
+
+                        new_page.as_bytes_mut()[..cnt_to_read]
+                            .copy_from_slice(&page.lock().as_bytes()[..cnt_to_read]);
+
+                        *pfn = new_page.into_page().into_raw();
                     } else {
                         *pfn = page.clone().into_raw();
                     }
@@ -182,13 +181,12 @@ impl MMArea {
                         cache_page.set_dirty();
                         *pfn = page.clone().into_raw();
                     } else {
-                        let new_page = Page::zeroed();
-                        unsafe {
-                            let page_data = new_page.as_memblk().as_bytes_mut();
-                            page_data[..cnt_to_read]
-                                .copy_from_slice(&page.as_memblk().as_bytes()[..cnt_to_read]);
-                        }
-                        *pfn = new_page.into_raw();
+                        let mut new_page = PageExcl::zeroed();
+
+                        new_page.as_bytes_mut()[..cnt_to_read]
+                            .copy_from_slice(&page.lock().as_bytes()[..cnt_to_read]);
+
+                        *pfn = new_page.into_page().into_raw();
                     }
 
                     attr.insert(PageAttribute::WRITE);

+ 17 - 22
src/kernel/mem/mm_list.rs

@@ -1,33 +1,30 @@
 mod mapping;
 mod page_fault;
 
-use super::address::{VAddrExt as _, VRangeExt as _};
-use super::page_alloc::GlobalPageAlloc;
-use super::paging::AllocZeroed as _;
-use super::{AsMemoryBlock, MMArea, Page};
-use crate::kernel::constants::{EEXIST, EFAULT, EINVAL, ENOMEM};
-use crate::kernel::mem::page_alloc::RawPagePtr;
-use crate::{prelude::*, sync::ArcSwap};
 use alloc::collections::btree_set::BTreeSet;
 use core::fmt;
 use core::sync::atomic::{AtomicUsize, Ordering};
+
 use eonix_hal::mm::{
     flush_tlb_all, get_root_page_table_pfn, set_root_page_table_pfn, ArchPagingMode,
     ArchPhysAccess, GLOBAL_PAGE_TABLE,
 };
-use eonix_mm::address::{Addr as _, PAddr};
-use eonix_mm::page_table::PageAttribute;
-use eonix_mm::paging::PFN;
-use eonix_mm::{
-    address::{AddrOps as _, VAddr, VRange},
-    page_table::{PageTable, RawAttribute, PTE},
-    paging::PAGE_SIZE,
-};
+use eonix_mm::address::{Addr as _, AddrOps as _, PAddr, VAddr, VRange};
+use eonix_mm::page_table::{PageAttribute, PageTable, RawAttribute, PTE};
+use eonix_mm::paging::{PAGE_SIZE, PFN};
 use eonix_sync::{LazyLock, Mutex};
-
 pub use mapping::{FileMapping, Mapping};
 pub use page_fault::handle_kernel_page_fault;
 
+use super::address::{VAddrExt as _, VRangeExt as _};
+use super::page_alloc::GlobalPageAlloc;
+use super::paging::AllocZeroed as _;
+use super::{MMArea, Page, PageExt};
+use crate::kernel::constants::{EEXIST, EFAULT, EINVAL, ENOMEM};
+use crate::kernel::mem::page_alloc::RawPagePtr;
+use crate::prelude::*;
+use crate::sync::ArcSwap;
+
 pub static EMPTY_PAGE: LazyLock<Page> = LazyLock::new(|| Page::zeroed());
 
 #[derive(Debug, Clone, Copy)]
@@ -697,12 +694,10 @@ impl MMList {
                 unsafe {
                     // SAFETY: We are sure that the page is valid and we have the right to access it.
                     Page::with_raw(pte.get_pfn(), |page| {
-                        // SAFETY: The caller guarantees that no one else is using the page.
-                        let page_data = page.as_memblk().as_bytes_mut();
-                        func(
-                            offset + idx * 0x1000,
-                            &mut page_data[start_offset..end_offset],
-                        );
+                        let mut pg = page.lock();
+                        let page_data = &mut pg.as_bytes_mut()[start_offset..end_offset];
+
+                        func(offset + idx * 0x1000, page_data);
                     });
                 }
             }

+ 8 - 1
src/kernel/mem/page_alloc/raw_page.rs

@@ -81,7 +81,7 @@ pub struct RawPagePtr(NonNull<RawPage>);
 
 impl PageFlags {
     pub const PRESENT: u32 = 1 << 0;
-    // pub const LOCKED: u32 = 1 << 1;
+    pub const LOCKED: u32 = 1 << 1;
     pub const BUDDY: u32 = 1 << 2;
     pub const SLAB: u32 = 1 << 3;
     pub const DIRTY: u32 = 1 << 4;
@@ -99,6 +99,13 @@ impl PageFlags {
     pub fn clear(&self, flag: u32) {
         self.0.fetch_and(!flag, Ordering::Relaxed);
     }
+
+    /// Set the flag and return whether it was already set.
+    ///
+    /// If multiple flags are given, returns true if any of them were already set.
+    pub fn test_and_set(&self, flag: u32) -> bool {
+        (self.0.fetch_or(flag, Ordering::Relaxed) & flag) != 0
+    }
 }
 
 impl RawPagePtr {

+ 15 - 22
src/kernel/mem/page_cache.rs

@@ -1,22 +1,23 @@
-use super::{paging::AllocZeroed, Page};
-use crate::{
-    io::{Buffer, FillResult, Stream},
-    kernel::mem::page_alloc::RawPagePtr,
-    prelude::KResult,
-    GlobalPageAlloc,
-};
-use align_ext::AlignExt;
 use alloc::boxed::Box;
-use alloc::{collections::btree_map::BTreeMap, sync::Weak};
+use alloc::collections::btree_map::BTreeMap;
+use alloc::sync::Weak;
+use core::future::Future;
+use core::mem::ManuallyDrop;
+
+use align_ext::AlignExt;
 use async_trait::async_trait;
-use core::{future::Future, mem::ManuallyDrop};
 use eonix_hal::mm::ArchPhysAccess;
-use eonix_mm::{
-    address::{PAddr, PhysAccess},
-    paging::{PageAlloc, RawPage, PAGE_SIZE, PAGE_SIZE_BITS, PFN},
-};
+use eonix_mm::address::{PAddr, PhysAccess};
+use eonix_mm::paging::{PageAlloc, RawPage, PAGE_SIZE, PAGE_SIZE_BITS, PFN};
 use eonix_sync::Mutex;
 
+use super::paging::AllocZeroed;
+use super::Page;
+use crate::io::{Buffer, FillResult, Stream};
+use crate::kernel::mem::page_alloc::RawPagePtr;
+use crate::prelude::KResult;
+use crate::GlobalPageAlloc;
+
 pub struct PageCache {
     pages: Mutex<BTreeMap<usize, CachePage>>,
     backend: Weak<dyn PageCacheBackend>,
@@ -315,14 +316,6 @@ impl CachePageStream {
     pub fn new(page: CachePage) -> Self {
         Self { page, cur: 0 }
     }
-
-    pub fn remaining(&self) -> usize {
-        self.page.valid_size().saturating_sub(self.cur)
-    }
-
-    pub fn is_drained(&self) -> bool {
-        self.cur >= self.page.valid_size()
-    }
 }
 
 impl Stream for CachePageStream {

+ 108 - 21
src/kernel/mem/paging.rs

@@ -1,41 +1,54 @@
-use super::{access::AsMemoryBlock, page_alloc::GlobalPageAlloc, MemoryBlock, PhysAccess};
+use core::ops::Deref;
+use core::ptr::NonNull;
+
+use eonix_mm::paging::Page as GenericPage;
+
+use super::page_alloc::GlobalPageAlloc;
+use super::PhysAccess;
 use crate::io::{Buffer, FillResult};
-use eonix_mm::paging::{Page as GenericPage, PageAlloc};
 
 pub type Page = GenericPage<GlobalPageAlloc>;
 
 /// A buffer that wraps a page and provides a `Buffer` interface.
 pub struct PageBuffer {
-    page: Page,
+    page: PageExcl,
     offset: usize,
 }
 
+pub struct PageLocked<'a> {
+    page: &'a Page,
+}
+
+/// A page that is exclusively owned.
+#[repr(transparent)]
+pub struct PageExcl(Page);
+
 pub trait AllocZeroed {
     fn zeroed() -> Self;
 }
 
-impl<A: PageAlloc> AsMemoryBlock for GenericPage<A> {
-    fn as_memblk(&self) -> MemoryBlock {
-        unsafe {
-            // SAFETY: `self.start()` points to valid memory of length `self.len()`.
-            MemoryBlock::new(self.start().as_ptr::<()>().addr(), self.len())
-        }
+pub trait PageExt {
+    fn lock(&self) -> PageLocked;
+
+    /// Get a vmem pointer to the page data as a byte slice.
+    fn get_bytes_ptr(&self) -> NonNull<[u8]>;
+
+    /// Get a vmem pointer to the start of the page.
+    fn get_ptr(&self) -> NonNull<u8> {
+        self.get_bytes_ptr().cast()
     }
 }
 
 impl PageBuffer {
     pub fn new() -> Self {
         Self {
-            page: Page::alloc(),
+            page: PageExcl::alloc(),
             offset: 0,
         }
     }
 
     pub fn all(&self) -> &[u8] {
-        unsafe {
-            // SAFETY: The page is exclusivly owned by us.
-            self.page.as_memblk().as_bytes()
-        }
+        self.page.as_bytes()
     }
 
     pub fn data(&self) -> &[u8] {
@@ -43,10 +56,7 @@ impl PageBuffer {
     }
 
     pub fn available_mut(&mut self) -> &mut [u8] {
-        unsafe {
-            // SAFETY: The page is exclusivly owned by us.
-            &mut self.page.as_memblk().as_bytes_mut()[self.offset..]
-        }
+        &mut self.page.as_bytes_mut()[self.offset..]
     }
 }
 
@@ -80,10 +90,87 @@ impl Buffer for PageBuffer {
 impl AllocZeroed for Page {
     fn zeroed() -> Self {
         let page = Self::alloc();
+
+        page.lock().as_bytes_mut().fill(0);
+
+        page
+    }
+}
+
+impl PageExt for Page {
+    fn lock(&self) -> PageLocked {
+        // TODO: Actually perform the lock.
+        PageLocked { page: self }
+    }
+
+    fn get_bytes_ptr(&self) -> NonNull<[u8]> {
         unsafe {
-            // SAFETY: The page is exclusivly owned by us.
-            page.as_memblk().as_bytes_mut().fill(0);
+            // SAFETY: `self.start()` can't be null.
+            NonNull::slice_from_raw_parts(self.start().as_ptr(), self.len())
         }
-        page
+    }
+}
+
+impl PageLocked<'_> {
+    pub fn as_bytes(&self) -> &[u8] {
+        unsafe {
+            // SAFETY: `self.start()` points to valid memory of length `self.len()`.
+            core::slice::from_raw_parts(self.start().as_ptr().as_ptr(), self.len())
+        }
+    }
+
+    pub fn as_bytes_mut(&mut self) -> &mut [u8] {
+        unsafe {
+            // SAFETY: `self.start()` points to valid memory of length `self.len()`.
+            core::slice::from_raw_parts_mut(self.start().as_ptr().as_ptr(), self.len())
+        }
+    }
+}
+
+impl Deref for PageLocked<'_> {
+    type Target = Page;
+
+    fn deref(&self) -> &Self::Target {
+        self.page
+    }
+}
+
+impl PageExcl {
+    pub fn alloc() -> Self {
+        Self(Page::alloc())
+    }
+
+    pub fn alloc_order(order: u32) -> Self {
+        Self(Page::alloc_order(order))
+    }
+
+    pub fn zeroed() -> Self {
+        Self(Page::zeroed())
+    }
+
+    pub fn as_bytes(&self) -> &[u8] {
+        unsafe {
+            // SAFETY: The page is exclusively owned by us.
+            self.get_bytes_ptr().as_ref()
+        }
+    }
+
+    pub fn as_bytes_mut(&mut self) -> &mut [u8] {
+        unsafe {
+            // SAFETY: The page is exclusively owned by us.
+            self.get_bytes_ptr().as_mut()
+        }
+    }
+
+    pub fn into_page(self) -> Page {
+        self.0
+    }
+}
+
+impl Deref for PageExcl {
+    type Target = Page;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
     }
 }

+ 14 - 20
src/kernel/vfs/file/mod.rs

@@ -2,29 +2,24 @@ mod inode_file;
 mod pipe;
 mod terminal_file;
 
-use crate::{
-    io::{Buffer, ByteBuffer, Chunks, IntoStream, Stream},
-    kernel::{
-        constants::{EBADF, EINTR, EINVAL, ENOTTY},
-        mem::{AsMemoryBlock, Page},
-        task::Thread,
-        CharDevice,
-    },
-    prelude::KResult,
-};
 use alloc::sync::Arc;
-use bitflags::bitflags;
-use core::{
-    ops::Deref,
-    sync::atomic::{AtomicI32, AtomicU32, Ordering},
-};
-use pipe::{PipeReadEnd, PipeWriteEnd};
-use posix_types::open::OpenFlags;
+use core::ops::Deref;
+use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
 
+use bitflags::bitflags;
 pub use inode_file::InodeFile;
 pub use pipe::Pipe;
+use pipe::{PipeReadEnd, PipeWriteEnd};
+use posix_types::open::OpenFlags;
 pub use terminal_file::TerminalFile;
 
+use crate::io::{Buffer, ByteBuffer, Chunks, IntoStream, Stream};
+use crate::kernel::constants::{EBADF, EINTR, EINVAL, ENOTTY};
+use crate::kernel::mem::PageExcl;
+use crate::kernel::task::Thread;
+use crate::kernel::CharDevice;
+use crate::prelude::KResult;
+
 pub enum FileType {
     Inode(InodeFile),
     PipeRead(PipeReadEnd),
@@ -99,9 +94,8 @@ impl FileType {
     }
 
     pub async fn sendfile(&self, dest_file: &Self, count: usize) -> KResult<usize> {
-        let buffer_page = Page::alloc();
-        // SAFETY: We are the only owner of the page.
-        let buffer = unsafe { buffer_page.as_memblk().as_bytes_mut() };
+        let mut buffer_page = PageExcl::alloc();
+        let buffer = buffer_page.as_bytes_mut();
 
         self.sendfile_check()?;
 

+ 25 - 25
src/lib.rs

@@ -11,6 +11,9 @@
 
 extern crate alloc;
 
+#[macro_use]
+extern crate static_assertions;
+
 #[cfg(any(target_arch = "riscv64", target_arch = "x86_64"))]
 extern crate unwinding;
 
@@ -28,36 +31,33 @@ mod prelude;
 mod rcu;
 mod sync;
 
-use crate::kernel::task::alloc_pid;
-use alloc::{ffi::CString, sync::Arc};
-use core::{
-    hint::spin_loop,
-    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
-};
-use eonix_hal::{
-    arch_exported::bootstrap::shutdown,
-    context::TaskContext,
-    processor::{halt, CPU, CPU_COUNT},
-    traits::{context::RawTaskContext, trap::IrqState},
-    trap::disable_irqs_save,
-};
+use alloc::ffi::CString;
+use alloc::sync::Arc;
+use core::hint::spin_loop;
+use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+
+use eonix_hal::arch_exported::bootstrap::shutdown;
+use eonix_hal::context::TaskContext;
+use eonix_hal::processor::{halt, CPU, CPU_COUNT};
+use eonix_hal::traits::context::RawTaskContext;
+use eonix_hal::traits::trap::IrqState;
+use eonix_hal::trap::disable_irqs_save;
 use eonix_mm::address::PRange;
-use eonix_runtime::{executor::Stack, scheduler::RUNTIME};
-use kernel::{
-    mem::GlobalPageAlloc,
-    task::{KernelStack, ProcessBuilder, ProcessList, ProgramLoader, ThreadBuilder},
-    vfs::{
-        dentry::Dentry,
-        mount::{do_mount, MS_NOATIME, MS_NODEV, MS_NOSUID, MS_RDONLY},
-        types::Permission,
-        FsContext,
-    },
-    CharDevice,
-};
+use eonix_runtime::executor::Stack;
+use eonix_runtime::scheduler::RUNTIME;
+use kernel::mem::GlobalPageAlloc;
+use kernel::task::{KernelStack, ProcessBuilder, ProcessList, ProgramLoader, ThreadBuilder};
+use kernel::vfs::dentry::Dentry;
+use kernel::vfs::mount::{do_mount, MS_NOATIME, MS_NODEV, MS_NOSUID, MS_RDONLY};
+use kernel::vfs::types::Permission;
+use kernel::vfs::FsContext;
+use kernel::CharDevice;
 use kernel_init::setup_memory;
 use path::Path;
 use prelude::*;
 
+use crate::kernel::task::alloc_pid;
+
 #[cfg(any(target_arch = "riscv64", target_arch = "loongarch64"))]
 fn do_panic() -> ! {
     #[cfg(target_arch = "riscv64")]