virtio_blk.rs 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. use crate::{
  2. io::Chunks,
  3. kernel::{
  4. block::{BlockDeviceRequest, BlockRequestQueue},
  5. constants::EIO,
  6. mem::{AsMemoryBlock, Page},
  7. },
  8. prelude::KResult,
  9. };
  10. use eonix_hal::mm::ArchPhysAccess;
  11. use eonix_mm::{
  12. address::{Addr, PAddr, PhysAccess},
  13. paging::PFN,
  14. };
  15. use eonix_sync::Spin;
  16. use virtio_drivers::{device::blk::VirtIOBlk, transport::Transport, Hal};
  17. pub struct HAL;
  18. unsafe impl Hal for HAL {
  19. fn dma_alloc(
  20. pages: usize,
  21. _direction: virtio_drivers::BufferDirection,
  22. ) -> (virtio_drivers::PhysAddr, core::ptr::NonNull<u8>) {
  23. let page = Page::alloc_at_least(pages);
  24. let paddr = page.start().addr();
  25. let ptr = page.as_memblk().as_byte_ptr();
  26. page.into_raw();
  27. (paddr, ptr)
  28. }
  29. unsafe fn dma_dealloc(
  30. paddr: virtio_drivers::PhysAddr,
  31. _vaddr: core::ptr::NonNull<u8>,
  32. _pages: usize,
  33. ) -> i32 {
  34. let pfn = PFN::from(PAddr::from(paddr));
  35. unsafe {
  36. // SAFETY: The caller ensures that the pfn corresponds to a valid
  37. // page allocated by `dma_alloc`.
  38. Page::from_raw(pfn);
  39. }
  40. 0
  41. }
  42. unsafe fn mmio_phys_to_virt(
  43. paddr: virtio_drivers::PhysAddr,
  44. _size: usize,
  45. ) -> core::ptr::NonNull<u8> {
  46. unsafe { ArchPhysAccess::as_ptr(PAddr::from(paddr)) }
  47. }
  48. unsafe fn share(
  49. buffer: core::ptr::NonNull<[u8]>,
  50. _direction: virtio_drivers::BufferDirection,
  51. ) -> virtio_drivers::PhysAddr {
  52. let paddr = unsafe {
  53. // SAFETY: The caller ensures that the buffer is valid.
  54. ArchPhysAccess::from_ptr(buffer.cast::<u8>())
  55. };
  56. paddr.addr()
  57. }
  58. unsafe fn unshare(
  59. _paddr: virtio_drivers::PhysAddr,
  60. _buffer: core::ptr::NonNull<[u8]>,
  61. _direction: virtio_drivers::BufferDirection,
  62. ) {
  63. }
  64. }
  65. impl<T> BlockRequestQueue for Spin<VirtIOBlk<HAL, T>>
  66. where
  67. T: Transport + Send,
  68. {
  69. fn max_request_pages(&self) -> u64 {
  70. 1024
  71. }
  72. fn submit(&self, req: BlockDeviceRequest) -> KResult<()> {
  73. match req {
  74. BlockDeviceRequest::Write { .. } => todo!(),
  75. BlockDeviceRequest::Read {
  76. sector,
  77. count,
  78. buffer,
  79. } => {
  80. let mut dev = self.lock();
  81. for ((start, len), buffer_page) in
  82. Chunks::new(sector as usize, count as usize, 8).zip(buffer.iter())
  83. {
  84. let buffer = unsafe {
  85. // SAFETY: Pages in `req.buffer` are guaranteed to be exclusively owned by us.
  86. &mut buffer_page.as_memblk().as_bytes_mut()[..len as usize * 512]
  87. };
  88. dev.read_blocks(start, buffer).map_err(|_| EIO)?;
  89. }
  90. }
  91. }
  92. Ok(())
  93. }
  94. }