block.rs 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. use super::{
  2. constants::ENOENT,
  3. mem::{paging::Page, AsMemoryBlock as _},
  4. vfs::DevId,
  5. };
  6. use crate::kernel::constants::{EEXIST, EINVAL, EIO};
  7. use crate::{
  8. io::{Buffer, FillResult, UninitBuffer},
  9. prelude::*,
  10. };
  11. use alloc::{
  12. collections::btree_map::{BTreeMap, Entry},
  13. sync::Arc,
  14. };
  15. use core::cmp::Ordering;
  16. pub fn make_device(major: u32, minor: u32) -> DevId {
  17. (major << 8) & 0xff00u32 | minor & 0xffu32
  18. }
  19. pub trait BlockRequestQueue: Send + Sync {
  20. /// Maximum number of sectors that can be read in one request
  21. ///
  22. fn max_request_pages(&self) -> u64;
  23. fn submit(&self, req: BlockDeviceRequest) -> KResult<()>;
  24. }
  25. struct BlockDeviceDisk {
  26. queue: Arc<dyn BlockRequestQueue>,
  27. }
  28. #[allow(dead_code)]
  29. struct BlockDevicePartition {
  30. disk_dev: DevId,
  31. offset: u64,
  32. queue: Arc<dyn BlockRequestQueue>,
  33. }
  34. enum BlockDeviceType {
  35. Disk(BlockDeviceDisk),
  36. Partition(BlockDevicePartition),
  37. }
  38. pub struct BlockDevice {
  39. devid: DevId,
  40. size: u64,
  41. max_pages: u64,
  42. dev_type: BlockDeviceType,
  43. }
  44. impl PartialEq for BlockDevice {
  45. fn eq(&self, other: &Self) -> bool {
  46. self.devid == other.devid
  47. }
  48. }
  49. impl PartialOrd for BlockDevice {
  50. fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
  51. Some(self.devid.cmp(&other.devid))
  52. }
  53. }
  54. impl Eq for BlockDevice {}
  55. impl Ord for BlockDevice {
  56. fn cmp(&self, other: &Self) -> Ordering {
  57. self.devid.cmp(&other.devid)
  58. }
  59. }
  60. static BLOCK_DEVICE_LIST: Spin<BTreeMap<DevId, Arc<BlockDevice>>> = Spin::new(BTreeMap::new());
  61. #[derive(Debug, Clone, Copy)]
  62. #[repr(C)]
  63. struct MBREntry {
  64. attr: u8,
  65. chs_start: [u8; 3],
  66. part_type: u8,
  67. chs_end: [u8; 3],
  68. lba_start: u32,
  69. cnt: u32,
  70. }
  71. #[derive(Debug, Clone, Copy)]
  72. #[repr(C, packed)]
  73. struct MBR {
  74. code: [u8; 446],
  75. entries: [MBREntry; 4],
  76. magic: [u8; 2],
  77. }
  78. impl BlockDevice {
  79. pub fn register_disk(
  80. devid: DevId,
  81. size: u64,
  82. queue: Arc<dyn BlockRequestQueue>,
  83. ) -> KResult<Arc<Self>> {
  84. let max_pages = queue.max_request_pages();
  85. let device = Arc::new(Self {
  86. devid,
  87. size,
  88. max_pages,
  89. dev_type: BlockDeviceType::Disk(BlockDeviceDisk { queue }),
  90. });
  91. match BLOCK_DEVICE_LIST.lock().entry(devid) {
  92. Entry::Vacant(entry) => Ok(entry.insert(device).clone()),
  93. Entry::Occupied(_) => Err(EEXIST),
  94. }
  95. }
  96. pub fn get(devid: DevId) -> KResult<Arc<Self>> {
  97. BLOCK_DEVICE_LIST.lock().get(&devid).cloned().ok_or(ENOENT)
  98. }
  99. }
  100. impl BlockDevice {
  101. pub fn devid(&self) -> DevId {
  102. self.devid
  103. }
  104. pub fn register_partition(&self, idx: u32, offset: u64, size: u64) -> KResult<Arc<Self>> {
  105. let queue = match self.dev_type {
  106. BlockDeviceType::Disk(ref disk) => disk.queue.clone(),
  107. BlockDeviceType::Partition(_) => return Err(EINVAL),
  108. };
  109. let device = Arc::new(BlockDevice {
  110. devid: make_device(self.devid >> 8, idx as u32),
  111. size,
  112. max_pages: self.max_pages,
  113. dev_type: BlockDeviceType::Partition(BlockDevicePartition {
  114. disk_dev: self.devid,
  115. offset,
  116. queue,
  117. }),
  118. });
  119. match BLOCK_DEVICE_LIST.lock().entry(device.devid()) {
  120. Entry::Vacant(entry) => Ok(entry.insert(device).clone()),
  121. Entry::Occupied(_) => Err(EEXIST),
  122. }
  123. }
  124. pub fn partprobe(&self) -> KResult<()> {
  125. match self.dev_type {
  126. BlockDeviceType::Partition(_) => Err(EINVAL),
  127. BlockDeviceType::Disk(_) => {
  128. let mut mbr: UninitBuffer<MBR> = UninitBuffer::new();
  129. self.read_some(0, &mut mbr)?.ok_or(EIO)?;
  130. let mbr = mbr.assume_filled_ref()?;
  131. if mbr.magic != [0x55, 0xaa] {
  132. return Ok(());
  133. }
  134. let entries = mbr.entries;
  135. for (idx, entry) in entries.iter().enumerate() {
  136. if entry.part_type == 0 {
  137. continue;
  138. }
  139. let offset = entry.lba_start as u64;
  140. let size = entry.cnt as u64;
  141. self.register_partition(idx as u32 + 1, offset, size)?;
  142. }
  143. Ok(())
  144. }
  145. }
  146. }
  147. /// No extra overhead, send the request directly to the queue
  148. /// If any of the parameters does not meet the requirement, the operation will fail
  149. ///
  150. /// # Requirements
  151. /// - `req.count` must not exceed the disk size and maximum request size
  152. /// - `req.sector` must be within the disk size
  153. /// - `req.buffer` must be enough to hold the data
  154. ///
  155. pub fn read_raw(&self, mut req: BlockDeviceRequest) -> KResult<()> {
  156. // TODO: check disk size limit
  157. if req.sector + req.count > self.size {
  158. return Err(EINVAL);
  159. }
  160. match self.dev_type {
  161. BlockDeviceType::Disk(ref disk) => disk.queue.submit(req),
  162. BlockDeviceType::Partition(ref part) => {
  163. req.sector += part.offset;
  164. part.queue.submit(req)
  165. }
  166. }
  167. }
  168. /// Read some from the block device, may involve some copy and fragmentation
  169. ///
  170. /// Further optimization may be needed, including caching, read-ahead and reordering
  171. ///
  172. /// # Arguments
  173. /// `offset` - offset in bytes
  174. ///
  175. pub fn read_some(&self, offset: usize, buffer: &mut dyn Buffer) -> KResult<FillResult> {
  176. let mut sector_start = offset as u64 / 512;
  177. let mut first_sector_offset = offset as u64 % 512;
  178. let mut sector_count = (first_sector_offset + buffer.total() as u64 + 511) / 512;
  179. let mut nfilled = 0;
  180. 'outer: while sector_count != 0 {
  181. let pages: &[Page];
  182. let page: Option<Page>;
  183. let page_vec: Option<Vec<Page>>;
  184. let nread;
  185. match sector_count {
  186. count if count <= 8 => {
  187. nread = count;
  188. let _page = Page::alloc();
  189. page = Some(_page);
  190. pages = core::slice::from_ref(page.as_ref().unwrap());
  191. }
  192. count if count <= 16 => {
  193. nread = count;
  194. let _pages = Page::alloc_order(1);
  195. page = Some(_pages);
  196. pages = core::slice::from_ref(page.as_ref().unwrap());
  197. }
  198. count => {
  199. nread = count.min(self.max_pages);
  200. let npages = (nread + 15) / 16;
  201. let mut _page_vec = Vec::with_capacity(npages as usize);
  202. for _ in 0..npages {
  203. _page_vec.push(Page::alloc_order(1));
  204. }
  205. page_vec = Some(_page_vec);
  206. pages = page_vec.as_ref().unwrap().as_slice();
  207. }
  208. }
  209. let req = BlockDeviceRequest {
  210. sector: sector_start,
  211. count: nread,
  212. buffer: &pages,
  213. };
  214. self.read_raw(req)?;
  215. for page in pages.iter() {
  216. // SAFETY: We are the only owner of the page so no one could be mutating it.
  217. let data = unsafe { &page.as_memblk().as_bytes()[first_sector_offset as usize..] };
  218. first_sector_offset = 0;
  219. match buffer.fill(data)? {
  220. FillResult::Done(n) => nfilled += n,
  221. FillResult::Partial(n) => {
  222. nfilled += n;
  223. break 'outer;
  224. }
  225. FillResult::Full => {
  226. break 'outer;
  227. }
  228. }
  229. }
  230. sector_start += nread;
  231. sector_count -= nread;
  232. }
  233. if nfilled == buffer.total() {
  234. Ok(FillResult::Done(nfilled))
  235. } else {
  236. Ok(FillResult::Partial(nfilled))
  237. }
  238. }
  239. }
  240. pub struct BlockDeviceRequest<'lt> {
  241. pub sector: u64, // Sector to read from, in 512-byte blocks
  242. pub count: u64, // Number of sectors to read
  243. pub buffer: &'lt [Page],
  244. }