block.rs 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. use core::cmp::Ordering;
  2. use crate::{
  3. io::{Buffer, FillResult, UninitBuffer},
  4. prelude::*,
  5. };
  6. use alloc::{
  7. collections::btree_map::{BTreeMap, Entry},
  8. sync::Arc,
  9. };
  10. use bindings::{EEXIST, EINVAL, EIO, ENOENT};
  11. use lazy_static::lazy_static;
  12. use super::{
  13. mem::{paging::Page, phys::PhysPtr},
  14. vfs::DevId,
  15. };
  16. pub fn make_device(major: u32, minor: u32) -> DevId {
  17. (major << 8) & 0xff00u32 | minor & 0xffu32
  18. }
  19. pub trait BlockRequestQueue: Send + Sync {
  20. /// Maximum number of sectors that can be read in one request
  21. ///
  22. fn max_request_pages(&self) -> u64;
  23. fn submit(&self, req: BlockDeviceRequest) -> KResult<()>;
  24. }
  25. struct BlockDeviceDisk {
  26. queue: Arc<dyn BlockRequestQueue>,
  27. }
  28. struct BlockDevicePartition {
  29. disk_dev: DevId,
  30. offset: u64,
  31. queue: Arc<dyn BlockRequestQueue>,
  32. }
  33. enum BlockDeviceType {
  34. Disk(BlockDeviceDisk),
  35. Partition(BlockDevicePartition),
  36. }
  37. pub struct BlockDevice {
  38. devid: DevId,
  39. size: u64,
  40. max_pages: u64,
  41. dev_type: BlockDeviceType,
  42. }
  43. impl PartialEq for BlockDevice {
  44. fn eq(&self, other: &Self) -> bool {
  45. self.devid == other.devid
  46. }
  47. }
  48. impl PartialOrd for BlockDevice {
  49. fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
  50. Some(self.devid.cmp(&other.devid))
  51. }
  52. }
  53. impl Eq for BlockDevice {}
  54. impl Ord for BlockDevice {
  55. fn cmp(&self, other: &Self) -> Ordering {
  56. self.devid.cmp(&other.devid)
  57. }
  58. }
  59. lazy_static! {
  60. static ref BLOCK_DEVICE_LIST: Spin<BTreeMap<DevId, Arc<BlockDevice>>> =
  61. Spin::new(BTreeMap::new());
  62. }
  63. #[derive(Debug, Clone, Copy)]
  64. #[repr(C)]
  65. struct MBREntry {
  66. attr: u8,
  67. chs_start: [u8; 3],
  68. part_type: u8,
  69. chs_end: [u8; 3],
  70. lba_start: u32,
  71. cnt: u32,
  72. }
  73. #[derive(Debug, Clone, Copy)]
  74. #[repr(C, packed)]
  75. struct MBR {
  76. code: [u8; 446],
  77. entries: [MBREntry; 4],
  78. magic: [u8; 2],
  79. }
  80. impl BlockDevice {
  81. pub fn register_disk(
  82. devid: DevId,
  83. size: u64,
  84. queue: Arc<dyn BlockRequestQueue>,
  85. ) -> KResult<Arc<Self>> {
  86. let max_pages = queue.max_request_pages();
  87. let device = Arc::new(Self {
  88. devid,
  89. size,
  90. max_pages,
  91. dev_type: BlockDeviceType::Disk(BlockDeviceDisk { queue }),
  92. });
  93. match BLOCK_DEVICE_LIST.lock().entry(devid) {
  94. Entry::Vacant(entry) => Ok(entry.insert(device).clone()),
  95. Entry::Occupied(_) => Err(EEXIST),
  96. }
  97. }
  98. pub fn get(devid: DevId) -> KResult<Arc<Self>> {
  99. BLOCK_DEVICE_LIST.lock().get(&devid).cloned().ok_or(ENOENT)
  100. }
  101. }
  102. impl BlockDevice {
  103. pub fn devid(&self) -> DevId {
  104. self.devid
  105. }
  106. pub fn register_partition(
  107. &self,
  108. idx: u32,
  109. offset: u64,
  110. size: u64,
  111. ) -> KResult<Arc<Self>> {
  112. let queue = match self.dev_type {
  113. BlockDeviceType::Disk(ref disk) => disk.queue.clone(),
  114. BlockDeviceType::Partition(_) => return Err(EINVAL),
  115. };
  116. let device = Arc::new(BlockDevice {
  117. devid: make_device(self.devid >> 8, idx as u32),
  118. size,
  119. max_pages: self.max_pages,
  120. dev_type: BlockDeviceType::Partition(BlockDevicePartition {
  121. disk_dev: self.devid,
  122. offset,
  123. queue,
  124. }),
  125. });
  126. match BLOCK_DEVICE_LIST.lock().entry(device.devid()) {
  127. Entry::Vacant(entry) => Ok(entry.insert(device).clone()),
  128. Entry::Occupied(_) => Err(EEXIST),
  129. }
  130. }
  131. pub fn partprobe(&self) -> KResult<()> {
  132. match self.dev_type {
  133. BlockDeviceType::Partition(_) => Err(EINVAL),
  134. BlockDeviceType::Disk(_) => {
  135. let mut mbr: UninitBuffer<MBR> = UninitBuffer::new();
  136. self.read_some(0, &mut mbr)?.ok_or(EIO)?;
  137. let mbr = mbr.assume_filled_ref()?;
  138. if mbr.magic != [0x55, 0xaa] {
  139. return Ok(());
  140. }
  141. let entries = mbr.entries;
  142. for (idx, entry) in entries.iter().enumerate() {
  143. if entry.part_type == 0 {
  144. continue;
  145. }
  146. let offset = entry.lba_start as u64;
  147. let size = entry.cnt as u64;
  148. self.register_partition(idx as u32 + 1, offset, size)?;
  149. }
  150. Ok(())
  151. }
  152. }
  153. }
  154. /// No extra overhead, send the request directly to the queue
  155. /// If any of the parameters does not meet the requirement, the operation will fail
  156. ///
  157. /// # Requirements
  158. /// - `req.count` must not exceed the disk size and maximum request size
  159. /// - `req.sector` must be within the disk size
  160. /// - `req.buffer` must be enough to hold the data
  161. ///
  162. pub fn read_raw(&self, mut req: BlockDeviceRequest) -> KResult<()> {
  163. // TODO: check disk size limit
  164. if req.sector + req.count > self.size {
  165. return Err(EINVAL);
  166. }
  167. match self.dev_type {
  168. BlockDeviceType::Disk(ref disk) => disk.queue.submit(req),
  169. BlockDeviceType::Partition(ref part) => {
  170. req.sector += part.offset;
  171. part.queue.submit(req)
  172. }
  173. }
  174. }
  175. /// Read some from the block device, may involve some copy and fragmentation
  176. ///
  177. /// Further optimization may be needed, including caching, read-ahead and reordering
  178. ///
  179. /// # Arguments
  180. /// `offset` - offset in bytes
  181. ///
  182. pub fn read_some(
  183. &self,
  184. offset: usize,
  185. buffer: &mut dyn Buffer,
  186. ) -> KResult<FillResult> {
  187. let mut sector_start = offset as u64 / 512;
  188. let mut first_sector_offset = offset as u64 % 512;
  189. let mut sector_count =
  190. (first_sector_offset + buffer.total() as u64 + 511) / 512;
  191. let mut nfilled = 0;
  192. 'outer: while sector_count != 0 {
  193. let pages: &[Page];
  194. let page: Option<Page>;
  195. let page_vec: Option<Vec<Page>>;
  196. let nread;
  197. match sector_count {
  198. count if count <= 8 => {
  199. nread = count;
  200. let _page = Page::alloc_one();
  201. page = Some(_page);
  202. pages = core::slice::from_ref(page.as_ref().unwrap());
  203. }
  204. count if count <= 16 => {
  205. nread = count;
  206. let _pages = Page::alloc_many(1);
  207. page = Some(_pages);
  208. pages = core::slice::from_ref(page.as_ref().unwrap());
  209. }
  210. count => {
  211. nread = count.min(self.max_pages);
  212. let npages = (nread + 15) / 16;
  213. let mut _page_vec = Vec::with_capacity(npages as usize);
  214. for _ in 0..npages {
  215. _page_vec.push(Page::alloc_many(1));
  216. }
  217. page_vec = Some(_page_vec);
  218. pages = page_vec.as_ref().unwrap().as_slice();
  219. }
  220. }
  221. let req = BlockDeviceRequest {
  222. sector: sector_start,
  223. count: nread,
  224. buffer: &pages,
  225. };
  226. self.read_raw(req)?;
  227. for page in pages.iter() {
  228. let data = if first_sector_offset != 0 {
  229. let ret = page
  230. .as_cached()
  231. .as_slice(page.len())
  232. .split_at(first_sector_offset as usize)
  233. .1;
  234. first_sector_offset = 0;
  235. ret
  236. } else {
  237. page.as_cached().as_slice(page.len())
  238. };
  239. match buffer.fill(data)? {
  240. FillResult::Done(n) => nfilled += n,
  241. FillResult::Partial(n) => {
  242. nfilled += n;
  243. break 'outer;
  244. }
  245. FillResult::Full => {
  246. break 'outer;
  247. }
  248. }
  249. }
  250. sector_start += nread;
  251. sector_count -= nread;
  252. }
  253. if nfilled == buffer.total() {
  254. Ok(FillResult::Done(nfilled))
  255. } else {
  256. Ok(FillResult::Partial(nfilled))
  257. }
  258. }
  259. }
  260. pub struct BlockDeviceRequest<'lt> {
  261. pub sector: u64, // Sector to read from, in 512-byte blocks
  262. pub count: u64, // Number of sectors to read
  263. pub buffer: &'lt [Page],
  264. }