port.rs 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. use super::command::{Command, IdentifyCommand, ReadLBACommand};
  2. use super::{
  3. vread, vwrite, CommandHeader, PRDTEntry, FISH2D, PORT_CMD_CR, PORT_CMD_FR, PORT_CMD_FRE,
  4. PORT_CMD_ST, PORT_IE_DEFAULT,
  5. };
  6. use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
  7. use crate::kernel::mem::paging::Page;
  8. use crate::kernel::mem::phys::{NoCachePP, PhysPtr};
  9. use crate::prelude::*;
  10. use crate::sync::UCondVar;
  11. use alloc::collections::vec_deque::VecDeque;
  12. use bindings::{EINVAL, EIO};
  13. use eonix_preempt::assert_preempt_enabled;
  14. use eonix_spin_irq::SpinIrq as _;
  15. fn spinwait_clear(refval: *const u32, mask: u32) -> KResult<()> {
  16. const SPINWAIT_MAX: usize = 1000;
  17. let mut spins = 0;
  18. while vread(refval) & mask != 0 {
  19. if spins == SPINWAIT_MAX {
  20. return Err(EIO);
  21. }
  22. spins += 1;
  23. }
  24. Ok(())
  25. }
  26. /// An `AdapterPort` is an HBA device in AHCI mode.
  27. ///
  28. /// # Access
  29. ///
  30. /// All reads and writes to this struct is volatile
  31. ///
  32. #[allow(dead_code)]
  33. #[repr(C)]
  34. pub struct AdapterPortData {
  35. pub command_list_base: u64,
  36. pub fis_base: u64,
  37. pub interrupt_status: u32,
  38. pub interrupt_enable: u32,
  39. pub command_status: u32,
  40. _reserved2: u32,
  41. pub task_file_data: u32,
  42. pub signature: u32,
  43. pub sata_status: u32,
  44. pub sata_control: u32,
  45. pub sata_error: u32,
  46. pub sata_active: u32,
  47. pub command_issue: u32,
  48. pub sata_notification: u32,
  49. pub fis_based_switch_control: u32,
  50. _reserved1: [u32; 11],
  51. vendor: [u32; 4],
  52. }
  53. #[allow(dead_code)]
  54. #[derive(Debug, PartialEq, Eq, Clone, Copy)]
  55. enum SlotState {
  56. Idle,
  57. Working,
  58. Finished,
  59. Error,
  60. }
  61. struct CommandSlotInner {
  62. state: SlotState,
  63. /// # Usage
  64. /// `cmdheader` might be used in irq handler. So in order to wait for
  65. /// commands to finish, we should use `lock_irq` on `cmdheader`
  66. cmdheader: *mut CommandHeader,
  67. }
  68. /// # Safety
  69. /// This is safe because the `cmdheader` is not shared between threads
  70. unsafe impl Send for CommandSlotInner {}
  71. impl CommandSlotInner {
  72. pub fn setup(&mut self, cmdtable_base: u64, prdtlen: u16, write: bool) {
  73. let cmdheader = unsafe { self.cmdheader.as_mut().unwrap() };
  74. cmdheader.first = 0x05; // FIS type
  75. if write {
  76. cmdheader.first |= 0x40;
  77. }
  78. cmdheader.second = 0x00;
  79. cmdheader.prdt_length = prdtlen;
  80. cmdheader.bytes_transferred = 0;
  81. cmdheader.command_table_base = cmdtable_base;
  82. cmdheader._reserved = [0; 4];
  83. }
  84. }
  85. struct CommandSlot {
  86. inner: Spin<CommandSlotInner>,
  87. cv: UCondVar,
  88. }
  89. impl CommandSlot {
  90. fn new(cmdheader: *mut CommandHeader) -> Self {
  91. Self {
  92. inner: Spin::new(CommandSlotInner {
  93. state: SlotState::Idle,
  94. cmdheader,
  95. }),
  96. cv: UCondVar::new(),
  97. }
  98. }
  99. }
  100. struct FreeList {
  101. free: VecDeque<u32>,
  102. working: VecDeque<u32>,
  103. }
  104. impl FreeList {
  105. fn new() -> Self {
  106. Self {
  107. free: (0..32).collect(),
  108. working: VecDeque::new(),
  109. }
  110. }
  111. }
  112. #[derive(Default, Debug)]
  113. pub struct AdapterPortStats {
  114. /// Number of commands sent
  115. cmd_sent: u64,
  116. /// Number of transmission errors
  117. cmd_error: u64,
  118. /// Number of interrupts fired
  119. int_fired: u64,
  120. }
  121. pub struct AdapterPort {
  122. pub nport: u32,
  123. regs: *mut (),
  124. page: Page,
  125. slots: [CommandSlot; 32],
  126. free_list: Spin<FreeList>,
  127. free_list_cv: UCondVar,
  128. /// Statistics for this port
  129. pub stats: Spin<AdapterPortStats>,
  130. }
  131. /// # Safety
  132. /// This is safe because the `AdapterPort` can be accessed by only one thread at the same time
  133. unsafe impl Send for AdapterPort {}
  134. unsafe impl Sync for AdapterPort {}
  135. impl AdapterPort {
  136. pub fn new(base: usize, nport: u32) -> Self {
  137. let page = Page::alloc_one();
  138. let cmdheaders_start = page.as_cached().as_ptr::<CommandHeader>();
  139. Self {
  140. nport,
  141. regs: NoCachePP::new(base + 0x100 + 0x80 * nport as usize).as_ptr(),
  142. slots: core::array::from_fn(|index| {
  143. CommandSlot::new(unsafe { cmdheaders_start.offset(index as isize) })
  144. }),
  145. free_list: Spin::new(FreeList::new()),
  146. free_list_cv: UCondVar::new(),
  147. page,
  148. stats: Spin::default(),
  149. }
  150. }
  151. }
  152. impl AdapterPort {
  153. fn command_list_base(&self) -> *mut u64 {
  154. unsafe { self.regs.byte_offset(0x00).cast() }
  155. }
  156. fn fis_base(&self) -> *mut u64 {
  157. unsafe { self.regs.byte_offset(0x08).cast() }
  158. }
  159. fn sata_status(&self) -> *mut u32 {
  160. unsafe { self.regs.byte_offset(0x28).cast() }
  161. }
  162. fn command_status(&self) -> *mut u32 {
  163. unsafe { self.regs.byte_offset(0x18).cast() }
  164. }
  165. fn command_issue(&self) -> *mut u32 {
  166. unsafe { self.regs.byte_offset(0x38).cast() }
  167. }
  168. pub fn interrupt_status(&self) -> *mut u32 {
  169. unsafe { self.regs.byte_offset(0x10).cast() }
  170. }
  171. pub fn interrupt_enable(&self) -> *mut u32 {
  172. unsafe { self.regs.byte_offset(0x14).cast() }
  173. }
  174. pub fn status_ok(&self) -> bool {
  175. vread(self.sata_status()) & 0xf == 0x3
  176. }
  177. fn get_free_slot(&self) -> u32 {
  178. let mut free_list = self.free_list.lock_irq();
  179. loop {
  180. match free_list.free.pop_front() {
  181. Some(slot) => break slot,
  182. None => self.free_list_cv.wait(&mut free_list),
  183. };
  184. }
  185. }
  186. fn save_working(&self, slot: u32) {
  187. self.free_list.lock().working.push_back(slot);
  188. }
  189. fn release_free_slot(&self, slot: u32) {
  190. self.free_list.lock().free.push_back(slot);
  191. self.free_list_cv.notify_one();
  192. }
  193. pub fn handle_interrupt(&self) {
  194. let ci = vread(self.command_issue());
  195. // no need to use `lock_irq()` inside interrupt handler
  196. let mut free_list = self.free_list.lock();
  197. free_list.working.retain(|&n| {
  198. if ci & (1 << n) != 0 {
  199. return true;
  200. }
  201. let slot = &self.slots[n as usize];
  202. // TODO: check error
  203. let mut slot_inner = slot.inner.lock();
  204. debug_assert_eq!(slot_inner.state, SlotState::Working);
  205. slot_inner.state = SlotState::Finished;
  206. slot.cv.notify_all();
  207. self.stats.lock().int_fired += 1;
  208. false
  209. });
  210. }
  211. fn stop_command(&self) -> KResult<()> {
  212. vwrite(
  213. self.command_status(),
  214. vread(self.command_status()) & !(PORT_CMD_ST | PORT_CMD_FRE),
  215. );
  216. spinwait_clear(self.command_status(), PORT_CMD_CR | PORT_CMD_FR)
  217. }
  218. fn start_command(&self) -> KResult<()> {
  219. spinwait_clear(self.command_status(), PORT_CMD_CR)?;
  220. let cmd_status = vread(self.command_status());
  221. vwrite(
  222. self.command_status(),
  223. cmd_status | PORT_CMD_ST | PORT_CMD_FRE,
  224. );
  225. Ok(())
  226. }
  227. /// # Might Sleep
  228. /// This function **might sleep**, so call it in a preemptible context
  229. fn send_command(&self, cmd: &impl Command) -> KResult<()> {
  230. assert_preempt_enabled!("AdapterPort::send_command");
  231. let pages = cmd.pages();
  232. let cmdtable_page = Page::alloc_one();
  233. let command_fis: &mut FISH2D = cmdtable_page.as_cached().as_mut();
  234. command_fis.setup(cmd.cmd(), cmd.lba(), cmd.count());
  235. let prdt: &mut [PRDTEntry; 248] = cmdtable_page.as_cached().offset(0x80).as_mut();
  236. for (idx, page) in pages.iter().enumerate() {
  237. prdt[idx].setup(page);
  238. }
  239. let slot_index = self.get_free_slot() as usize;
  240. let slot_object = &self.slots[slot_index];
  241. let mut slot = slot_object.inner.lock_irq();
  242. slot.setup(
  243. cmdtable_page.as_phys() as u64,
  244. pages.len() as u16,
  245. cmd.write(),
  246. );
  247. slot.state = SlotState::Working;
  248. // should we clear received fis here?
  249. debug_assert!(vread(self.command_issue()) & (1 << slot_index) == 0);
  250. vwrite(self.command_issue(), 1 << slot_index);
  251. if spinwait_clear(self.command_issue(), 1 << slot_index).is_err() {
  252. let mut saved = false;
  253. while slot.state == SlotState::Working {
  254. if !saved {
  255. saved = true;
  256. self.save_working(slot_index as u32);
  257. }
  258. slot_object.cv.wait(&mut slot);
  259. }
  260. } else {
  261. // TODO: check error
  262. slot.state = SlotState::Finished;
  263. }
  264. let state = slot.state;
  265. slot.state = SlotState::Idle;
  266. debug_assert_ne!(state, SlotState::Working);
  267. self.release_free_slot(slot_index as u32);
  268. match state {
  269. SlotState::Finished => {
  270. self.stats.lock().cmd_sent += 1;
  271. Ok(())
  272. }
  273. SlotState::Error => {
  274. self.stats.lock().cmd_error += 1;
  275. Err(EIO)
  276. }
  277. _ => panic!("Invalid slot state"),
  278. }
  279. }
  280. fn identify(&self) -> KResult<()> {
  281. let cmd = IdentifyCommand::new();
  282. // TODO: check returned data
  283. self.send_command(&cmd)?;
  284. Ok(())
  285. }
  286. pub fn init(&self) -> KResult<()> {
  287. self.stop_command()?;
  288. vwrite(self.interrupt_enable(), PORT_IE_DEFAULT);
  289. vwrite(self.command_list_base(), self.page.as_phys() as u64);
  290. vwrite(self.fis_base(), self.page.as_phys() as u64 + 0x400);
  291. self.start_command()?;
  292. match self.identify() {
  293. Err(err) => {
  294. self.stop_command()?;
  295. Err(err)
  296. }
  297. Ok(_) => Ok(()),
  298. }
  299. }
  300. }
  301. impl BlockRequestQueue for AdapterPort {
  302. fn max_request_pages(&self) -> u64 {
  303. 1024
  304. }
  305. fn submit(&self, req: BlockDeviceRequest) -> KResult<()> {
  306. // TODO: check disk size limit using newtype
  307. if req.count > 65535 {
  308. return Err(EINVAL);
  309. }
  310. let command = ReadLBACommand::new(req.buffer, req.sector, req.count as u16)?;
  311. self.send_command(&command)
  312. }
  313. }