port.rs 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. use alloc::collections::vec_deque::VecDeque;
  2. use bindings::{EINVAL, EIO};
  3. use eonix_preempt::assert_preempt_enabled;
  4. use crate::prelude::*;
  5. use crate::kernel::block::{BlockDeviceRequest, BlockRequestQueue};
  6. use crate::kernel::mem::paging::Page;
  7. use crate::kernel::mem::phys::{NoCachePP, PhysPtr};
  8. use crate::sync::UCondVar;
  9. use super::command::{Command, IdentifyCommand, ReadLBACommand};
  10. use super::{
  11. vread, vwrite, CommandHeader, PRDTEntry, FISH2D, PORT_CMD_CR, PORT_CMD_FR, PORT_CMD_FRE,
  12. PORT_CMD_ST, PORT_IE_DEFAULT,
  13. };
  14. fn spinwait_clear(refval: *const u32, mask: u32) -> KResult<()> {
  15. const SPINWAIT_MAX: usize = 1000;
  16. let mut spins = 0;
  17. while vread(refval) & mask != 0 {
  18. if spins == SPINWAIT_MAX {
  19. return Err(EIO);
  20. }
  21. spins += 1;
  22. }
  23. Ok(())
  24. }
  25. /// An `AdapterPort` is an HBA device in AHCI mode.
  26. ///
  27. /// # Access
  28. ///
  29. /// All reads and writes to this struct is volatile
  30. ///
  31. #[allow(dead_code)]
  32. #[repr(C)]
  33. pub struct AdapterPortData {
  34. pub command_list_base: u64,
  35. pub fis_base: u64,
  36. pub interrupt_status: u32,
  37. pub interrupt_enable: u32,
  38. pub command_status: u32,
  39. _reserved2: u32,
  40. pub task_file_data: u32,
  41. pub signature: u32,
  42. pub sata_status: u32,
  43. pub sata_control: u32,
  44. pub sata_error: u32,
  45. pub sata_active: u32,
  46. pub command_issue: u32,
  47. pub sata_notification: u32,
  48. pub fis_based_switch_control: u32,
  49. _reserved1: [u32; 11],
  50. vendor: [u32; 4],
  51. }
  52. #[allow(dead_code)]
  53. #[derive(Debug, PartialEq, Eq, Clone, Copy)]
  54. enum SlotState {
  55. Idle,
  56. Working,
  57. Finished,
  58. Error,
  59. }
  60. struct CommandSlotInner {
  61. state: SlotState,
  62. /// # Usage
  63. /// `cmdheader` might be used in irq handler. So in order to wait for
  64. /// commands to finish, we should use `lock_irq` on `cmdheader`
  65. cmdheader: *mut CommandHeader,
  66. }
  67. /// # Safety
  68. /// This is safe because the `cmdheader` is not shared between threads
  69. unsafe impl Send for CommandSlotInner {}
  70. impl CommandSlotInner {
  71. pub fn setup(&mut self, cmdtable_base: u64, prdtlen: u16, write: bool) {
  72. let cmdheader = unsafe { self.cmdheader.as_mut().unwrap() };
  73. cmdheader.first = 0x05; // FIS type
  74. if write {
  75. cmdheader.first |= 0x40;
  76. }
  77. cmdheader.second = 0x00;
  78. cmdheader.prdt_length = prdtlen;
  79. cmdheader.bytes_transferred = 0;
  80. cmdheader.command_table_base = cmdtable_base;
  81. cmdheader._reserved = [0; 4];
  82. }
  83. }
  84. struct CommandSlot {
  85. inner: Spin<CommandSlotInner>,
  86. cv: UCondVar,
  87. }
  88. impl CommandSlot {
  89. fn new(cmdheader: *mut CommandHeader) -> Self {
  90. Self {
  91. inner: Spin::new(CommandSlotInner {
  92. state: SlotState::Idle,
  93. cmdheader,
  94. }),
  95. cv: UCondVar::new(),
  96. }
  97. }
  98. }
  99. struct FreeList {
  100. free: VecDeque<u32>,
  101. working: VecDeque<u32>,
  102. }
  103. impl FreeList {
  104. fn new() -> Self {
  105. Self {
  106. free: (0..32).collect(),
  107. working: VecDeque::new(),
  108. }
  109. }
  110. }
  111. #[derive(Default, Debug)]
  112. pub struct AdapterPortStats {
  113. /// Number of commands sent
  114. cmd_sent: u64,
  115. /// Number of transmission errors
  116. cmd_error: u64,
  117. /// Number of interrupts fired
  118. int_fired: u64,
  119. }
  120. pub struct AdapterPort {
  121. pub nport: u32,
  122. regs: *mut (),
  123. page: Page,
  124. slots: [CommandSlot; 32],
  125. free_list: Spin<FreeList>,
  126. free_list_cv: UCondVar,
  127. /// Statistics for this port
  128. pub stats: Spin<AdapterPortStats>,
  129. }
  130. /// # Safety
  131. /// This is safe because the `AdapterPort` can be accessed by only one thread at the same time
  132. unsafe impl Send for AdapterPort {}
  133. unsafe impl Sync for AdapterPort {}
  134. impl AdapterPort {
  135. pub fn new(base: usize, nport: u32) -> Self {
  136. let page = Page::alloc_one();
  137. let cmdheaders_start = page.as_cached().as_ptr::<CommandHeader>();
  138. Self {
  139. nport,
  140. regs: NoCachePP::new(base + 0x100 + 0x80 * nport as usize).as_ptr(),
  141. slots: core::array::from_fn(|index| {
  142. CommandSlot::new(unsafe { cmdheaders_start.offset(index as isize) })
  143. }),
  144. free_list: Spin::new(FreeList::new()),
  145. free_list_cv: UCondVar::new(),
  146. page,
  147. stats: Spin::default(),
  148. }
  149. }
  150. }
  151. impl AdapterPort {
  152. fn command_list_base(&self) -> *mut u64 {
  153. unsafe { self.regs.byte_offset(0x00).cast() }
  154. }
  155. fn fis_base(&self) -> *mut u64 {
  156. unsafe { self.regs.byte_offset(0x08).cast() }
  157. }
  158. fn sata_status(&self) -> *mut u32 {
  159. unsafe { self.regs.byte_offset(0x28).cast() }
  160. }
  161. fn command_status(&self) -> *mut u32 {
  162. unsafe { self.regs.byte_offset(0x18).cast() }
  163. }
  164. fn command_issue(&self) -> *mut u32 {
  165. unsafe { self.regs.byte_offset(0x38).cast() }
  166. }
  167. pub fn interrupt_status(&self) -> *mut u32 {
  168. unsafe { self.regs.byte_offset(0x10).cast() }
  169. }
  170. pub fn interrupt_enable(&self) -> *mut u32 {
  171. unsafe { self.regs.byte_offset(0x14).cast() }
  172. }
  173. pub fn status_ok(&self) -> bool {
  174. vread(self.sata_status()) & 0xf == 0x3
  175. }
  176. fn get_free_slot(&self) -> u32 {
  177. let mut free_list = self.free_list.lock_irq();
  178. loop {
  179. match free_list.free.pop_front() {
  180. Some(slot) => break slot,
  181. None => self.free_list_cv.wait(&mut free_list),
  182. };
  183. }
  184. }
  185. fn save_working(&self, slot: u32) {
  186. self.free_list.lock().working.push_back(slot);
  187. }
  188. fn release_free_slot(&self, slot: u32) {
  189. self.free_list.lock().free.push_back(slot);
  190. self.free_list_cv.notify_one();
  191. }
  192. pub fn handle_interrupt(&self) {
  193. let ci = vread(self.command_issue());
  194. // no need to use `lock_irq()` inside interrupt handler
  195. let mut free_list = self.free_list.lock();
  196. free_list.working.retain(|&n| {
  197. if ci & (1 << n) != 0 {
  198. return true;
  199. }
  200. let slot = &self.slots[n as usize];
  201. // TODO: check error
  202. let mut slot_inner = slot.inner.lock();
  203. debug_assert_eq!(slot_inner.state, SlotState::Working);
  204. slot_inner.state = SlotState::Finished;
  205. slot.cv.notify_all();
  206. self.stats.lock().int_fired += 1;
  207. false
  208. });
  209. }
  210. fn stop_command(&self) -> KResult<()> {
  211. vwrite(
  212. self.command_status(),
  213. vread(self.command_status()) & !(PORT_CMD_ST | PORT_CMD_FRE),
  214. );
  215. spinwait_clear(self.command_status(), PORT_CMD_CR | PORT_CMD_FR)
  216. }
  217. fn start_command(&self) -> KResult<()> {
  218. spinwait_clear(self.command_status(), PORT_CMD_CR)?;
  219. let cmd_status = vread(self.command_status());
  220. vwrite(
  221. self.command_status(),
  222. cmd_status | PORT_CMD_ST | PORT_CMD_FRE,
  223. );
  224. Ok(())
  225. }
  226. /// # Might Sleep
  227. /// This function **might sleep**, so call it in a preemptible context
  228. fn send_command(&self, cmd: &impl Command) -> KResult<()> {
  229. assert_preempt_enabled!("AdapterPort::send_command");
  230. let pages = cmd.pages();
  231. let cmdtable_page = Page::alloc_one();
  232. let command_fis: &mut FISH2D = cmdtable_page.as_cached().as_mut();
  233. command_fis.setup(cmd.cmd(), cmd.lba(), cmd.count());
  234. let prdt: &mut [PRDTEntry; 248] = cmdtable_page.as_cached().offset(0x80).as_mut();
  235. for (idx, page) in pages.iter().enumerate() {
  236. prdt[idx].setup(page);
  237. }
  238. let slot_index = self.get_free_slot() as usize;
  239. let slot_object = &self.slots[slot_index];
  240. let mut slot = slot_object.inner.lock_irq();
  241. slot.setup(
  242. cmdtable_page.as_phys() as u64,
  243. pages.len() as u16,
  244. cmd.write(),
  245. );
  246. slot.state = SlotState::Working;
  247. // should we clear received fis here?
  248. debug_assert!(vread(self.command_issue()) & (1 << slot_index) == 0);
  249. vwrite(self.command_issue(), 1 << slot_index);
  250. if spinwait_clear(self.command_issue(), 1 << slot_index).is_err() {
  251. let mut saved = false;
  252. while slot.state == SlotState::Working {
  253. if !saved {
  254. saved = true;
  255. self.save_working(slot_index as u32);
  256. }
  257. slot_object.cv.wait(&mut slot);
  258. }
  259. } else {
  260. // TODO: check error
  261. slot.state = SlotState::Finished;
  262. }
  263. let state = slot.state;
  264. slot.state = SlotState::Idle;
  265. debug_assert_ne!(state, SlotState::Working);
  266. self.release_free_slot(slot_index as u32);
  267. match state {
  268. SlotState::Finished => {
  269. self.stats.lock().cmd_sent += 1;
  270. Ok(())
  271. }
  272. SlotState::Error => {
  273. self.stats.lock().cmd_error += 1;
  274. Err(EIO)
  275. }
  276. _ => panic!("Invalid slot state"),
  277. }
  278. }
  279. fn identify(&self) -> KResult<()> {
  280. let cmd = IdentifyCommand::new();
  281. // TODO: check returned data
  282. self.send_command(&cmd)?;
  283. Ok(())
  284. }
  285. pub fn init(&self) -> KResult<()> {
  286. self.stop_command()?;
  287. vwrite(self.interrupt_enable(), PORT_IE_DEFAULT);
  288. vwrite(self.command_list_base(), self.page.as_phys() as u64);
  289. vwrite(self.fis_base(), self.page.as_phys() as u64 + 0x400);
  290. self.start_command()?;
  291. match self.identify() {
  292. Err(err) => {
  293. self.stop_command()?;
  294. Err(err)
  295. }
  296. Ok(_) => Ok(()),
  297. }
  298. }
  299. }
  300. impl BlockRequestQueue for AdapterPort {
  301. fn max_request_pages(&self) -> u64 {
  302. 1024
  303. }
  304. fn submit(&self, req: BlockDeviceRequest) -> KResult<()> {
  305. // TODO: check disk size limit using newtype
  306. if req.count > 65535 {
  307. return Err(EINVAL);
  308. }
  309. let command = ReadLBACommand::new(req.buffer, req.sector, req.count as u16)?;
  310. self.send_command(&command)
  311. }
  312. }