page_cache.rs 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. use super::access::AsMemoryBlock;
  2. use crate::{
  3. io::{Buffer, FillResult, Stream},
  4. kernel::mem::page_alloc::RawPagePtr,
  5. prelude::KResult,
  6. GlobalPageAlloc,
  7. };
  8. use align_ext::AlignExt;
  9. use alloc::{collections::btree_map::BTreeMap, sync::Weak};
  10. use eonix_mm::paging::{PageAlloc, RawPage, PAGE_SIZE, PAGE_SIZE_BITS};
  11. use eonix_sync::Mutex;
  12. pub struct PageCache {
  13. pages: Mutex<BTreeMap<usize, CachePage>>,
  14. backend: Weak<dyn PageCacheBackend>,
  15. }
  16. unsafe impl Send for PageCache {}
  17. unsafe impl Sync for PageCache {}
  18. #[derive(Clone, Copy)]
  19. pub struct CachePage(RawPagePtr);
  20. impl Buffer for CachePage {
  21. fn total(&self) -> usize {
  22. PAGE_SIZE
  23. }
  24. fn wrote(&self) -> usize {
  25. self.valid_size()
  26. }
  27. fn fill(&mut self, data: &[u8]) -> KResult<FillResult> {
  28. let valid_size = self.valid_size();
  29. let available = &mut self.all_mut()[valid_size..];
  30. if available.len() == 0 {
  31. return Ok(FillResult::Full);
  32. }
  33. let len = core::cmp::min(data.len(), available.len());
  34. available[..len].copy_from_slice(&data[..len]);
  35. *self.0.valid_size() += len;
  36. if len < data.len() {
  37. Ok(FillResult::Partial(len))
  38. } else {
  39. Ok(FillResult::Done(len))
  40. }
  41. }
  42. }
  43. impl CachePage {
  44. pub fn new() -> Self {
  45. let page = GlobalPageAlloc.alloc().unwrap();
  46. page.cache_init();
  47. Self(page)
  48. }
  49. pub fn new_zeroed() -> Self {
  50. let page = GlobalPageAlloc.alloc().unwrap();
  51. // SAFETY: We own the page exclusively, so we can safely zero it.
  52. unsafe {
  53. page.as_memblk().as_bytes_mut().fill(0);
  54. }
  55. page.cache_init();
  56. Self(page)
  57. }
  58. pub fn valid_size(&self) -> usize {
  59. *self.0.valid_size()
  60. }
  61. pub fn set_valid_size(&mut self, valid_size: usize) {
  62. *self.0.valid_size() = valid_size;
  63. }
  64. pub fn all(&self) -> &[u8] {
  65. unsafe {
  66. self.0.as_memblk().as_bytes()
  67. }
  68. }
  69. pub fn all_mut(&mut self) -> &mut [u8] {
  70. unsafe {
  71. self.0.as_memblk().as_bytes_mut()
  72. }
  73. }
  74. pub fn valid_data(&self) -> &[u8] {
  75. &self.all()[..self.valid_size()]
  76. }
  77. pub fn is_dirty(&self) -> bool {
  78. self.0.is_dirty()
  79. }
  80. pub fn set_dirty(&self) {
  81. self.0.set_dirty();
  82. }
  83. pub fn clear_dirty(&self) {
  84. self.0.clear_dirty();
  85. }
  86. }
  87. impl PageCache {
  88. pub fn new(backend: Weak<dyn PageCacheBackend>) -> Self {
  89. Self {
  90. pages: Mutex::new(BTreeMap::new()),
  91. backend: backend,
  92. }
  93. }
  94. pub async fn read(&self, buffer: &mut dyn Buffer, mut offset: usize) -> KResult<usize> {
  95. let mut pages = self.pages.lock().await;
  96. loop {
  97. let page_id = offset >> PAGE_SIZE_BITS;
  98. let page = pages.get(&page_id);
  99. match page {
  100. Some(page) => {
  101. let inner_offset = offset % PAGE_SIZE;
  102. // TODO: still cause unnecessary IO if valid_size < PAGESIZE
  103. // and fill result is Done
  104. if page.valid_size() == 0
  105. || buffer
  106. .fill(&page.valid_data()[inner_offset..])?
  107. .should_stop()
  108. || buffer.available() == 0
  109. {
  110. break;
  111. }
  112. offset += PAGE_SIZE - inner_offset;
  113. }
  114. None => {
  115. let mut new_page = CachePage::new();
  116. self.backend
  117. .upgrade()
  118. .unwrap()
  119. .read_page(&mut new_page, offset.align_down(PAGE_SIZE))?;
  120. pages.insert(page_id, new_page);
  121. }
  122. }
  123. }
  124. Ok(buffer.wrote())
  125. }
  126. pub async fn write(&self, stream: &mut dyn Stream, mut offset: usize) -> KResult<usize> {
  127. let mut pages = self.pages.lock().await;
  128. let old_size = self.backend.upgrade().unwrap().size();
  129. let mut wrote = 0;
  130. loop {
  131. let page_id = offset >> PAGE_SIZE_BITS;
  132. let page = pages.get_mut(&page_id);
  133. match page {
  134. Some(page) => {
  135. let inner_offset = offset % PAGE_SIZE;
  136. let cursor_end = match stream.poll_data(&mut page.all_mut()[inner_offset..])? {
  137. Some(buf) => {
  138. wrote += buf.len();
  139. inner_offset + buf.len()
  140. }
  141. None => {
  142. break;
  143. }
  144. };
  145. if page.valid_size() < cursor_end {
  146. page.set_valid_size(cursor_end);
  147. }
  148. page.set_dirty();
  149. offset += PAGE_SIZE - inner_offset;
  150. }
  151. None => {
  152. let new_page = if (offset >> PAGE_SIZE_BITS) > (old_size >> PAGE_SIZE_BITS) {
  153. let new_page = CachePage::new_zeroed();
  154. new_page
  155. } else {
  156. let mut new_page = CachePage::new();
  157. self.backend
  158. .upgrade()
  159. .unwrap()
  160. .read_page(&mut new_page, offset.align_down(PAGE_SIZE))?;
  161. new_page
  162. };
  163. pages.insert(page_id, new_page);
  164. }
  165. }
  166. }
  167. Ok(wrote)
  168. }
  169. pub async fn fsync(&self) -> KResult<()> {
  170. let pages = self.pages.lock().await;
  171. for (page_id, page) in pages.iter() {
  172. if page.is_dirty() {
  173. self.backend
  174. .upgrade()
  175. .unwrap()
  176. .write_page(page, page_id << PAGE_SIZE_BITS)?;
  177. page.clear_dirty();
  178. }
  179. }
  180. Ok(())
  181. }
  182. // This function is used for extend write or truncate
  183. pub async fn resize(&self, new_size: usize) -> KResult<()> {
  184. let mut pages = self.pages.lock().await;
  185. let old_size = self.backend.upgrade().unwrap().size();
  186. if new_size < old_size {
  187. let begin = new_size.align_down(PAGE_SIZE) >> PAGE_SIZE_BITS;
  188. let end = old_size.align_up(PAGE_SIZE) >> PAGE_SIZE_BITS;
  189. for page_id in begin..end {
  190. pages.remove(&page_id);
  191. }
  192. } else if new_size > old_size {
  193. let begin = old_size.align_down(PAGE_SIZE) >> PAGE_SIZE_BITS;
  194. let end = new_size.align_up(PAGE_SIZE) >> PAGE_SIZE_BITS;
  195. pages.remove(&begin);
  196. for page_id in begin..end {
  197. let mut new_page = CachePage::new_zeroed();
  198. if page_id != end - 1 {
  199. new_page.set_valid_size(PAGE_SIZE);
  200. } else {
  201. new_page.set_valid_size(new_size % PAGE_SIZE);
  202. }
  203. new_page.set_dirty();
  204. pages.insert(page_id, new_page);
  205. }
  206. }
  207. Ok(())
  208. }
  209. pub async fn get_page(&self, offset: usize) -> KResult<Option<RawPagePtr>> {
  210. let offset_aligin = offset.align_down(PAGE_SIZE);
  211. let page_id = offset_aligin >> PAGE_SIZE_BITS;
  212. let size = self.backend.upgrade().unwrap().size();
  213. if offset_aligin > size {
  214. return Ok(None);
  215. }
  216. let mut pages = self.pages.lock().await;
  217. if let Some(page) = pages.get(&page_id) {
  218. Ok(Some(page.0))
  219. } else {
  220. let mut new_page = CachePage::new();
  221. self.backend
  222. .upgrade()
  223. .unwrap()
  224. .read_page(&mut new_page, offset_aligin)?;
  225. pages.insert(page_id, new_page);
  226. Ok(Some(new_page.0))
  227. }
  228. }
  229. }
  230. // with this trait, "page cache" and "block cache" are unified,
  231. // for fs, offset is file offset (floor algin to PAGE_SIZE)
  232. // for blkdev, offset is block idx (floor align to PAGE_SIZE / BLK_SIZE)
  233. // Oh no, this would make unnecessary cache
  234. pub trait PageCacheBackend {
  235. fn read_page(&self, page: &mut CachePage, offset: usize) -> KResult<usize>;
  236. fn write_page(&self, page: &CachePage, offset: usize) -> KResult<usize>;
  237. fn size(&self) -> usize;
  238. }
  239. pub trait PageCacheRawPage: RawPage {
  240. fn valid_size(&self) -> &mut usize;
  241. fn is_dirty(&self) -> bool;
  242. fn set_dirty(&self);
  243. fn clear_dirty(&self);
  244. fn cache_init(&self);
  245. }
  246. impl Drop for PageCache {
  247. fn drop(&mut self) {
  248. let _ = self.fsync();
  249. }
  250. }