scheduler.rs 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. use crate::{
  2. context::ExecutionContext,
  3. executor::{ExecuteStatus, OutputHandle, Stack},
  4. ready_queue::{cpu_rq, local_rq},
  5. run::{Contexted, Run},
  6. task::{Task, TaskAdapter, TaskHandle},
  7. };
  8. use alloc::sync::Arc;
  9. use core::{
  10. mem::forget,
  11. ptr::NonNull,
  12. sync::atomic::{compiler_fence, Ordering},
  13. task::Waker,
  14. };
  15. use eonix_log::println_trace;
  16. use eonix_preempt::assert_preempt_count_eq;
  17. use eonix_sync::{LazyLock, Spin, SpinIrq as _};
  18. use intrusive_collections::RBTree;
  19. use pointers::BorrowedArc;
  20. #[arch::define_percpu]
  21. static CURRENT_TASK: Option<NonNull<Task>> = None;
  22. #[arch::define_percpu]
  23. static LOCAL_SCHEDULER_CONTEXT: ExecutionContext = ExecutionContext::new();
  24. static TASKS: LazyLock<Spin<RBTree<TaskAdapter>>> =
  25. LazyLock::new(|| Spin::new(RBTree::new(TaskAdapter::new())));
  26. pub struct Scheduler;
  27. pub struct JoinHandle<Output>(Arc<Spin<OutputHandle<Output>>>)
  28. where
  29. Output: Send;
  30. impl Task {
  31. pub fn current<'a>() -> BorrowedArc<'a, Task> {
  32. unsafe {
  33. // SAFETY:
  34. // We should never "inspect" a change in `current`.
  35. // The change of `CURRENT` will only happen in the scheduler. And if we are preempted,
  36. // when we DO return, the `CURRENT` will be the same and remain valid.
  37. BorrowedArc::from_raw(CURRENT_TASK.get().expect("Current task should be present"))
  38. }
  39. }
  40. }
  41. impl<O> JoinHandle<O>
  42. where
  43. O: Send,
  44. {
  45. pub fn join(self) -> O {
  46. let Self(output) = self;
  47. let mut waker = Some(Waker::from(Task::current().clone()));
  48. loop {
  49. let mut locked = output.lock();
  50. match locked.try_resolve() {
  51. Some(output) => break output,
  52. None => {
  53. if let Some(waker) = waker.take() {
  54. locked.register_waiter(waker);
  55. }
  56. }
  57. }
  58. }
  59. }
  60. }
  61. impl Scheduler {
  62. /// `Scheduler` might be used in various places. Do not hold it for a long time.
  63. ///
  64. /// # Safety
  65. /// The locked returned by this function should be locked with `lock_irq` to prevent from
  66. /// rescheduling during access to the scheduler. Disabling preemption will do the same.
  67. ///
  68. /// Drop the lock before calling `schedule`.
  69. pub fn get() -> &'static Self {
  70. static GLOBAL_SCHEDULER: Scheduler = Scheduler;
  71. &GLOBAL_SCHEDULER
  72. }
  73. pub fn init_local_scheduler<S>()
  74. where
  75. S: Stack,
  76. {
  77. let stack = S::new();
  78. unsafe {
  79. eonix_preempt::disable();
  80. // SAFETY: Preemption is disabled.
  81. let context: &mut ExecutionContext = LOCAL_SCHEDULER_CONTEXT.as_mut();
  82. context.set_ip(local_scheduler as _);
  83. context.set_sp(stack.get_bottom().addr().get() as usize);
  84. eonix_preempt::enable();
  85. }
  86. // We don't need to keep the stack around.
  87. forget(stack);
  88. }
  89. /// # Safety
  90. /// This function must not be called inside of the scheulder context.
  91. ///
  92. /// The caller must ensure that `preempt::count` == 1.
  93. pub unsafe fn go_from_scheduler(to: &ExecutionContext) {
  94. // SAFETY: Preemption is disabled.
  95. unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() }.switch_to(to);
  96. }
  97. /// # Safety
  98. /// This function must not be called inside of the scheulder context.
  99. ///
  100. /// The caller must ensure that `preempt::count` == 1.
  101. pub unsafe fn goto_scheduler(from: &ExecutionContext) {
  102. // SAFETY: Preemption is disabled.
  103. from.switch_to(unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() });
  104. }
  105. /// # Safety
  106. /// This function must not be called inside of the scheulder context.
  107. ///
  108. /// The caller must ensure that `preempt::count` == 1.
  109. pub unsafe fn goto_scheduler_noreturn() -> ! {
  110. // SAFETY: Preemption is disabled.
  111. unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref().switch_noreturn() }
  112. }
  113. fn add_task(task: Arc<Task>) {
  114. TASKS.lock().insert(task);
  115. }
  116. fn remove_task(task: &Task) {
  117. unsafe { TASKS.lock().cursor_mut_from_ptr(task as *const _).remove() };
  118. }
  119. fn select_cpu_for_task(&self, task: &Task) -> usize {
  120. task.cpu.load(Ordering::Relaxed) as _
  121. }
  122. pub fn activate(&self, task: &Arc<Task>) {
  123. // Only one cpu can be activating the task at a time.
  124. // TODO: Add some checks.
  125. if task.on_rq.swap(true, Ordering::Acquire) {
  126. // Lock the rq and check whether the task is on the rq again.
  127. let cpuid = task.cpu.load(Ordering::Acquire);
  128. let mut rq = cpu_rq(cpuid as _).lock_irq();
  129. if !task.on_rq.load(Ordering::Acquire) {
  130. // Task has just got off the rq. Put it back.
  131. rq.put(task.clone());
  132. } else {
  133. // Task is already on the rq. Do nothing.
  134. return;
  135. }
  136. } else {
  137. // Task not on some rq. Select one and put it here.
  138. let cpu = self.select_cpu_for_task(&task);
  139. let mut rq = cpu_rq(cpu).lock_irq();
  140. task.cpu.store(cpu as _, Ordering::Release);
  141. rq.put(task.clone());
  142. }
  143. }
  144. pub fn spawn<S, R>(&self, runnable: R) -> JoinHandle<R::Output>
  145. where
  146. S: Stack + 'static,
  147. R: Run + Contexted + Send + 'static,
  148. R::Output: Send + 'static,
  149. {
  150. let TaskHandle {
  151. task,
  152. output_handle,
  153. } = Task::new::<S, _>(runnable);
  154. Self::add_task(task.clone());
  155. self.activate(&task);
  156. JoinHandle(output_handle)
  157. }
  158. /// Go to idle task. Call this with `preempt_count == 1`.
  159. /// The preempt count will be decremented by this function.
  160. ///
  161. /// # Safety
  162. /// We might never return from here.
  163. /// Drop all variables that take ownership of some resource before calling this function.
  164. pub fn schedule() {
  165. assert_preempt_count_eq!(1, "Scheduler::schedule");
  166. // Make sure all works are done before scheduling.
  167. compiler_fence(Ordering::SeqCst);
  168. // TODO!!!!!: Use of reference here needs further consideration.
  169. //
  170. // Since we might never return to here, we can't take ownership of `current()`.
  171. // Is it safe to believe that `current()` will never change across calls?
  172. unsafe {
  173. // SAFETY: Preemption is disabled.
  174. Scheduler::goto_scheduler(&Task::current().execution_context);
  175. }
  176. eonix_preempt::enable();
  177. }
  178. }
  179. extern "C" fn local_scheduler() -> ! {
  180. loop {
  181. assert_preempt_count_eq!(1, "Scheduler::idle_task");
  182. let mut rq = local_rq().lock_irq();
  183. let previous_task = CURRENT_TASK
  184. .get()
  185. .map(|ptr| unsafe { Arc::from_raw(ptr.as_ptr()) });
  186. let next_task = rq.get();
  187. match (previous_task, next_task) {
  188. (None, None) => {
  189. // Nothing to do, halt the cpu and rerun the loop.
  190. drop(rq);
  191. arch::halt();
  192. continue;
  193. }
  194. (None, Some(next)) => {
  195. CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
  196. }
  197. (Some(previous), None) => {
  198. if previous.state.is_running() {
  199. // Previous thread is `Running`, return to the current running thread.
  200. println_trace!(
  201. "trace_scheduler",
  202. "Returning to task id({}) without doing context switch",
  203. previous.id
  204. );
  205. CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
  206. } else {
  207. // Nothing to do, halt the cpu and rerun the loop.
  208. CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
  209. drop(rq);
  210. arch::halt();
  211. continue;
  212. }
  213. }
  214. (Some(previous), Some(next)) => {
  215. println_trace!(
  216. "trace_scheduler",
  217. "Switching from task id({}) to task id({})",
  218. previous.id,
  219. next.id
  220. );
  221. debug_assert_ne!(previous.id, next.id, "Switching to the same task");
  222. if previous.state.is_running() || !previous.state.try_park() {
  223. rq.put(previous);
  224. } else {
  225. previous.on_rq.store(false, Ordering::Release);
  226. }
  227. CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
  228. }
  229. }
  230. drop(rq);
  231. // TODO: We can move the release of finished tasks to some worker thread.
  232. if let ExecuteStatus::Finished = Task::current().run() {
  233. let current = CURRENT_TASK
  234. .swap(None)
  235. .map(|ptr| unsafe { Arc::from_raw(ptr.as_ptr()) })
  236. .expect("Current task should be present");
  237. Scheduler::remove_task(&current);
  238. }
  239. }
  240. }