scheduler.rs 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. use crate::{
  2. context::ExecutionContext,
  3. executor::{ExecuteStatus, OutputHandle, Stack},
  4. ready_queue::{cpu_rq, local_rq},
  5. run::{Contexted, Run},
  6. task::{Task, TaskAdapter, TaskHandle},
  7. };
  8. use alloc::sync::Arc;
  9. use core::{
  10. mem::forget,
  11. ptr::NonNull,
  12. sync::atomic::{compiler_fence, Ordering},
  13. task::Waker,
  14. };
  15. use eonix_log::println_trace;
  16. use eonix_preempt::assert_preempt_count_eq;
  17. use eonix_sync::{LazyLock, Spin, SpinIrq as _};
  18. use intrusive_collections::RBTree;
  19. use pointers::BorrowedArc;
  20. #[eonix_percpu::define_percpu]
  21. static CURRENT_TASK: Option<NonNull<Task>> = None;
  22. #[eonix_percpu::define_percpu]
  23. static LOCAL_SCHEDULER_CONTEXT: ExecutionContext = ExecutionContext::new();
  24. static TASKS: LazyLock<Spin<RBTree<TaskAdapter>>> =
  25. LazyLock::new(|| Spin::new(RBTree::new(TaskAdapter::new())));
  26. pub struct Scheduler;
  27. pub struct JoinHandle<Output>(Arc<Spin<OutputHandle<Output>>>)
  28. where
  29. Output: Send;
  30. impl Task {
  31. pub fn current<'a>() -> BorrowedArc<'a, Task> {
  32. unsafe {
  33. // SAFETY:
  34. // We should never "inspect" a change in `current`.
  35. // The change of `CURRENT` will only happen in the scheduler. And if we are preempted,
  36. // when we DO return, the `CURRENT` will be the same and remain valid.
  37. BorrowedArc::from_raw(CURRENT_TASK.get().expect("Current task should be present"))
  38. }
  39. }
  40. }
  41. impl<O> JoinHandle<O>
  42. where
  43. O: Send,
  44. {
  45. pub fn join(self) -> O {
  46. let Self(output) = self;
  47. let mut waker = Some(Waker::from(Task::current().clone()));
  48. loop {
  49. let mut locked = output.lock();
  50. match locked.try_resolve() {
  51. Some(output) => break output,
  52. None => {
  53. if let Some(waker) = waker.take() {
  54. locked.register_waiter(waker);
  55. }
  56. }
  57. }
  58. }
  59. }
  60. }
  61. impl Scheduler {
  62. /// `Scheduler` might be used in various places. Do not hold it for a long time.
  63. ///
  64. /// # Safety
  65. /// The locked returned by this function should be locked with `lock_irq` to prevent from
  66. /// rescheduling during access to the scheduler. Disabling preemption will do the same.
  67. ///
  68. /// Drop the lock before calling `schedule`.
  69. pub fn get() -> &'static Self {
  70. static GLOBAL_SCHEDULER: Scheduler = Scheduler;
  71. &GLOBAL_SCHEDULER
  72. }
  73. pub fn init_local_scheduler<S>()
  74. where
  75. S: Stack,
  76. {
  77. let stack = S::new();
  78. unsafe {
  79. eonix_preempt::disable();
  80. // SAFETY: Preemption is disabled.
  81. let context: &mut ExecutionContext = LOCAL_SCHEDULER_CONTEXT.as_mut();
  82. context.set_ip(local_scheduler as _);
  83. context.set_sp(stack.get_bottom().addr().get() as usize);
  84. context.set_interrupt(true);
  85. eonix_preempt::enable();
  86. }
  87. // We don't need to keep the stack around.
  88. forget(stack);
  89. }
  90. /// # Safety
  91. /// This function must not be called inside of the scheulder context.
  92. ///
  93. /// The caller must ensure that `preempt::count` == 1.
  94. pub unsafe fn go_from_scheduler(to: &ExecutionContext) {
  95. // SAFETY: Preemption is disabled.
  96. unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() }.switch_to(to);
  97. }
  98. /// # Safety
  99. /// This function must not be called inside of the scheulder context.
  100. ///
  101. /// The caller must ensure that `preempt::count` == 1.
  102. pub unsafe fn goto_scheduler(from: &ExecutionContext) {
  103. // SAFETY: Preemption is disabled.
  104. from.switch_to(unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref() });
  105. }
  106. /// # Safety
  107. /// This function must not be called inside of the scheulder context.
  108. ///
  109. /// The caller must ensure that `preempt::count` == 1.
  110. pub unsafe fn goto_scheduler_noreturn() -> ! {
  111. // SAFETY: Preemption is disabled.
  112. unsafe { LOCAL_SCHEDULER_CONTEXT.as_ref().switch_noreturn() }
  113. }
  114. fn add_task(task: Arc<Task>) {
  115. TASKS.lock().insert(task);
  116. }
  117. fn remove_task(task: &Task) {
  118. unsafe { TASKS.lock().cursor_mut_from_ptr(task as *const _).remove() };
  119. }
  120. fn select_cpu_for_task(&self, task: &Task) -> usize {
  121. task.cpu.load(Ordering::Relaxed) as _
  122. }
  123. pub fn activate(&self, task: &Arc<Task>) {
  124. // Only one cpu can be activating the task at a time.
  125. // TODO: Add some checks.
  126. if task.on_rq.swap(true, Ordering::Acquire) {
  127. // Lock the rq and check whether the task is on the rq again.
  128. let cpuid = task.cpu.load(Ordering::Acquire);
  129. let mut rq = cpu_rq(cpuid as _).lock_irq();
  130. if !task.on_rq.load(Ordering::Acquire) {
  131. // Task has just got off the rq. Put it back.
  132. rq.put(task.clone());
  133. } else {
  134. // Task is already on the rq. Do nothing.
  135. return;
  136. }
  137. } else {
  138. // Task not on some rq. Select one and put it here.
  139. let cpu = self.select_cpu_for_task(&task);
  140. let mut rq = cpu_rq(cpu).lock_irq();
  141. task.cpu.store(cpu as _, Ordering::Release);
  142. rq.put(task.clone());
  143. }
  144. }
  145. pub fn spawn<S, R>(&self, runnable: R) -> JoinHandle<R::Output>
  146. where
  147. S: Stack + 'static,
  148. R: Run + Contexted + Send + 'static,
  149. R::Output: Send + 'static,
  150. {
  151. let TaskHandle {
  152. task,
  153. output_handle,
  154. } = Task::new::<S, _>(runnable);
  155. Self::add_task(task.clone());
  156. self.activate(&task);
  157. JoinHandle(output_handle)
  158. }
  159. /// Go to idle task. Call this with `preempt_count == 1`.
  160. /// The preempt count will be decremented by this function.
  161. ///
  162. /// # Safety
  163. /// We might never return from here.
  164. /// Drop all variables that take ownership of some resource before calling this function.
  165. pub fn schedule() {
  166. assert_preempt_count_eq!(1, "Scheduler::schedule");
  167. // Make sure all works are done before scheduling.
  168. compiler_fence(Ordering::SeqCst);
  169. // TODO!!!!!: Use of reference here needs further consideration.
  170. //
  171. // Since we might never return to here, we can't take ownership of `current()`.
  172. // Is it safe to believe that `current()` will never change across calls?
  173. unsafe {
  174. // SAFETY: Preemption is disabled.
  175. Scheduler::goto_scheduler(&Task::current().execution_context);
  176. }
  177. eonix_preempt::enable();
  178. }
  179. }
  180. extern "C" fn local_scheduler() -> ! {
  181. loop {
  182. assert_preempt_count_eq!(1, "Scheduler::idle_task");
  183. let mut rq = local_rq().lock_irq();
  184. let previous_task = CURRENT_TASK
  185. .get()
  186. .map(|ptr| unsafe { Arc::from_raw(ptr.as_ptr()) });
  187. let next_task = rq.get();
  188. match (previous_task, next_task) {
  189. (None, None) => {
  190. // Nothing to do, halt the cpu and rerun the loop.
  191. drop(rq);
  192. arch::halt();
  193. continue;
  194. }
  195. (None, Some(next)) => {
  196. CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
  197. }
  198. (Some(previous), None) => {
  199. if previous.state.is_running() {
  200. // Previous thread is `Running`, return to the current running thread.
  201. println_trace!(
  202. "trace_scheduler",
  203. "Returning to task id({}) without doing context switch",
  204. previous.id
  205. );
  206. CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
  207. } else {
  208. // Nothing to do, halt the cpu and rerun the loop.
  209. CURRENT_TASK.set(NonNull::new(Arc::into_raw(previous) as *mut _));
  210. drop(rq);
  211. arch::halt();
  212. continue;
  213. }
  214. }
  215. (Some(previous), Some(next)) => {
  216. println_trace!(
  217. "trace_scheduler",
  218. "Switching from task id({}) to task id({})",
  219. previous.id,
  220. next.id
  221. );
  222. debug_assert_ne!(previous.id, next.id, "Switching to the same task");
  223. if previous.state.is_running() || !previous.state.try_park() {
  224. rq.put(previous);
  225. } else {
  226. previous.on_rq.store(false, Ordering::Release);
  227. }
  228. CURRENT_TASK.set(NonNull::new(Arc::into_raw(next) as *mut _));
  229. }
  230. }
  231. drop(rq);
  232. // TODO: We can move the release of finished tasks to some worker thread.
  233. if let ExecuteStatus::Finished = Task::current().run() {
  234. let current = CURRENT_TASK
  235. .swap(None)
  236. .map(|ptr| unsafe { Arc::from_raw(ptr.as_ptr()) })
  237. .expect("Current task should be present");
  238. Scheduler::remove_task(&current);
  239. }
  240. }
  241. }