stop_task.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #include "sched.h"
  2. /*
  3. * stop-task scheduling class.
  4. *
  5. * The stop task is the highest priority task in the system, it preempts
  6. * everything and will be preempted by nothing.
  7. *
  8. * See kernel/stop_machine.c
  9. */
  10. #ifdef CONFIG_SMP
  11. static int
  12. select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
  13. {
  14. return task_cpu(p); /* stop tasks as never migrate */
  15. }
  16. #endif /* CONFIG_SMP */
  17. static void
  18. check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
  19. {
  20. /* we're never preempted */
  21. }
  22. static struct task_struct *
  23. pick_next_task_stop(struct rq *rq, struct task_struct *prev)
  24. {
  25. struct task_struct *stop = rq->stop;
  26. if (!stop || !task_on_rq_queued(stop))
  27. return NULL;
  28. put_prev_task(rq, prev);
  29. stop->se.exec_start = rq_clock_task(rq);
  30. return stop;
  31. }
  32. static void
  33. enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  34. {
  35. add_nr_running(rq, 1);
  36. }
  37. static void
  38. dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  39. {
  40. sub_nr_running(rq, 1);
  41. }
  42. static void yield_task_stop(struct rq *rq)
  43. {
  44. BUG(); /* the stop task should never yield, its pointless. */
  45. }
  46. static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
  47. {
  48. struct task_struct *curr = rq->curr;
  49. u64 delta_exec;
  50. delta_exec = rq_clock_task(rq) - curr->se.exec_start;
  51. if (unlikely((s64)delta_exec < 0))
  52. delta_exec = 0;
  53. schedstat_set(curr->se.statistics.exec_max,
  54. max(curr->se.statistics.exec_max, delta_exec));
  55. curr->se.sum_exec_runtime += delta_exec;
  56. account_group_exec_runtime(curr, delta_exec);
  57. curr->se.exec_start = rq_clock_task(rq);
  58. cpuacct_charge(curr, delta_exec);
  59. }
  60. static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
  61. {
  62. }
  63. static void set_curr_task_stop(struct rq *rq)
  64. {
  65. struct task_struct *stop = rq->stop;
  66. stop->se.exec_start = rq_clock_task(rq);
  67. }
  68. static void switched_to_stop(struct rq *rq, struct task_struct *p)
  69. {
  70. BUG(); /* its impossible to change to this class */
  71. }
  72. static void
  73. prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
  74. {
  75. BUG(); /* how!?, what priority? */
  76. }
  77. static unsigned int
  78. get_rr_interval_stop(struct rq *rq, struct task_struct *task)
  79. {
  80. return 0;
  81. }
  82. static void update_curr_stop(struct rq *rq)
  83. {
  84. }
  85. /*
  86. * Simple, special scheduling class for the per-CPU stop tasks:
  87. */
  88. const struct sched_class stop_sched_class = {
  89. .next = &dl_sched_class,
  90. .enqueue_task = enqueue_task_stop,
  91. .dequeue_task = dequeue_task_stop,
  92. .yield_task = yield_task_stop,
  93. .check_preempt_curr = check_preempt_curr_stop,
  94. .pick_next_task = pick_next_task_stop,
  95. .put_prev_task = put_prev_task_stop,
  96. #ifdef CONFIG_SMP
  97. .select_task_rq = select_task_rq_stop,
  98. .set_cpus_allowed = set_cpus_allowed_common,
  99. #endif
  100. .set_curr_task = set_curr_task_stop,
  101. .task_tick = task_tick_stop,
  102. .get_rr_interval = get_rr_interval_stop,
  103. .prio_changed = prio_changed_stop,
  104. .switched_to = switched_to_stop,
  105. .update_curr = update_curr_stop,
  106. };