cpudeadline.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * kernel/sched/cpudl.c
  3. *
  4. * Global CPU deadline management
  5. *
  6. * Author: Juri Lelli <j.lelli@sssup.it>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; version 2
  11. * of the License.
  12. */
  13. #include <linux/gfp.h>
  14. #include <linux/kernel.h>
  15. #include <linux/slab.h>
  16. #include "cpudeadline.h"
  17. static inline int parent(int i)
  18. {
  19. return (i - 1) >> 1;
  20. }
  21. static inline int left_child(int i)
  22. {
  23. return (i << 1) + 1;
  24. }
  25. static inline int right_child(int i)
  26. {
  27. return (i << 1) + 2;
  28. }
  29. static void cpudl_exchange(struct cpudl *cp, int a, int b)
  30. {
  31. int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
  32. swap(cp->elements[a].cpu, cp->elements[b].cpu);
  33. swap(cp->elements[a].dl , cp->elements[b].dl );
  34. swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
  35. }
  36. static void cpudl_heapify(struct cpudl *cp, int idx)
  37. {
  38. int l, r, largest;
  39. /* adapted from lib/prio_heap.c */
  40. while(1) {
  41. l = left_child(idx);
  42. r = right_child(idx);
  43. largest = idx;
  44. if ((l < cp->size) && dl_time_before(cp->elements[idx].dl,
  45. cp->elements[l].dl))
  46. largest = l;
  47. if ((r < cp->size) && dl_time_before(cp->elements[largest].dl,
  48. cp->elements[r].dl))
  49. largest = r;
  50. if (largest == idx)
  51. break;
  52. /* Push idx down the heap one level and bump one up */
  53. cpudl_exchange(cp, largest, idx);
  54. idx = largest;
  55. }
  56. }
  57. static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
  58. {
  59. WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
  60. if (dl_time_before(new_dl, cp->elements[idx].dl)) {
  61. cp->elements[idx].dl = new_dl;
  62. cpudl_heapify(cp, idx);
  63. } else {
  64. cp->elements[idx].dl = new_dl;
  65. while (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
  66. cp->elements[idx].dl)) {
  67. cpudl_exchange(cp, idx, parent(idx));
  68. idx = parent(idx);
  69. }
  70. }
  71. }
  72. static inline int cpudl_maximum(struct cpudl *cp)
  73. {
  74. return cp->elements[0].cpu;
  75. }
  76. /*
  77. * cpudl_find - find the best (later-dl) CPU in the system
  78. * @cp: the cpudl max-heap context
  79. * @p: the task
  80. * @later_mask: a mask to fill in with the selected CPUs (or NULL)
  81. *
  82. * Returns: int - best CPU (heap maximum if suitable)
  83. */
  84. int cpudl_find(struct cpudl *cp, struct task_struct *p,
  85. struct cpumask *later_mask)
  86. {
  87. int best_cpu = -1;
  88. const struct sched_dl_entity *dl_se = &p->dl;
  89. if (later_mask &&
  90. cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
  91. best_cpu = cpumask_any(later_mask);
  92. goto out;
  93. } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
  94. dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
  95. best_cpu = cpudl_maximum(cp);
  96. if (later_mask)
  97. cpumask_set_cpu(best_cpu, later_mask);
  98. }
  99. out:
  100. WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
  101. return best_cpu;
  102. }
  103. /*
  104. * cpudl_set - update the cpudl max-heap
  105. * @cp: the cpudl max-heap context
  106. * @cpu: the target cpu
  107. * @dl: the new earliest deadline for this cpu
  108. *
  109. * Notes: assumes cpu_rq(cpu)->lock is locked
  110. *
  111. * Returns: (void)
  112. */
  113. void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
  114. {
  115. int old_idx, new_cpu;
  116. unsigned long flags;
  117. WARN_ON(!cpu_present(cpu));
  118. raw_spin_lock_irqsave(&cp->lock, flags);
  119. old_idx = cp->elements[cpu].idx;
  120. if (!is_valid) {
  121. /* remove item */
  122. if (old_idx == IDX_INVALID) {
  123. /*
  124. * Nothing to remove if old_idx was invalid.
  125. * This could happen if a rq_offline_dl is
  126. * called for a CPU without -dl tasks running.
  127. */
  128. goto out;
  129. }
  130. new_cpu = cp->elements[cp->size - 1].cpu;
  131. cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
  132. cp->elements[old_idx].cpu = new_cpu;
  133. cp->size--;
  134. cp->elements[new_cpu].idx = old_idx;
  135. cp->elements[cpu].idx = IDX_INVALID;
  136. while (old_idx > 0 && dl_time_before(
  137. cp->elements[parent(old_idx)].dl,
  138. cp->elements[old_idx].dl)) {
  139. cpudl_exchange(cp, old_idx, parent(old_idx));
  140. old_idx = parent(old_idx);
  141. }
  142. cpumask_set_cpu(cpu, cp->free_cpus);
  143. cpudl_heapify(cp, old_idx);
  144. goto out;
  145. }
  146. if (old_idx == IDX_INVALID) {
  147. cp->size++;
  148. cp->elements[cp->size - 1].dl = 0;
  149. cp->elements[cp->size - 1].cpu = cpu;
  150. cp->elements[cpu].idx = cp->size - 1;
  151. cpudl_change_key(cp, cp->size - 1, dl);
  152. cpumask_clear_cpu(cpu, cp->free_cpus);
  153. } else {
  154. cpudl_change_key(cp, old_idx, dl);
  155. }
  156. out:
  157. raw_spin_unlock_irqrestore(&cp->lock, flags);
  158. }
  159. /*
  160. * cpudl_set_freecpu - Set the cpudl.free_cpus
  161. * @cp: the cpudl max-heap context
  162. * @cpu: rd attached cpu
  163. */
  164. void cpudl_set_freecpu(struct cpudl *cp, int cpu)
  165. {
  166. cpumask_set_cpu(cpu, cp->free_cpus);
  167. }
  168. /*
  169. * cpudl_clear_freecpu - Clear the cpudl.free_cpus
  170. * @cp: the cpudl max-heap context
  171. * @cpu: rd attached cpu
  172. */
  173. void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
  174. {
  175. cpumask_clear_cpu(cpu, cp->free_cpus);
  176. }
  177. /*
  178. * cpudl_init - initialize the cpudl structure
  179. * @cp: the cpudl max-heap context
  180. */
  181. int cpudl_init(struct cpudl *cp)
  182. {
  183. int i;
  184. memset(cp, 0, sizeof(*cp));
  185. raw_spin_lock_init(&cp->lock);
  186. cp->size = 0;
  187. cp->elements = kcalloc(nr_cpu_ids,
  188. sizeof(struct cpudl_item),
  189. GFP_KERNEL);
  190. if (!cp->elements)
  191. return -ENOMEM;
  192. if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
  193. kfree(cp->elements);
  194. return -ENOMEM;
  195. }
  196. for_each_possible_cpu(i)
  197. cp->elements[i].idx = IDX_INVALID;
  198. return 0;
  199. }
  200. /*
  201. * cpudl_cleanup - clean up the cpudl structure
  202. * @cp: the cpudl max-heap context
  203. */
  204. void cpudl_cleanup(struct cpudl *cp)
  205. {
  206. free_cpumask_var(cp->free_cpus);
  207. kfree(cp->elements);
  208. }