cpuacct.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. #include <linux/cgroup.h>
  2. #include <linux/slab.h>
  3. #include <linux/percpu.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/kernel_stat.h>
  9. #include <linux/err.h>
  10. #include "sched.h"
  11. /*
  12. * CPU accounting code for task groups.
  13. *
  14. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  15. * (balbir@in.ibm.com).
  16. */
  17. /* Time spent by the tasks of the cpu accounting group executing in ... */
  18. enum cpuacct_stat_index {
  19. CPUACCT_STAT_USER, /* ... user mode */
  20. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  21. CPUACCT_STAT_NSTATS,
  22. };
  23. /* track cpu usage of a group of tasks and its child groups */
  24. struct cpuacct {
  25. struct cgroup_subsys_state css;
  26. /* cpuusage holds pointer to a u64-type object on every cpu */
  27. u64 __percpu *cpuusage;
  28. struct kernel_cpustat __percpu *cpustat;
  29. };
  30. static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
  31. {
  32. return css ? container_of(css, struct cpuacct, css) : NULL;
  33. }
  34. /* return cpu accounting group to which this task belongs */
  35. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  36. {
  37. return css_ca(task_css(tsk, cpuacct_cgrp_id));
  38. }
  39. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  40. {
  41. return css_ca(ca->css.parent);
  42. }
  43. static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
  44. static struct cpuacct root_cpuacct = {
  45. .cpustat = &kernel_cpustat,
  46. .cpuusage = &root_cpuacct_cpuusage,
  47. };
  48. /* create a new cpu accounting group */
  49. static struct cgroup_subsys_state *
  50. cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
  51. {
  52. struct cpuacct *ca;
  53. if (!parent_css)
  54. return &root_cpuacct.css;
  55. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  56. if (!ca)
  57. goto out;
  58. ca->cpuusage = alloc_percpu(u64);
  59. if (!ca->cpuusage)
  60. goto out_free_ca;
  61. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  62. if (!ca->cpustat)
  63. goto out_free_cpuusage;
  64. return &ca->css;
  65. out_free_cpuusage:
  66. free_percpu(ca->cpuusage);
  67. out_free_ca:
  68. kfree(ca);
  69. out:
  70. return ERR_PTR(-ENOMEM);
  71. }
  72. /* destroy an existing cpu accounting group */
  73. static void cpuacct_css_free(struct cgroup_subsys_state *css)
  74. {
  75. struct cpuacct *ca = css_ca(css);
  76. free_percpu(ca->cpustat);
  77. free_percpu(ca->cpuusage);
  78. kfree(ca);
  79. }
  80. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  81. {
  82. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  83. u64 data;
  84. #ifndef CONFIG_64BIT
  85. /*
  86. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  87. */
  88. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  89. data = *cpuusage;
  90. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  91. #else
  92. data = *cpuusage;
  93. #endif
  94. return data;
  95. }
  96. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  97. {
  98. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  99. #ifndef CONFIG_64BIT
  100. /*
  101. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  102. */
  103. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  104. *cpuusage = val;
  105. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  106. #else
  107. *cpuusage = val;
  108. #endif
  109. }
  110. /* return total cpu usage (in nanoseconds) of a group */
  111. static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
  112. {
  113. struct cpuacct *ca = css_ca(css);
  114. u64 totalcpuusage = 0;
  115. int i;
  116. for_each_present_cpu(i)
  117. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  118. return totalcpuusage;
  119. }
  120. static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
  121. u64 reset)
  122. {
  123. struct cpuacct *ca = css_ca(css);
  124. int err = 0;
  125. int i;
  126. if (reset) {
  127. err = -EINVAL;
  128. goto out;
  129. }
  130. for_each_present_cpu(i)
  131. cpuacct_cpuusage_write(ca, i, 0);
  132. out:
  133. return err;
  134. }
  135. static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
  136. {
  137. struct cpuacct *ca = css_ca(seq_css(m));
  138. u64 percpu;
  139. int i;
  140. for_each_present_cpu(i) {
  141. percpu = cpuacct_cpuusage_read(ca, i);
  142. seq_printf(m, "%llu ", (unsigned long long) percpu);
  143. }
  144. seq_printf(m, "\n");
  145. return 0;
  146. }
  147. static const char * const cpuacct_stat_desc[] = {
  148. [CPUACCT_STAT_USER] = "user",
  149. [CPUACCT_STAT_SYSTEM] = "system",
  150. };
  151. static int cpuacct_stats_show(struct seq_file *sf, void *v)
  152. {
  153. struct cpuacct *ca = css_ca(seq_css(sf));
  154. int cpu;
  155. s64 val = 0;
  156. for_each_online_cpu(cpu) {
  157. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  158. val += kcpustat->cpustat[CPUTIME_USER];
  159. val += kcpustat->cpustat[CPUTIME_NICE];
  160. }
  161. val = cputime64_to_clock_t(val);
  162. seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_USER], val);
  163. val = 0;
  164. for_each_online_cpu(cpu) {
  165. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  166. val += kcpustat->cpustat[CPUTIME_SYSTEM];
  167. val += kcpustat->cpustat[CPUTIME_IRQ];
  168. val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
  169. }
  170. val = cputime64_to_clock_t(val);
  171. seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
  172. return 0;
  173. }
  174. static struct cftype files[] = {
  175. {
  176. .name = "usage",
  177. .read_u64 = cpuusage_read,
  178. .write_u64 = cpuusage_write,
  179. },
  180. {
  181. .name = "usage_percpu",
  182. .seq_show = cpuacct_percpu_seq_show,
  183. },
  184. {
  185. .name = "stat",
  186. .seq_show = cpuacct_stats_show,
  187. },
  188. { } /* terminate */
  189. };
  190. /*
  191. * charge this task's execution time to its accounting group.
  192. *
  193. * called with rq->lock held.
  194. */
  195. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  196. {
  197. struct cpuacct *ca;
  198. int cpu;
  199. cpu = task_cpu(tsk);
  200. rcu_read_lock();
  201. ca = task_ca(tsk);
  202. while (true) {
  203. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  204. *cpuusage += cputime;
  205. ca = parent_ca(ca);
  206. if (!ca)
  207. break;
  208. }
  209. rcu_read_unlock();
  210. }
  211. /*
  212. * Add user/system time to cpuacct.
  213. *
  214. * Note: it's the caller that updates the account of the root cgroup.
  215. */
  216. void cpuacct_account_field(struct task_struct *p, int index, u64 val)
  217. {
  218. struct kernel_cpustat *kcpustat;
  219. struct cpuacct *ca;
  220. rcu_read_lock();
  221. ca = task_ca(p);
  222. while (ca != &root_cpuacct) {
  223. kcpustat = this_cpu_ptr(ca->cpustat);
  224. kcpustat->cpustat[index] += val;
  225. ca = parent_ca(ca);
  226. }
  227. rcu_read_unlock();
  228. }
  229. struct cgroup_subsys cpuacct_cgrp_subsys = {
  230. .css_alloc = cpuacct_css_alloc,
  231. .css_free = cpuacct_css_free,
  232. .legacy_cftypes = files,
  233. .early_init = 1,
  234. };