latencytop.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * latencytop.c: Latency display infrastructure
  3. *
  4. * (C) Copyright 2008 Intel Corporation
  5. * Author: Arjan van de Ven <arjan@linux.intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. */
  12. /*
  13. * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
  14. * used by the "latencytop" userspace tool. The latency that is tracked is not
  15. * the 'traditional' interrupt latency (which is primarily caused by something
  16. * else consuming CPU), but instead, it is the latency an application encounters
  17. * because the kernel sleeps on its behalf for various reasons.
  18. *
  19. * This code tracks 2 levels of statistics:
  20. * 1) System level latency
  21. * 2) Per process latency
  22. *
  23. * The latency is stored in fixed sized data structures in an accumulated form;
  24. * if the "same" latency cause is hit twice, this will be tracked as one entry
  25. * in the data structure. Both the count, total accumulated latency and maximum
  26. * latency are tracked in this data structure. When the fixed size structure is
  27. * full, no new causes are tracked until the buffer is flushed by writing to
  28. * the /proc file; the userspace tool does this on a regular basis.
  29. *
  30. * A latency cause is identified by a stringified backtrace at the point that
  31. * the scheduler gets invoked. The userland tool will use this string to
  32. * identify the cause of the latency in human readable form.
  33. *
  34. * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
  35. * These files look like this:
  36. *
  37. * Latency Top version : v0.1
  38. * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
  39. * | | | |
  40. * | | | +----> the stringified backtrace
  41. * | | +---------> The maximum latency for this entry in microseconds
  42. * | +--------------> The accumulated latency for this entry (microseconds)
  43. * +-------------------> The number of times this entry is hit
  44. *
  45. * (note: the average latency is the accumulated latency divided by the number
  46. * of times)
  47. */
  48. #include <linux/latencytop.h>
  49. #include <linux/kallsyms.h>
  50. #include <linux/seq_file.h>
  51. #include <linux/notifier.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/proc_fs.h>
  54. #include <linux/export.h>
  55. #include <linux/sched.h>
  56. #include <linux/list.h>
  57. #include <linux/stacktrace.h>
  58. static DEFINE_RAW_SPINLOCK(latency_lock);
  59. #define MAXLR 128
  60. static struct latency_record latency_record[MAXLR];
  61. int latencytop_enabled;
  62. void clear_all_latency_tracing(struct task_struct *p)
  63. {
  64. unsigned long flags;
  65. if (!latencytop_enabled)
  66. return;
  67. raw_spin_lock_irqsave(&latency_lock, flags);
  68. memset(&p->latency_record, 0, sizeof(p->latency_record));
  69. p->latency_record_count = 0;
  70. raw_spin_unlock_irqrestore(&latency_lock, flags);
  71. }
  72. static void clear_global_latency_tracing(void)
  73. {
  74. unsigned long flags;
  75. raw_spin_lock_irqsave(&latency_lock, flags);
  76. memset(&latency_record, 0, sizeof(latency_record));
  77. raw_spin_unlock_irqrestore(&latency_lock, flags);
  78. }
  79. static void __sched
  80. account_global_scheduler_latency(struct task_struct *tsk,
  81. struct latency_record *lat)
  82. {
  83. int firstnonnull = MAXLR + 1;
  84. int i;
  85. if (!latencytop_enabled)
  86. return;
  87. /* skip kernel threads for now */
  88. if (!tsk->mm)
  89. return;
  90. for (i = 0; i < MAXLR; i++) {
  91. int q, same = 1;
  92. /* Nothing stored: */
  93. if (!latency_record[i].backtrace[0]) {
  94. if (firstnonnull > i)
  95. firstnonnull = i;
  96. continue;
  97. }
  98. for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
  99. unsigned long record = lat->backtrace[q];
  100. if (latency_record[i].backtrace[q] != record) {
  101. same = 0;
  102. break;
  103. }
  104. /* 0 and ULONG_MAX entries mean end of backtrace: */
  105. if (record == 0 || record == ULONG_MAX)
  106. break;
  107. }
  108. if (same) {
  109. latency_record[i].count++;
  110. latency_record[i].time += lat->time;
  111. if (lat->time > latency_record[i].max)
  112. latency_record[i].max = lat->time;
  113. return;
  114. }
  115. }
  116. i = firstnonnull;
  117. if (i >= MAXLR - 1)
  118. return;
  119. /* Allocted a new one: */
  120. memcpy(&latency_record[i], lat, sizeof(struct latency_record));
  121. }
  122. /*
  123. * Iterator to store a backtrace into a latency record entry
  124. */
  125. static inline void store_stacktrace(struct task_struct *tsk,
  126. struct latency_record *lat)
  127. {
  128. struct stack_trace trace;
  129. memset(&trace, 0, sizeof(trace));
  130. trace.max_entries = LT_BACKTRACEDEPTH;
  131. trace.entries = &lat->backtrace[0];
  132. save_stack_trace_tsk(tsk, &trace);
  133. }
  134. /**
  135. * __account_scheduler_latency - record an occurred latency
  136. * @tsk - the task struct of the task hitting the latency
  137. * @usecs - the duration of the latency in microseconds
  138. * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
  139. *
  140. * This function is the main entry point for recording latency entries
  141. * as called by the scheduler.
  142. *
  143. * This function has a few special cases to deal with normal 'non-latency'
  144. * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
  145. * since this usually is caused by waiting for events via select() and co.
  146. *
  147. * Negative latencies (caused by time going backwards) are also explicitly
  148. * skipped.
  149. */
  150. void __sched
  151. __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
  152. {
  153. unsigned long flags;
  154. int i, q;
  155. struct latency_record lat;
  156. /* Long interruptible waits are generally user requested... */
  157. if (inter && usecs > 5000)
  158. return;
  159. /* Negative sleeps are time going backwards */
  160. /* Zero-time sleeps are non-interesting */
  161. if (usecs <= 0)
  162. return;
  163. memset(&lat, 0, sizeof(lat));
  164. lat.count = 1;
  165. lat.time = usecs;
  166. lat.max = usecs;
  167. store_stacktrace(tsk, &lat);
  168. raw_spin_lock_irqsave(&latency_lock, flags);
  169. account_global_scheduler_latency(tsk, &lat);
  170. for (i = 0; i < tsk->latency_record_count; i++) {
  171. struct latency_record *mylat;
  172. int same = 1;
  173. mylat = &tsk->latency_record[i];
  174. for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
  175. unsigned long record = lat.backtrace[q];
  176. if (mylat->backtrace[q] != record) {
  177. same = 0;
  178. break;
  179. }
  180. /* 0 and ULONG_MAX entries mean end of backtrace: */
  181. if (record == 0 || record == ULONG_MAX)
  182. break;
  183. }
  184. if (same) {
  185. mylat->count++;
  186. mylat->time += lat.time;
  187. if (lat.time > mylat->max)
  188. mylat->max = lat.time;
  189. goto out_unlock;
  190. }
  191. }
  192. /*
  193. * short term hack; if we're > 32 we stop; future we recycle:
  194. */
  195. if (tsk->latency_record_count >= LT_SAVECOUNT)
  196. goto out_unlock;
  197. /* Allocated a new one: */
  198. i = tsk->latency_record_count++;
  199. memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
  200. out_unlock:
  201. raw_spin_unlock_irqrestore(&latency_lock, flags);
  202. }
  203. static int lstats_show(struct seq_file *m, void *v)
  204. {
  205. int i;
  206. seq_puts(m, "Latency Top version : v0.1\n");
  207. for (i = 0; i < MAXLR; i++) {
  208. struct latency_record *lr = &latency_record[i];
  209. if (lr->backtrace[0]) {
  210. int q;
  211. seq_printf(m, "%i %lu %lu",
  212. lr->count, lr->time, lr->max);
  213. for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
  214. unsigned long bt = lr->backtrace[q];
  215. if (!bt)
  216. break;
  217. if (bt == ULONG_MAX)
  218. break;
  219. seq_printf(m, " %ps", (void *)bt);
  220. }
  221. seq_puts(m, "\n");
  222. }
  223. }
  224. return 0;
  225. }
  226. static ssize_t
  227. lstats_write(struct file *file, const char __user *buf, size_t count,
  228. loff_t *offs)
  229. {
  230. clear_global_latency_tracing();
  231. return count;
  232. }
  233. static int lstats_open(struct inode *inode, struct file *filp)
  234. {
  235. return single_open(filp, lstats_show, NULL);
  236. }
  237. static const struct file_operations lstats_fops = {
  238. .open = lstats_open,
  239. .read = seq_read,
  240. .write = lstats_write,
  241. .llseek = seq_lseek,
  242. .release = single_release,
  243. };
  244. static int __init init_lstats_procfs(void)
  245. {
  246. proc_create("latency_stats", 0644, NULL, &lstats_fops);
  247. return 0;
  248. }
  249. device_initcall(init_lstats_procfs);