proc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * linux/kernel/irq/proc.c
  3. *
  4. * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
  5. *
  6. * This file contains the /proc/irq/ handling code.
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/gfp.h>
  10. #include <linux/proc_fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/mutex.h>
  15. #include "internals.h"
  16. /*
  17. * Access rules:
  18. *
  19. * procfs protects read/write of /proc/irq/N/ files against a
  20. * concurrent free of the interrupt descriptor. remove_proc_entry()
  21. * immediately prevents new read/writes to happen and waits for
  22. * already running read/write functions to complete.
  23. *
  24. * We remove the proc entries first and then delete the interrupt
  25. * descriptor from the radix tree and free it. So it is guaranteed
  26. * that irq_to_desc(N) is valid as long as the read/writes are
  27. * permitted by procfs.
  28. *
  29. * The read from /proc/interrupts is a different problem because there
  30. * is no protection. So the lookup and the access to irqdesc
  31. * information must be protected by sparse_irq_lock.
  32. */
  33. static struct proc_dir_entry *root_irq_dir;
  34. #ifdef CONFIG_SMP
  35. static int show_irq_affinity(int type, struct seq_file *m, void *v)
  36. {
  37. struct irq_desc *desc = irq_to_desc((long)m->private);
  38. const struct cpumask *mask = desc->irq_common_data.affinity;
  39. #ifdef CONFIG_GENERIC_PENDING_IRQ
  40. if (irqd_is_setaffinity_pending(&desc->irq_data))
  41. mask = desc->pending_mask;
  42. #endif
  43. if (type)
  44. seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
  45. else
  46. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  47. return 0;
  48. }
  49. static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
  50. {
  51. struct irq_desc *desc = irq_to_desc((long)m->private);
  52. unsigned long flags;
  53. cpumask_var_t mask;
  54. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  55. return -ENOMEM;
  56. raw_spin_lock_irqsave(&desc->lock, flags);
  57. if (desc->affinity_hint)
  58. cpumask_copy(mask, desc->affinity_hint);
  59. raw_spin_unlock_irqrestore(&desc->lock, flags);
  60. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  61. free_cpumask_var(mask);
  62. return 0;
  63. }
  64. #ifndef is_affinity_mask_valid
  65. #define is_affinity_mask_valid(val) 1
  66. #endif
  67. int no_irq_affinity;
  68. static int irq_affinity_proc_show(struct seq_file *m, void *v)
  69. {
  70. return show_irq_affinity(0, m, v);
  71. }
  72. static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
  73. {
  74. return show_irq_affinity(1, m, v);
  75. }
  76. static ssize_t write_irq_affinity(int type, struct file *file,
  77. const char __user *buffer, size_t count, loff_t *pos)
  78. {
  79. unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
  80. cpumask_var_t new_value;
  81. int err;
  82. if (!irq_can_set_affinity(irq) || no_irq_affinity)
  83. return -EIO;
  84. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  85. return -ENOMEM;
  86. if (type)
  87. err = cpumask_parselist_user(buffer, count, new_value);
  88. else
  89. err = cpumask_parse_user(buffer, count, new_value);
  90. if (err)
  91. goto free_cpumask;
  92. if (!is_affinity_mask_valid(new_value)) {
  93. err = -EINVAL;
  94. goto free_cpumask;
  95. }
  96. /*
  97. * Do not allow disabling IRQs completely - it's a too easy
  98. * way to make the system unusable accidentally :-) At least
  99. * one online CPU still has to be targeted.
  100. */
  101. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  102. /* Special case for empty set - allow the architecture
  103. code to set default SMP affinity. */
  104. err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
  105. } else {
  106. irq_set_affinity(irq, new_value);
  107. err = count;
  108. }
  109. free_cpumask:
  110. free_cpumask_var(new_value);
  111. return err;
  112. }
  113. static ssize_t irq_affinity_proc_write(struct file *file,
  114. const char __user *buffer, size_t count, loff_t *pos)
  115. {
  116. return write_irq_affinity(0, file, buffer, count, pos);
  117. }
  118. static ssize_t irq_affinity_list_proc_write(struct file *file,
  119. const char __user *buffer, size_t count, loff_t *pos)
  120. {
  121. return write_irq_affinity(1, file, buffer, count, pos);
  122. }
  123. static int irq_affinity_proc_open(struct inode *inode, struct file *file)
  124. {
  125. return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
  126. }
  127. static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
  128. {
  129. return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
  130. }
  131. static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
  132. {
  133. return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
  134. }
  135. static const struct file_operations irq_affinity_proc_fops = {
  136. .open = irq_affinity_proc_open,
  137. .read = seq_read,
  138. .llseek = seq_lseek,
  139. .release = single_release,
  140. .write = irq_affinity_proc_write,
  141. };
  142. static const struct file_operations irq_affinity_hint_proc_fops = {
  143. .open = irq_affinity_hint_proc_open,
  144. .read = seq_read,
  145. .llseek = seq_lseek,
  146. .release = single_release,
  147. };
  148. static const struct file_operations irq_affinity_list_proc_fops = {
  149. .open = irq_affinity_list_proc_open,
  150. .read = seq_read,
  151. .llseek = seq_lseek,
  152. .release = single_release,
  153. .write = irq_affinity_list_proc_write,
  154. };
  155. static int default_affinity_show(struct seq_file *m, void *v)
  156. {
  157. seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
  158. return 0;
  159. }
  160. static ssize_t default_affinity_write(struct file *file,
  161. const char __user *buffer, size_t count, loff_t *ppos)
  162. {
  163. cpumask_var_t new_value;
  164. int err;
  165. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  166. return -ENOMEM;
  167. err = cpumask_parse_user(buffer, count, new_value);
  168. if (err)
  169. goto out;
  170. if (!is_affinity_mask_valid(new_value)) {
  171. err = -EINVAL;
  172. goto out;
  173. }
  174. /*
  175. * Do not allow disabling IRQs completely - it's a too easy
  176. * way to make the system unusable accidentally :-) At least
  177. * one online CPU still has to be targeted.
  178. */
  179. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  180. err = -EINVAL;
  181. goto out;
  182. }
  183. cpumask_copy(irq_default_affinity, new_value);
  184. err = count;
  185. out:
  186. free_cpumask_var(new_value);
  187. return err;
  188. }
  189. static int default_affinity_open(struct inode *inode, struct file *file)
  190. {
  191. return single_open(file, default_affinity_show, PDE_DATA(inode));
  192. }
  193. static const struct file_operations default_affinity_proc_fops = {
  194. .open = default_affinity_open,
  195. .read = seq_read,
  196. .llseek = seq_lseek,
  197. .release = single_release,
  198. .write = default_affinity_write,
  199. };
  200. static int irq_node_proc_show(struct seq_file *m, void *v)
  201. {
  202. struct irq_desc *desc = irq_to_desc((long) m->private);
  203. seq_printf(m, "%d\n", irq_desc_get_node(desc));
  204. return 0;
  205. }
  206. static int irq_node_proc_open(struct inode *inode, struct file *file)
  207. {
  208. return single_open(file, irq_node_proc_show, PDE_DATA(inode));
  209. }
  210. static const struct file_operations irq_node_proc_fops = {
  211. .open = irq_node_proc_open,
  212. .read = seq_read,
  213. .llseek = seq_lseek,
  214. .release = single_release,
  215. };
  216. #endif
  217. static int irq_spurious_proc_show(struct seq_file *m, void *v)
  218. {
  219. struct irq_desc *desc = irq_to_desc((long) m->private);
  220. seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
  221. desc->irq_count, desc->irqs_unhandled,
  222. jiffies_to_msecs(desc->last_unhandled));
  223. return 0;
  224. }
  225. static int irq_spurious_proc_open(struct inode *inode, struct file *file)
  226. {
  227. return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
  228. }
  229. static const struct file_operations irq_spurious_proc_fops = {
  230. .open = irq_spurious_proc_open,
  231. .read = seq_read,
  232. .llseek = seq_lseek,
  233. .release = single_release,
  234. };
  235. #define MAX_NAMELEN 128
  236. static int name_unique(unsigned int irq, struct irqaction *new_action)
  237. {
  238. struct irq_desc *desc = irq_to_desc(irq);
  239. struct irqaction *action;
  240. unsigned long flags;
  241. int ret = 1;
  242. raw_spin_lock_irqsave(&desc->lock, flags);
  243. for (action = desc->action ; action; action = action->next) {
  244. if ((action != new_action) && action->name &&
  245. !strcmp(new_action->name, action->name)) {
  246. ret = 0;
  247. break;
  248. }
  249. }
  250. raw_spin_unlock_irqrestore(&desc->lock, flags);
  251. return ret;
  252. }
  253. void register_handler_proc(unsigned int irq, struct irqaction *action)
  254. {
  255. char name [MAX_NAMELEN];
  256. struct irq_desc *desc = irq_to_desc(irq);
  257. if (!desc->dir || action->dir || !action->name ||
  258. !name_unique(irq, action))
  259. return;
  260. memset(name, 0, MAX_NAMELEN);
  261. snprintf(name, MAX_NAMELEN, "%s", action->name);
  262. /* create /proc/irq/1234/handler/ */
  263. action->dir = proc_mkdir(name, desc->dir);
  264. }
  265. #undef MAX_NAMELEN
  266. #define MAX_NAMELEN 10
  267. void register_irq_proc(unsigned int irq, struct irq_desc *desc)
  268. {
  269. static DEFINE_MUTEX(register_lock);
  270. char name [MAX_NAMELEN];
  271. if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
  272. return;
  273. /*
  274. * irq directories are registered only when a handler is
  275. * added, not when the descriptor is created, so multiple
  276. * tasks might try to register at the same time.
  277. */
  278. mutex_lock(&register_lock);
  279. if (desc->dir)
  280. goto out_unlock;
  281. memset(name, 0, MAX_NAMELEN);
  282. sprintf(name, "%d", irq);
  283. /* create /proc/irq/1234 */
  284. desc->dir = proc_mkdir(name, root_irq_dir);
  285. if (!desc->dir)
  286. goto out_unlock;
  287. #ifdef CONFIG_SMP
  288. /* create /proc/irq/<irq>/smp_affinity */
  289. proc_create_data("smp_affinity", 0644, desc->dir,
  290. &irq_affinity_proc_fops, (void *)(long)irq);
  291. /* create /proc/irq/<irq>/affinity_hint */
  292. proc_create_data("affinity_hint", 0444, desc->dir,
  293. &irq_affinity_hint_proc_fops, (void *)(long)irq);
  294. /* create /proc/irq/<irq>/smp_affinity_list */
  295. proc_create_data("smp_affinity_list", 0644, desc->dir,
  296. &irq_affinity_list_proc_fops, (void *)(long)irq);
  297. proc_create_data("node", 0444, desc->dir,
  298. &irq_node_proc_fops, (void *)(long)irq);
  299. #endif
  300. proc_create_data("spurious", 0444, desc->dir,
  301. &irq_spurious_proc_fops, (void *)(long)irq);
  302. out_unlock:
  303. mutex_unlock(&register_lock);
  304. }
  305. void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
  306. {
  307. char name [MAX_NAMELEN];
  308. if (!root_irq_dir || !desc->dir)
  309. return;
  310. #ifdef CONFIG_SMP
  311. remove_proc_entry("smp_affinity", desc->dir);
  312. remove_proc_entry("affinity_hint", desc->dir);
  313. remove_proc_entry("smp_affinity_list", desc->dir);
  314. remove_proc_entry("node", desc->dir);
  315. #endif
  316. remove_proc_entry("spurious", desc->dir);
  317. memset(name, 0, MAX_NAMELEN);
  318. sprintf(name, "%u", irq);
  319. remove_proc_entry(name, root_irq_dir);
  320. }
  321. #undef MAX_NAMELEN
  322. void unregister_handler_proc(unsigned int irq, struct irqaction *action)
  323. {
  324. proc_remove(action->dir);
  325. }
  326. static void register_default_affinity_proc(void)
  327. {
  328. #ifdef CONFIG_SMP
  329. proc_create("irq/default_smp_affinity", 0644, NULL,
  330. &default_affinity_proc_fops);
  331. #endif
  332. }
  333. void init_irq_proc(void)
  334. {
  335. unsigned int irq;
  336. struct irq_desc *desc;
  337. /* create /proc/irq */
  338. root_irq_dir = proc_mkdir("irq", NULL);
  339. if (!root_irq_dir)
  340. return;
  341. register_default_affinity_proc();
  342. /*
  343. * Create entries for all existing IRQs.
  344. */
  345. for_each_irq_desc(irq, desc) {
  346. if (!desc)
  347. continue;
  348. register_irq_proc(irq, desc);
  349. }
  350. }
  351. #ifdef CONFIG_GENERIC_IRQ_SHOW
  352. int __weak arch_show_interrupts(struct seq_file *p, int prec)
  353. {
  354. return 0;
  355. }
  356. #ifndef ACTUAL_NR_IRQS
  357. # define ACTUAL_NR_IRQS nr_irqs
  358. #endif
  359. int show_interrupts(struct seq_file *p, void *v)
  360. {
  361. static int prec;
  362. unsigned long flags, any_count = 0;
  363. int i = *(loff_t *) v, j;
  364. struct irqaction *action;
  365. struct irq_desc *desc;
  366. if (i > ACTUAL_NR_IRQS)
  367. return 0;
  368. if (i == ACTUAL_NR_IRQS)
  369. return arch_show_interrupts(p, prec);
  370. /* print header and calculate the width of the first column */
  371. if (i == 0) {
  372. for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
  373. j *= 10;
  374. seq_printf(p, "%*s", prec + 8, "");
  375. for_each_online_cpu(j)
  376. seq_printf(p, "CPU%-8d", j);
  377. seq_putc(p, '\n');
  378. }
  379. irq_lock_sparse();
  380. desc = irq_to_desc(i);
  381. if (!desc)
  382. goto outsparse;
  383. raw_spin_lock_irqsave(&desc->lock, flags);
  384. for_each_online_cpu(j)
  385. any_count |= kstat_irqs_cpu(i, j);
  386. action = desc->action;
  387. if ((!action || irq_desc_is_chained(desc)) && !any_count)
  388. goto out;
  389. seq_printf(p, "%*d: ", prec, i);
  390. for_each_online_cpu(j)
  391. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  392. if (desc->irq_data.chip) {
  393. if (desc->irq_data.chip->irq_print_chip)
  394. desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
  395. else if (desc->irq_data.chip->name)
  396. seq_printf(p, " %8s", desc->irq_data.chip->name);
  397. else
  398. seq_printf(p, " %8s", "-");
  399. } else {
  400. seq_printf(p, " %8s", "None");
  401. }
  402. if (desc->irq_data.domain)
  403. seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
  404. #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
  405. seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
  406. #endif
  407. if (desc->name)
  408. seq_printf(p, "-%-8s", desc->name);
  409. if (action) {
  410. seq_printf(p, " %s", action->name);
  411. while ((action = action->next) != NULL)
  412. seq_printf(p, ", %s", action->name);
  413. }
  414. seq_putc(p, '\n');
  415. out:
  416. raw_spin_unlock_irqrestore(&desc->lock, flags);
  417. outsparse:
  418. irq_unlock_sparse();
  419. return 0;
  420. }
  421. #endif