irq.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Code to handle x86 style IRQs plus some generic interrupt stuff.
  7. *
  8. * Copyright (C) 1992 Linus Torvalds
  9. * Copyright (C) 1994 - 2000 Ralf Baechle
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/delay.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel_stat.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/mm.h>
  18. #include <linux/random.h>
  19. #include <linux/sched.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/kgdb.h>
  23. #include <linux/ftrace.h>
  24. #include <linux/atomic.h>
  25. #include <asm/uaccess.h>
  26. void *irq_stack[NR_CPUS];
  27. /*
  28. * 'what should we do if we get a hw irq event on an illegal vector'.
  29. * each architecture has to answer this themselves.
  30. */
  31. void ack_bad_irq(unsigned int irq)
  32. {
  33. printk("unexpected IRQ # %d\n", irq);
  34. }
  35. atomic_t irq_err_count;
  36. int arch_show_interrupts(struct seq_file *p, int prec)
  37. {
  38. seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
  39. return 0;
  40. }
  41. asmlinkage void spurious_interrupt(void)
  42. {
  43. atomic_inc(&irq_err_count);
  44. }
  45. void __init init_IRQ(void)
  46. {
  47. int i;
  48. unsigned int order = get_order(IRQ_STACK_SIZE);
  49. for (i = 0; i < NR_IRQS; i++)
  50. irq_set_noprobe(i);
  51. arch_init_irq();
  52. for_each_possible_cpu(i) {
  53. void *s = (void *)__get_free_pages(GFP_KERNEL, order);
  54. irq_stack[i] = s;
  55. pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
  56. irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
  57. }
  58. }
  59. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  60. static inline void check_stack_overflow(void)
  61. {
  62. unsigned long sp;
  63. __asm__ __volatile__("move %0, $sp" : "=r" (sp));
  64. sp &= THREAD_MASK;
  65. /*
  66. * Check for stack overflow: is there less than STACK_WARN free?
  67. * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
  68. */
  69. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  70. printk("do_IRQ: stack overflow: %ld\n",
  71. sp - sizeof(struct thread_info));
  72. dump_stack();
  73. }
  74. }
  75. #else
  76. static inline void check_stack_overflow(void) {}
  77. #endif
  78. /*
  79. * do_IRQ handles all normal device IRQ's (the special
  80. * SMP cross-CPU interrupts have their own specific
  81. * handlers).
  82. */
  83. void __irq_entry do_IRQ(unsigned int irq)
  84. {
  85. irq_enter();
  86. check_stack_overflow();
  87. generic_handle_irq(irq);
  88. irq_exit();
  89. }