uv_irq.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV IRQ functions
  7. *
  8. * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/slab.h>
  13. #include <linux/irq.h>
  14. #include <asm/irqdomain.h>
  15. #include <asm/apic.h>
  16. #include <asm/uv/uv_irq.h>
  17. #include <asm/uv/uv_hub.h>
  18. /* MMR offset and pnode of hub sourcing interrupts for a given irq */
  19. struct uv_irq_2_mmr_pnode {
  20. unsigned long offset;
  21. int pnode;
  22. };
  23. static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
  24. {
  25. unsigned long mmr_value;
  26. struct uv_IO_APIC_route_entry *entry;
  27. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  28. sizeof(unsigned long));
  29. mmr_value = 0;
  30. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  31. entry->vector = cfg->vector;
  32. entry->delivery_mode = apic->irq_delivery_mode;
  33. entry->dest_mode = apic->irq_dest_mode;
  34. entry->polarity = 0;
  35. entry->trigger = 0;
  36. entry->mask = 0;
  37. entry->dest = cfg->dest_apicid;
  38. uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
  39. }
  40. static void uv_noop(struct irq_data *data) { }
  41. static void uv_ack_apic(struct irq_data *data)
  42. {
  43. ack_APIC_irq();
  44. }
  45. static int
  46. uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
  47. bool force)
  48. {
  49. struct irq_data *parent = data->parent_data;
  50. struct irq_cfg *cfg = irqd_cfg(data);
  51. int ret;
  52. ret = parent->chip->irq_set_affinity(parent, mask, force);
  53. if (ret >= 0) {
  54. uv_program_mmr(cfg, data->chip_data);
  55. send_cleanup_vector(cfg);
  56. }
  57. return ret;
  58. }
  59. static struct irq_chip uv_irq_chip = {
  60. .name = "UV-CORE",
  61. .irq_mask = uv_noop,
  62. .irq_unmask = uv_noop,
  63. .irq_eoi = uv_ack_apic,
  64. .irq_set_affinity = uv_set_irq_affinity,
  65. };
  66. static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
  67. unsigned int nr_irqs, void *arg)
  68. {
  69. struct uv_irq_2_mmr_pnode *chip_data;
  70. struct irq_alloc_info *info = arg;
  71. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  72. int ret;
  73. if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
  74. return -EINVAL;
  75. chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
  76. irq_data_get_node(irq_data));
  77. if (!chip_data)
  78. return -ENOMEM;
  79. ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
  80. if (ret >= 0) {
  81. if (info->uv_limit == UV_AFFINITY_CPU)
  82. irq_set_status_flags(virq, IRQ_NO_BALANCING);
  83. else
  84. irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
  85. chip_data->pnode = uv_blade_to_pnode(info->uv_blade);
  86. chip_data->offset = info->uv_offset;
  87. irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
  88. handle_percpu_irq, NULL, info->uv_name);
  89. } else {
  90. kfree(chip_data);
  91. }
  92. return ret;
  93. }
  94. static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
  95. unsigned int nr_irqs)
  96. {
  97. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  98. BUG_ON(nr_irqs != 1);
  99. kfree(irq_data->chip_data);
  100. irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
  101. irq_clear_status_flags(virq, IRQ_NO_BALANCING);
  102. irq_domain_free_irqs_top(domain, virq, nr_irqs);
  103. }
  104. /*
  105. * Re-target the irq to the specified CPU and enable the specified MMR located
  106. * on the specified blade to allow the sending of MSIs to the specified CPU.
  107. */
  108. static void uv_domain_activate(struct irq_domain *domain,
  109. struct irq_data *irq_data)
  110. {
  111. uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
  112. }
  113. /*
  114. * Disable the specified MMR located on the specified blade so that MSIs are
  115. * longer allowed to be sent.
  116. */
  117. static void uv_domain_deactivate(struct irq_domain *domain,
  118. struct irq_data *irq_data)
  119. {
  120. unsigned long mmr_value;
  121. struct uv_IO_APIC_route_entry *entry;
  122. mmr_value = 0;
  123. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  124. entry->mask = 1;
  125. uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
  126. }
  127. static const struct irq_domain_ops uv_domain_ops = {
  128. .alloc = uv_domain_alloc,
  129. .free = uv_domain_free,
  130. .activate = uv_domain_activate,
  131. .deactivate = uv_domain_deactivate,
  132. };
  133. static struct irq_domain *uv_get_irq_domain(void)
  134. {
  135. static struct irq_domain *uv_domain;
  136. static DEFINE_MUTEX(uv_lock);
  137. mutex_lock(&uv_lock);
  138. if (uv_domain == NULL) {
  139. uv_domain = irq_domain_add_tree(NULL, &uv_domain_ops, NULL);
  140. if (uv_domain)
  141. uv_domain->parent = x86_vector_domain;
  142. }
  143. mutex_unlock(&uv_lock);
  144. return uv_domain;
  145. }
  146. /*
  147. * Set up a mapping of an available irq and vector, and enable the specified
  148. * MMR that defines the MSI that is to be sent to the specified CPU when an
  149. * interrupt is raised.
  150. */
  151. int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
  152. unsigned long mmr_offset, int limit)
  153. {
  154. struct irq_alloc_info info;
  155. struct irq_domain *domain = uv_get_irq_domain();
  156. if (!domain)
  157. return -ENOMEM;
  158. init_irq_alloc_info(&info, cpumask_of(cpu));
  159. info.type = X86_IRQ_ALLOC_TYPE_UV;
  160. info.uv_limit = limit;
  161. info.uv_blade = mmr_blade;
  162. info.uv_offset = mmr_offset;
  163. info.uv_name = irq_name;
  164. return irq_domain_alloc_irqs(domain, 1,
  165. uv_blade_to_memory_nid(mmr_blade), &info);
  166. }
  167. EXPORT_SYMBOL_GPL(uv_setup_irq);
  168. /*
  169. * Tear down a mapping of an irq and vector, and disable the specified MMR that
  170. * defined the MSI that was to be sent to the specified CPU when an interrupt
  171. * was raised.
  172. *
  173. * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
  174. */
  175. void uv_teardown_irq(unsigned int irq)
  176. {
  177. irq_domain_free_irqs(irq, 1);
  178. }
  179. EXPORT_SYMBOL_GPL(uv_teardown_irq);