msi_ia64.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * MSI hooks for standard x86 apic
  3. */
  4. #include <linux/pci.h>
  5. #include <linux/irq.h>
  6. #include <linux/msi.h>
  7. #include <linux/dmar.h>
  8. #include <asm/smp.h>
  9. #include <asm/msidef.h>
  10. static struct irq_chip ia64_msi_chip;
  11. #ifdef CONFIG_SMP
  12. static int ia64_set_msi_irq_affinity(struct irq_data *idata,
  13. const cpumask_t *cpu_mask, bool force)
  14. {
  15. struct msi_msg msg;
  16. u32 addr, data;
  17. int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
  18. unsigned int irq = idata->irq;
  19. if (irq_prepare_move(irq, cpu))
  20. return -1;
  21. __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
  22. addr = msg.address_lo;
  23. addr &= MSI_ADDR_DEST_ID_MASK;
  24. addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  25. msg.address_lo = addr;
  26. data = msg.data;
  27. data &= MSI_DATA_VECTOR_MASK;
  28. data |= MSI_DATA_VECTOR(irq_to_vector(irq));
  29. msg.data = data;
  30. pci_write_msi_msg(irq, &msg);
  31. cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
  32. return 0;
  33. }
  34. #endif /* CONFIG_SMP */
  35. int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  36. {
  37. struct msi_msg msg;
  38. unsigned long dest_phys_id;
  39. int irq, vector;
  40. irq = create_irq();
  41. if (irq < 0)
  42. return irq;
  43. irq_set_msi_desc(irq, desc);
  44. dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
  45. cpu_online_mask));
  46. vector = irq_to_vector(irq);
  47. msg.address_hi = 0;
  48. msg.address_lo =
  49. MSI_ADDR_HEADER |
  50. MSI_ADDR_DEST_MODE_PHYS |
  51. MSI_ADDR_REDIRECTION_CPU |
  52. MSI_ADDR_DEST_ID_CPU(dest_phys_id);
  53. msg.data =
  54. MSI_DATA_TRIGGER_EDGE |
  55. MSI_DATA_LEVEL_ASSERT |
  56. MSI_DATA_DELIVERY_FIXED |
  57. MSI_DATA_VECTOR(vector);
  58. pci_write_msi_msg(irq, &msg);
  59. irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
  60. return 0;
  61. }
  62. void ia64_teardown_msi_irq(unsigned int irq)
  63. {
  64. destroy_irq(irq);
  65. }
  66. static void ia64_ack_msi_irq(struct irq_data *data)
  67. {
  68. irq_complete_move(data->irq);
  69. irq_move_irq(data);
  70. ia64_eoi();
  71. }
  72. static int ia64_msi_retrigger_irq(struct irq_data *data)
  73. {
  74. unsigned int vector = irq_to_vector(data->irq);
  75. ia64_resend_irq(vector);
  76. return 1;
  77. }
  78. /*
  79. * Generic ops used on most IA64 platforms.
  80. */
  81. static struct irq_chip ia64_msi_chip = {
  82. .name = "PCI-MSI",
  83. .irq_mask = pci_msi_mask_irq,
  84. .irq_unmask = pci_msi_unmask_irq,
  85. .irq_ack = ia64_ack_msi_irq,
  86. #ifdef CONFIG_SMP
  87. .irq_set_affinity = ia64_set_msi_irq_affinity,
  88. #endif
  89. .irq_retrigger = ia64_msi_retrigger_irq,
  90. };
  91. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  92. {
  93. if (platform_setup_msi_irq)
  94. return platform_setup_msi_irq(pdev, desc);
  95. return ia64_setup_msi_irq(pdev, desc);
  96. }
  97. void arch_teardown_msi_irq(unsigned int irq)
  98. {
  99. if (platform_teardown_msi_irq)
  100. return platform_teardown_msi_irq(irq);
  101. return ia64_teardown_msi_irq(irq);
  102. }
  103. #ifdef CONFIG_INTEL_IOMMU
  104. #ifdef CONFIG_SMP
  105. static int dmar_msi_set_affinity(struct irq_data *data,
  106. const struct cpumask *mask, bool force)
  107. {
  108. unsigned int irq = data->irq;
  109. struct irq_cfg *cfg = irq_cfg + irq;
  110. struct msi_msg msg;
  111. int cpu = cpumask_first_and(mask, cpu_online_mask);
  112. if (irq_prepare_move(irq, cpu))
  113. return -1;
  114. dmar_msi_read(irq, &msg);
  115. msg.data &= ~MSI_DATA_VECTOR_MASK;
  116. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  117. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  118. msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  119. dmar_msi_write(irq, &msg);
  120. cpumask_copy(irq_data_get_affinity_mask(data), mask);
  121. return 0;
  122. }
  123. #endif /* CONFIG_SMP */
  124. static struct irq_chip dmar_msi_type = {
  125. .name = "DMAR_MSI",
  126. .irq_unmask = dmar_msi_unmask,
  127. .irq_mask = dmar_msi_mask,
  128. .irq_ack = ia64_ack_msi_irq,
  129. #ifdef CONFIG_SMP
  130. .irq_set_affinity = dmar_msi_set_affinity,
  131. #endif
  132. .irq_retrigger = ia64_msi_retrigger_irq,
  133. };
  134. static void
  135. msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
  136. {
  137. struct irq_cfg *cfg = irq_cfg + irq;
  138. unsigned dest;
  139. dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
  140. cpu_online_mask));
  141. msg->address_hi = 0;
  142. msg->address_lo =
  143. MSI_ADDR_HEADER |
  144. MSI_ADDR_DEST_MODE_PHYS |
  145. MSI_ADDR_REDIRECTION_CPU |
  146. MSI_ADDR_DEST_ID_CPU(dest);
  147. msg->data =
  148. MSI_DATA_TRIGGER_EDGE |
  149. MSI_DATA_LEVEL_ASSERT |
  150. MSI_DATA_DELIVERY_FIXED |
  151. MSI_DATA_VECTOR(cfg->vector);
  152. }
  153. int dmar_alloc_hwirq(int id, int node, void *arg)
  154. {
  155. int irq;
  156. struct msi_msg msg;
  157. irq = create_irq();
  158. if (irq > 0) {
  159. irq_set_handler_data(irq, arg);
  160. irq_set_chip_and_handler_name(irq, &dmar_msi_type,
  161. handle_edge_irq, "edge");
  162. msi_compose_msg(NULL, irq, &msg);
  163. dmar_msi_write(irq, &msg);
  164. }
  165. return irq;
  166. }
  167. void dmar_free_hwirq(int irq)
  168. {
  169. irq_set_handler_data(irq, NULL);
  170. destroy_irq(irq);
  171. }
  172. #endif /* CONFIG_INTEL_IOMMU */