vsmp_64.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * vSMPowered(tm) systems specific initialization
  3. * Copyright (C) 2005 ScaleMP Inc.
  4. *
  5. * Use of this code is subject to the terms and conditions of the
  6. * GNU general public license version 2. See "COPYING" or
  7. * http://www.gnu.org/licenses/gpl.html
  8. *
  9. * Ravikiran Thirumalai <kiran@scalemp.com>,
  10. * Shai Fultheim <shai@scalemp.com>
  11. * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
  12. * Ravikiran Thirumalai <kiran@scalemp.com>
  13. */
  14. #include <linux/init.h>
  15. #include <linux/pci_ids.h>
  16. #include <linux/pci_regs.h>
  17. #include <linux/smp.h>
  18. #include <linux/irq.h>
  19. #include <asm/apic.h>
  20. #include <asm/pci-direct.h>
  21. #include <asm/io.h>
  22. #include <asm/paravirt.h>
  23. #include <asm/setup.h>
  24. #define TOPOLOGY_REGISTER_OFFSET 0x10
  25. /* Flag below is initialized once during vSMP PCI initialization. */
  26. static int irq_routing_comply = 1;
  27. #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
  28. /*
  29. * Interrupt control on vSMPowered systems:
  30. * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
  31. * and vice versa.
  32. */
  33. asmlinkage __visible unsigned long vsmp_save_fl(void)
  34. {
  35. unsigned long flags = native_save_fl();
  36. if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
  37. flags &= ~X86_EFLAGS_IF;
  38. return flags;
  39. }
  40. PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
  41. __visible void vsmp_restore_fl(unsigned long flags)
  42. {
  43. if (flags & X86_EFLAGS_IF)
  44. flags &= ~X86_EFLAGS_AC;
  45. else
  46. flags |= X86_EFLAGS_AC;
  47. native_restore_fl(flags);
  48. }
  49. PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
  50. asmlinkage __visible void vsmp_irq_disable(void)
  51. {
  52. unsigned long flags = native_save_fl();
  53. native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  54. }
  55. PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
  56. asmlinkage __visible void vsmp_irq_enable(void)
  57. {
  58. unsigned long flags = native_save_fl();
  59. native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  60. }
  61. PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
  62. static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
  63. unsigned long addr, unsigned len)
  64. {
  65. switch (type) {
  66. case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
  67. case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
  68. case PARAVIRT_PATCH(pv_irq_ops.save_fl):
  69. case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
  70. return paravirt_patch_default(type, clobbers, ibuf, addr, len);
  71. default:
  72. return native_patch(type, clobbers, ibuf, addr, len);
  73. }
  74. }
  75. static void __init set_vsmp_pv_ops(void)
  76. {
  77. void __iomem *address;
  78. unsigned int cap, ctl, cfg;
  79. /* set vSMP magic bits to indicate vSMP capable kernel */
  80. cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  81. address = early_ioremap(cfg, 8);
  82. cap = readl(address);
  83. ctl = readl(address + 4);
  84. printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
  85. cap, ctl);
  86. /* If possible, let the vSMP foundation route the interrupt optimally */
  87. #ifdef CONFIG_SMP
  88. if (cap & ctl & BIT(8)) {
  89. ctl &= ~BIT(8);
  90. /* Interrupt routing set to ignore */
  91. irq_routing_comply = 0;
  92. #ifdef CONFIG_PROC_FS
  93. /* Don't let users change irq affinity via procfs */
  94. no_irq_affinity = 1;
  95. #endif
  96. }
  97. #endif
  98. if (cap & ctl & (1 << 4)) {
  99. /* Setup irq ops and turn on vSMP IRQ fastpath handling */
  100. pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
  101. pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
  102. pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
  103. pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
  104. pv_init_ops.patch = vsmp_patch;
  105. ctl &= ~(1 << 4);
  106. }
  107. writel(ctl, address + 4);
  108. ctl = readl(address + 4);
  109. pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
  110. early_iounmap(address, 8);
  111. }
  112. #else
  113. static void __init set_vsmp_pv_ops(void)
  114. {
  115. }
  116. #endif
  117. #ifdef CONFIG_PCI
  118. static int is_vsmp = -1;
  119. static void __init detect_vsmp_box(void)
  120. {
  121. is_vsmp = 0;
  122. if (!early_pci_allowed())
  123. return;
  124. /* Check if we are running on a ScaleMP vSMPowered box */
  125. if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
  126. (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
  127. is_vsmp = 1;
  128. }
  129. static int is_vsmp_box(void)
  130. {
  131. if (is_vsmp != -1)
  132. return is_vsmp;
  133. else {
  134. WARN_ON_ONCE(1);
  135. return 0;
  136. }
  137. }
  138. #else
  139. static void __init detect_vsmp_box(void)
  140. {
  141. }
  142. static int is_vsmp_box(void)
  143. {
  144. return 0;
  145. }
  146. #endif
  147. static void __init vsmp_cap_cpus(void)
  148. {
  149. #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
  150. void __iomem *address;
  151. unsigned int cfg, topology, node_shift, maxcpus;
  152. /*
  153. * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
  154. * ones present in the first board, unless explicitly overridden by
  155. * setup_max_cpus
  156. */
  157. if (setup_max_cpus != NR_CPUS)
  158. return;
  159. /* Read the vSMP Foundation topology register */
  160. cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  161. address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
  162. if (WARN_ON(!address))
  163. return;
  164. topology = readl(address);
  165. node_shift = (topology >> 16) & 0x7;
  166. if (!node_shift)
  167. /* The value 0 should be decoded as 8 */
  168. node_shift = 8;
  169. maxcpus = (topology & ((1 << node_shift) - 1)) + 1;
  170. pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
  171. maxcpus);
  172. setup_max_cpus = maxcpus;
  173. early_iounmap(address, 4);
  174. #endif
  175. }
  176. static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
  177. {
  178. return hard_smp_processor_id() >> index_msb;
  179. }
  180. /*
  181. * In vSMP, all cpus should be capable of handling interrupts, regardless of
  182. * the APIC used.
  183. */
  184. static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
  185. const struct cpumask *mask)
  186. {
  187. cpumask_setall(retmask);
  188. }
  189. static void vsmp_apic_post_init(void)
  190. {
  191. /* need to update phys_pkg_id */
  192. apic->phys_pkg_id = apicid_phys_pkg_id;
  193. if (!irq_routing_comply)
  194. apic->vector_allocation_domain = fill_vector_allocation_domain;
  195. }
  196. void __init vsmp_init(void)
  197. {
  198. detect_vsmp_box();
  199. if (!is_vsmp_box())
  200. return;
  201. x86_platform.apic_post_init = vsmp_apic_post_init;
  202. vsmp_cap_cpus();
  203. set_vsmp_pv_ops();
  204. return;
  205. }