sys_titan.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /*
  2. * linux/arch/alpha/kernel/sys_titan.c
  3. *
  4. * Copyright (C) 1995 David A Rusling
  5. * Copyright (C) 1996, 1999 Jay A Estabrook
  6. * Copyright (C) 1998, 1999 Richard Henderson
  7. * Copyright (C) 1999, 2000 Jeff Wiedemeier
  8. *
  9. * Code supporting TITAN systems (EV6+TITAN), currently:
  10. * Privateer
  11. * Falcon
  12. * Granite
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/types.h>
  16. #include <linux/mm.h>
  17. #include <linux/sched.h>
  18. #include <linux/pci.h>
  19. #include <linux/init.h>
  20. #include <linux/bitops.h>
  21. #include <asm/ptrace.h>
  22. #include <asm/dma.h>
  23. #include <asm/irq.h>
  24. #include <asm/mmu_context.h>
  25. #include <asm/io.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/core_titan.h>
  28. #include <asm/hwrpb.h>
  29. #include <asm/tlbflush.h>
  30. #include "proto.h"
  31. #include "irq_impl.h"
  32. #include "pci_impl.h"
  33. #include "machvec_impl.h"
  34. #include "err_impl.h"
  35. /*
  36. * Titan generic
  37. */
  38. /*
  39. * Titan supports up to 4 CPUs
  40. */
  41. static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
  42. /*
  43. * Mask is set (1) if enabled
  44. */
  45. static unsigned long titan_cached_irq_mask;
  46. /*
  47. * Need SMP-safe access to interrupt CSRs
  48. */
  49. DEFINE_SPINLOCK(titan_irq_lock);
  50. static void
  51. titan_update_irq_hw(unsigned long mask)
  52. {
  53. register titan_cchip *cchip = TITAN_cchip;
  54. unsigned long isa_enable = 1UL << 55;
  55. register int bcpu = boot_cpuid;
  56. #ifdef CONFIG_SMP
  57. cpumask_t cpm;
  58. volatile unsigned long *dim0, *dim1, *dim2, *dim3;
  59. unsigned long mask0, mask1, mask2, mask3, dummy;
  60. cpumask_copy(&cpm, cpu_present_mask);
  61. mask &= ~isa_enable;
  62. mask0 = mask & titan_cpu_irq_affinity[0];
  63. mask1 = mask & titan_cpu_irq_affinity[1];
  64. mask2 = mask & titan_cpu_irq_affinity[2];
  65. mask3 = mask & titan_cpu_irq_affinity[3];
  66. if (bcpu == 0) mask0 |= isa_enable;
  67. else if (bcpu == 1) mask1 |= isa_enable;
  68. else if (bcpu == 2) mask2 |= isa_enable;
  69. else mask3 |= isa_enable;
  70. dim0 = &cchip->dim0.csr;
  71. dim1 = &cchip->dim1.csr;
  72. dim2 = &cchip->dim2.csr;
  73. dim3 = &cchip->dim3.csr;
  74. if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
  75. if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
  76. if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
  77. if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
  78. *dim0 = mask0;
  79. *dim1 = mask1;
  80. *dim2 = mask2;
  81. *dim3 = mask3;
  82. mb();
  83. *dim0;
  84. *dim1;
  85. *dim2;
  86. *dim3;
  87. #else
  88. volatile unsigned long *dimB;
  89. dimB = &cchip->dim0.csr;
  90. if (bcpu == 1) dimB = &cchip->dim1.csr;
  91. else if (bcpu == 2) dimB = &cchip->dim2.csr;
  92. else if (bcpu == 3) dimB = &cchip->dim3.csr;
  93. *dimB = mask | isa_enable;
  94. mb();
  95. *dimB;
  96. #endif
  97. }
  98. static inline void
  99. titan_enable_irq(struct irq_data *d)
  100. {
  101. unsigned int irq = d->irq;
  102. spin_lock(&titan_irq_lock);
  103. titan_cached_irq_mask |= 1UL << (irq - 16);
  104. titan_update_irq_hw(titan_cached_irq_mask);
  105. spin_unlock(&titan_irq_lock);
  106. }
  107. static inline void
  108. titan_disable_irq(struct irq_data *d)
  109. {
  110. unsigned int irq = d->irq;
  111. spin_lock(&titan_irq_lock);
  112. titan_cached_irq_mask &= ~(1UL << (irq - 16));
  113. titan_update_irq_hw(titan_cached_irq_mask);
  114. spin_unlock(&titan_irq_lock);
  115. }
  116. static void
  117. titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
  118. {
  119. int cpu;
  120. for (cpu = 0; cpu < 4; cpu++) {
  121. if (cpumask_test_cpu(cpu, &affinity))
  122. titan_cpu_irq_affinity[cpu] |= 1UL << irq;
  123. else
  124. titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
  125. }
  126. }
  127. static int
  128. titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
  129. bool force)
  130. {
  131. unsigned int irq = d->irq;
  132. spin_lock(&titan_irq_lock);
  133. titan_cpu_set_irq_affinity(irq - 16, *affinity);
  134. titan_update_irq_hw(titan_cached_irq_mask);
  135. spin_unlock(&titan_irq_lock);
  136. return 0;
  137. }
  138. static void
  139. titan_device_interrupt(unsigned long vector)
  140. {
  141. printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
  142. }
  143. static void
  144. titan_srm_device_interrupt(unsigned long vector)
  145. {
  146. int irq;
  147. irq = (vector - 0x800) >> 4;
  148. handle_irq(irq);
  149. }
  150. static void __init
  151. init_titan_irqs(struct irq_chip * ops, int imin, int imax)
  152. {
  153. long i;
  154. for (i = imin; i <= imax; ++i) {
  155. irq_set_chip_and_handler(i, ops, handle_level_irq);
  156. irq_set_status_flags(i, IRQ_LEVEL);
  157. }
  158. }
  159. static struct irq_chip titan_irq_type = {
  160. .name = "TITAN",
  161. .irq_unmask = titan_enable_irq,
  162. .irq_mask = titan_disable_irq,
  163. .irq_mask_ack = titan_disable_irq,
  164. .irq_set_affinity = titan_set_irq_affinity,
  165. };
  166. static irqreturn_t
  167. titan_intr_nop(int irq, void *dev_id)
  168. {
  169. /*
  170. * This is a NOP interrupt handler for the purposes of
  171. * event counting -- just return.
  172. */
  173. return IRQ_HANDLED;
  174. }
  175. static void __init
  176. titan_init_irq(void)
  177. {
  178. if (alpha_using_srm && !alpha_mv.device_interrupt)
  179. alpha_mv.device_interrupt = titan_srm_device_interrupt;
  180. if (!alpha_mv.device_interrupt)
  181. alpha_mv.device_interrupt = titan_device_interrupt;
  182. titan_update_irq_hw(0);
  183. init_titan_irqs(&titan_irq_type, 16, 63 + 16);
  184. }
  185. static void __init
  186. titan_legacy_init_irq(void)
  187. {
  188. /* init the legacy dma controller */
  189. outb(0, DMA1_RESET_REG);
  190. outb(0, DMA2_RESET_REG);
  191. outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
  192. outb(0, DMA2_MASK_REG);
  193. /* init the legacy irq controller */
  194. init_i8259a_irqs();
  195. /* init the titan irqs */
  196. titan_init_irq();
  197. }
  198. void
  199. titan_dispatch_irqs(u64 mask)
  200. {
  201. unsigned long vector;
  202. /*
  203. * Mask down to those interrupts which are enable on this processor
  204. */
  205. mask &= titan_cpu_irq_affinity[smp_processor_id()];
  206. /*
  207. * Dispatch all requested interrupts
  208. */
  209. while (mask) {
  210. /* convert to SRM vector... priority is <63> -> <0> */
  211. vector = 63 - __kernel_ctlz(mask);
  212. mask &= ~(1UL << vector); /* clear it out */
  213. vector = 0x900 + (vector << 4); /* convert to SRM vector */
  214. /* dispatch it */
  215. alpha_mv.device_interrupt(vector);
  216. }
  217. }
  218. /*
  219. * Titan Family
  220. */
  221. static void __init
  222. titan_request_irq(unsigned int irq, irq_handler_t handler,
  223. unsigned long irqflags, const char *devname,
  224. void *dev_id)
  225. {
  226. int err;
  227. err = request_irq(irq, handler, irqflags, devname, dev_id);
  228. if (err) {
  229. printk("titan_request_irq for IRQ %d returned %d; ignoring\n",
  230. irq, err);
  231. }
  232. }
  233. static void __init
  234. titan_late_init(void)
  235. {
  236. /*
  237. * Enable the system error interrupts. These interrupts are
  238. * all reported to the kernel as machine checks, so the handler
  239. * is a nop so it can be called to count the individual events.
  240. */
  241. titan_request_irq(63+16, titan_intr_nop, 0,
  242. "CChip Error", NULL);
  243. titan_request_irq(62+16, titan_intr_nop, 0,
  244. "PChip 0 H_Error", NULL);
  245. titan_request_irq(61+16, titan_intr_nop, 0,
  246. "PChip 1 H_Error", NULL);
  247. titan_request_irq(60+16, titan_intr_nop, 0,
  248. "PChip 0 C_Error", NULL);
  249. titan_request_irq(59+16, titan_intr_nop, 0,
  250. "PChip 1 C_Error", NULL);
  251. /*
  252. * Register our error handlers.
  253. */
  254. titan_register_error_handlers();
  255. /*
  256. * Check if the console left us any error logs.
  257. */
  258. cdl_check_console_data_log();
  259. }
  260. static int
  261. titan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  262. {
  263. u8 intline;
  264. int irq;
  265. /* Get the current intline. */
  266. pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
  267. irq = intline;
  268. /* Is it explicitly routed through ISA? */
  269. if ((irq & 0xF0) == 0xE0)
  270. return irq;
  271. /* Offset by 16 to make room for ISA interrupts 0 - 15. */
  272. return irq + 16;
  273. }
  274. static void __init
  275. titan_init_pci(void)
  276. {
  277. /*
  278. * This isn't really the right place, but there's some init
  279. * that needs to be done after everything is basically up.
  280. */
  281. titan_late_init();
  282. /* Indicate that we trust the console to configure things properly */
  283. pci_set_flags(PCI_PROBE_ONLY);
  284. common_init_pci();
  285. SMC669_Init(0);
  286. locate_and_init_vga(NULL);
  287. }
  288. /*
  289. * Privateer
  290. */
  291. static void __init
  292. privateer_init_pci(void)
  293. {
  294. /*
  295. * Hook a couple of extra err interrupts that the
  296. * common titan code won't.
  297. */
  298. titan_request_irq(53+16, titan_intr_nop, 0,
  299. "NMI", NULL);
  300. titan_request_irq(50+16, titan_intr_nop, 0,
  301. "Temperature Warning", NULL);
  302. /*
  303. * Finish with the common version.
  304. */
  305. return titan_init_pci();
  306. }
  307. /*
  308. * The System Vectors.
  309. */
  310. struct alpha_machine_vector titan_mv __initmv = {
  311. .vector_name = "TITAN",
  312. DO_EV6_MMU,
  313. DO_DEFAULT_RTC,
  314. DO_TITAN_IO,
  315. .machine_check = titan_machine_check,
  316. .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
  317. .min_io_address = DEFAULT_IO_BASE,
  318. .min_mem_address = DEFAULT_MEM_BASE,
  319. .pci_dac_offset = TITAN_DAC_OFFSET,
  320. .nr_irqs = 80, /* 64 + 16 */
  321. /* device_interrupt will be filled in by titan_init_irq */
  322. .agp_info = titan_agp_info,
  323. .init_arch = titan_init_arch,
  324. .init_irq = titan_legacy_init_irq,
  325. .init_rtc = common_init_rtc,
  326. .init_pci = titan_init_pci,
  327. .kill_arch = titan_kill_arch,
  328. .pci_map_irq = titan_map_irq,
  329. .pci_swizzle = common_swizzle,
  330. };
  331. ALIAS_MV(titan)
  332. struct alpha_machine_vector privateer_mv __initmv = {
  333. .vector_name = "PRIVATEER",
  334. DO_EV6_MMU,
  335. DO_DEFAULT_RTC,
  336. DO_TITAN_IO,
  337. .machine_check = privateer_machine_check,
  338. .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
  339. .min_io_address = DEFAULT_IO_BASE,
  340. .min_mem_address = DEFAULT_MEM_BASE,
  341. .pci_dac_offset = TITAN_DAC_OFFSET,
  342. .nr_irqs = 80, /* 64 + 16 */
  343. /* device_interrupt will be filled in by titan_init_irq */
  344. .agp_info = titan_agp_info,
  345. .init_arch = titan_init_arch,
  346. .init_irq = titan_legacy_init_irq,
  347. .init_rtc = common_init_rtc,
  348. .init_pci = privateer_init_pci,
  349. .kill_arch = titan_kill_arch,
  350. .pci_map_irq = titan_map_irq,
  351. .pci_swizzle = common_swizzle,
  352. };
  353. /* No alpha_mv alias for privateer since we compile it
  354. in unconditionally with titan; setup_arch knows how to cope. */