smp.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * SMP support for power macintosh.
  3. *
  4. * We support both the old "powersurge" SMP architecture
  5. * and the current Core99 (G4 PowerMac) machines.
  6. *
  7. * Note that we don't support the very first rev. of
  8. * Apple/DayStar 2 CPUs board, the one with the funky
  9. * watchdog. Hopefully, none of these should be there except
  10. * maybe internally to Apple. I should probably still add some
  11. * code to detect this card though and disable SMP. --BenH.
  12. *
  13. * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
  14. * and Ben Herrenschmidt <benh@kernel.crashing.org>.
  15. *
  16. * Support for DayStar quad CPU cards
  17. * Copyright (C) XLR8, Inc. 1994-2000
  18. *
  19. * This program is free software; you can redistribute it and/or
  20. * modify it under the terms of the GNU General Public License
  21. * as published by the Free Software Foundation; either version
  22. * 2 of the License, or (at your option) any later version.
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/sched.h>
  26. #include <linux/smp.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/kernel_stat.h>
  29. #include <linux/delay.h>
  30. #include <linux/init.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/errno.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/cpu.h>
  35. #include <linux/compiler.h>
  36. #include <asm/ptrace.h>
  37. #include <linux/atomic.h>
  38. #include <asm/code-patching.h>
  39. #include <asm/irq.h>
  40. #include <asm/page.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/sections.h>
  43. #include <asm/io.h>
  44. #include <asm/prom.h>
  45. #include <asm/smp.h>
  46. #include <asm/machdep.h>
  47. #include <asm/pmac_feature.h>
  48. #include <asm/time.h>
  49. #include <asm/mpic.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/keylargo.h>
  52. #include <asm/pmac_low_i2c.h>
  53. #include <asm/pmac_pfunc.h>
  54. #include "pmac.h"
  55. #undef DEBUG
  56. #ifdef DEBUG
  57. #define DBG(fmt...) udbg_printf(fmt)
  58. #else
  59. #define DBG(fmt...)
  60. #endif
  61. extern void __secondary_start_pmac_0(void);
  62. extern int pmac_pfunc_base_install(void);
  63. static void (*pmac_tb_freeze)(int freeze);
  64. static u64 timebase;
  65. static int tb_req;
  66. #ifdef CONFIG_PPC_PMAC32_PSURGE
  67. /*
  68. * Powersurge (old powermac SMP) support.
  69. */
  70. /* Addresses for powersurge registers */
  71. #define HAMMERHEAD_BASE 0xf8000000
  72. #define HHEAD_CONFIG 0x90
  73. #define HHEAD_SEC_INTR 0xc0
  74. /* register for interrupting the primary processor on the powersurge */
  75. /* N.B. this is actually the ethernet ROM! */
  76. #define PSURGE_PRI_INTR 0xf3019000
  77. /* register for storing the start address for the secondary processor */
  78. /* N.B. this is the PCI config space address register for the 1st bridge */
  79. #define PSURGE_START 0xf2800000
  80. /* Daystar/XLR8 4-CPU card */
  81. #define PSURGE_QUAD_REG_ADDR 0xf8800000
  82. #define PSURGE_QUAD_IRQ_SET 0
  83. #define PSURGE_QUAD_IRQ_CLR 1
  84. #define PSURGE_QUAD_IRQ_PRIMARY 2
  85. #define PSURGE_QUAD_CKSTOP_CTL 3
  86. #define PSURGE_QUAD_PRIMARY_ARB 4
  87. #define PSURGE_QUAD_BOARD_ID 6
  88. #define PSURGE_QUAD_WHICH_CPU 7
  89. #define PSURGE_QUAD_CKSTOP_RDBK 8
  90. #define PSURGE_QUAD_RESET_CTL 11
  91. #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
  92. #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
  93. #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
  94. #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
  95. /* virtual addresses for the above */
  96. static volatile u8 __iomem *hhead_base;
  97. static volatile u8 __iomem *quad_base;
  98. static volatile u32 __iomem *psurge_pri_intr;
  99. static volatile u8 __iomem *psurge_sec_intr;
  100. static volatile u32 __iomem *psurge_start;
  101. /* values for psurge_type */
  102. #define PSURGE_NONE -1
  103. #define PSURGE_DUAL 0
  104. #define PSURGE_QUAD_OKEE 1
  105. #define PSURGE_QUAD_COTTON 2
  106. #define PSURGE_QUAD_ICEGRASS 3
  107. /* what sort of powersurge board we have */
  108. static int psurge_type = PSURGE_NONE;
  109. /* irq for secondary cpus to report */
  110. static struct irq_domain *psurge_host;
  111. int psurge_secondary_virq;
  112. /*
  113. * Set and clear IPIs for powersurge.
  114. */
  115. static inline void psurge_set_ipi(int cpu)
  116. {
  117. if (psurge_type == PSURGE_NONE)
  118. return;
  119. if (cpu == 0)
  120. in_be32(psurge_pri_intr);
  121. else if (psurge_type == PSURGE_DUAL)
  122. out_8(psurge_sec_intr, 0);
  123. else
  124. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
  125. }
  126. static inline void psurge_clr_ipi(int cpu)
  127. {
  128. if (cpu > 0) {
  129. switch(psurge_type) {
  130. case PSURGE_DUAL:
  131. out_8(psurge_sec_intr, ~0);
  132. case PSURGE_NONE:
  133. break;
  134. default:
  135. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
  136. }
  137. }
  138. }
  139. /*
  140. * On powersurge (old SMP powermac architecture) we don't have
  141. * separate IPIs for separate messages like openpic does. Instead
  142. * use the generic demux helpers
  143. * -- paulus.
  144. */
  145. static irqreturn_t psurge_ipi_intr(int irq, void *d)
  146. {
  147. psurge_clr_ipi(smp_processor_id());
  148. smp_ipi_demux();
  149. return IRQ_HANDLED;
  150. }
  151. static void smp_psurge_cause_ipi(int cpu, unsigned long data)
  152. {
  153. psurge_set_ipi(cpu);
  154. }
  155. static int psurge_host_map(struct irq_domain *h, unsigned int virq,
  156. irq_hw_number_t hw)
  157. {
  158. irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
  159. return 0;
  160. }
  161. static const struct irq_domain_ops psurge_host_ops = {
  162. .map = psurge_host_map,
  163. };
  164. static int psurge_secondary_ipi_init(void)
  165. {
  166. int rc = -ENOMEM;
  167. psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
  168. if (psurge_host)
  169. psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
  170. if (psurge_secondary_virq)
  171. rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
  172. IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
  173. if (rc)
  174. pr_err("Failed to setup secondary cpu IPI\n");
  175. return rc;
  176. }
  177. /*
  178. * Determine a quad card presence. We read the board ID register, we
  179. * force the data bus to change to something else, and we read it again.
  180. * It it's stable, then the register probably exist (ugh !)
  181. */
  182. static int __init psurge_quad_probe(void)
  183. {
  184. int type;
  185. unsigned int i;
  186. type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
  187. if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
  188. || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  189. return PSURGE_DUAL;
  190. /* looks OK, try a slightly more rigorous test */
  191. /* bogus is not necessarily cacheline-aligned,
  192. though I don't suppose that really matters. -- paulus */
  193. for (i = 0; i < 100; i++) {
  194. volatile u32 bogus[8];
  195. bogus[(0+i)%8] = 0x00000000;
  196. bogus[(1+i)%8] = 0x55555555;
  197. bogus[(2+i)%8] = 0xFFFFFFFF;
  198. bogus[(3+i)%8] = 0xAAAAAAAA;
  199. bogus[(4+i)%8] = 0x33333333;
  200. bogus[(5+i)%8] = 0xCCCCCCCC;
  201. bogus[(6+i)%8] = 0xCCCCCCCC;
  202. bogus[(7+i)%8] = 0x33333333;
  203. wmb();
  204. asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
  205. mb();
  206. if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  207. return PSURGE_DUAL;
  208. }
  209. return type;
  210. }
  211. static void __init psurge_quad_init(void)
  212. {
  213. int procbits;
  214. if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
  215. procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
  216. if (psurge_type == PSURGE_QUAD_ICEGRASS)
  217. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  218. else
  219. PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
  220. mdelay(33);
  221. out_8(psurge_sec_intr, ~0);
  222. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
  223. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  224. if (psurge_type != PSURGE_QUAD_ICEGRASS)
  225. PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
  226. PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
  227. mdelay(33);
  228. PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
  229. mdelay(33);
  230. PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
  231. mdelay(33);
  232. }
  233. static void __init smp_psurge_probe(void)
  234. {
  235. int i, ncpus;
  236. struct device_node *dn;
  237. /* We don't do SMP on the PPC601 -- paulus */
  238. if (PVR_VER(mfspr(SPRN_PVR)) == 1)
  239. return;
  240. /*
  241. * The powersurge cpu board can be used in the generation
  242. * of powermacs that have a socket for an upgradeable cpu card,
  243. * including the 7500, 8500, 9500, 9600.
  244. * The device tree doesn't tell you if you have 2 cpus because
  245. * OF doesn't know anything about the 2nd processor.
  246. * Instead we look for magic bits in magic registers,
  247. * in the hammerhead memory controller in the case of the
  248. * dual-cpu powersurge board. -- paulus.
  249. */
  250. dn = of_find_node_by_name(NULL, "hammerhead");
  251. if (dn == NULL)
  252. return;
  253. of_node_put(dn);
  254. hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
  255. quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
  256. psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
  257. psurge_type = psurge_quad_probe();
  258. if (psurge_type != PSURGE_DUAL) {
  259. psurge_quad_init();
  260. /* All released cards using this HW design have 4 CPUs */
  261. ncpus = 4;
  262. /* No sure how timebase sync works on those, let's use SW */
  263. smp_ops->give_timebase = smp_generic_give_timebase;
  264. smp_ops->take_timebase = smp_generic_take_timebase;
  265. } else {
  266. iounmap(quad_base);
  267. if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
  268. /* not a dual-cpu card */
  269. iounmap(hhead_base);
  270. psurge_type = PSURGE_NONE;
  271. return;
  272. }
  273. ncpus = 2;
  274. }
  275. if (psurge_secondary_ipi_init())
  276. return;
  277. psurge_start = ioremap(PSURGE_START, 4);
  278. psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
  279. /* This is necessary because OF doesn't know about the
  280. * secondary cpu(s), and thus there aren't nodes in the
  281. * device tree for them, and smp_setup_cpu_maps hasn't
  282. * set their bits in cpu_present_mask.
  283. */
  284. if (ncpus > NR_CPUS)
  285. ncpus = NR_CPUS;
  286. for (i = 1; i < ncpus ; ++i)
  287. set_cpu_present(i, true);
  288. if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
  289. }
  290. static int __init smp_psurge_kick_cpu(int nr)
  291. {
  292. unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
  293. unsigned long a, flags;
  294. int i, j;
  295. /* Defining this here is evil ... but I prefer hiding that
  296. * crap to avoid giving people ideas that they can do the
  297. * same.
  298. */
  299. extern volatile unsigned int cpu_callin_map[NR_CPUS];
  300. /* may need to flush here if secondary bats aren't setup */
  301. for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
  302. asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
  303. asm volatile("sync");
  304. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
  305. /* This is going to freeze the timeebase, we disable interrupts */
  306. local_irq_save(flags);
  307. out_be32(psurge_start, start);
  308. mb();
  309. psurge_set_ipi(nr);
  310. /*
  311. * We can't use udelay here because the timebase is now frozen.
  312. */
  313. for (i = 0; i < 2000; ++i)
  314. asm volatile("nop" : : : "memory");
  315. psurge_clr_ipi(nr);
  316. /*
  317. * Also, because the timebase is frozen, we must not return to the
  318. * caller which will try to do udelay's etc... Instead, we wait -here-
  319. * for the CPU to callin.
  320. */
  321. for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
  322. for (j = 1; j < 10000; j++)
  323. asm volatile("nop" : : : "memory");
  324. asm volatile("sync" : : : "memory");
  325. }
  326. if (!cpu_callin_map[nr])
  327. goto stuck;
  328. /* And we do the TB sync here too for standard dual CPU cards */
  329. if (psurge_type == PSURGE_DUAL) {
  330. while(!tb_req)
  331. barrier();
  332. tb_req = 0;
  333. mb();
  334. timebase = get_tb();
  335. mb();
  336. while (timebase)
  337. barrier();
  338. mb();
  339. }
  340. stuck:
  341. /* now interrupt the secondary, restarting both TBs */
  342. if (psurge_type == PSURGE_DUAL)
  343. psurge_set_ipi(1);
  344. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
  345. return 0;
  346. }
  347. static struct irqaction psurge_irqaction = {
  348. .handler = psurge_ipi_intr,
  349. .flags = IRQF_PERCPU | IRQF_NO_THREAD,
  350. .name = "primary IPI",
  351. };
  352. static void __init smp_psurge_setup_cpu(int cpu_nr)
  353. {
  354. if (cpu_nr != 0 || !psurge_start)
  355. return;
  356. /* reset the entry point so if we get another intr we won't
  357. * try to startup again */
  358. out_be32(psurge_start, 0x100);
  359. if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
  360. printk(KERN_ERR "Couldn't get primary IPI interrupt");
  361. }
  362. void __init smp_psurge_take_timebase(void)
  363. {
  364. if (psurge_type != PSURGE_DUAL)
  365. return;
  366. tb_req = 1;
  367. mb();
  368. while (!timebase)
  369. barrier();
  370. mb();
  371. set_tb(timebase >> 32, timebase & 0xffffffff);
  372. timebase = 0;
  373. mb();
  374. set_dec(tb_ticks_per_jiffy/2);
  375. }
  376. void __init smp_psurge_give_timebase(void)
  377. {
  378. /* Nothing to do here */
  379. }
  380. /* PowerSurge-style Macs */
  381. struct smp_ops_t psurge_smp_ops = {
  382. .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
  383. .cause_ipi = smp_psurge_cause_ipi,
  384. .probe = smp_psurge_probe,
  385. .kick_cpu = smp_psurge_kick_cpu,
  386. .setup_cpu = smp_psurge_setup_cpu,
  387. .give_timebase = smp_psurge_give_timebase,
  388. .take_timebase = smp_psurge_take_timebase,
  389. };
  390. #endif /* CONFIG_PPC_PMAC32_PSURGE */
  391. /*
  392. * Core 99 and later support
  393. */
  394. static void smp_core99_give_timebase(void)
  395. {
  396. unsigned long flags;
  397. local_irq_save(flags);
  398. while(!tb_req)
  399. barrier();
  400. tb_req = 0;
  401. (*pmac_tb_freeze)(1);
  402. mb();
  403. timebase = get_tb();
  404. mb();
  405. while (timebase)
  406. barrier();
  407. mb();
  408. (*pmac_tb_freeze)(0);
  409. mb();
  410. local_irq_restore(flags);
  411. }
  412. static void smp_core99_take_timebase(void)
  413. {
  414. unsigned long flags;
  415. local_irq_save(flags);
  416. tb_req = 1;
  417. mb();
  418. while (!timebase)
  419. barrier();
  420. mb();
  421. set_tb(timebase >> 32, timebase & 0xffffffff);
  422. timebase = 0;
  423. mb();
  424. local_irq_restore(flags);
  425. }
  426. #ifdef CONFIG_PPC64
  427. /*
  428. * G5s enable/disable the timebase via an i2c-connected clock chip.
  429. */
  430. static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
  431. static u8 pmac_tb_pulsar_addr;
  432. static void smp_core99_cypress_tb_freeze(int freeze)
  433. {
  434. u8 data;
  435. int rc;
  436. /* Strangely, the device-tree says address is 0xd2, but darwin
  437. * accesses 0xd0 ...
  438. */
  439. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  440. pmac_i2c_mode_combined);
  441. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  442. 0xd0 | pmac_i2c_read,
  443. 1, 0x81, &data, 1);
  444. if (rc != 0)
  445. goto bail;
  446. data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
  447. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  448. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  449. 0xd0 | pmac_i2c_write,
  450. 1, 0x81, &data, 1);
  451. bail:
  452. if (rc != 0) {
  453. printk("Cypress Timebase %s rc: %d\n",
  454. freeze ? "freeze" : "unfreeze", rc);
  455. panic("Timebase freeze failed !\n");
  456. }
  457. }
  458. static void smp_core99_pulsar_tb_freeze(int freeze)
  459. {
  460. u8 data;
  461. int rc;
  462. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  463. pmac_i2c_mode_combined);
  464. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  465. pmac_tb_pulsar_addr | pmac_i2c_read,
  466. 1, 0x2e, &data, 1);
  467. if (rc != 0)
  468. goto bail;
  469. data = (data & 0x88) | (freeze ? 0x11 : 0x22);
  470. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  471. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  472. pmac_tb_pulsar_addr | pmac_i2c_write,
  473. 1, 0x2e, &data, 1);
  474. bail:
  475. if (rc != 0) {
  476. printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
  477. freeze ? "freeze" : "unfreeze", rc);
  478. panic("Timebase freeze failed !\n");
  479. }
  480. }
  481. static void __init smp_core99_setup_i2c_hwsync(int ncpus)
  482. {
  483. struct device_node *cc = NULL;
  484. struct device_node *p;
  485. const char *name = NULL;
  486. const u32 *reg;
  487. int ok;
  488. /* Look for the clock chip */
  489. for_each_node_by_name(cc, "i2c-hwclock") {
  490. p = of_get_parent(cc);
  491. ok = p && of_device_is_compatible(p, "uni-n-i2c");
  492. of_node_put(p);
  493. if (!ok)
  494. continue;
  495. pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
  496. if (pmac_tb_clock_chip_host == NULL)
  497. continue;
  498. reg = of_get_property(cc, "reg", NULL);
  499. if (reg == NULL)
  500. continue;
  501. switch (*reg) {
  502. case 0xd2:
  503. if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
  504. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  505. pmac_tb_pulsar_addr = 0xd2;
  506. name = "Pulsar";
  507. } else if (of_device_is_compatible(cc, "cy28508")) {
  508. pmac_tb_freeze = smp_core99_cypress_tb_freeze;
  509. name = "Cypress";
  510. }
  511. break;
  512. case 0xd4:
  513. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  514. pmac_tb_pulsar_addr = 0xd4;
  515. name = "Pulsar";
  516. break;
  517. }
  518. if (pmac_tb_freeze != NULL)
  519. break;
  520. }
  521. if (pmac_tb_freeze != NULL) {
  522. /* Open i2c bus for synchronous access */
  523. if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
  524. printk(KERN_ERR "Failed top open i2c bus for clock"
  525. " sync, fallback to software sync !\n");
  526. goto no_i2c_sync;
  527. }
  528. printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
  529. name);
  530. return;
  531. }
  532. no_i2c_sync:
  533. pmac_tb_freeze = NULL;
  534. pmac_tb_clock_chip_host = NULL;
  535. }
  536. /*
  537. * Newer G5s uses a platform function
  538. */
  539. static void smp_core99_pfunc_tb_freeze(int freeze)
  540. {
  541. struct device_node *cpus;
  542. struct pmf_args args;
  543. cpus = of_find_node_by_path("/cpus");
  544. BUG_ON(cpus == NULL);
  545. args.count = 1;
  546. args.u[0].v = !freeze;
  547. pmf_call_function(cpus, "cpu-timebase", &args);
  548. of_node_put(cpus);
  549. }
  550. #else /* CONFIG_PPC64 */
  551. /*
  552. * SMP G4 use a GPIO to enable/disable the timebase.
  553. */
  554. static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
  555. static void smp_core99_gpio_tb_freeze(int freeze)
  556. {
  557. if (freeze)
  558. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
  559. else
  560. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
  561. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  562. }
  563. #endif /* !CONFIG_PPC64 */
  564. /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
  565. volatile static long int core99_l2_cache;
  566. volatile static long int core99_l3_cache;
  567. static void core99_init_caches(int cpu)
  568. {
  569. #ifndef CONFIG_PPC64
  570. if (!cpu_has_feature(CPU_FTR_L2CR))
  571. return;
  572. if (cpu == 0) {
  573. core99_l2_cache = _get_L2CR();
  574. printk("CPU0: L2CR is %lx\n", core99_l2_cache);
  575. } else {
  576. printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
  577. _set_L2CR(0);
  578. _set_L2CR(core99_l2_cache);
  579. printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
  580. }
  581. if (!cpu_has_feature(CPU_FTR_L3CR))
  582. return;
  583. if (cpu == 0){
  584. core99_l3_cache = _get_L3CR();
  585. printk("CPU0: L3CR is %lx\n", core99_l3_cache);
  586. } else {
  587. printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
  588. _set_L3CR(0);
  589. _set_L3CR(core99_l3_cache);
  590. printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
  591. }
  592. #endif /* !CONFIG_PPC64 */
  593. }
  594. static void __init smp_core99_setup(int ncpus)
  595. {
  596. #ifdef CONFIG_PPC64
  597. /* i2c based HW sync on some G5s */
  598. if (of_machine_is_compatible("PowerMac7,2") ||
  599. of_machine_is_compatible("PowerMac7,3") ||
  600. of_machine_is_compatible("RackMac3,1"))
  601. smp_core99_setup_i2c_hwsync(ncpus);
  602. /* pfunc based HW sync on recent G5s */
  603. if (pmac_tb_freeze == NULL) {
  604. struct device_node *cpus =
  605. of_find_node_by_path("/cpus");
  606. if (cpus &&
  607. of_get_property(cpus, "platform-cpu-timebase", NULL)) {
  608. pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
  609. printk(KERN_INFO "Processor timebase sync using"
  610. " platform function\n");
  611. }
  612. }
  613. #else /* CONFIG_PPC64 */
  614. /* GPIO based HW sync on ppc32 Core99 */
  615. if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
  616. struct device_node *cpu;
  617. const u32 *tbprop = NULL;
  618. core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
  619. cpu = of_find_node_by_type(NULL, "cpu");
  620. if (cpu != NULL) {
  621. tbprop = of_get_property(cpu, "timebase-enable", NULL);
  622. if (tbprop)
  623. core99_tb_gpio = *tbprop;
  624. of_node_put(cpu);
  625. }
  626. pmac_tb_freeze = smp_core99_gpio_tb_freeze;
  627. printk(KERN_INFO "Processor timebase sync using"
  628. " GPIO 0x%02x\n", core99_tb_gpio);
  629. }
  630. #endif /* CONFIG_PPC64 */
  631. /* No timebase sync, fallback to software */
  632. if (pmac_tb_freeze == NULL) {
  633. smp_ops->give_timebase = smp_generic_give_timebase;
  634. smp_ops->take_timebase = smp_generic_take_timebase;
  635. printk(KERN_INFO "Processor timebase sync using software\n");
  636. }
  637. #ifndef CONFIG_PPC64
  638. {
  639. int i;
  640. /* XXX should get this from reg properties */
  641. for (i = 1; i < ncpus; ++i)
  642. set_hard_smp_processor_id(i, i);
  643. }
  644. #endif
  645. /* 32 bits SMP can't NAP */
  646. if (!of_machine_is_compatible("MacRISC4"))
  647. powersave_nap = 0;
  648. }
  649. static void __init smp_core99_probe(void)
  650. {
  651. struct device_node *cpus;
  652. int ncpus = 0;
  653. if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
  654. /* Count CPUs in the device-tree */
  655. for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
  656. ++ncpus;
  657. printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
  658. /* Nothing more to do if less than 2 of them */
  659. if (ncpus <= 1)
  660. return;
  661. /* We need to perform some early initialisations before we can start
  662. * setting up SMP as we are running before initcalls
  663. */
  664. pmac_pfunc_base_install();
  665. pmac_i2c_init();
  666. /* Setup various bits like timebase sync method, ability to nap, ... */
  667. smp_core99_setup(ncpus);
  668. /* Install IPIs */
  669. mpic_request_ipis();
  670. /* Collect l2cr and l3cr values from CPU 0 */
  671. core99_init_caches(0);
  672. }
  673. static int smp_core99_kick_cpu(int nr)
  674. {
  675. unsigned int save_vector;
  676. unsigned long target, flags;
  677. unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
  678. if (nr < 0 || nr > 3)
  679. return -ENOENT;
  680. if (ppc_md.progress)
  681. ppc_md.progress("smp_core99_kick_cpu", 0x346);
  682. local_irq_save(flags);
  683. /* Save reset vector */
  684. save_vector = *vector;
  685. /* Setup fake reset vector that does
  686. * b __secondary_start_pmac_0 + nr*8
  687. */
  688. target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
  689. patch_branch(vector, target, BRANCH_SET_LINK);
  690. /* Put some life in our friend */
  691. pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
  692. /* FIXME: We wait a bit for the CPU to take the exception, I should
  693. * instead wait for the entry code to set something for me. Well,
  694. * ideally, all that crap will be done in prom.c and the CPU left
  695. * in a RAM-based wait loop like CHRP.
  696. */
  697. mdelay(1);
  698. /* Restore our exception vector */
  699. *vector = save_vector;
  700. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  701. local_irq_restore(flags);
  702. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
  703. return 0;
  704. }
  705. static void smp_core99_setup_cpu(int cpu_nr)
  706. {
  707. /* Setup L2/L3 */
  708. if (cpu_nr != 0)
  709. core99_init_caches(cpu_nr);
  710. /* Setup openpic */
  711. mpic_setup_this_cpu();
  712. }
  713. #ifdef CONFIG_PPC64
  714. #ifdef CONFIG_HOTPLUG_CPU
  715. static int smp_core99_cpu_notify(struct notifier_block *self,
  716. unsigned long action, void *hcpu)
  717. {
  718. int rc;
  719. switch(action) {
  720. case CPU_UP_PREPARE:
  721. case CPU_UP_PREPARE_FROZEN:
  722. /* Open i2c bus if it was used for tb sync */
  723. if (pmac_tb_clock_chip_host) {
  724. rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
  725. if (rc) {
  726. pr_err("Failed to open i2c bus for time sync\n");
  727. return notifier_from_errno(rc);
  728. }
  729. }
  730. break;
  731. case CPU_ONLINE:
  732. case CPU_UP_CANCELED:
  733. /* Close i2c bus if it was used for tb sync */
  734. if (pmac_tb_clock_chip_host)
  735. pmac_i2c_close(pmac_tb_clock_chip_host);
  736. break;
  737. default:
  738. break;
  739. }
  740. return NOTIFY_OK;
  741. }
  742. static struct notifier_block smp_core99_cpu_nb = {
  743. .notifier_call = smp_core99_cpu_notify,
  744. };
  745. #endif /* CONFIG_HOTPLUG_CPU */
  746. static void __init smp_core99_bringup_done(void)
  747. {
  748. extern void g5_phy_disable_cpu1(void);
  749. /* Close i2c bus if it was used for tb sync */
  750. if (pmac_tb_clock_chip_host)
  751. pmac_i2c_close(pmac_tb_clock_chip_host);
  752. /* If we didn't start the second CPU, we must take
  753. * it off the bus.
  754. */
  755. if (of_machine_is_compatible("MacRISC4") &&
  756. num_online_cpus() < 2) {
  757. set_cpu_present(1, false);
  758. g5_phy_disable_cpu1();
  759. }
  760. #ifdef CONFIG_HOTPLUG_CPU
  761. register_cpu_notifier(&smp_core99_cpu_nb);
  762. #endif
  763. if (ppc_md.progress)
  764. ppc_md.progress("smp_core99_bringup_done", 0x349);
  765. }
  766. #endif /* CONFIG_PPC64 */
  767. #ifdef CONFIG_HOTPLUG_CPU
  768. static int smp_core99_cpu_disable(void)
  769. {
  770. int rc = generic_cpu_disable();
  771. if (rc)
  772. return rc;
  773. mpic_cpu_set_priority(0xf);
  774. return 0;
  775. }
  776. #ifdef CONFIG_PPC32
  777. static void pmac_cpu_die(void)
  778. {
  779. int cpu = smp_processor_id();
  780. local_irq_disable();
  781. idle_task_exit();
  782. pr_debug("CPU%d offline\n", cpu);
  783. generic_set_cpu_dead(cpu);
  784. smp_wmb();
  785. mb();
  786. low_cpu_die();
  787. }
  788. #else /* CONFIG_PPC32 */
  789. static void pmac_cpu_die(void)
  790. {
  791. int cpu = smp_processor_id();
  792. local_irq_disable();
  793. idle_task_exit();
  794. /*
  795. * turn off as much as possible, we'll be
  796. * kicked out as this will only be invoked
  797. * on core99 platforms for now ...
  798. */
  799. printk(KERN_INFO "CPU#%d offline\n", cpu);
  800. generic_set_cpu_dead(cpu);
  801. smp_wmb();
  802. /*
  803. * Re-enable interrupts. The NAP code needs to enable them
  804. * anyways, do it now so we deal with the case where one already
  805. * happened while soft-disabled.
  806. * We shouldn't get any external interrupts, only decrementer, and the
  807. * decrementer handler is safe for use on offline CPUs
  808. */
  809. local_irq_enable();
  810. while (1) {
  811. /* let's not take timer interrupts too often ... */
  812. set_dec(0x7fffffff);
  813. /* Enter NAP mode */
  814. power4_idle();
  815. }
  816. }
  817. #endif /* else CONFIG_PPC32 */
  818. #endif /* CONFIG_HOTPLUG_CPU */
  819. /* Core99 Macs (dual G4s and G5s) */
  820. struct smp_ops_t core99_smp_ops = {
  821. .message_pass = smp_mpic_message_pass,
  822. .probe = smp_core99_probe,
  823. #ifdef CONFIG_PPC64
  824. .bringup_done = smp_core99_bringup_done,
  825. #endif
  826. .kick_cpu = smp_core99_kick_cpu,
  827. .setup_cpu = smp_core99_setup_cpu,
  828. .give_timebase = smp_core99_give_timebase,
  829. .take_timebase = smp_core99_take_timebase,
  830. #if defined(CONFIG_HOTPLUG_CPU)
  831. .cpu_disable = smp_core99_cpu_disable,
  832. .cpu_die = generic_cpu_die,
  833. #endif
  834. };
  835. void __init pmac_setup_smp(void)
  836. {
  837. struct device_node *np;
  838. /* Check for Core99 */
  839. np = of_find_node_by_name(NULL, "uni-n");
  840. if (!np)
  841. np = of_find_node_by_name(NULL, "u3");
  842. if (!np)
  843. np = of_find_node_by_name(NULL, "u4");
  844. if (np) {
  845. of_node_put(np);
  846. smp_ops = &core99_smp_ops;
  847. }
  848. #ifdef CONFIG_PPC_PMAC32_PSURGE
  849. else {
  850. /* We have to set bits in cpu_possible_mask here since the
  851. * secondary CPU(s) aren't in the device tree. Various
  852. * things won't be initialized for CPUs not in the possible
  853. * map, so we really need to fix it up here.
  854. */
  855. int cpu;
  856. for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
  857. set_cpu_possible(cpu, true);
  858. smp_ops = &psurge_smp_ops;
  859. }
  860. #endif /* CONFIG_PPC_PMAC32_PSURGE */
  861. #ifdef CONFIG_HOTPLUG_CPU
  862. ppc_md.cpu_die = pmac_cpu_die;
  863. #endif
  864. }