cache.S 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /*
  2. * This file contains low-level cache management functions
  3. * used for sleep and CPU speed changes on Apple machines.
  4. * (In fact the only thing that is Apple-specific is that we assume
  5. * that we can read from ROM at physical address 0xfff00000.)
  6. *
  7. * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
  8. * Benjamin Herrenschmidt (benh@kernel.crashing.org)
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. *
  15. */
  16. #include <asm/processor.h>
  17. #include <asm/ppc_asm.h>
  18. #include <asm/cputable.h>
  19. /*
  20. * Flush and disable all data caches (dL1, L2, L3). This is used
  21. * when going to sleep, when doing a PMU based cpufreq transition,
  22. * or when "offlining" a CPU on SMP machines. This code is over
  23. * paranoid, but I've had enough issues with various CPU revs and
  24. * bugs that I decided it was worth beeing over cautious
  25. */
  26. _GLOBAL(flush_disable_caches)
  27. #ifndef CONFIG_6xx
  28. blr
  29. #else
  30. BEGIN_FTR_SECTION
  31. b flush_disable_745x
  32. END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
  33. BEGIN_FTR_SECTION
  34. b flush_disable_75x
  35. END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
  36. b __flush_disable_L1
  37. /* This is the code for G3 and 74[01]0 */
  38. flush_disable_75x:
  39. mflr r10
  40. /* Turn off EE and DR in MSR */
  41. mfmsr r11
  42. rlwinm r0,r11,0,~MSR_EE
  43. rlwinm r0,r0,0,~MSR_DR
  44. sync
  45. mtmsr r0
  46. isync
  47. /* Stop DST streams */
  48. BEGIN_FTR_SECTION
  49. DSSALL
  50. sync
  51. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  52. /* Stop DPM */
  53. mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
  54. rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
  55. sync
  56. mtspr SPRN_HID0,r4 /* Disable DPM */
  57. sync
  58. /* Disp-flush L1. We have a weird problem here that I never
  59. * totally figured out. On 750FX, using the ROM for the flush
  60. * results in a non-working flush. We use that workaround for
  61. * now until I finally understand what's going on. --BenH
  62. */
  63. /* ROM base by default */
  64. lis r4,0xfff0
  65. mfpvr r3
  66. srwi r3,r3,16
  67. cmplwi cr0,r3,0x7000
  68. bne+ 1f
  69. /* RAM base on 750FX */
  70. li r4,0
  71. 1: li r4,0x4000
  72. mtctr r4
  73. 1: lwz r0,0(r4)
  74. addi r4,r4,32
  75. bdnz 1b
  76. sync
  77. isync
  78. /* Disable / invalidate / enable L1 data */
  79. mfspr r3,SPRN_HID0
  80. rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
  81. mtspr SPRN_HID0,r3
  82. sync
  83. isync
  84. ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
  85. sync
  86. isync
  87. mtspr SPRN_HID0,r3
  88. xori r3,r3,(HID0_DCI|HID0_ICFI)
  89. mtspr SPRN_HID0,r3
  90. sync
  91. /* Get the current enable bit of the L2CR into r4 */
  92. mfspr r5,SPRN_L2CR
  93. /* Set to data-only (pre-745x bit) */
  94. oris r3,r5,L2CR_L2DO@h
  95. b 2f
  96. /* When disabling L2, code must be in L1 */
  97. .balign 32
  98. 1: mtspr SPRN_L2CR,r3
  99. 3: sync
  100. isync
  101. b 1f
  102. 2: b 3f
  103. 3: sync
  104. isync
  105. b 1b
  106. 1: /* disp-flush L2. The interesting thing here is that the L2 can be
  107. * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
  108. * but that is probbaly fine. We disp-flush over 4Mb to be safe
  109. */
  110. lis r4,2
  111. mtctr r4
  112. lis r4,0xfff0
  113. 1: lwz r0,0(r4)
  114. addi r4,r4,32
  115. bdnz 1b
  116. sync
  117. isync
  118. lis r4,2
  119. mtctr r4
  120. lis r4,0xfff0
  121. 1: dcbf 0,r4
  122. addi r4,r4,32
  123. bdnz 1b
  124. sync
  125. isync
  126. /* now disable L2 */
  127. rlwinm r5,r5,0,~L2CR_L2E
  128. b 2f
  129. /* When disabling L2, code must be in L1 */
  130. .balign 32
  131. 1: mtspr SPRN_L2CR,r5
  132. 3: sync
  133. isync
  134. b 1f
  135. 2: b 3f
  136. 3: sync
  137. isync
  138. b 1b
  139. 1: sync
  140. isync
  141. /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
  142. oris r4,r5,L2CR_L2I@h
  143. mtspr SPRN_L2CR,r4
  144. sync
  145. isync
  146. /* Wait for the invalidation to complete */
  147. 1: mfspr r3,SPRN_L2CR
  148. rlwinm. r0,r3,0,31,31
  149. bne 1b
  150. /* Clear L2I */
  151. xoris r4,r4,L2CR_L2I@h
  152. sync
  153. mtspr SPRN_L2CR,r4
  154. sync
  155. /* now disable the L1 data cache */
  156. mfspr r0,SPRN_HID0
  157. rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
  158. mtspr SPRN_HID0,r0
  159. sync
  160. isync
  161. /* Restore HID0[DPM] to whatever it was before */
  162. sync
  163. mfspr r0,SPRN_HID0
  164. rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
  165. mtspr SPRN_HID0,r0
  166. sync
  167. /* restore DR and EE */
  168. sync
  169. mtmsr r11
  170. isync
  171. mtlr r10
  172. blr
  173. /* This code is for 745x processors */
  174. flush_disable_745x:
  175. /* Turn off EE and DR in MSR */
  176. mfmsr r11
  177. rlwinm r0,r11,0,~MSR_EE
  178. rlwinm r0,r0,0,~MSR_DR
  179. sync
  180. mtmsr r0
  181. isync
  182. /* Stop prefetch streams */
  183. DSSALL
  184. sync
  185. /* Disable L2 prefetching */
  186. mfspr r0,SPRN_MSSCR0
  187. rlwinm r0,r0,0,0,29
  188. mtspr SPRN_MSSCR0,r0
  189. sync
  190. isync
  191. lis r4,0
  192. dcbf 0,r4
  193. dcbf 0,r4
  194. dcbf 0,r4
  195. dcbf 0,r4
  196. dcbf 0,r4
  197. dcbf 0,r4
  198. dcbf 0,r4
  199. dcbf 0,r4
  200. /* Due to a bug with the HW flush on some CPU revs, we occasionally
  201. * experience data corruption. I'm adding a displacement flush along
  202. * with a dcbf loop over a few Mb to "help". The problem isn't totally
  203. * fixed by this in theory, but at least, in practice, I couldn't reproduce
  204. * it even with a big hammer...
  205. */
  206. lis r4,0x0002
  207. mtctr r4
  208. li r4,0
  209. 1:
  210. lwz r0,0(r4)
  211. addi r4,r4,32 /* Go to start of next cache line */
  212. bdnz 1b
  213. isync
  214. /* Now, flush the first 4MB of memory */
  215. lis r4,0x0002
  216. mtctr r4
  217. li r4,0
  218. sync
  219. 1:
  220. dcbf 0,r4
  221. addi r4,r4,32 /* Go to start of next cache line */
  222. bdnz 1b
  223. /* Flush and disable the L1 data cache */
  224. mfspr r6,SPRN_LDSTCR
  225. lis r3,0xfff0 /* read from ROM for displacement flush */
  226. li r4,0xfe /* start with only way 0 unlocked */
  227. li r5,128 /* 128 lines in each way */
  228. 1: mtctr r5
  229. rlwimi r6,r4,0,24,31
  230. mtspr SPRN_LDSTCR,r6
  231. sync
  232. isync
  233. 2: lwz r0,0(r3) /* touch each cache line */
  234. addi r3,r3,32
  235. bdnz 2b
  236. rlwinm r4,r4,1,24,30 /* move on to the next way */
  237. ori r4,r4,1
  238. cmpwi r4,0xff /* all done? */
  239. bne 1b
  240. /* now unlock the L1 data cache */
  241. li r4,0
  242. rlwimi r6,r4,0,24,31
  243. sync
  244. mtspr SPRN_LDSTCR,r6
  245. sync
  246. isync
  247. /* Flush the L2 cache using the hardware assist */
  248. mfspr r3,SPRN_L2CR
  249. cmpwi r3,0 /* check if it is enabled first */
  250. bge 4f
  251. oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
  252. b 2f
  253. /* When disabling/locking L2, code must be in L1 */
  254. .balign 32
  255. 1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
  256. 3: sync
  257. isync
  258. b 1f
  259. 2: b 3f
  260. 3: sync
  261. isync
  262. b 1b
  263. 1: sync
  264. isync
  265. ori r0,r3,L2CR_L2HWF_745x
  266. sync
  267. mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
  268. 3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
  269. andi. r0,r0,L2CR_L2HWF_745x
  270. bne 3b
  271. sync
  272. rlwinm r3,r3,0,~L2CR_L2E
  273. b 2f
  274. /* When disabling L2, code must be in L1 */
  275. .balign 32
  276. 1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
  277. 3: sync
  278. isync
  279. b 1f
  280. 2: b 3f
  281. 3: sync
  282. isync
  283. b 1b
  284. 1: sync
  285. isync
  286. oris r4,r3,L2CR_L2I@h
  287. mtspr SPRN_L2CR,r4
  288. sync
  289. isync
  290. 1: mfspr r4,SPRN_L2CR
  291. andis. r0,r4,L2CR_L2I@h
  292. bne 1b
  293. sync
  294. BEGIN_FTR_SECTION
  295. /* Flush the L3 cache using the hardware assist */
  296. 4: mfspr r3,SPRN_L3CR
  297. cmpwi r3,0 /* check if it is enabled */
  298. bge 6f
  299. oris r0,r3,L3CR_L3IO@h
  300. ori r0,r0,L3CR_L3DO
  301. sync
  302. mtspr SPRN_L3CR,r0 /* lock the L3 cache */
  303. sync
  304. isync
  305. ori r0,r0,L3CR_L3HWF
  306. sync
  307. mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
  308. 5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
  309. andi. r0,r0,L3CR_L3HWF
  310. bne 5b
  311. rlwinm r3,r3,0,~L3CR_L3E
  312. sync
  313. mtspr SPRN_L3CR,r3 /* disable the L3 cache */
  314. sync
  315. ori r4,r3,L3CR_L3I
  316. mtspr SPRN_L3CR,r4
  317. 1: mfspr r4,SPRN_L3CR
  318. andi. r0,r4,L3CR_L3I
  319. bne 1b
  320. sync
  321. END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
  322. 6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
  323. rlwinm r0,r0,0,~HID0_DCE
  324. mtspr SPRN_HID0,r0
  325. sync
  326. isync
  327. mtmsr r11 /* restore DR and EE */
  328. isync
  329. blr
  330. #endif /* CONFIG_6xx */