swsusp_32.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. #include <linux/threads.h>
  2. #include <asm/processor.h>
  3. #include <asm/page.h>
  4. #include <asm/cputable.h>
  5. #include <asm/thread_info.h>
  6. #include <asm/ppc_asm.h>
  7. #include <asm/asm-offsets.h>
  8. #include <asm/mmu.h>
  9. /*
  10. * Structure for storing CPU registers on the save area.
  11. */
  12. #define SL_SP 0
  13. #define SL_PC 4
  14. #define SL_MSR 8
  15. #define SL_SDR1 0xc
  16. #define SL_SPRG0 0x10 /* 4 sprg's */
  17. #define SL_DBAT0 0x20
  18. #define SL_IBAT0 0x28
  19. #define SL_DBAT1 0x30
  20. #define SL_IBAT1 0x38
  21. #define SL_DBAT2 0x40
  22. #define SL_IBAT2 0x48
  23. #define SL_DBAT3 0x50
  24. #define SL_IBAT3 0x58
  25. #define SL_TB 0x60
  26. #define SL_R2 0x68
  27. #define SL_CR 0x6c
  28. #define SL_LR 0x70
  29. #define SL_R12 0x74 /* r12 to r31 */
  30. #define SL_SIZE (SL_R12 + 80)
  31. .section .data
  32. .align 5
  33. _GLOBAL(swsusp_save_area)
  34. .space SL_SIZE
  35. .section .text
  36. .align 5
  37. _GLOBAL(swsusp_arch_suspend)
  38. lis r11,swsusp_save_area@h
  39. ori r11,r11,swsusp_save_area@l
  40. mflr r0
  41. stw r0,SL_LR(r11)
  42. mfcr r0
  43. stw r0,SL_CR(r11)
  44. stw r1,SL_SP(r11)
  45. stw r2,SL_R2(r11)
  46. stmw r12,SL_R12(r11)
  47. /* Save MSR & SDR1 */
  48. mfmsr r4
  49. stw r4,SL_MSR(r11)
  50. mfsdr1 r4
  51. stw r4,SL_SDR1(r11)
  52. /* Get a stable timebase and save it */
  53. 1: mftbu r4
  54. stw r4,SL_TB(r11)
  55. mftb r5
  56. stw r5,SL_TB+4(r11)
  57. mftbu r3
  58. cmpw r3,r4
  59. bne 1b
  60. /* Save SPRGs */
  61. mfsprg r4,0
  62. stw r4,SL_SPRG0(r11)
  63. mfsprg r4,1
  64. stw r4,SL_SPRG0+4(r11)
  65. mfsprg r4,2
  66. stw r4,SL_SPRG0+8(r11)
  67. mfsprg r4,3
  68. stw r4,SL_SPRG0+12(r11)
  69. /* Save BATs */
  70. mfdbatu r4,0
  71. stw r4,SL_DBAT0(r11)
  72. mfdbatl r4,0
  73. stw r4,SL_DBAT0+4(r11)
  74. mfdbatu r4,1
  75. stw r4,SL_DBAT1(r11)
  76. mfdbatl r4,1
  77. stw r4,SL_DBAT1+4(r11)
  78. mfdbatu r4,2
  79. stw r4,SL_DBAT2(r11)
  80. mfdbatl r4,2
  81. stw r4,SL_DBAT2+4(r11)
  82. mfdbatu r4,3
  83. stw r4,SL_DBAT3(r11)
  84. mfdbatl r4,3
  85. stw r4,SL_DBAT3+4(r11)
  86. mfibatu r4,0
  87. stw r4,SL_IBAT0(r11)
  88. mfibatl r4,0
  89. stw r4,SL_IBAT0+4(r11)
  90. mfibatu r4,1
  91. stw r4,SL_IBAT1(r11)
  92. mfibatl r4,1
  93. stw r4,SL_IBAT1+4(r11)
  94. mfibatu r4,2
  95. stw r4,SL_IBAT2(r11)
  96. mfibatl r4,2
  97. stw r4,SL_IBAT2+4(r11)
  98. mfibatu r4,3
  99. stw r4,SL_IBAT3(r11)
  100. mfibatl r4,3
  101. stw r4,SL_IBAT3+4(r11)
  102. #if 0
  103. /* Backup various CPU config stuffs */
  104. bl __save_cpu_setup
  105. #endif
  106. /* Call the low level suspend stuff (we should probably have made
  107. * a stackframe...
  108. */
  109. bl swsusp_save
  110. /* Restore LR from the save area */
  111. lis r11,swsusp_save_area@h
  112. ori r11,r11,swsusp_save_area@l
  113. lwz r0,SL_LR(r11)
  114. mtlr r0
  115. blr
  116. /* Resume code */
  117. _GLOBAL(swsusp_arch_resume)
  118. #ifdef CONFIG_ALTIVEC
  119. /* Stop pending alitvec streams and memory accesses */
  120. BEGIN_FTR_SECTION
  121. DSSALL
  122. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  123. #endif
  124. sync
  125. /* Disable MSR:DR to make sure we don't take a TLB or
  126. * hash miss during the copy, as our hash table will
  127. * for a while be unusable. For .text, we assume we are
  128. * covered by a BAT. This works only for non-G5 at this
  129. * point. G5 will need a better approach, possibly using
  130. * a small temporary hash table filled with large mappings,
  131. * disabling the MMU completely isn't a good option for
  132. * performance reasons.
  133. * (Note that 750's may have the same performance issue as
  134. * the G5 in this case, we should investigate using moving
  135. * BATs for these CPUs)
  136. */
  137. mfmsr r0
  138. sync
  139. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  140. mtmsr r0
  141. sync
  142. isync
  143. /* Load ptr the list of pages to copy in r3 */
  144. lis r11,(restore_pblist - KERNELBASE)@h
  145. ori r11,r11,restore_pblist@l
  146. lwz r10,0(r11)
  147. /* Copy the pages. This is a very basic implementation, to
  148. * be replaced by something more cache efficient */
  149. 1:
  150. tophys(r3,r10)
  151. li r0,256
  152. mtctr r0
  153. lwz r11,pbe_address(r3) /* source */
  154. tophys(r5,r11)
  155. lwz r10,pbe_orig_address(r3) /* destination */
  156. tophys(r6,r10)
  157. 2:
  158. lwz r8,0(r5)
  159. lwz r9,4(r5)
  160. lwz r10,8(r5)
  161. lwz r11,12(r5)
  162. addi r5,r5,16
  163. stw r8,0(r6)
  164. stw r9,4(r6)
  165. stw r10,8(r6)
  166. stw r11,12(r6)
  167. addi r6,r6,16
  168. bdnz 2b
  169. lwz r10,pbe_next(r3)
  170. cmpwi 0,r10,0
  171. bne 1b
  172. /* Do a very simple cache flush/inval of the L1 to ensure
  173. * coherency of the icache
  174. */
  175. lis r3,0x0002
  176. mtctr r3
  177. li r3, 0
  178. 1:
  179. lwz r0,0(r3)
  180. addi r3,r3,0x0020
  181. bdnz 1b
  182. isync
  183. sync
  184. /* Now flush those cache lines */
  185. lis r3,0x0002
  186. mtctr r3
  187. li r3, 0
  188. 1:
  189. dcbf 0,r3
  190. addi r3,r3,0x0020
  191. bdnz 1b
  192. sync
  193. /* Ok, we are now running with the kernel data of the old
  194. * kernel fully restored. We can get to the save area
  195. * easily now. As for the rest of the code, it assumes the
  196. * loader kernel and the booted one are exactly identical
  197. */
  198. lis r11,swsusp_save_area@h
  199. ori r11,r11,swsusp_save_area@l
  200. tophys(r11,r11)
  201. #if 0
  202. /* Restore various CPU config stuffs */
  203. bl __restore_cpu_setup
  204. #endif
  205. /* Restore the BATs, and SDR1. Then we can turn on the MMU.
  206. * This is a bit hairy as we are running out of those BATs,
  207. * but first, our code is probably in the icache, and we are
  208. * writing the same value to the BAT, so that should be fine,
  209. * though a better solution will have to be found long-term
  210. */
  211. lwz r4,SL_SDR1(r11)
  212. mtsdr1 r4
  213. lwz r4,SL_SPRG0(r11)
  214. mtsprg 0,r4
  215. lwz r4,SL_SPRG0+4(r11)
  216. mtsprg 1,r4
  217. lwz r4,SL_SPRG0+8(r11)
  218. mtsprg 2,r4
  219. lwz r4,SL_SPRG0+12(r11)
  220. mtsprg 3,r4
  221. #if 0
  222. lwz r4,SL_DBAT0(r11)
  223. mtdbatu 0,r4
  224. lwz r4,SL_DBAT0+4(r11)
  225. mtdbatl 0,r4
  226. lwz r4,SL_DBAT1(r11)
  227. mtdbatu 1,r4
  228. lwz r4,SL_DBAT1+4(r11)
  229. mtdbatl 1,r4
  230. lwz r4,SL_DBAT2(r11)
  231. mtdbatu 2,r4
  232. lwz r4,SL_DBAT2+4(r11)
  233. mtdbatl 2,r4
  234. lwz r4,SL_DBAT3(r11)
  235. mtdbatu 3,r4
  236. lwz r4,SL_DBAT3+4(r11)
  237. mtdbatl 3,r4
  238. lwz r4,SL_IBAT0(r11)
  239. mtibatu 0,r4
  240. lwz r4,SL_IBAT0+4(r11)
  241. mtibatl 0,r4
  242. lwz r4,SL_IBAT1(r11)
  243. mtibatu 1,r4
  244. lwz r4,SL_IBAT1+4(r11)
  245. mtibatl 1,r4
  246. lwz r4,SL_IBAT2(r11)
  247. mtibatu 2,r4
  248. lwz r4,SL_IBAT2+4(r11)
  249. mtibatl 2,r4
  250. lwz r4,SL_IBAT3(r11)
  251. mtibatu 3,r4
  252. lwz r4,SL_IBAT3+4(r11)
  253. mtibatl 3,r4
  254. #endif
  255. BEGIN_MMU_FTR_SECTION
  256. li r4,0
  257. mtspr SPRN_DBAT4U,r4
  258. mtspr SPRN_DBAT4L,r4
  259. mtspr SPRN_DBAT5U,r4
  260. mtspr SPRN_DBAT5L,r4
  261. mtspr SPRN_DBAT6U,r4
  262. mtspr SPRN_DBAT6L,r4
  263. mtspr SPRN_DBAT7U,r4
  264. mtspr SPRN_DBAT7L,r4
  265. mtspr SPRN_IBAT4U,r4
  266. mtspr SPRN_IBAT4L,r4
  267. mtspr SPRN_IBAT5U,r4
  268. mtspr SPRN_IBAT5L,r4
  269. mtspr SPRN_IBAT6U,r4
  270. mtspr SPRN_IBAT6L,r4
  271. mtspr SPRN_IBAT7U,r4
  272. mtspr SPRN_IBAT7L,r4
  273. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
  274. /* Flush all TLBs */
  275. lis r4,0x1000
  276. 1: addic. r4,r4,-0x1000
  277. tlbie r4
  278. bgt 1b
  279. sync
  280. /* restore the MSR and turn on the MMU */
  281. lwz r3,SL_MSR(r11)
  282. bl turn_on_mmu
  283. tovirt(r11,r11)
  284. /* Restore TB */
  285. li r3,0
  286. mttbl r3
  287. lwz r3,SL_TB(r11)
  288. lwz r4,SL_TB+4(r11)
  289. mttbu r3
  290. mttbl r4
  291. /* Kick decrementer */
  292. li r0,1
  293. mtdec r0
  294. /* Restore the callee-saved registers and return */
  295. lwz r0,SL_CR(r11)
  296. mtcr r0
  297. lwz r2,SL_R2(r11)
  298. lmw r12,SL_R12(r11)
  299. lwz r1,SL_SP(r11)
  300. lwz r0,SL_LR(r11)
  301. mtlr r0
  302. // XXX Note: we don't really need to call swsusp_resume
  303. li r3,0
  304. blr
  305. /* FIXME:This construct is actually not useful since we don't shut
  306. * down the instruction MMU, we could just flip back MSR-DR on.
  307. */
  308. turn_on_mmu:
  309. mflr r4
  310. mtsrr0 r4
  311. mtsrr1 r3
  312. sync
  313. isync
  314. rfi