booke_interrupts.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #include <asm/kvm_asm.h>
  22. #include <asm/reg.h>
  23. #include <asm/page.h>
  24. #include <asm/asm-offsets.h>
  25. /* The host stack layout: */
  26. #define HOST_R1 0 /* Implied by stwu. */
  27. #define HOST_CALLEE_LR 4
  28. #define HOST_RUN 8
  29. /* r2 is special: it holds 'current', and it made nonvolatile in the
  30. * kernel with the -ffixed-r2 gcc option. */
  31. #define HOST_R2 12
  32. #define HOST_CR 16
  33. #define HOST_NV_GPRS 20
  34. #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
  35. #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
  36. #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
  37. #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
  38. #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
  39. #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
  40. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  41. (1<<BOOKE_INTERRUPT_DEBUG))
  42. #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  43. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  44. (1<<BOOKE_INTERRUPT_ALIGNMENT))
  45. #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  46. (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
  47. (1<<BOOKE_INTERRUPT_PROGRAM) | \
  48. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  49. (1<<BOOKE_INTERRUPT_ALIGNMENT))
  50. .macro __KVM_HANDLER ivor_nr scratch srr0
  51. /* Get pointer to vcpu and record exit number. */
  52. mtspr \scratch , r4
  53. mfspr r4, SPRN_SPRG_THREAD
  54. lwz r4, THREAD_KVM_VCPU(r4)
  55. stw r3, VCPU_GPR(R3)(r4)
  56. stw r5, VCPU_GPR(R5)(r4)
  57. stw r6, VCPU_GPR(R6)(r4)
  58. mfspr r3, \scratch
  59. mfctr r5
  60. stw r3, VCPU_GPR(R4)(r4)
  61. stw r5, VCPU_CTR(r4)
  62. mfspr r3, \srr0
  63. lis r6, kvmppc_resume_host@h
  64. stw r3, VCPU_PC(r4)
  65. li r5, \ivor_nr
  66. ori r6, r6, kvmppc_resume_host@l
  67. mtctr r6
  68. bctr
  69. .endm
  70. .macro KVM_HANDLER ivor_nr scratch srr0
  71. _GLOBAL(kvmppc_handler_\ivor_nr)
  72. __KVM_HANDLER \ivor_nr \scratch \srr0
  73. .endm
  74. .macro KVM_DBG_HANDLER ivor_nr scratch srr0
  75. _GLOBAL(kvmppc_handler_\ivor_nr)
  76. mtspr \scratch, r4
  77. mfspr r4, SPRN_SPRG_THREAD
  78. lwz r4, THREAD_KVM_VCPU(r4)
  79. stw r3, VCPU_CRIT_SAVE(r4)
  80. mfcr r3
  81. mfspr r4, SPRN_CSRR1
  82. andi. r4, r4, MSR_PR
  83. bne 1f
  84. /* debug interrupt happened in enter/exit path */
  85. mfspr r4, SPRN_CSRR1
  86. rlwinm r4, r4, 0, ~MSR_DE
  87. mtspr SPRN_CSRR1, r4
  88. lis r4, 0xffff
  89. ori r4, r4, 0xffff
  90. mtspr SPRN_DBSR, r4
  91. mfspr r4, SPRN_SPRG_THREAD
  92. lwz r4, THREAD_KVM_VCPU(r4)
  93. mtcr r3
  94. lwz r3, VCPU_CRIT_SAVE(r4)
  95. mfspr r4, \scratch
  96. rfci
  97. 1: /* debug interrupt happened in guest */
  98. mtcr r3
  99. mfspr r4, SPRN_SPRG_THREAD
  100. lwz r4, THREAD_KVM_VCPU(r4)
  101. lwz r3, VCPU_CRIT_SAVE(r4)
  102. mfspr r4, \scratch
  103. __KVM_HANDLER \ivor_nr \scratch \srr0
  104. .endm
  105. .macro KVM_HANDLER_ADDR ivor_nr
  106. .long kvmppc_handler_\ivor_nr
  107. .endm
  108. .macro KVM_HANDLER_END
  109. .long kvmppc_handlers_end
  110. .endm
  111. _GLOBAL(kvmppc_handlers_start)
  112. KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  113. KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
  114. KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  115. KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  116. KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  117. KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  118. KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  119. KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  120. KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  121. KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  122. KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  123. KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  124. KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  125. KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  126. KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  127. KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  128. KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  129. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  130. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  131. _GLOBAL(kvmppc_handlers_end)
  132. /* Registers:
  133. * SPRG_SCRATCH0: guest r4
  134. * r4: vcpu pointer
  135. * r5: KVM exit number
  136. */
  137. _GLOBAL(kvmppc_resume_host)
  138. mfcr r3
  139. stw r3, VCPU_CR(r4)
  140. stw r7, VCPU_GPR(R7)(r4)
  141. stw r8, VCPU_GPR(R8)(r4)
  142. stw r9, VCPU_GPR(R9)(r4)
  143. li r6, 1
  144. slw r6, r6, r5
  145. #ifdef CONFIG_KVM_EXIT_TIMING
  146. /* save exit time */
  147. 1:
  148. mfspr r7, SPRN_TBRU
  149. mfspr r8, SPRN_TBRL
  150. mfspr r9, SPRN_TBRU
  151. cmpw r9, r7
  152. bne 1b
  153. stw r8, VCPU_TIMING_EXIT_TBL(r4)
  154. stw r9, VCPU_TIMING_EXIT_TBU(r4)
  155. #endif
  156. /* Save the faulting instruction and all GPRs for emulation. */
  157. andi. r7, r6, NEED_INST_MASK
  158. beq ..skip_inst_copy
  159. mfspr r9, SPRN_SRR0
  160. mfmsr r8
  161. ori r7, r8, MSR_DS
  162. mtmsr r7
  163. isync
  164. lwz r9, 0(r9)
  165. mtmsr r8
  166. isync
  167. stw r9, VCPU_LAST_INST(r4)
  168. stw r15, VCPU_GPR(R15)(r4)
  169. stw r16, VCPU_GPR(R16)(r4)
  170. stw r17, VCPU_GPR(R17)(r4)
  171. stw r18, VCPU_GPR(R18)(r4)
  172. stw r19, VCPU_GPR(R19)(r4)
  173. stw r20, VCPU_GPR(R20)(r4)
  174. stw r21, VCPU_GPR(R21)(r4)
  175. stw r22, VCPU_GPR(R22)(r4)
  176. stw r23, VCPU_GPR(R23)(r4)
  177. stw r24, VCPU_GPR(R24)(r4)
  178. stw r25, VCPU_GPR(R25)(r4)
  179. stw r26, VCPU_GPR(R26)(r4)
  180. stw r27, VCPU_GPR(R27)(r4)
  181. stw r28, VCPU_GPR(R28)(r4)
  182. stw r29, VCPU_GPR(R29)(r4)
  183. stw r30, VCPU_GPR(R30)(r4)
  184. stw r31, VCPU_GPR(R31)(r4)
  185. ..skip_inst_copy:
  186. /* Also grab DEAR and ESR before the host can clobber them. */
  187. andi. r7, r6, NEED_DEAR_MASK
  188. beq ..skip_dear
  189. mfspr r9, SPRN_DEAR
  190. stw r9, VCPU_FAULT_DEAR(r4)
  191. ..skip_dear:
  192. andi. r7, r6, NEED_ESR_MASK
  193. beq ..skip_esr
  194. mfspr r9, SPRN_ESR
  195. stw r9, VCPU_FAULT_ESR(r4)
  196. ..skip_esr:
  197. /* Save remaining volatile guest register state to vcpu. */
  198. stw r0, VCPU_GPR(R0)(r4)
  199. stw r1, VCPU_GPR(R1)(r4)
  200. stw r2, VCPU_GPR(R2)(r4)
  201. stw r10, VCPU_GPR(R10)(r4)
  202. stw r11, VCPU_GPR(R11)(r4)
  203. stw r12, VCPU_GPR(R12)(r4)
  204. stw r13, VCPU_GPR(R13)(r4)
  205. stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
  206. mflr r3
  207. stw r3, VCPU_LR(r4)
  208. mfxer r3
  209. stw r3, VCPU_XER(r4)
  210. /* Restore host stack pointer and PID before IVPR, since the host
  211. * exception handlers use them. */
  212. lwz r1, VCPU_HOST_STACK(r4)
  213. lwz r3, VCPU_HOST_PID(r4)
  214. mtspr SPRN_PID, r3
  215. #ifdef CONFIG_FSL_BOOKE
  216. /* we cheat and know that Linux doesn't use PID1 which is always 0 */
  217. lis r3, 0
  218. mtspr SPRN_PID1, r3
  219. #endif
  220. /* Restore host IVPR before re-enabling interrupts. We cheat and know
  221. * that Linux IVPR is always 0xc0000000. */
  222. lis r3, 0xc000
  223. mtspr SPRN_IVPR, r3
  224. /* Switch to kernel stack and jump to handler. */
  225. LOAD_REG_ADDR(r3, kvmppc_handle_exit)
  226. mtctr r3
  227. lwz r3, HOST_RUN(r1)
  228. lwz r2, HOST_R2(r1)
  229. mr r14, r4 /* Save vcpu pointer. */
  230. bctrl /* kvmppc_handle_exit() */
  231. /* Restore vcpu pointer and the nonvolatiles we used. */
  232. mr r4, r14
  233. lwz r14, VCPU_GPR(R14)(r4)
  234. /* Sometimes instruction emulation must restore complete GPR state. */
  235. andi. r5, r3, RESUME_FLAG_NV
  236. beq ..skip_nv_load
  237. lwz r15, VCPU_GPR(R15)(r4)
  238. lwz r16, VCPU_GPR(R16)(r4)
  239. lwz r17, VCPU_GPR(R17)(r4)
  240. lwz r18, VCPU_GPR(R18)(r4)
  241. lwz r19, VCPU_GPR(R19)(r4)
  242. lwz r20, VCPU_GPR(R20)(r4)
  243. lwz r21, VCPU_GPR(R21)(r4)
  244. lwz r22, VCPU_GPR(R22)(r4)
  245. lwz r23, VCPU_GPR(R23)(r4)
  246. lwz r24, VCPU_GPR(R24)(r4)
  247. lwz r25, VCPU_GPR(R25)(r4)
  248. lwz r26, VCPU_GPR(R26)(r4)
  249. lwz r27, VCPU_GPR(R27)(r4)
  250. lwz r28, VCPU_GPR(R28)(r4)
  251. lwz r29, VCPU_GPR(R29)(r4)
  252. lwz r30, VCPU_GPR(R30)(r4)
  253. lwz r31, VCPU_GPR(R31)(r4)
  254. ..skip_nv_load:
  255. /* Should we return to the guest? */
  256. andi. r5, r3, RESUME_FLAG_HOST
  257. beq lightweight_exit
  258. srawi r3, r3, 2 /* Shift -ERR back down. */
  259. heavyweight_exit:
  260. /* Not returning to guest. */
  261. #ifdef CONFIG_SPE
  262. /* save guest SPEFSCR and load host SPEFSCR */
  263. mfspr r9, SPRN_SPEFSCR
  264. stw r9, VCPU_SPEFSCR(r4)
  265. lwz r9, VCPU_HOST_SPEFSCR(r4)
  266. mtspr SPRN_SPEFSCR, r9
  267. #endif
  268. /* We already saved guest volatile register state; now save the
  269. * non-volatiles. */
  270. stw r15, VCPU_GPR(R15)(r4)
  271. stw r16, VCPU_GPR(R16)(r4)
  272. stw r17, VCPU_GPR(R17)(r4)
  273. stw r18, VCPU_GPR(R18)(r4)
  274. stw r19, VCPU_GPR(R19)(r4)
  275. stw r20, VCPU_GPR(R20)(r4)
  276. stw r21, VCPU_GPR(R21)(r4)
  277. stw r22, VCPU_GPR(R22)(r4)
  278. stw r23, VCPU_GPR(R23)(r4)
  279. stw r24, VCPU_GPR(R24)(r4)
  280. stw r25, VCPU_GPR(R25)(r4)
  281. stw r26, VCPU_GPR(R26)(r4)
  282. stw r27, VCPU_GPR(R27)(r4)
  283. stw r28, VCPU_GPR(R28)(r4)
  284. stw r29, VCPU_GPR(R29)(r4)
  285. stw r30, VCPU_GPR(R30)(r4)
  286. stw r31, VCPU_GPR(R31)(r4)
  287. /* Load host non-volatile register state from host stack. */
  288. lwz r14, HOST_NV_GPR(R14)(r1)
  289. lwz r15, HOST_NV_GPR(R15)(r1)
  290. lwz r16, HOST_NV_GPR(R16)(r1)
  291. lwz r17, HOST_NV_GPR(R17)(r1)
  292. lwz r18, HOST_NV_GPR(R18)(r1)
  293. lwz r19, HOST_NV_GPR(R19)(r1)
  294. lwz r20, HOST_NV_GPR(R20)(r1)
  295. lwz r21, HOST_NV_GPR(R21)(r1)
  296. lwz r22, HOST_NV_GPR(R22)(r1)
  297. lwz r23, HOST_NV_GPR(R23)(r1)
  298. lwz r24, HOST_NV_GPR(R24)(r1)
  299. lwz r25, HOST_NV_GPR(R25)(r1)
  300. lwz r26, HOST_NV_GPR(R26)(r1)
  301. lwz r27, HOST_NV_GPR(R27)(r1)
  302. lwz r28, HOST_NV_GPR(R28)(r1)
  303. lwz r29, HOST_NV_GPR(R29)(r1)
  304. lwz r30, HOST_NV_GPR(R30)(r1)
  305. lwz r31, HOST_NV_GPR(R31)(r1)
  306. /* Return to kvm_vcpu_run(). */
  307. lwz r4, HOST_STACK_LR(r1)
  308. lwz r5, HOST_CR(r1)
  309. addi r1, r1, HOST_STACK_SIZE
  310. mtlr r4
  311. mtcr r5
  312. /* r3 still contains the return code from kvmppc_handle_exit(). */
  313. blr
  314. /* Registers:
  315. * r3: kvm_run pointer
  316. * r4: vcpu pointer
  317. */
  318. _GLOBAL(__kvmppc_vcpu_run)
  319. stwu r1, -HOST_STACK_SIZE(r1)
  320. stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
  321. /* Save host state to stack. */
  322. stw r3, HOST_RUN(r1)
  323. mflr r3
  324. stw r3, HOST_STACK_LR(r1)
  325. mfcr r5
  326. stw r5, HOST_CR(r1)
  327. /* Save host non-volatile register state to stack. */
  328. stw r14, HOST_NV_GPR(R14)(r1)
  329. stw r15, HOST_NV_GPR(R15)(r1)
  330. stw r16, HOST_NV_GPR(R16)(r1)
  331. stw r17, HOST_NV_GPR(R17)(r1)
  332. stw r18, HOST_NV_GPR(R18)(r1)
  333. stw r19, HOST_NV_GPR(R19)(r1)
  334. stw r20, HOST_NV_GPR(R20)(r1)
  335. stw r21, HOST_NV_GPR(R21)(r1)
  336. stw r22, HOST_NV_GPR(R22)(r1)
  337. stw r23, HOST_NV_GPR(R23)(r1)
  338. stw r24, HOST_NV_GPR(R24)(r1)
  339. stw r25, HOST_NV_GPR(R25)(r1)
  340. stw r26, HOST_NV_GPR(R26)(r1)
  341. stw r27, HOST_NV_GPR(R27)(r1)
  342. stw r28, HOST_NV_GPR(R28)(r1)
  343. stw r29, HOST_NV_GPR(R29)(r1)
  344. stw r30, HOST_NV_GPR(R30)(r1)
  345. stw r31, HOST_NV_GPR(R31)(r1)
  346. /* Load guest non-volatiles. */
  347. lwz r14, VCPU_GPR(R14)(r4)
  348. lwz r15, VCPU_GPR(R15)(r4)
  349. lwz r16, VCPU_GPR(R16)(r4)
  350. lwz r17, VCPU_GPR(R17)(r4)
  351. lwz r18, VCPU_GPR(R18)(r4)
  352. lwz r19, VCPU_GPR(R19)(r4)
  353. lwz r20, VCPU_GPR(R20)(r4)
  354. lwz r21, VCPU_GPR(R21)(r4)
  355. lwz r22, VCPU_GPR(R22)(r4)
  356. lwz r23, VCPU_GPR(R23)(r4)
  357. lwz r24, VCPU_GPR(R24)(r4)
  358. lwz r25, VCPU_GPR(R25)(r4)
  359. lwz r26, VCPU_GPR(R26)(r4)
  360. lwz r27, VCPU_GPR(R27)(r4)
  361. lwz r28, VCPU_GPR(R28)(r4)
  362. lwz r29, VCPU_GPR(R29)(r4)
  363. lwz r30, VCPU_GPR(R30)(r4)
  364. lwz r31, VCPU_GPR(R31)(r4)
  365. #ifdef CONFIG_SPE
  366. /* save host SPEFSCR and load guest SPEFSCR */
  367. mfspr r3, SPRN_SPEFSCR
  368. stw r3, VCPU_HOST_SPEFSCR(r4)
  369. lwz r3, VCPU_SPEFSCR(r4)
  370. mtspr SPRN_SPEFSCR, r3
  371. #endif
  372. lightweight_exit:
  373. stw r2, HOST_R2(r1)
  374. mfspr r3, SPRN_PID
  375. stw r3, VCPU_HOST_PID(r4)
  376. lwz r3, VCPU_SHADOW_PID(r4)
  377. mtspr SPRN_PID, r3
  378. #ifdef CONFIG_FSL_BOOKE
  379. lwz r3, VCPU_SHADOW_PID1(r4)
  380. mtspr SPRN_PID1, r3
  381. #endif
  382. /* Load some guest volatiles. */
  383. lwz r0, VCPU_GPR(R0)(r4)
  384. lwz r2, VCPU_GPR(R2)(r4)
  385. lwz r9, VCPU_GPR(R9)(r4)
  386. lwz r10, VCPU_GPR(R10)(r4)
  387. lwz r11, VCPU_GPR(R11)(r4)
  388. lwz r12, VCPU_GPR(R12)(r4)
  389. lwz r13, VCPU_GPR(R13)(r4)
  390. lwz r3, VCPU_LR(r4)
  391. mtlr r3
  392. lwz r3, VCPU_XER(r4)
  393. mtxer r3
  394. /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
  395. * so how do we make sure vcpu won't fault? */
  396. lis r8, kvmppc_booke_handlers@ha
  397. lwz r8, kvmppc_booke_handlers@l(r8)
  398. mtspr SPRN_IVPR, r8
  399. lwz r5, VCPU_SHARED(r4)
  400. /* Can't switch the stack pointer until after IVPR is switched,
  401. * because host interrupt handlers would get confused. */
  402. lwz r1, VCPU_GPR(R1)(r4)
  403. /*
  404. * Host interrupt handlers may have clobbered these
  405. * guest-readable SPRGs, or the guest kernel may have
  406. * written directly to the shared area, so we
  407. * need to reload them here with the guest's values.
  408. */
  409. PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
  410. mtspr SPRN_SPRG4W, r3
  411. PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
  412. mtspr SPRN_SPRG5W, r3
  413. PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
  414. mtspr SPRN_SPRG6W, r3
  415. PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
  416. mtspr SPRN_SPRG7W, r3
  417. #ifdef CONFIG_KVM_EXIT_TIMING
  418. /* save enter time */
  419. 1:
  420. mfspr r6, SPRN_TBRU
  421. mfspr r7, SPRN_TBRL
  422. mfspr r8, SPRN_TBRU
  423. cmpw r8, r6
  424. bne 1b
  425. stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
  426. stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
  427. #endif
  428. /* Finish loading guest volatiles and jump to guest. */
  429. lwz r3, VCPU_CTR(r4)
  430. lwz r5, VCPU_CR(r4)
  431. lwz r6, VCPU_PC(r4)
  432. lwz r7, VCPU_SHADOW_MSR(r4)
  433. mtctr r3
  434. mtcr r5
  435. mtsrr0 r6
  436. mtsrr1 r7
  437. lwz r5, VCPU_GPR(R5)(r4)
  438. lwz r6, VCPU_GPR(R6)(r4)
  439. lwz r7, VCPU_GPR(R7)(r4)
  440. lwz r8, VCPU_GPR(R8)(r4)
  441. /* Clear any debug events which occurred since we disabled MSR[DE].
  442. * XXX This gives us a 3-instruction window in which a breakpoint
  443. * intended for guest context could fire in the host instead. */
  444. lis r3, 0xffff
  445. ori r3, r3, 0xffff
  446. mtspr SPRN_DBSR, r3
  447. lwz r3, VCPU_GPR(R3)(r4)
  448. lwz r4, VCPU_GPR(R4)(r4)
  449. rfi
  450. .data
  451. .align 4
  452. .globl kvmppc_booke_handler_addr
  453. kvmppc_booke_handler_addr:
  454. KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
  455. KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
  456. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
  457. KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
  458. KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
  459. KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
  460. KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
  461. KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
  462. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
  463. KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
  464. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
  465. KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
  466. KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
  467. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
  468. KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
  469. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
  470. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
  471. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
  472. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
  473. KVM_HANDLER_END /*Always keep this in end*/
  474. #ifdef CONFIG_SPE
  475. _GLOBAL(kvmppc_save_guest_spe)
  476. cmpi 0,r3,0
  477. beqlr-
  478. SAVE_32EVRS(0, r4, r3, VCPU_EVR)
  479. evxor evr6, evr6, evr6
  480. evmwumiaa evr6, evr6, evr6
  481. li r4,VCPU_ACC
  482. evstddx evr6, r4, r3 /* save acc */
  483. blr
  484. _GLOBAL(kvmppc_load_guest_spe)
  485. cmpi 0,r3,0
  486. beqlr-
  487. li r4,VCPU_ACC
  488. evlddx evr6,r4,r3
  489. evmra evr6,evr6 /* load acc */
  490. REST_32EVRS(0, r4, r3, VCPU_EVR)
  491. blr
  492. #endif