kvm_emul.S 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Alexander Graf <agraf@suse.de>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #include <asm/kvm_asm.h>
  22. #include <asm/reg.h>
  23. #include <asm/page.h>
  24. #include <asm/asm-offsets.h>
  25. #define KVM_MAGIC_PAGE (-4096)
  26. #ifdef CONFIG_64BIT
  27. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  28. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  29. #else
  30. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  31. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  32. #endif
  33. #define SCRATCH_SAVE \
  34. /* Enable critical section. We are critical if \
  35. shared->critical == r1 */ \
  36. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  37. \
  38. /* Save state */ \
  39. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  40. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  41. mfcr r31; \
  42. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  43. #define SCRATCH_RESTORE \
  44. /* Restore state */ \
  45. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  46. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  47. mtcr r30; \
  48. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  49. \
  50. /* Disable critical section. We are critical if \
  51. shared->critical == r1 and r2 is always != r1 */ \
  52. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  53. .global kvm_template_start
  54. kvm_template_start:
  55. .global kvm_emulate_mtmsrd
  56. kvm_emulate_mtmsrd:
  57. SCRATCH_SAVE
  58. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  59. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  60. lis r30, (~(MSR_EE | MSR_RI))@h
  61. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  62. and r31, r31, r30
  63. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  64. kvm_emulate_mtmsrd_reg:
  65. ori r30, r0, 0
  66. andi. r30, r30, (MSR_EE|MSR_RI)
  67. or r31, r31, r30
  68. /* Put MSR back into magic page */
  69. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  70. /* Check if we have to fetch an interrupt */
  71. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  72. cmpwi r31, 0
  73. beq+ no_check
  74. /* Check if we may trigger an interrupt */
  75. andi. r30, r30, MSR_EE
  76. beq no_check
  77. SCRATCH_RESTORE
  78. /* Nag hypervisor */
  79. kvm_emulate_mtmsrd_orig_ins:
  80. tlbsync
  81. b kvm_emulate_mtmsrd_branch
  82. no_check:
  83. SCRATCH_RESTORE
  84. /* Go back to caller */
  85. kvm_emulate_mtmsrd_branch:
  86. b .
  87. kvm_emulate_mtmsrd_end:
  88. .global kvm_emulate_mtmsrd_branch_offs
  89. kvm_emulate_mtmsrd_branch_offs:
  90. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  91. .global kvm_emulate_mtmsrd_reg_offs
  92. kvm_emulate_mtmsrd_reg_offs:
  93. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  94. .global kvm_emulate_mtmsrd_orig_ins_offs
  95. kvm_emulate_mtmsrd_orig_ins_offs:
  96. .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
  97. .global kvm_emulate_mtmsrd_len
  98. kvm_emulate_mtmsrd_len:
  99. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
  100. #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
  101. #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
  102. .global kvm_emulate_mtmsr
  103. kvm_emulate_mtmsr:
  104. SCRATCH_SAVE
  105. /* Fetch old MSR in r31 */
  106. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  107. /* Find the changed bits between old and new MSR */
  108. kvm_emulate_mtmsr_reg1:
  109. ori r30, r0, 0
  110. xor r31, r30, r31
  111. /* Check if we need to really do mtmsr */
  112. LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
  113. and. r31, r31, r30
  114. /* No critical bits changed? Maybe we can stay in the guest. */
  115. beq maybe_stay_in_guest
  116. do_mtmsr:
  117. SCRATCH_RESTORE
  118. /* Just fire off the mtmsr if it's critical */
  119. kvm_emulate_mtmsr_orig_ins:
  120. mtmsr r0
  121. b kvm_emulate_mtmsr_branch
  122. maybe_stay_in_guest:
  123. /* Get the target register in r30 */
  124. kvm_emulate_mtmsr_reg2:
  125. ori r30, r0, 0
  126. /* Put MSR into magic page because we don't call mtmsr */
  127. STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  128. /* Check if we have to fetch an interrupt */
  129. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  130. cmpwi r31, 0
  131. beq+ no_mtmsr
  132. /* Check if we may trigger an interrupt */
  133. andi. r31, r30, MSR_EE
  134. bne do_mtmsr
  135. no_mtmsr:
  136. SCRATCH_RESTORE
  137. /* Go back to caller */
  138. kvm_emulate_mtmsr_branch:
  139. b .
  140. kvm_emulate_mtmsr_end:
  141. .global kvm_emulate_mtmsr_branch_offs
  142. kvm_emulate_mtmsr_branch_offs:
  143. .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
  144. .global kvm_emulate_mtmsr_reg1_offs
  145. kvm_emulate_mtmsr_reg1_offs:
  146. .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
  147. .global kvm_emulate_mtmsr_reg2_offs
  148. kvm_emulate_mtmsr_reg2_offs:
  149. .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
  150. .global kvm_emulate_mtmsr_orig_ins_offs
  151. kvm_emulate_mtmsr_orig_ins_offs:
  152. .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
  153. .global kvm_emulate_mtmsr_len
  154. kvm_emulate_mtmsr_len:
  155. .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
  156. /* also used for wrteei 1 */
  157. .global kvm_emulate_wrtee
  158. kvm_emulate_wrtee:
  159. SCRATCH_SAVE
  160. /* Fetch old MSR in r31 */
  161. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  162. /* Insert new MSR[EE] */
  163. kvm_emulate_wrtee_reg:
  164. ori r30, r0, 0
  165. rlwimi r31, r30, 0, MSR_EE
  166. /*
  167. * If MSR[EE] is now set, check for a pending interrupt.
  168. * We could skip this if MSR[EE] was already on, but that
  169. * should be rare, so don't bother.
  170. */
  171. andi. r30, r30, MSR_EE
  172. /* Put MSR into magic page because we don't call wrtee */
  173. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  174. beq no_wrtee
  175. /* Check if we have to fetch an interrupt */
  176. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  177. cmpwi r30, 0
  178. bne do_wrtee
  179. no_wrtee:
  180. SCRATCH_RESTORE
  181. /* Go back to caller */
  182. kvm_emulate_wrtee_branch:
  183. b .
  184. do_wrtee:
  185. SCRATCH_RESTORE
  186. /* Just fire off the wrtee if it's critical */
  187. kvm_emulate_wrtee_orig_ins:
  188. wrtee r0
  189. b kvm_emulate_wrtee_branch
  190. kvm_emulate_wrtee_end:
  191. .global kvm_emulate_wrtee_branch_offs
  192. kvm_emulate_wrtee_branch_offs:
  193. .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
  194. .global kvm_emulate_wrtee_reg_offs
  195. kvm_emulate_wrtee_reg_offs:
  196. .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
  197. .global kvm_emulate_wrtee_orig_ins_offs
  198. kvm_emulate_wrtee_orig_ins_offs:
  199. .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
  200. .global kvm_emulate_wrtee_len
  201. kvm_emulate_wrtee_len:
  202. .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
  203. .global kvm_emulate_wrteei_0
  204. kvm_emulate_wrteei_0:
  205. SCRATCH_SAVE
  206. /* Fetch old MSR in r31 */
  207. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  208. /* Remove MSR_EE from old MSR */
  209. rlwinm r31, r31, 0, ~MSR_EE
  210. /* Write new MSR value back */
  211. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  212. SCRATCH_RESTORE
  213. /* Go back to caller */
  214. kvm_emulate_wrteei_0_branch:
  215. b .
  216. kvm_emulate_wrteei_0_end:
  217. .global kvm_emulate_wrteei_0_branch_offs
  218. kvm_emulate_wrteei_0_branch_offs:
  219. .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
  220. .global kvm_emulate_wrteei_0_len
  221. kvm_emulate_wrteei_0_len:
  222. .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
  223. .global kvm_emulate_mtsrin
  224. kvm_emulate_mtsrin:
  225. SCRATCH_SAVE
  226. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  227. andi. r31, r31, MSR_DR | MSR_IR
  228. beq kvm_emulate_mtsrin_reg1
  229. SCRATCH_RESTORE
  230. kvm_emulate_mtsrin_orig_ins:
  231. nop
  232. b kvm_emulate_mtsrin_branch
  233. kvm_emulate_mtsrin_reg1:
  234. /* rX >> 26 */
  235. rlwinm r30,r0,6,26,29
  236. kvm_emulate_mtsrin_reg2:
  237. stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
  238. SCRATCH_RESTORE
  239. /* Go back to caller */
  240. kvm_emulate_mtsrin_branch:
  241. b .
  242. kvm_emulate_mtsrin_end:
  243. .global kvm_emulate_mtsrin_branch_offs
  244. kvm_emulate_mtsrin_branch_offs:
  245. .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
  246. .global kvm_emulate_mtsrin_reg1_offs
  247. kvm_emulate_mtsrin_reg1_offs:
  248. .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
  249. .global kvm_emulate_mtsrin_reg2_offs
  250. kvm_emulate_mtsrin_reg2_offs:
  251. .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
  252. .global kvm_emulate_mtsrin_orig_ins_offs
  253. kvm_emulate_mtsrin_orig_ins_offs:
  254. .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
  255. .global kvm_emulate_mtsrin_len
  256. kvm_emulate_mtsrin_len:
  257. .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
  258. .global kvm_template_end
  259. kvm_template_end: