kvm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License, version 2, as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/init.h>
  23. #include <linux/export.h>
  24. #include <linux/kvm_para.h>
  25. #include <linux/slab.h>
  26. #include <linux/of.h>
  27. #include <asm/reg.h>
  28. #include <asm/sections.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/disassemble.h>
  31. #include <asm/ppc-opcode.h>
  32. #include <asm/epapr_hcalls.h>
  33. #define KVM_MAGIC_PAGE (-4096L)
  34. #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
  35. #define KVM_INST_LWZ 0x80000000
  36. #define KVM_INST_STW 0x90000000
  37. #define KVM_INST_LD 0xe8000000
  38. #define KVM_INST_STD 0xf8000000
  39. #define KVM_INST_NOP 0x60000000
  40. #define KVM_INST_B 0x48000000
  41. #define KVM_INST_B_MASK 0x03ffffff
  42. #define KVM_INST_B_MAX 0x01ffffff
  43. #define KVM_INST_LI 0x38000000
  44. #define KVM_MASK_RT 0x03e00000
  45. #define KVM_RT_30 0x03c00000
  46. #define KVM_MASK_RB 0x0000f800
  47. #define KVM_INST_MFMSR 0x7c0000a6
  48. #define SPR_FROM 0
  49. #define SPR_TO 0x100
  50. #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
  51. (((sprn) & 0x1f) << 16) | \
  52. (((sprn) & 0x3e0) << 6) | \
  53. (moveto))
  54. #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
  55. #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
  56. #define KVM_INST_TLBSYNC 0x7c00046c
  57. #define KVM_INST_MTMSRD_L0 0x7c000164
  58. #define KVM_INST_MTMSRD_L1 0x7c010164
  59. #define KVM_INST_MTMSR 0x7c000124
  60. #define KVM_INST_WRTEE 0x7c000106
  61. #define KVM_INST_WRTEEI_0 0x7c000146
  62. #define KVM_INST_WRTEEI_1 0x7c008146
  63. #define KVM_INST_MTSRIN 0x7c0001e4
  64. static bool kvm_patching_worked = true;
  65. char kvm_tmp[1024 * 1024];
  66. static int kvm_tmp_index;
  67. static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
  68. {
  69. *inst = new_inst;
  70. flush_icache_range((ulong)inst, (ulong)inst + 4);
  71. }
  72. static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
  73. {
  74. #ifdef CONFIG_64BIT
  75. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  76. #else
  77. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
  78. #endif
  79. }
  80. static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
  81. {
  82. #ifdef CONFIG_64BIT
  83. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  84. #else
  85. kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
  86. #endif
  87. }
  88. static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
  89. {
  90. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
  91. }
  92. static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
  93. {
  94. #ifdef CONFIG_64BIT
  95. kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
  96. #else
  97. kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
  98. #endif
  99. }
  100. static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
  101. {
  102. kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
  103. }
  104. static void kvm_patch_ins_nop(u32 *inst)
  105. {
  106. kvm_patch_ins(inst, KVM_INST_NOP);
  107. }
  108. static void kvm_patch_ins_b(u32 *inst, int addr)
  109. {
  110. #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
  111. /* On relocatable kernels interrupts handlers and our code
  112. can be in different regions, so we don't patch them */
  113. if ((ulong)inst < (ulong)&__end_interrupts)
  114. return;
  115. #endif
  116. kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
  117. }
  118. static u32 *kvm_alloc(int len)
  119. {
  120. u32 *p;
  121. if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
  122. printk(KERN_ERR "KVM: No more space (%d + %d)\n",
  123. kvm_tmp_index, len);
  124. kvm_patching_worked = false;
  125. return NULL;
  126. }
  127. p = (void*)&kvm_tmp[kvm_tmp_index];
  128. kvm_tmp_index += len;
  129. return p;
  130. }
  131. extern u32 kvm_emulate_mtmsrd_branch_offs;
  132. extern u32 kvm_emulate_mtmsrd_reg_offs;
  133. extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
  134. extern u32 kvm_emulate_mtmsrd_len;
  135. extern u32 kvm_emulate_mtmsrd[];
  136. static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
  137. {
  138. u32 *p;
  139. int distance_start;
  140. int distance_end;
  141. ulong next_inst;
  142. p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
  143. if (!p)
  144. return;
  145. /* Find out where we are and put everything there */
  146. distance_start = (ulong)p - (ulong)inst;
  147. next_inst = ((ulong)inst + 4);
  148. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
  149. /* Make sure we only write valid b instructions */
  150. if (distance_start > KVM_INST_B_MAX) {
  151. kvm_patching_worked = false;
  152. return;
  153. }
  154. /* Modify the chunk to fit the invocation */
  155. memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
  156. p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
  157. switch (get_rt(rt)) {
  158. case 30:
  159. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  160. magic_var(scratch2), KVM_RT_30);
  161. break;
  162. case 31:
  163. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  164. magic_var(scratch1), KVM_RT_30);
  165. break;
  166. default:
  167. p[kvm_emulate_mtmsrd_reg_offs] |= rt;
  168. break;
  169. }
  170. p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
  171. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
  172. /* Patch the invocation */
  173. kvm_patch_ins_b(inst, distance_start);
  174. }
  175. extern u32 kvm_emulate_mtmsr_branch_offs;
  176. extern u32 kvm_emulate_mtmsr_reg1_offs;
  177. extern u32 kvm_emulate_mtmsr_reg2_offs;
  178. extern u32 kvm_emulate_mtmsr_orig_ins_offs;
  179. extern u32 kvm_emulate_mtmsr_len;
  180. extern u32 kvm_emulate_mtmsr[];
  181. static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
  182. {
  183. u32 *p;
  184. int distance_start;
  185. int distance_end;
  186. ulong next_inst;
  187. p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
  188. if (!p)
  189. return;
  190. /* Find out where we are and put everything there */
  191. distance_start = (ulong)p - (ulong)inst;
  192. next_inst = ((ulong)inst + 4);
  193. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
  194. /* Make sure we only write valid b instructions */
  195. if (distance_start > KVM_INST_B_MAX) {
  196. kvm_patching_worked = false;
  197. return;
  198. }
  199. /* Modify the chunk to fit the invocation */
  200. memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
  201. p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
  202. /* Make clobbered registers work too */
  203. switch (get_rt(rt)) {
  204. case 30:
  205. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  206. magic_var(scratch2), KVM_RT_30);
  207. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  208. magic_var(scratch2), KVM_RT_30);
  209. break;
  210. case 31:
  211. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  212. magic_var(scratch1), KVM_RT_30);
  213. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  214. magic_var(scratch1), KVM_RT_30);
  215. break;
  216. default:
  217. p[kvm_emulate_mtmsr_reg1_offs] |= rt;
  218. p[kvm_emulate_mtmsr_reg2_offs] |= rt;
  219. break;
  220. }
  221. p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
  222. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
  223. /* Patch the invocation */
  224. kvm_patch_ins_b(inst, distance_start);
  225. }
  226. #ifdef CONFIG_BOOKE
  227. extern u32 kvm_emulate_wrtee_branch_offs;
  228. extern u32 kvm_emulate_wrtee_reg_offs;
  229. extern u32 kvm_emulate_wrtee_orig_ins_offs;
  230. extern u32 kvm_emulate_wrtee_len;
  231. extern u32 kvm_emulate_wrtee[];
  232. static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
  233. {
  234. u32 *p;
  235. int distance_start;
  236. int distance_end;
  237. ulong next_inst;
  238. p = kvm_alloc(kvm_emulate_wrtee_len * 4);
  239. if (!p)
  240. return;
  241. /* Find out where we are and put everything there */
  242. distance_start = (ulong)p - (ulong)inst;
  243. next_inst = ((ulong)inst + 4);
  244. distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
  245. /* Make sure we only write valid b instructions */
  246. if (distance_start > KVM_INST_B_MAX) {
  247. kvm_patching_worked = false;
  248. return;
  249. }
  250. /* Modify the chunk to fit the invocation */
  251. memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
  252. p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
  253. if (imm_one) {
  254. p[kvm_emulate_wrtee_reg_offs] =
  255. KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
  256. } else {
  257. /* Make clobbered registers work too */
  258. switch (get_rt(rt)) {
  259. case 30:
  260. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  261. magic_var(scratch2), KVM_RT_30);
  262. break;
  263. case 31:
  264. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  265. magic_var(scratch1), KVM_RT_30);
  266. break;
  267. default:
  268. p[kvm_emulate_wrtee_reg_offs] |= rt;
  269. break;
  270. }
  271. }
  272. p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
  273. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
  274. /* Patch the invocation */
  275. kvm_patch_ins_b(inst, distance_start);
  276. }
  277. extern u32 kvm_emulate_wrteei_0_branch_offs;
  278. extern u32 kvm_emulate_wrteei_0_len;
  279. extern u32 kvm_emulate_wrteei_0[];
  280. static void kvm_patch_ins_wrteei_0(u32 *inst)
  281. {
  282. u32 *p;
  283. int distance_start;
  284. int distance_end;
  285. ulong next_inst;
  286. p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
  287. if (!p)
  288. return;
  289. /* Find out where we are and put everything there */
  290. distance_start = (ulong)p - (ulong)inst;
  291. next_inst = ((ulong)inst + 4);
  292. distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
  293. /* Make sure we only write valid b instructions */
  294. if (distance_start > KVM_INST_B_MAX) {
  295. kvm_patching_worked = false;
  296. return;
  297. }
  298. memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
  299. p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
  300. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
  301. /* Patch the invocation */
  302. kvm_patch_ins_b(inst, distance_start);
  303. }
  304. #endif
  305. #ifdef CONFIG_PPC_BOOK3S_32
  306. extern u32 kvm_emulate_mtsrin_branch_offs;
  307. extern u32 kvm_emulate_mtsrin_reg1_offs;
  308. extern u32 kvm_emulate_mtsrin_reg2_offs;
  309. extern u32 kvm_emulate_mtsrin_orig_ins_offs;
  310. extern u32 kvm_emulate_mtsrin_len;
  311. extern u32 kvm_emulate_mtsrin[];
  312. static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
  313. {
  314. u32 *p;
  315. int distance_start;
  316. int distance_end;
  317. ulong next_inst;
  318. p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
  319. if (!p)
  320. return;
  321. /* Find out where we are and put everything there */
  322. distance_start = (ulong)p - (ulong)inst;
  323. next_inst = ((ulong)inst + 4);
  324. distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
  325. /* Make sure we only write valid b instructions */
  326. if (distance_start > KVM_INST_B_MAX) {
  327. kvm_patching_worked = false;
  328. return;
  329. }
  330. /* Modify the chunk to fit the invocation */
  331. memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
  332. p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
  333. p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
  334. p[kvm_emulate_mtsrin_reg2_offs] |= rt;
  335. p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
  336. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
  337. /* Patch the invocation */
  338. kvm_patch_ins_b(inst, distance_start);
  339. }
  340. #endif
  341. static void kvm_map_magic_page(void *data)
  342. {
  343. u32 *features = data;
  344. ulong in[8] = {0};
  345. ulong out[8];
  346. in[0] = KVM_MAGIC_PAGE;
  347. in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
  348. epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
  349. *features = out[0];
  350. }
  351. static void kvm_check_ins(u32 *inst, u32 features)
  352. {
  353. u32 _inst = *inst;
  354. u32 inst_no_rt = _inst & ~KVM_MASK_RT;
  355. u32 inst_rt = _inst & KVM_MASK_RT;
  356. switch (inst_no_rt) {
  357. /* Loads */
  358. case KVM_INST_MFMSR:
  359. kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
  360. break;
  361. case KVM_INST_MFSPR(SPRN_SPRG0):
  362. kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
  363. break;
  364. case KVM_INST_MFSPR(SPRN_SPRG1):
  365. kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
  366. break;
  367. case KVM_INST_MFSPR(SPRN_SPRG2):
  368. kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
  369. break;
  370. case KVM_INST_MFSPR(SPRN_SPRG3):
  371. kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
  372. break;
  373. case KVM_INST_MFSPR(SPRN_SRR0):
  374. kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
  375. break;
  376. case KVM_INST_MFSPR(SPRN_SRR1):
  377. kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
  378. break;
  379. #ifdef CONFIG_BOOKE
  380. case KVM_INST_MFSPR(SPRN_DEAR):
  381. #else
  382. case KVM_INST_MFSPR(SPRN_DAR):
  383. #endif
  384. kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
  385. break;
  386. case KVM_INST_MFSPR(SPRN_DSISR):
  387. kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
  388. break;
  389. #ifdef CONFIG_PPC_BOOK3E_MMU
  390. case KVM_INST_MFSPR(SPRN_MAS0):
  391. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  392. kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
  393. break;
  394. case KVM_INST_MFSPR(SPRN_MAS1):
  395. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  396. kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
  397. break;
  398. case KVM_INST_MFSPR(SPRN_MAS2):
  399. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  400. kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
  401. break;
  402. case KVM_INST_MFSPR(SPRN_MAS3):
  403. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  404. kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
  405. break;
  406. case KVM_INST_MFSPR(SPRN_MAS4):
  407. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  408. kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
  409. break;
  410. case KVM_INST_MFSPR(SPRN_MAS6):
  411. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  412. kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
  413. break;
  414. case KVM_INST_MFSPR(SPRN_MAS7):
  415. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  416. kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
  417. break;
  418. #endif /* CONFIG_PPC_BOOK3E_MMU */
  419. case KVM_INST_MFSPR(SPRN_SPRG4):
  420. #ifdef CONFIG_BOOKE
  421. case KVM_INST_MFSPR(SPRN_SPRG4R):
  422. #endif
  423. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  424. kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
  425. break;
  426. case KVM_INST_MFSPR(SPRN_SPRG5):
  427. #ifdef CONFIG_BOOKE
  428. case KVM_INST_MFSPR(SPRN_SPRG5R):
  429. #endif
  430. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  431. kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
  432. break;
  433. case KVM_INST_MFSPR(SPRN_SPRG6):
  434. #ifdef CONFIG_BOOKE
  435. case KVM_INST_MFSPR(SPRN_SPRG6R):
  436. #endif
  437. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  438. kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
  439. break;
  440. case KVM_INST_MFSPR(SPRN_SPRG7):
  441. #ifdef CONFIG_BOOKE
  442. case KVM_INST_MFSPR(SPRN_SPRG7R):
  443. #endif
  444. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  445. kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
  446. break;
  447. #ifdef CONFIG_BOOKE
  448. case KVM_INST_MFSPR(SPRN_ESR):
  449. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  450. kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
  451. break;
  452. #endif
  453. case KVM_INST_MFSPR(SPRN_PIR):
  454. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  455. kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
  456. break;
  457. /* Stores */
  458. case KVM_INST_MTSPR(SPRN_SPRG0):
  459. kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
  460. break;
  461. case KVM_INST_MTSPR(SPRN_SPRG1):
  462. kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
  463. break;
  464. case KVM_INST_MTSPR(SPRN_SPRG2):
  465. kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
  466. break;
  467. case KVM_INST_MTSPR(SPRN_SPRG3):
  468. kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
  469. break;
  470. case KVM_INST_MTSPR(SPRN_SRR0):
  471. kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
  472. break;
  473. case KVM_INST_MTSPR(SPRN_SRR1):
  474. kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
  475. break;
  476. #ifdef CONFIG_BOOKE
  477. case KVM_INST_MTSPR(SPRN_DEAR):
  478. #else
  479. case KVM_INST_MTSPR(SPRN_DAR):
  480. #endif
  481. kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
  482. break;
  483. case KVM_INST_MTSPR(SPRN_DSISR):
  484. kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
  485. break;
  486. #ifdef CONFIG_PPC_BOOK3E_MMU
  487. case KVM_INST_MTSPR(SPRN_MAS0):
  488. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  489. kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
  490. break;
  491. case KVM_INST_MTSPR(SPRN_MAS1):
  492. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  493. kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
  494. break;
  495. case KVM_INST_MTSPR(SPRN_MAS2):
  496. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  497. kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
  498. break;
  499. case KVM_INST_MTSPR(SPRN_MAS3):
  500. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  501. kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
  502. break;
  503. case KVM_INST_MTSPR(SPRN_MAS4):
  504. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  505. kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
  506. break;
  507. case KVM_INST_MTSPR(SPRN_MAS6):
  508. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  509. kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
  510. break;
  511. case KVM_INST_MTSPR(SPRN_MAS7):
  512. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  513. kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
  514. break;
  515. #endif /* CONFIG_PPC_BOOK3E_MMU */
  516. case KVM_INST_MTSPR(SPRN_SPRG4):
  517. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  518. kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
  519. break;
  520. case KVM_INST_MTSPR(SPRN_SPRG5):
  521. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  522. kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
  523. break;
  524. case KVM_INST_MTSPR(SPRN_SPRG6):
  525. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  526. kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
  527. break;
  528. case KVM_INST_MTSPR(SPRN_SPRG7):
  529. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  530. kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
  531. break;
  532. #ifdef CONFIG_BOOKE
  533. case KVM_INST_MTSPR(SPRN_ESR):
  534. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  535. kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
  536. break;
  537. #endif
  538. /* Nops */
  539. case KVM_INST_TLBSYNC:
  540. kvm_patch_ins_nop(inst);
  541. break;
  542. /* Rewrites */
  543. case KVM_INST_MTMSRD_L1:
  544. kvm_patch_ins_mtmsrd(inst, inst_rt);
  545. break;
  546. case KVM_INST_MTMSR:
  547. case KVM_INST_MTMSRD_L0:
  548. kvm_patch_ins_mtmsr(inst, inst_rt);
  549. break;
  550. #ifdef CONFIG_BOOKE
  551. case KVM_INST_WRTEE:
  552. kvm_patch_ins_wrtee(inst, inst_rt, 0);
  553. break;
  554. #endif
  555. }
  556. switch (inst_no_rt & ~KVM_MASK_RB) {
  557. #ifdef CONFIG_PPC_BOOK3S_32
  558. case KVM_INST_MTSRIN:
  559. if (features & KVM_MAGIC_FEAT_SR) {
  560. u32 inst_rb = _inst & KVM_MASK_RB;
  561. kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
  562. }
  563. break;
  564. #endif
  565. }
  566. switch (_inst) {
  567. #ifdef CONFIG_BOOKE
  568. case KVM_INST_WRTEEI_0:
  569. kvm_patch_ins_wrteei_0(inst);
  570. break;
  571. case KVM_INST_WRTEEI_1:
  572. kvm_patch_ins_wrtee(inst, 0, 1);
  573. break;
  574. #endif
  575. }
  576. }
  577. extern u32 kvm_template_start[];
  578. extern u32 kvm_template_end[];
  579. static void kvm_use_magic_page(void)
  580. {
  581. u32 *p;
  582. u32 *start, *end;
  583. u32 tmp;
  584. u32 features;
  585. /* Tell the host to map the magic page to -4096 on all CPUs */
  586. on_each_cpu(kvm_map_magic_page, &features, 1);
  587. /* Quick self-test to see if the mapping works */
  588. if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
  589. kvm_patching_worked = false;
  590. return;
  591. }
  592. /* Now loop through all code and find instructions */
  593. start = (void*)_stext;
  594. end = (void*)_etext;
  595. /*
  596. * Being interrupted in the middle of patching would
  597. * be bad for SPRG4-7, which KVM can't keep in sync
  598. * with emulated accesses because reads don't trap.
  599. */
  600. local_irq_disable();
  601. for (p = start; p < end; p++) {
  602. /* Avoid patching the template code */
  603. if (p >= kvm_template_start && p < kvm_template_end) {
  604. p = kvm_template_end - 1;
  605. continue;
  606. }
  607. kvm_check_ins(p, features);
  608. }
  609. local_irq_enable();
  610. printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
  611. kvm_patching_worked ? "worked" : "failed");
  612. }
  613. static __init void kvm_free_tmp(void)
  614. {
  615. free_reserved_area(&kvm_tmp[kvm_tmp_index],
  616. &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
  617. }
  618. static int __init kvm_guest_init(void)
  619. {
  620. if (!kvm_para_available())
  621. goto free_tmp;
  622. if (!epapr_paravirt_enabled)
  623. goto free_tmp;
  624. if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
  625. kvm_use_magic_page();
  626. #ifdef CONFIG_PPC_BOOK3S_64
  627. /* Enable napping */
  628. powersave_nap = 1;
  629. #endif
  630. free_tmp:
  631. kvm_free_tmp();
  632. return 0;
  633. }
  634. postcore_initcall(kvm_guest_init);