kvm_host.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_HOST_H__
  20. #define __POWERPC_KVM_HOST_H__
  21. #include <linux/mutex.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/kvm_types.h>
  26. #include <linux/threads.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/kvm_para.h>
  29. #include <linux/list.h>
  30. #include <linux/atomic.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/page.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/hvcall.h>
  36. #define KVM_MAX_VCPUS NR_CPUS
  37. #define KVM_MAX_VCORES NR_CPUS
  38. #define KVM_USER_MEM_SLOTS 32
  39. #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
  40. #ifdef CONFIG_KVM_MMIO
  41. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  42. #endif
  43. #define KVM_HALT_POLL_NS_DEFAULT 500000
  44. /* These values are internal and can be increased later */
  45. #define KVM_NR_IRQCHIPS 1
  46. #define KVM_IRQCHIP_NUM_PINS 256
  47. #include <linux/mmu_notifier.h>
  48. #define KVM_ARCH_WANT_MMU_NOTIFIER
  49. extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  50. extern int kvm_unmap_hva_range(struct kvm *kvm,
  51. unsigned long start, unsigned long end);
  52. extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  53. extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  54. extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  55. static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
  56. unsigned long address)
  57. {
  58. }
  59. #define HPTEG_CACHE_NUM (1 << 15)
  60. #define HPTEG_HASH_BITS_PTE 13
  61. #define HPTEG_HASH_BITS_PTE_LONG 12
  62. #define HPTEG_HASH_BITS_VPTE 13
  63. #define HPTEG_HASH_BITS_VPTE_LONG 5
  64. #define HPTEG_HASH_BITS_VPTE_64K 11
  65. #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  66. #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
  67. #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  68. #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
  69. #define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
  70. /* Physical Address Mask - allowed range of real mode RAM access */
  71. #define KVM_PAM 0x0fffffffffffffffULL
  72. struct lppaca;
  73. struct slb_shadow;
  74. struct dtl_entry;
  75. struct kvmppc_vcpu_book3s;
  76. struct kvmppc_book3s_shadow_vcpu;
  77. struct kvm_vm_stat {
  78. u32 remote_tlb_flush;
  79. };
  80. struct kvm_vcpu_stat {
  81. u32 sum_exits;
  82. u32 mmio_exits;
  83. u32 signal_exits;
  84. u32 light_exits;
  85. /* Account for special types of light exits: */
  86. u32 itlb_real_miss_exits;
  87. u32 itlb_virt_miss_exits;
  88. u32 dtlb_real_miss_exits;
  89. u32 dtlb_virt_miss_exits;
  90. u32 syscall_exits;
  91. u32 isi_exits;
  92. u32 dsi_exits;
  93. u32 emulated_inst_exits;
  94. u32 dec_exits;
  95. u32 ext_intr_exits;
  96. u32 halt_successful_poll;
  97. u32 halt_attempted_poll;
  98. u32 halt_wakeup;
  99. u32 dbell_exits;
  100. u32 gdbell_exits;
  101. u32 ld;
  102. u32 st;
  103. #ifdef CONFIG_PPC_BOOK3S
  104. u32 pf_storage;
  105. u32 pf_instruc;
  106. u32 sp_storage;
  107. u32 sp_instruc;
  108. u32 queue_intr;
  109. u32 ld_slow;
  110. u32 st_slow;
  111. #endif
  112. };
  113. enum kvm_exit_types {
  114. MMIO_EXITS,
  115. SIGNAL_EXITS,
  116. ITLB_REAL_MISS_EXITS,
  117. ITLB_VIRT_MISS_EXITS,
  118. DTLB_REAL_MISS_EXITS,
  119. DTLB_VIRT_MISS_EXITS,
  120. SYSCALL_EXITS,
  121. ISI_EXITS,
  122. DSI_EXITS,
  123. EMULATED_INST_EXITS,
  124. EMULATED_MTMSRWE_EXITS,
  125. EMULATED_WRTEE_EXITS,
  126. EMULATED_MTSPR_EXITS,
  127. EMULATED_MFSPR_EXITS,
  128. EMULATED_MTMSR_EXITS,
  129. EMULATED_MFMSR_EXITS,
  130. EMULATED_TLBSX_EXITS,
  131. EMULATED_TLBWE_EXITS,
  132. EMULATED_RFI_EXITS,
  133. EMULATED_RFCI_EXITS,
  134. EMULATED_RFDI_EXITS,
  135. DEC_EXITS,
  136. EXT_INTR_EXITS,
  137. HALT_WAKEUP,
  138. USR_PR_INST,
  139. FP_UNAVAIL,
  140. DEBUG_EXITS,
  141. TIMEINGUEST,
  142. DBELL_EXITS,
  143. GDBELL_EXITS,
  144. __NUMBER_OF_KVM_EXIT_TYPES
  145. };
  146. /* allow access to big endian 32bit upper/lower parts and 64bit var */
  147. struct kvmppc_exit_timing {
  148. union {
  149. u64 tv64;
  150. struct {
  151. u32 tbu, tbl;
  152. } tv32;
  153. };
  154. };
  155. struct kvmppc_pginfo {
  156. unsigned long pfn;
  157. atomic_t refcnt;
  158. };
  159. struct kvmppc_spapr_tce_table {
  160. struct list_head list;
  161. struct kvm *kvm;
  162. u64 liobn;
  163. u32 window_size;
  164. struct page *pages[0];
  165. };
  166. /* XICS components, defined in book3s_xics.c */
  167. struct kvmppc_xics;
  168. struct kvmppc_icp;
  169. /*
  170. * The reverse mapping array has one entry for each HPTE,
  171. * which stores the guest's view of the second word of the HPTE
  172. * (including the guest physical address of the mapping),
  173. * plus forward and backward pointers in a doubly-linked ring
  174. * of HPTEs that map the same host page. The pointers in this
  175. * ring are 32-bit HPTE indexes, to save space.
  176. */
  177. struct revmap_entry {
  178. unsigned long guest_rpte;
  179. unsigned int forw, back;
  180. };
  181. /*
  182. * We use the top bit of each memslot->arch.rmap entry as a lock bit,
  183. * and bit 32 as a present flag. The bottom 32 bits are the
  184. * index in the guest HPT of a HPTE that points to the page.
  185. */
  186. #define KVMPPC_RMAP_LOCK_BIT 63
  187. #define KVMPPC_RMAP_RC_SHIFT 32
  188. #define KVMPPC_RMAP_CHG_SHIFT 48
  189. #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
  190. #define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
  191. #define KVMPPC_RMAP_CHG_ORDER (0x3ful << KVMPPC_RMAP_CHG_SHIFT)
  192. #define KVMPPC_RMAP_PRESENT 0x100000000ul
  193. #define KVMPPC_RMAP_INDEX 0xfffffffful
  194. struct kvm_arch_memory_slot {
  195. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  196. unsigned long *rmap;
  197. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  198. };
  199. struct kvm_arch {
  200. unsigned int lpid;
  201. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  202. unsigned long hpt_virt;
  203. struct revmap_entry *revmap;
  204. unsigned int host_lpid;
  205. unsigned long host_lpcr;
  206. unsigned long sdr1;
  207. unsigned long host_sdr1;
  208. int tlbie_lock;
  209. unsigned long lpcr;
  210. unsigned long vrma_slb_v;
  211. int hpte_setup_done;
  212. u32 hpt_order;
  213. atomic_t vcpus_running;
  214. u32 online_vcores;
  215. unsigned long hpt_npte;
  216. unsigned long hpt_mask;
  217. atomic_t hpte_mod_interest;
  218. cpumask_t need_tlb_flush;
  219. int hpt_cma_alloc;
  220. struct dentry *debugfs_dir;
  221. struct dentry *htab_dentry;
  222. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  223. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  224. struct mutex hpt_mutex;
  225. #endif
  226. #ifdef CONFIG_PPC_BOOK3S_64
  227. struct list_head spapr_tce_tables;
  228. struct list_head rtas_tokens;
  229. DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
  230. #endif
  231. #ifdef CONFIG_KVM_MPIC
  232. struct openpic *mpic;
  233. #endif
  234. #ifdef CONFIG_KVM_XICS
  235. struct kvmppc_xics *xics;
  236. #endif
  237. struct kvmppc_ops *kvm_ops;
  238. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  239. /* This array can grow quite large, keep it at the end */
  240. struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
  241. #endif
  242. };
  243. /*
  244. * Struct for a virtual core.
  245. * Note: entry_exit_map combines a bitmap of threads that have entered
  246. * in the bottom 8 bits and a bitmap of threads that have exited in the
  247. * next 8 bits. This is so that we can atomically set the entry bit
  248. * iff the exit map is 0 without taking a lock.
  249. */
  250. struct kvmppc_vcore {
  251. int n_runnable;
  252. int num_threads;
  253. int entry_exit_map;
  254. int napping_threads;
  255. int first_vcpuid;
  256. u16 pcpu;
  257. u16 last_cpu;
  258. u8 vcore_state;
  259. u8 in_guest;
  260. struct kvmppc_vcore *master_vcore;
  261. struct list_head runnable_threads;
  262. struct list_head preempt_list;
  263. spinlock_t lock;
  264. wait_queue_head_t wq;
  265. spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
  266. u64 stolen_tb;
  267. u64 preempt_tb;
  268. struct kvm_vcpu *runner;
  269. struct kvm *kvm;
  270. u64 tb_offset; /* guest timebase - host timebase */
  271. ulong lpcr;
  272. u32 arch_compat;
  273. ulong pcr;
  274. ulong dpdes; /* doorbell state (POWER8) */
  275. ulong conferring_threads;
  276. };
  277. #define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
  278. #define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
  279. #define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
  280. /* This bit is used when a vcore exit is triggered from outside the vcore */
  281. #define VCORE_EXIT_REQ 0x10000
  282. /*
  283. * Values for vcore_state.
  284. * Note that these are arranged such that lower values
  285. * (< VCORE_SLEEPING) don't require stolen time accounting
  286. * on load/unload, and higher values do.
  287. */
  288. #define VCORE_INACTIVE 0
  289. #define VCORE_PREEMPT 1
  290. #define VCORE_PIGGYBACK 2
  291. #define VCORE_SLEEPING 3
  292. #define VCORE_RUNNING 4
  293. #define VCORE_EXITING 5
  294. /*
  295. * Struct used to manage memory for a virtual processor area
  296. * registered by a PAPR guest. There are three types of area
  297. * that a guest can register.
  298. */
  299. struct kvmppc_vpa {
  300. unsigned long gpa; /* Current guest phys addr */
  301. void *pinned_addr; /* Address in kernel linear mapping */
  302. void *pinned_end; /* End of region */
  303. unsigned long next_gpa; /* Guest phys addr for update */
  304. unsigned long len; /* Number of bytes required */
  305. u8 update_pending; /* 1 => update pinned_addr from next_gpa */
  306. bool dirty; /* true => area has been modified by kernel */
  307. };
  308. struct kvmppc_pte {
  309. ulong eaddr;
  310. u64 vpage;
  311. ulong raddr;
  312. bool may_read : 1;
  313. bool may_write : 1;
  314. bool may_execute : 1;
  315. u8 page_size; /* MMU_PAGE_xxx */
  316. };
  317. struct kvmppc_mmu {
  318. /* book3s_64 only */
  319. void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
  320. u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
  321. u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
  322. void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
  323. void (*slbia)(struct kvm_vcpu *vcpu);
  324. /* book3s */
  325. void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
  326. u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
  327. int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
  328. struct kvmppc_pte *pte, bool data, bool iswrite);
  329. void (*reset_msr)(struct kvm_vcpu *vcpu);
  330. void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
  331. int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
  332. u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
  333. bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
  334. };
  335. struct kvmppc_slb {
  336. u64 esid;
  337. u64 vsid;
  338. u64 orige;
  339. u64 origv;
  340. bool valid : 1;
  341. bool Ks : 1;
  342. bool Kp : 1;
  343. bool nx : 1;
  344. bool large : 1; /* PTEs are 16MB */
  345. bool tb : 1; /* 1TB segment */
  346. bool class : 1;
  347. u8 base_page_size; /* MMU_PAGE_xxx */
  348. };
  349. /* Struct used to accumulate timing information in HV real mode code */
  350. struct kvmhv_tb_accumulator {
  351. u64 seqcount; /* used to synchronize access, also count * 2 */
  352. u64 tb_total; /* total time in timebase ticks */
  353. u64 tb_min; /* min time */
  354. u64 tb_max; /* max time */
  355. };
  356. # ifdef CONFIG_PPC_FSL_BOOK3E
  357. #define KVMPPC_BOOKE_IAC_NUM 2
  358. #define KVMPPC_BOOKE_DAC_NUM 2
  359. # else
  360. #define KVMPPC_BOOKE_IAC_NUM 4
  361. #define KVMPPC_BOOKE_DAC_NUM 2
  362. # endif
  363. #define KVMPPC_BOOKE_MAX_IAC 4
  364. #define KVMPPC_BOOKE_MAX_DAC 2
  365. /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
  366. #define KVMPPC_EPR_NONE 0 /* EPR not supported */
  367. #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
  368. #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
  369. #define KVMPPC_IRQ_DEFAULT 0
  370. #define KVMPPC_IRQ_MPIC 1
  371. #define KVMPPC_IRQ_XICS 2
  372. struct openpic;
  373. struct kvm_vcpu_arch {
  374. ulong host_stack;
  375. u32 host_pid;
  376. #ifdef CONFIG_PPC_BOOK3S
  377. struct kvmppc_slb slb[64];
  378. int slb_max; /* 1 + index of last valid entry in slb[] */
  379. int slb_nr; /* total number of entries in SLB */
  380. struct kvmppc_mmu mmu;
  381. struct kvmppc_vcpu_book3s *book3s;
  382. #endif
  383. #ifdef CONFIG_PPC_BOOK3S_32
  384. struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
  385. #endif
  386. ulong gpr[32];
  387. struct thread_fp_state fp;
  388. #ifdef CONFIG_SPE
  389. ulong evr[32];
  390. ulong spefscr;
  391. ulong host_spefscr;
  392. u64 acc;
  393. #endif
  394. #ifdef CONFIG_ALTIVEC
  395. struct thread_vr_state vr;
  396. #endif
  397. #ifdef CONFIG_KVM_BOOKE_HV
  398. u32 host_mas4;
  399. u32 host_mas6;
  400. u32 shadow_epcr;
  401. u32 shadow_msrp;
  402. u32 eplc;
  403. u32 epsc;
  404. u32 oldpir;
  405. #endif
  406. #if defined(CONFIG_BOOKE)
  407. #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
  408. u32 epcr;
  409. #endif
  410. #endif
  411. #ifdef CONFIG_PPC_BOOK3S
  412. /* For Gekko paired singles */
  413. u32 qpr[32];
  414. #endif
  415. ulong pc;
  416. ulong ctr;
  417. ulong lr;
  418. #ifdef CONFIG_PPC_BOOK3S
  419. ulong tar;
  420. #endif
  421. ulong xer;
  422. u32 cr;
  423. #ifdef CONFIG_PPC_BOOK3S
  424. ulong hflags;
  425. ulong guest_owned_ext;
  426. ulong purr;
  427. ulong spurr;
  428. ulong ic;
  429. ulong vtb;
  430. ulong dscr;
  431. ulong amr;
  432. ulong uamor;
  433. ulong iamr;
  434. u32 ctrl;
  435. u32 dabrx;
  436. ulong dabr;
  437. ulong dawr;
  438. ulong dawrx;
  439. ulong ciabr;
  440. ulong cfar;
  441. ulong ppr;
  442. u32 pspb;
  443. ulong fscr;
  444. ulong shadow_fscr;
  445. ulong ebbhr;
  446. ulong ebbrr;
  447. ulong bescr;
  448. ulong csigr;
  449. ulong tacr;
  450. ulong tcscr;
  451. ulong acop;
  452. ulong wort;
  453. ulong shadow_srr1;
  454. #endif
  455. u32 vrsave; /* also USPRG0 */
  456. u32 mmucr;
  457. /* shadow_msr is unused for BookE HV */
  458. ulong shadow_msr;
  459. ulong csrr0;
  460. ulong csrr1;
  461. ulong dsrr0;
  462. ulong dsrr1;
  463. ulong mcsrr0;
  464. ulong mcsrr1;
  465. ulong mcsr;
  466. u32 dec;
  467. #ifdef CONFIG_BOOKE
  468. u32 decar;
  469. #endif
  470. /* Time base value when we entered the guest */
  471. u64 entry_tb;
  472. u64 entry_vtb;
  473. u64 entry_ic;
  474. u32 tcr;
  475. ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
  476. u32 ivor[64];
  477. ulong ivpr;
  478. u32 pvr;
  479. u32 shadow_pid;
  480. u32 shadow_pid1;
  481. u32 pid;
  482. u32 swap_pid;
  483. u32 ccr0;
  484. u32 ccr1;
  485. u32 dbsr;
  486. u64 mmcr[5];
  487. u32 pmc[8];
  488. u32 spmc[2];
  489. u64 siar;
  490. u64 sdar;
  491. u64 sier;
  492. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  493. u64 tfhar;
  494. u64 texasr;
  495. u64 tfiar;
  496. u32 cr_tm;
  497. u64 xer_tm;
  498. u64 lr_tm;
  499. u64 ctr_tm;
  500. u64 amr_tm;
  501. u64 ppr_tm;
  502. u64 dscr_tm;
  503. u64 tar_tm;
  504. ulong gpr_tm[32];
  505. struct thread_fp_state fp_tm;
  506. struct thread_vr_state vr_tm;
  507. u32 vrsave_tm; /* also USPRG0 */
  508. #endif
  509. #ifdef CONFIG_KVM_EXIT_TIMING
  510. struct mutex exit_timing_lock;
  511. struct kvmppc_exit_timing timing_exit;
  512. struct kvmppc_exit_timing timing_last_enter;
  513. u32 last_exit_type;
  514. u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
  515. u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  516. u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  517. u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  518. u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  519. u64 timing_last_exit;
  520. struct dentry *debugfs_exit_timing;
  521. #endif
  522. #ifdef CONFIG_PPC_BOOK3S
  523. ulong fault_dar;
  524. u32 fault_dsisr;
  525. unsigned long intr_msr;
  526. #endif
  527. #ifdef CONFIG_BOOKE
  528. ulong fault_dear;
  529. ulong fault_esr;
  530. ulong queued_dear;
  531. ulong queued_esr;
  532. spinlock_t wdt_lock;
  533. struct timer_list wdt_timer;
  534. u32 tlbcfg[4];
  535. u32 tlbps[4];
  536. u32 mmucfg;
  537. u32 eptcfg;
  538. u32 epr;
  539. u64 sprg9;
  540. u32 pwrmgtcr0;
  541. u32 crit_save;
  542. /* guest debug registers*/
  543. struct debug_reg dbg_reg;
  544. #endif
  545. gpa_t paddr_accessed;
  546. gva_t vaddr_accessed;
  547. pgd_t *pgdir;
  548. u8 io_gpr; /* GPR used as IO source/target */
  549. u8 mmio_host_swabbed;
  550. u8 mmio_sign_extend;
  551. u8 osi_needed;
  552. u8 osi_enabled;
  553. u8 papr_enabled;
  554. u8 watchdog_enabled;
  555. u8 sane;
  556. u8 cpu_type;
  557. u8 hcall_needed;
  558. u8 epr_flags; /* KVMPPC_EPR_xxx */
  559. u8 epr_needed;
  560. u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
  561. struct hrtimer dec_timer;
  562. u64 dec_jiffies;
  563. u64 dec_expires;
  564. unsigned long pending_exceptions;
  565. u8 ceded;
  566. u8 prodded;
  567. u32 last_inst;
  568. wait_queue_head_t *wqp;
  569. struct kvmppc_vcore *vcore;
  570. int ret;
  571. int trap;
  572. int state;
  573. int ptid;
  574. int thread_cpu;
  575. bool timer_running;
  576. wait_queue_head_t cpu_run;
  577. struct kvm_vcpu_arch_shared *shared;
  578. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  579. bool shared_big_endian;
  580. #endif
  581. unsigned long magic_page_pa; /* phys addr to map the magic page to */
  582. unsigned long magic_page_ea; /* effect. addr to map the magic page to */
  583. bool disable_kernel_nx;
  584. int irq_type; /* one of KVM_IRQ_* */
  585. int irq_cpu_id;
  586. struct openpic *mpic; /* KVM_IRQ_MPIC */
  587. #ifdef CONFIG_KVM_XICS
  588. struct kvmppc_icp *icp; /* XICS presentation controller */
  589. #endif
  590. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  591. struct kvm_vcpu_arch_shared shregs;
  592. unsigned long pgfault_addr;
  593. long pgfault_index;
  594. unsigned long pgfault_hpte[2];
  595. struct list_head run_list;
  596. struct task_struct *run_task;
  597. struct kvm_run *kvm_run;
  598. spinlock_t vpa_update_lock;
  599. struct kvmppc_vpa vpa;
  600. struct kvmppc_vpa dtl;
  601. struct dtl_entry *dtl_ptr;
  602. unsigned long dtl_index;
  603. u64 stolen_logged;
  604. struct kvmppc_vpa slb_shadow;
  605. spinlock_t tbacct_lock;
  606. u64 busy_stolen;
  607. u64 busy_preempt;
  608. u32 emul_inst;
  609. #endif
  610. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  611. struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
  612. u64 cur_tb_start; /* when it started */
  613. struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
  614. struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
  615. struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
  616. struct kvmhv_tb_accumulator guest_time; /* guest execution */
  617. struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
  618. struct dentry *debugfs_dir;
  619. struct dentry *debugfs_timings;
  620. #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
  621. };
  622. #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
  623. /* Values for vcpu->arch.state */
  624. #define KVMPPC_VCPU_NOTREADY 0
  625. #define KVMPPC_VCPU_RUNNABLE 1
  626. #define KVMPPC_VCPU_BUSY_IN_HOST 2
  627. /* Values for vcpu->arch.io_gpr */
  628. #define KVM_MMIO_REG_MASK 0x001f
  629. #define KVM_MMIO_REG_EXT_MASK 0xffe0
  630. #define KVM_MMIO_REG_GPR 0x0000
  631. #define KVM_MMIO_REG_FPR 0x0020
  632. #define KVM_MMIO_REG_QPR 0x0040
  633. #define KVM_MMIO_REG_FQPR 0x0060
  634. #define __KVM_HAVE_ARCH_WQP
  635. #define __KVM_HAVE_CREATE_DEVICE
  636. static inline void kvm_arch_hardware_disable(void) {}
  637. static inline void kvm_arch_hardware_unsetup(void) {}
  638. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  639. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  640. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  641. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  642. static inline void kvm_arch_exit(void) {}
  643. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  644. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  645. #endif /* __POWERPC_KVM_HOST_H__ */