kvm_cache_regs.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #ifndef ASM_KVM_CACHE_REGS_H
  2. #define ASM_KVM_CACHE_REGS_H
  3. #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
  4. #define KVM_POSSIBLE_CR4_GUEST_BITS \
  5. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  6. | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
  7. static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
  8. enum kvm_reg reg)
  9. {
  10. if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
  11. kvm_x86_ops->cache_reg(vcpu, reg);
  12. return vcpu->arch.regs[reg];
  13. }
  14. static inline void kvm_register_write(struct kvm_vcpu *vcpu,
  15. enum kvm_reg reg,
  16. unsigned long val)
  17. {
  18. vcpu->arch.regs[reg] = val;
  19. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  20. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  21. }
  22. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  23. {
  24. return kvm_register_read(vcpu, VCPU_REGS_RIP);
  25. }
  26. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  27. {
  28. kvm_register_write(vcpu, VCPU_REGS_RIP, val);
  29. }
  30. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  31. {
  32. might_sleep(); /* on svm */
  33. if (!test_bit(VCPU_EXREG_PDPTR,
  34. (unsigned long *)&vcpu->arch.regs_avail))
  35. kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
  36. return vcpu->arch.walk_mmu->pdptrs[index];
  37. }
  38. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  39. {
  40. ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
  41. if (tmask & vcpu->arch.cr0_guest_owned_bits)
  42. kvm_x86_ops->decache_cr0_guest_bits(vcpu);
  43. return vcpu->arch.cr0 & mask;
  44. }
  45. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  46. {
  47. return kvm_read_cr0_bits(vcpu, ~0UL);
  48. }
  49. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  50. {
  51. ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
  52. if (tmask & vcpu->arch.cr4_guest_owned_bits)
  53. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  54. return vcpu->arch.cr4 & mask;
  55. }
  56. static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
  57. {
  58. if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
  59. kvm_x86_ops->decache_cr3(vcpu);
  60. return vcpu->arch.cr3;
  61. }
  62. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  63. {
  64. return kvm_read_cr4_bits(vcpu, ~0UL);
  65. }
  66. static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
  67. {
  68. return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
  69. | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
  70. }
  71. static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
  72. {
  73. vcpu->arch.hflags |= HF_GUEST_MASK;
  74. }
  75. static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
  76. {
  77. vcpu->arch.hflags &= ~HF_GUEST_MASK;
  78. }
  79. static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  80. {
  81. return vcpu->arch.hflags & HF_GUEST_MASK;
  82. }
  83. static inline bool is_smm(struct kvm_vcpu *vcpu)
  84. {
  85. return vcpu->arch.hflags & HF_SMM_MASK;
  86. }
  87. #endif