cpuid.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. #ifndef ARCH_X86_KVM_CPUID_H
  2. #define ARCH_X86_KVM_CPUID_H
  3. #include "x86.h"
  4. int kvm_update_cpuid(struct kvm_vcpu *vcpu);
  5. bool kvm_mpx_supported(void);
  6. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  7. u32 function, u32 index);
  8. int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
  9. struct kvm_cpuid_entry2 __user *entries,
  10. unsigned int type);
  11. int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  12. struct kvm_cpuid *cpuid,
  13. struct kvm_cpuid_entry __user *entries);
  14. int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  15. struct kvm_cpuid2 *cpuid,
  16. struct kvm_cpuid_entry2 __user *entries);
  17. int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  18. struct kvm_cpuid2 *cpuid,
  19. struct kvm_cpuid_entry2 __user *entries);
  20. void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
  21. int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
  22. static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  23. {
  24. return vcpu->arch.maxphyaddr;
  25. }
  26. static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
  27. {
  28. struct kvm_cpuid_entry2 *best;
  29. if (!static_cpu_has(X86_FEATURE_XSAVE))
  30. return false;
  31. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  32. return best && (best->ecx & bit(X86_FEATURE_XSAVE));
  33. }
  34. static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
  35. {
  36. struct kvm_cpuid_entry2 *best;
  37. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  38. return best && (best->edx & bit(X86_FEATURE_MTRR));
  39. }
  40. static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
  41. {
  42. struct kvm_cpuid_entry2 *best;
  43. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  44. return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
  45. }
  46. static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
  47. {
  48. struct kvm_cpuid_entry2 *best;
  49. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  50. return best && (best->ebx & bit(X86_FEATURE_SMEP));
  51. }
  52. static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
  53. {
  54. struct kvm_cpuid_entry2 *best;
  55. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  56. return best && (best->ebx & bit(X86_FEATURE_SMAP));
  57. }
  58. static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
  59. {
  60. struct kvm_cpuid_entry2 *best;
  61. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  62. return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
  63. }
  64. static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
  65. {
  66. struct kvm_cpuid_entry2 *best;
  67. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  68. return best && (best->edx & bit(X86_FEATURE_LM));
  69. }
  70. static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
  71. {
  72. struct kvm_cpuid_entry2 *best;
  73. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  74. return best && (best->ecx & bit(X86_FEATURE_OSVW));
  75. }
  76. static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
  77. {
  78. struct kvm_cpuid_entry2 *best;
  79. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  80. return best && (best->ecx & bit(X86_FEATURE_PCID));
  81. }
  82. static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
  83. {
  84. struct kvm_cpuid_entry2 *best;
  85. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  86. return best && (best->ecx & bit(X86_FEATURE_X2APIC));
  87. }
  88. static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
  89. {
  90. struct kvm_cpuid_entry2 *best;
  91. best = kvm_find_cpuid_entry(vcpu, 0, 0);
  92. return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
  93. }
  94. static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
  95. {
  96. struct kvm_cpuid_entry2 *best;
  97. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  98. return best && (best->edx & bit(X86_FEATURE_GBPAGES));
  99. }
  100. static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
  101. {
  102. struct kvm_cpuid_entry2 *best;
  103. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  104. return best && (best->ebx & bit(X86_FEATURE_RTM));
  105. }
  106. static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
  107. {
  108. struct kvm_cpuid_entry2 *best;
  109. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  110. return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
  111. }
  112. static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
  113. {
  114. struct kvm_cpuid_entry2 *best;
  115. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  116. return best && (best->ebx & bit(X86_FEATURE_MPX));
  117. }
  118. static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
  119. {
  120. struct kvm_cpuid_entry2 *best;
  121. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  122. return best && (best->edx & bit(X86_FEATURE_RDTSCP));
  123. }
  124. static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
  125. {
  126. struct kvm_cpuid_entry2 *best;
  127. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  128. if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
  129. return true;
  130. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  131. return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
  132. }
  133. static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
  134. {
  135. struct kvm_cpuid_entry2 *best;
  136. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  137. if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
  138. return true;
  139. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  140. return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
  141. }
  142. static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
  143. {
  144. struct kvm_cpuid_entry2 *best;
  145. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  146. return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
  147. }
  148. static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
  149. {
  150. struct kvm_cpuid_entry2 *best;
  151. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  152. return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
  153. }
  154. /*
  155. * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
  156. */
  157. #define BIT_NRIPS 3
  158. static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
  159. {
  160. struct kvm_cpuid_entry2 *best;
  161. best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
  162. /*
  163. * NRIPS is a scattered cpuid feature, so we can't use
  164. * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
  165. * position 8, not 3).
  166. */
  167. return best && (best->edx & bit(BIT_NRIPS));
  168. }
  169. #undef BIT_NRIPS
  170. #endif