pmu_intel.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /*
  2. * KVM PMU support for Intel CPUs
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. * Gleb Natapov <gleb@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/perf_event.h>
  17. #include <asm/perf_event.h>
  18. #include "x86.h"
  19. #include "cpuid.h"
  20. #include "lapic.h"
  21. #include "pmu.h"
  22. static struct kvm_event_hw_type_mapping intel_arch_events[] = {
  23. /* Index must match CPUID 0x0A.EBX bit vector */
  24. [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  25. [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  26. [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
  27. [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
  28. [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
  29. [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  30. [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  31. [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
  32. };
  33. /* mapping between fixed pmc index and intel_arch_events array */
  34. static int fixed_pmc_events[] = {1, 0, 7};
  35. static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
  36. {
  37. int i;
  38. for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
  39. u8 new_ctrl = fixed_ctrl_field(data, i);
  40. u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
  41. struct kvm_pmc *pmc;
  42. pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
  43. if (old_ctrl == new_ctrl)
  44. continue;
  45. reprogram_fixed_counter(pmc, new_ctrl, i);
  46. }
  47. pmu->fixed_ctr_ctrl = data;
  48. }
  49. /* function is called when global control register has been updated. */
  50. static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
  51. {
  52. int bit;
  53. u64 diff = pmu->global_ctrl ^ data;
  54. pmu->global_ctrl = data;
  55. for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
  56. reprogram_counter(pmu, bit);
  57. }
  58. static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
  59. u8 event_select,
  60. u8 unit_mask)
  61. {
  62. int i;
  63. for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
  64. if (intel_arch_events[i].eventsel == event_select
  65. && intel_arch_events[i].unit_mask == unit_mask
  66. && (pmu->available_event_types & (1 << i)))
  67. break;
  68. if (i == ARRAY_SIZE(intel_arch_events))
  69. return PERF_COUNT_HW_MAX;
  70. return intel_arch_events[i].event_type;
  71. }
  72. static unsigned intel_find_fixed_event(int idx)
  73. {
  74. if (idx >= ARRAY_SIZE(fixed_pmc_events))
  75. return PERF_COUNT_HW_MAX;
  76. return intel_arch_events[fixed_pmc_events[idx]].event_type;
  77. }
  78. /* check if a PMC is enabled by comparising it with globl_ctrl bits. */
  79. static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
  80. {
  81. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  82. return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
  83. }
  84. static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
  85. {
  86. if (pmc_idx < INTEL_PMC_IDX_FIXED)
  87. return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
  88. MSR_P6_EVNTSEL0);
  89. else {
  90. u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
  91. return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
  92. }
  93. }
  94. /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
  95. static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
  96. {
  97. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  98. bool fixed = idx & (1u << 30);
  99. idx &= ~(3u << 30);
  100. return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
  101. (fixed && idx >= pmu->nr_arch_fixed_counters);
  102. }
  103. static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
  104. unsigned idx)
  105. {
  106. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  107. bool fixed = idx & (1u << 30);
  108. struct kvm_pmc *counters;
  109. idx &= ~(3u << 30);
  110. if (!fixed && idx >= pmu->nr_arch_gp_counters)
  111. return NULL;
  112. if (fixed && idx >= pmu->nr_arch_fixed_counters)
  113. return NULL;
  114. counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
  115. return &counters[idx];
  116. }
  117. static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  118. {
  119. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  120. int ret;
  121. switch (msr) {
  122. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  123. case MSR_CORE_PERF_GLOBAL_STATUS:
  124. case MSR_CORE_PERF_GLOBAL_CTRL:
  125. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  126. ret = pmu->version > 1;
  127. break;
  128. default:
  129. ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
  130. get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
  131. get_fixed_pmc(pmu, msr);
  132. break;
  133. }
  134. return ret;
  135. }
  136. static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
  137. {
  138. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  139. struct kvm_pmc *pmc;
  140. switch (msr) {
  141. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  142. *data = pmu->fixed_ctr_ctrl;
  143. return 0;
  144. case MSR_CORE_PERF_GLOBAL_STATUS:
  145. *data = pmu->global_status;
  146. return 0;
  147. case MSR_CORE_PERF_GLOBAL_CTRL:
  148. *data = pmu->global_ctrl;
  149. return 0;
  150. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  151. *data = pmu->global_ovf_ctrl;
  152. return 0;
  153. default:
  154. if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
  155. (pmc = get_fixed_pmc(pmu, msr))) {
  156. *data = pmc_read_counter(pmc);
  157. return 0;
  158. } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
  159. *data = pmc->eventsel;
  160. return 0;
  161. }
  162. }
  163. return 1;
  164. }
  165. static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  166. {
  167. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  168. struct kvm_pmc *pmc;
  169. u32 msr = msr_info->index;
  170. u64 data = msr_info->data;
  171. switch (msr) {
  172. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  173. if (pmu->fixed_ctr_ctrl == data)
  174. return 0;
  175. if (!(data & 0xfffffffffffff444ull)) {
  176. reprogram_fixed_counters(pmu, data);
  177. return 0;
  178. }
  179. break;
  180. case MSR_CORE_PERF_GLOBAL_STATUS:
  181. if (msr_info->host_initiated) {
  182. pmu->global_status = data;
  183. return 0;
  184. }
  185. break; /* RO MSR */
  186. case MSR_CORE_PERF_GLOBAL_CTRL:
  187. if (pmu->global_ctrl == data)
  188. return 0;
  189. if (!(data & pmu->global_ctrl_mask)) {
  190. global_ctrl_changed(pmu, data);
  191. return 0;
  192. }
  193. break;
  194. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  195. if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
  196. if (!msr_info->host_initiated)
  197. pmu->global_status &= ~data;
  198. pmu->global_ovf_ctrl = data;
  199. return 0;
  200. }
  201. break;
  202. default:
  203. if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
  204. (pmc = get_fixed_pmc(pmu, msr))) {
  205. if (!msr_info->host_initiated)
  206. data = (s64)(s32)data;
  207. pmc->counter += data - pmc_read_counter(pmc);
  208. return 0;
  209. } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
  210. if (data == pmc->eventsel)
  211. return 0;
  212. if (!(data & pmu->reserved_bits)) {
  213. reprogram_gp_counter(pmc, data);
  214. return 0;
  215. }
  216. }
  217. }
  218. return 1;
  219. }
  220. static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
  221. {
  222. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  223. struct kvm_cpuid_entry2 *entry;
  224. union cpuid10_eax eax;
  225. union cpuid10_edx edx;
  226. pmu->nr_arch_gp_counters = 0;
  227. pmu->nr_arch_fixed_counters = 0;
  228. pmu->counter_bitmask[KVM_PMC_GP] = 0;
  229. pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
  230. pmu->version = 0;
  231. pmu->reserved_bits = 0xffffffff00200000ull;
  232. entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
  233. if (!entry)
  234. return;
  235. eax.full = entry->eax;
  236. edx.full = entry->edx;
  237. pmu->version = eax.split.version_id;
  238. if (!pmu->version)
  239. return;
  240. pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
  241. INTEL_PMC_MAX_GENERIC);
  242. pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
  243. pmu->available_event_types = ~entry->ebx &
  244. ((1ull << eax.split.mask_length) - 1);
  245. if (pmu->version == 1) {
  246. pmu->nr_arch_fixed_counters = 0;
  247. } else {
  248. pmu->nr_arch_fixed_counters =
  249. min_t(int, edx.split.num_counters_fixed,
  250. INTEL_PMC_MAX_FIXED);
  251. pmu->counter_bitmask[KVM_PMC_FIXED] =
  252. ((u64)1 << edx.split.bit_width_fixed) - 1;
  253. }
  254. pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
  255. (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
  256. pmu->global_ctrl_mask = ~pmu->global_ctrl;
  257. entry = kvm_find_cpuid_entry(vcpu, 7, 0);
  258. if (entry &&
  259. (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
  260. (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
  261. pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
  262. }
  263. static void intel_pmu_init(struct kvm_vcpu *vcpu)
  264. {
  265. int i;
  266. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  267. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  268. pmu->gp_counters[i].type = KVM_PMC_GP;
  269. pmu->gp_counters[i].vcpu = vcpu;
  270. pmu->gp_counters[i].idx = i;
  271. }
  272. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
  273. pmu->fixed_counters[i].type = KVM_PMC_FIXED;
  274. pmu->fixed_counters[i].vcpu = vcpu;
  275. pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
  276. }
  277. }
  278. static void intel_pmu_reset(struct kvm_vcpu *vcpu)
  279. {
  280. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  281. int i;
  282. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  283. struct kvm_pmc *pmc = &pmu->gp_counters[i];
  284. pmc_stop_counter(pmc);
  285. pmc->counter = pmc->eventsel = 0;
  286. }
  287. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
  288. pmc_stop_counter(&pmu->fixed_counters[i]);
  289. pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
  290. pmu->global_ovf_ctrl = 0;
  291. }
  292. struct kvm_pmu_ops intel_pmu_ops = {
  293. .find_arch_event = intel_find_arch_event,
  294. .find_fixed_event = intel_find_fixed_event,
  295. .pmc_is_enabled = intel_pmc_is_enabled,
  296. .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
  297. .msr_idx_to_pmc = intel_msr_idx_to_pmc,
  298. .is_valid_msr_idx = intel_is_valid_msr_idx,
  299. .is_valid_msr = intel_is_valid_msr,
  300. .get_msr = intel_pmu_get_msr,
  301. .set_msr = intel_pmu_set_msr,
  302. .refresh = intel_pmu_refresh,
  303. .init = intel_pmu_init,
  304. .reset = intel_pmu_reset,
  305. };