pmu.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  3. *
  4. * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. * Gleb Natapov <gleb@redhat.com>
  9. * Wei Huang <wei@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include <linux/types.h>
  16. #include <linux/kvm_host.h>
  17. #include <linux/perf_event.h>
  18. #include <asm/perf_event.h>
  19. #include "x86.h"
  20. #include "cpuid.h"
  21. #include "lapic.h"
  22. #include "pmu.h"
  23. /* NOTE:
  24. * - Each perf counter is defined as "struct kvm_pmc";
  25. * - There are two types of perf counters: general purpose (gp) and fixed.
  26. * gp counters are stored in gp_counters[] and fixed counters are stored
  27. * in fixed_counters[] respectively. Both of them are part of "struct
  28. * kvm_pmu";
  29. * - pmu.c understands the difference between gp counters and fixed counters.
  30. * However AMD doesn't support fixed-counters;
  31. * - There are three types of index to access perf counters (PMC):
  32. * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
  33. * has MSR_K7_PERFCTRn.
  34. * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
  35. * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
  36. * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
  37. * that it also supports fixed counters. idx can be used to as index to
  38. * gp and fixed counters.
  39. * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
  40. * code. Each pmc, stored in kvm_pmc.idx field, is unique across
  41. * all perf counters (both gp and fixed). The mapping relationship
  42. * between pmc and perf counters is as the following:
  43. * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
  44. * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
  45. * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
  46. */
  47. static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
  48. {
  49. struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
  50. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  51. kvm_pmu_deliver_pmi(vcpu);
  52. }
  53. static void kvm_perf_overflow(struct perf_event *perf_event,
  54. struct perf_sample_data *data,
  55. struct pt_regs *regs)
  56. {
  57. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  58. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  59. if (!test_and_set_bit(pmc->idx,
  60. (unsigned long *)&pmu->reprogram_pmi)) {
  61. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  62. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  63. }
  64. }
  65. static void kvm_perf_overflow_intr(struct perf_event *perf_event,
  66. struct perf_sample_data *data,
  67. struct pt_regs *regs)
  68. {
  69. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  70. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  71. if (!test_and_set_bit(pmc->idx,
  72. (unsigned long *)&pmu->reprogram_pmi)) {
  73. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  74. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  75. /*
  76. * Inject PMI. If vcpu was in a guest mode during NMI PMI
  77. * can be ejected on a guest mode re-entry. Otherwise we can't
  78. * be sure that vcpu wasn't executing hlt instruction at the
  79. * time of vmexit and is not going to re-enter guest mode until
  80. * woken up. So we should wake it, but this is impossible from
  81. * NMI context. Do it from irq work instead.
  82. */
  83. if (!kvm_is_in_guest())
  84. irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
  85. else
  86. kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
  87. }
  88. }
  89. static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
  90. unsigned config, bool exclude_user,
  91. bool exclude_kernel, bool intr,
  92. bool in_tx, bool in_tx_cp)
  93. {
  94. struct perf_event *event;
  95. struct perf_event_attr attr = {
  96. .type = type,
  97. .size = sizeof(attr),
  98. .pinned = true,
  99. .exclude_idle = true,
  100. .exclude_host = 1,
  101. .exclude_user = exclude_user,
  102. .exclude_kernel = exclude_kernel,
  103. .config = config,
  104. };
  105. if (in_tx)
  106. attr.config |= HSW_IN_TX;
  107. if (in_tx_cp)
  108. attr.config |= HSW_IN_TX_CHECKPOINTED;
  109. attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
  110. event = perf_event_create_kernel_counter(&attr, -1, current,
  111. intr ? kvm_perf_overflow_intr :
  112. kvm_perf_overflow, pmc);
  113. if (IS_ERR(event)) {
  114. printk_once("kvm_pmu: event creation failed %ld\n",
  115. PTR_ERR(event));
  116. return;
  117. }
  118. pmc->perf_event = event;
  119. clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
  120. }
  121. void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
  122. {
  123. unsigned config, type = PERF_TYPE_RAW;
  124. u8 event_select, unit_mask;
  125. if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
  126. printk_once("kvm pmu: pin control bit is ignored\n");
  127. pmc->eventsel = eventsel;
  128. pmc_stop_counter(pmc);
  129. if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
  130. return;
  131. event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
  132. unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
  133. if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
  134. ARCH_PERFMON_EVENTSEL_INV |
  135. ARCH_PERFMON_EVENTSEL_CMASK |
  136. HSW_IN_TX |
  137. HSW_IN_TX_CHECKPOINTED))) {
  138. config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
  139. event_select,
  140. unit_mask);
  141. if (config != PERF_COUNT_HW_MAX)
  142. type = PERF_TYPE_HARDWARE;
  143. }
  144. if (type == PERF_TYPE_RAW)
  145. config = eventsel & X86_RAW_EVENT_MASK;
  146. pmc_reprogram_counter(pmc, type, config,
  147. !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
  148. !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
  149. eventsel & ARCH_PERFMON_EVENTSEL_INT,
  150. (eventsel & HSW_IN_TX),
  151. (eventsel & HSW_IN_TX_CHECKPOINTED));
  152. }
  153. EXPORT_SYMBOL_GPL(reprogram_gp_counter);
  154. void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
  155. {
  156. unsigned en_field = ctrl & 0x3;
  157. bool pmi = ctrl & 0x8;
  158. pmc_stop_counter(pmc);
  159. if (!en_field || !pmc_is_enabled(pmc))
  160. return;
  161. pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
  162. kvm_x86_ops->pmu_ops->find_fixed_event(idx),
  163. !(en_field & 0x2), /* exclude user */
  164. !(en_field & 0x1), /* exclude kernel */
  165. pmi, false, false);
  166. }
  167. EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
  168. void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
  169. {
  170. struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
  171. if (!pmc)
  172. return;
  173. if (pmc_is_gp(pmc))
  174. reprogram_gp_counter(pmc, pmc->eventsel);
  175. else {
  176. int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
  177. u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
  178. reprogram_fixed_counter(pmc, ctrl, idx);
  179. }
  180. }
  181. EXPORT_SYMBOL_GPL(reprogram_counter);
  182. void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
  183. {
  184. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  185. u64 bitmask;
  186. int bit;
  187. bitmask = pmu->reprogram_pmi;
  188. for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
  189. struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
  190. if (unlikely(!pmc || !pmc->perf_event)) {
  191. clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
  192. continue;
  193. }
  194. reprogram_counter(pmu, bit);
  195. }
  196. }
  197. /* check if idx is a valid index to access PMU */
  198. int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
  199. {
  200. return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
  201. }
  202. int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
  203. {
  204. bool fast_mode = idx & (1u << 31);
  205. struct kvm_pmc *pmc;
  206. u64 ctr_val;
  207. pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
  208. if (!pmc)
  209. return 1;
  210. ctr_val = pmc_read_counter(pmc);
  211. if (fast_mode)
  212. ctr_val = (u32)ctr_val;
  213. *data = ctr_val;
  214. return 0;
  215. }
  216. void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
  217. {
  218. if (vcpu->arch.apic)
  219. kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
  220. }
  221. bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  222. {
  223. return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
  224. }
  225. int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
  226. {
  227. return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
  228. }
  229. int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  230. {
  231. return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
  232. }
  233. /* refresh PMU settings. This function generally is called when underlying
  234. * settings are changed (such as changes of PMU CPUID by guest VMs), which
  235. * should rarely happen.
  236. */
  237. void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
  238. {
  239. kvm_x86_ops->pmu_ops->refresh(vcpu);
  240. }
  241. void kvm_pmu_reset(struct kvm_vcpu *vcpu)
  242. {
  243. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  244. irq_work_sync(&pmu->irq_work);
  245. kvm_x86_ops->pmu_ops->reset(vcpu);
  246. }
  247. void kvm_pmu_init(struct kvm_vcpu *vcpu)
  248. {
  249. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  250. memset(pmu, 0, sizeof(*pmu));
  251. kvm_x86_ops->pmu_ops->init(vcpu);
  252. init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
  253. kvm_pmu_refresh(vcpu);
  254. }
  255. void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
  256. {
  257. kvm_pmu_reset(vcpu);
  258. }