perf_event.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * PMU support
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. * Author: Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based heavily on the ARMv7 perf event code.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <asm/irq_regs.h>
  22. #include <linux/of.h>
  23. #include <linux/perf/arm_pmu.h>
  24. #include <linux/platform_device.h>
  25. /*
  26. * ARMv8 PMUv3 Performance Events handling code.
  27. * Common event types.
  28. */
  29. enum armv8_pmuv3_perf_types {
  30. /* Required events. */
  31. ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
  32. ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
  33. ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  34. ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  35. ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
  36. ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
  37. /* At least one of the following is required. */
  38. ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
  39. ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
  40. /* Common architectural events. */
  41. ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
  42. ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
  43. ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
  44. ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
  45. ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
  46. ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
  47. ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
  48. ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
  49. ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  50. ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
  51. /* Common microarchitectural events. */
  52. ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
  53. ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
  54. ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
  55. ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
  56. ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  57. ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
  58. ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
  59. ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
  60. ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
  61. ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
  62. ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
  63. ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
  64. };
  65. /* ARMv8 Cortex-A53 specific event types. */
  66. enum armv8_a53_pmu_perf_types {
  67. ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2,
  68. };
  69. /* ARMv8 Cortex-A57 specific event types. */
  70. enum armv8_a57_perf_types {
  71. ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40,
  72. ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41,
  73. ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42,
  74. ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43,
  75. ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c,
  76. ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d,
  77. };
  78. /* PMUv3 HW events mapping. */
  79. static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  80. PERF_MAP_ALL_UNSUPPORTED,
  81. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  82. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  83. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  84. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  85. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  86. };
  87. /* ARM Cortex-A53 HW events mapping. */
  88. static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
  89. PERF_MAP_ALL_UNSUPPORTED,
  90. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  91. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  92. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  93. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  94. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
  95. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  96. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  97. };
  98. static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
  99. PERF_MAP_ALL_UNSUPPORTED,
  100. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  101. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  102. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  103. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  104. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  105. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  106. };
  107. static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  108. [PERF_COUNT_HW_CACHE_OP_MAX]
  109. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  110. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  111. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  112. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  113. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  114. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  115. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  116. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  117. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  118. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  119. };
  120. static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  121. [PERF_COUNT_HW_CACHE_OP_MAX]
  122. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  123. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  124. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  125. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  126. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  127. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  128. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
  129. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
  130. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
  131. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
  132. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  133. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  134. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  135. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  136. };
  137. static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  138. [PERF_COUNT_HW_CACHE_OP_MAX]
  139. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  140. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  141. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
  142. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
  143. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
  144. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
  145. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
  146. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
  147. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
  148. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
  149. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
  150. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  151. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  152. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  153. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  154. };
  155. /*
  156. * Perf Events' indices
  157. */
  158. #define ARMV8_IDX_CYCLE_COUNTER 0
  159. #define ARMV8_IDX_COUNTER0 1
  160. #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
  161. (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  162. #define ARMV8_MAX_COUNTERS 32
  163. #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
  164. /*
  165. * ARMv8 low level PMU access
  166. */
  167. /*
  168. * Perf Event to low level counters mapping
  169. */
  170. #define ARMV8_IDX_TO_COUNTER(x) \
  171. (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
  172. /*
  173. * Per-CPU PMCR: config reg
  174. */
  175. #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
  176. #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
  177. #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
  178. #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  179. #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
  180. #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  181. #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
  182. #define ARMV8_PMCR_N_MASK 0x1f
  183. #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
  184. /*
  185. * PMOVSR: counters overflow flag status reg
  186. */
  187. #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
  188. #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
  189. /*
  190. * PMXEVTYPER: Event selection reg
  191. */
  192. #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
  193. #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
  194. /*
  195. * Event filters for PMUv3
  196. */
  197. #define ARMV8_EXCLUDE_EL1 (1 << 31)
  198. #define ARMV8_EXCLUDE_EL0 (1 << 30)
  199. #define ARMV8_INCLUDE_EL2 (1 << 27)
  200. static inline u32 armv8pmu_pmcr_read(void)
  201. {
  202. u32 val;
  203. asm volatile("mrs %0, pmcr_el0" : "=r" (val));
  204. return val;
  205. }
  206. static inline void armv8pmu_pmcr_write(u32 val)
  207. {
  208. val &= ARMV8_PMCR_MASK;
  209. isb();
  210. asm volatile("msr pmcr_el0, %0" :: "r" (val));
  211. }
  212. static inline int armv8pmu_has_overflowed(u32 pmovsr)
  213. {
  214. return pmovsr & ARMV8_OVERFLOWED_MASK;
  215. }
  216. static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  217. {
  218. return idx >= ARMV8_IDX_CYCLE_COUNTER &&
  219. idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
  220. }
  221. static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
  222. {
  223. return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
  224. }
  225. static inline int armv8pmu_select_counter(int idx)
  226. {
  227. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  228. asm volatile("msr pmselr_el0, %0" :: "r" (counter));
  229. isb();
  230. return idx;
  231. }
  232. static inline u32 armv8pmu_read_counter(struct perf_event *event)
  233. {
  234. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  235. struct hw_perf_event *hwc = &event->hw;
  236. int idx = hwc->idx;
  237. u32 value = 0;
  238. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  239. pr_err("CPU%u reading wrong counter %d\n",
  240. smp_processor_id(), idx);
  241. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  242. asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
  243. else if (armv8pmu_select_counter(idx) == idx)
  244. asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
  245. return value;
  246. }
  247. static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
  248. {
  249. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  250. struct hw_perf_event *hwc = &event->hw;
  251. int idx = hwc->idx;
  252. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  253. pr_err("CPU%u writing wrong counter %d\n",
  254. smp_processor_id(), idx);
  255. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  256. asm volatile("msr pmccntr_el0, %0" :: "r" (value));
  257. else if (armv8pmu_select_counter(idx) == idx)
  258. asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
  259. }
  260. static inline void armv8pmu_write_evtype(int idx, u32 val)
  261. {
  262. if (armv8pmu_select_counter(idx) == idx) {
  263. val &= ARMV8_EVTYPE_MASK;
  264. asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
  265. }
  266. }
  267. static inline int armv8pmu_enable_counter(int idx)
  268. {
  269. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  270. asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
  271. return idx;
  272. }
  273. static inline int armv8pmu_disable_counter(int idx)
  274. {
  275. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  276. asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
  277. return idx;
  278. }
  279. static inline int armv8pmu_enable_intens(int idx)
  280. {
  281. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  282. asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
  283. return idx;
  284. }
  285. static inline int armv8pmu_disable_intens(int idx)
  286. {
  287. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  288. asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
  289. isb();
  290. /* Clear the overflow flag in case an interrupt is pending. */
  291. asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
  292. isb();
  293. return idx;
  294. }
  295. static inline u32 armv8pmu_getreset_flags(void)
  296. {
  297. u32 value;
  298. /* Read */
  299. asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
  300. /* Write to clear flags */
  301. value &= ARMV8_OVSR_MASK;
  302. asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
  303. return value;
  304. }
  305. static void armv8pmu_enable_event(struct perf_event *event)
  306. {
  307. unsigned long flags;
  308. struct hw_perf_event *hwc = &event->hw;
  309. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  310. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  311. int idx = hwc->idx;
  312. /*
  313. * Enable counter and interrupt, and set the counter to count
  314. * the event that we're interested in.
  315. */
  316. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  317. /*
  318. * Disable counter
  319. */
  320. armv8pmu_disable_counter(idx);
  321. /*
  322. * Set event (if destined for PMNx counters).
  323. */
  324. armv8pmu_write_evtype(idx, hwc->config_base);
  325. /*
  326. * Enable interrupt for this counter
  327. */
  328. armv8pmu_enable_intens(idx);
  329. /*
  330. * Enable counter
  331. */
  332. armv8pmu_enable_counter(idx);
  333. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  334. }
  335. static void armv8pmu_disable_event(struct perf_event *event)
  336. {
  337. unsigned long flags;
  338. struct hw_perf_event *hwc = &event->hw;
  339. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  340. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  341. int idx = hwc->idx;
  342. /*
  343. * Disable counter and interrupt
  344. */
  345. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  346. /*
  347. * Disable counter
  348. */
  349. armv8pmu_disable_counter(idx);
  350. /*
  351. * Disable interrupt for this counter
  352. */
  353. armv8pmu_disable_intens(idx);
  354. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  355. }
  356. static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
  357. {
  358. u32 pmovsr;
  359. struct perf_sample_data data;
  360. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  361. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  362. struct pt_regs *regs;
  363. int idx;
  364. /*
  365. * Get and reset the IRQ flags
  366. */
  367. pmovsr = armv8pmu_getreset_flags();
  368. /*
  369. * Did an overflow occur?
  370. */
  371. if (!armv8pmu_has_overflowed(pmovsr))
  372. return IRQ_NONE;
  373. /*
  374. * Handle the counter(s) overflow(s)
  375. */
  376. regs = get_irq_regs();
  377. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  378. struct perf_event *event = cpuc->events[idx];
  379. struct hw_perf_event *hwc;
  380. /* Ignore if we don't have an event. */
  381. if (!event)
  382. continue;
  383. /*
  384. * We have a single interrupt for all counters. Check that
  385. * each counter has overflowed before we process it.
  386. */
  387. if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
  388. continue;
  389. hwc = &event->hw;
  390. armpmu_event_update(event);
  391. perf_sample_data_init(&data, 0, hwc->last_period);
  392. if (!armpmu_event_set_period(event))
  393. continue;
  394. if (perf_event_overflow(event, &data, regs))
  395. cpu_pmu->disable(event);
  396. }
  397. /*
  398. * Handle the pending perf events.
  399. *
  400. * Note: this call *must* be run with interrupts disabled. For
  401. * platforms that can have the PMU interrupts raised as an NMI, this
  402. * will not work.
  403. */
  404. irq_work_run();
  405. return IRQ_HANDLED;
  406. }
  407. static void armv8pmu_start(struct arm_pmu *cpu_pmu)
  408. {
  409. unsigned long flags;
  410. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  411. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  412. /* Enable all counters */
  413. armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
  414. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  415. }
  416. static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
  417. {
  418. unsigned long flags;
  419. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  420. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  421. /* Disable all counters */
  422. armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
  423. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  424. }
  425. static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
  426. struct perf_event *event)
  427. {
  428. int idx;
  429. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  430. struct hw_perf_event *hwc = &event->hw;
  431. unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
  432. /* Always place a cycle counter into the cycle counter. */
  433. if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
  434. if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
  435. return -EAGAIN;
  436. return ARMV8_IDX_CYCLE_COUNTER;
  437. }
  438. /*
  439. * For anything other than a cycle counter, try and use
  440. * the events counters
  441. */
  442. for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  443. if (!test_and_set_bit(idx, cpuc->used_mask))
  444. return idx;
  445. }
  446. /* The counters are all in use. */
  447. return -EAGAIN;
  448. }
  449. /*
  450. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  451. */
  452. static int armv8pmu_set_event_filter(struct hw_perf_event *event,
  453. struct perf_event_attr *attr)
  454. {
  455. unsigned long config_base = 0;
  456. if (attr->exclude_idle)
  457. return -EPERM;
  458. if (attr->exclude_user)
  459. config_base |= ARMV8_EXCLUDE_EL0;
  460. if (attr->exclude_kernel)
  461. config_base |= ARMV8_EXCLUDE_EL1;
  462. if (!attr->exclude_hv)
  463. config_base |= ARMV8_INCLUDE_EL2;
  464. /*
  465. * Install the filter into config_base as this is used to
  466. * construct the event type.
  467. */
  468. event->config_base = config_base;
  469. return 0;
  470. }
  471. static void armv8pmu_reset(void *info)
  472. {
  473. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  474. u32 idx, nb_cnt = cpu_pmu->num_events;
  475. /* The counter and interrupt enable registers are unknown at reset. */
  476. for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  477. armv8pmu_disable_counter(idx);
  478. armv8pmu_disable_intens(idx);
  479. }
  480. /* Initialize & Reset PMNC: C and P bits. */
  481. armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
  482. }
  483. static int armv8_pmuv3_map_event(struct perf_event *event)
  484. {
  485. return armpmu_map_event(event, &armv8_pmuv3_perf_map,
  486. &armv8_pmuv3_perf_cache_map,
  487. ARMV8_EVTYPE_EVENT);
  488. }
  489. static int armv8_a53_map_event(struct perf_event *event)
  490. {
  491. return armpmu_map_event(event, &armv8_a53_perf_map,
  492. &armv8_a53_perf_cache_map,
  493. ARMV8_EVTYPE_EVENT);
  494. }
  495. static int armv8_a57_map_event(struct perf_event *event)
  496. {
  497. return armpmu_map_event(event, &armv8_a57_perf_map,
  498. &armv8_a57_perf_cache_map,
  499. ARMV8_EVTYPE_EVENT);
  500. }
  501. static void armv8pmu_read_num_pmnc_events(void *info)
  502. {
  503. int *nb_cnt = info;
  504. /* Read the nb of CNTx counters supported from PMNC */
  505. *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
  506. /* Add the CPU cycles counter */
  507. *nb_cnt += 1;
  508. }
  509. static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
  510. {
  511. return smp_call_function_any(&arm_pmu->supported_cpus,
  512. armv8pmu_read_num_pmnc_events,
  513. &arm_pmu->num_events, 1);
  514. }
  515. static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
  516. {
  517. cpu_pmu->handle_irq = armv8pmu_handle_irq,
  518. cpu_pmu->enable = armv8pmu_enable_event,
  519. cpu_pmu->disable = armv8pmu_disable_event,
  520. cpu_pmu->read_counter = armv8pmu_read_counter,
  521. cpu_pmu->write_counter = armv8pmu_write_counter,
  522. cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
  523. cpu_pmu->start = armv8pmu_start,
  524. cpu_pmu->stop = armv8pmu_stop,
  525. cpu_pmu->reset = armv8pmu_reset,
  526. cpu_pmu->max_period = (1LLU << 32) - 1,
  527. cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
  528. }
  529. static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
  530. {
  531. armv8_pmu_init(cpu_pmu);
  532. cpu_pmu->name = "armv8_pmuv3";
  533. cpu_pmu->map_event = armv8_pmuv3_map_event;
  534. return armv8pmu_probe_num_events(cpu_pmu);
  535. }
  536. static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
  537. {
  538. armv8_pmu_init(cpu_pmu);
  539. cpu_pmu->name = "armv8_cortex_a53";
  540. cpu_pmu->map_event = armv8_a53_map_event;
  541. return armv8pmu_probe_num_events(cpu_pmu);
  542. }
  543. static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
  544. {
  545. armv8_pmu_init(cpu_pmu);
  546. cpu_pmu->name = "armv8_cortex_a57";
  547. cpu_pmu->map_event = armv8_a57_map_event;
  548. return armv8pmu_probe_num_events(cpu_pmu);
  549. }
  550. static const struct of_device_id armv8_pmu_of_device_ids[] = {
  551. {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
  552. {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
  553. {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
  554. {},
  555. };
  556. static int armv8_pmu_device_probe(struct platform_device *pdev)
  557. {
  558. return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
  559. }
  560. static struct platform_driver armv8_pmu_driver = {
  561. .driver = {
  562. .name = "armv8-pmu",
  563. .of_match_table = armv8_pmu_of_device_ids,
  564. .suppress_bind_attrs = true,
  565. },
  566. .probe = armv8_pmu_device_probe,
  567. };
  568. static int __init register_armv8_pmu_driver(void)
  569. {
  570. return platform_driver_register(&armv8_pmu_driver);
  571. }
  572. device_initcall(register_armv8_pmu_driver);