arm-ccn.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright (C) 2014 ARM Limited
  12. */
  13. #include <linux/ctype.h>
  14. #include <linux/hrtimer.h>
  15. #include <linux/idr.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/module.h>
  19. #include <linux/perf_event.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/slab.h>
  22. #define CCN_NUM_XP_PORTS 2
  23. #define CCN_NUM_VCS 4
  24. #define CCN_NUM_REGIONS 256
  25. #define CCN_REGION_SIZE 0x10000
  26. #define CCN_ALL_OLY_ID 0xff00
  27. #define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
  28. #define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
  29. #define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
  30. #define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
  31. #define CCN_MN_ERRINT_STATUS 0x0008
  32. #define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
  33. #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
  34. #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
  35. #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
  36. #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
  37. #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
  38. #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
  39. #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
  40. #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
  41. #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
  42. #define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
  43. #define CCN_MN_ERR_SIG_VAL_63_0 0x0300
  44. #define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
  45. #define CCN_DT_ACTIVE_DSM 0x0000
  46. #define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
  47. #define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
  48. #define CCN_DT_CTL 0x0028
  49. #define CCN_DT_CTL__DT_EN (1 << 0)
  50. #define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
  51. #define CCN_DT_PMCCNTR 0x0140
  52. #define CCN_DT_PMCCNTRSR 0x0190
  53. #define CCN_DT_PMOVSR 0x0198
  54. #define CCN_DT_PMOVSR_CLR 0x01a0
  55. #define CCN_DT_PMOVSR_CLR__MASK 0x1f
  56. #define CCN_DT_PMCR 0x01a8
  57. #define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
  58. #define CCN_DT_PMCR__PMU_EN (1 << 0)
  59. #define CCN_DT_PMSR 0x01b0
  60. #define CCN_DT_PMSR_REQ 0x01b8
  61. #define CCN_DT_PMSR_CLR 0x01c0
  62. #define CCN_HNF_PMU_EVENT_SEL 0x0600
  63. #define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
  64. #define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
  65. #define CCN_XP_DT_CONFIG 0x0300
  66. #define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
  67. #define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
  68. #define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
  69. #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
  70. #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
  71. #define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
  72. #define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
  73. #define CCN_XP_DT_INTERFACE_SEL 0x0308
  74. #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
  75. #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
  76. #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
  77. #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
  78. #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
  79. #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
  80. #define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
  81. #define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
  82. #define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
  83. #define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
  84. #define CCN_XP_DT_CONTROL 0x0370
  85. #define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
  86. #define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
  87. #define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
  88. #define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
  89. #define CCN_XP_PMU_EVENT_SEL 0x0600
  90. #define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
  91. #define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
  92. #define CCN_SBAS_PMU_EVENT_SEL 0x0600
  93. #define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
  94. #define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
  95. #define CCN_RNI_PMU_EVENT_SEL 0x0600
  96. #define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
  97. #define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
  98. #define CCN_TYPE_MN 0x01
  99. #define CCN_TYPE_DT 0x02
  100. #define CCN_TYPE_HNF 0x04
  101. #define CCN_TYPE_HNI 0x05
  102. #define CCN_TYPE_XP 0x08
  103. #define CCN_TYPE_SBSX 0x0c
  104. #define CCN_TYPE_SBAS 0x10
  105. #define CCN_TYPE_RNI_1P 0x14
  106. #define CCN_TYPE_RNI_2P 0x15
  107. #define CCN_TYPE_RNI_3P 0x16
  108. #define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
  109. #define CCN_TYPE_RND_2P 0x19
  110. #define CCN_TYPE_RND_3P 0x1a
  111. #define CCN_TYPE_CYCLES 0xff /* Pseudotype */
  112. #define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
  113. #define CCN_NUM_PMU_EVENTS 4
  114. #define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
  115. #define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
  116. #define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
  117. #define CCN_NUM_PREDEFINED_MASKS 4
  118. #define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
  119. #define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
  120. #define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
  121. #define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
  122. struct arm_ccn_component {
  123. void __iomem *base;
  124. u32 type;
  125. DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
  126. union {
  127. struct {
  128. DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
  129. } xp;
  130. };
  131. };
  132. #define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
  133. struct arm_ccn_dt, pmu), struct arm_ccn, dt)
  134. struct arm_ccn_dt {
  135. int id;
  136. void __iomem *base;
  137. spinlock_t config_lock;
  138. DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
  139. struct {
  140. struct arm_ccn_component *source;
  141. struct perf_event *event;
  142. } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
  143. struct {
  144. u64 l, h;
  145. } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
  146. struct hrtimer hrtimer;
  147. cpumask_t cpu;
  148. struct notifier_block cpu_nb;
  149. struct pmu pmu;
  150. };
  151. struct arm_ccn {
  152. struct device *dev;
  153. void __iomem *base;
  154. unsigned int irq;
  155. unsigned sbas_present:1;
  156. unsigned sbsx_present:1;
  157. int num_nodes;
  158. struct arm_ccn_component *node;
  159. int num_xps;
  160. struct arm_ccn_component *xp;
  161. struct arm_ccn_dt dt;
  162. int mn_id;
  163. };
  164. static int arm_ccn_node_to_xp(int node)
  165. {
  166. return node / CCN_NUM_XP_PORTS;
  167. }
  168. static int arm_ccn_node_to_xp_port(int node)
  169. {
  170. return node % CCN_NUM_XP_PORTS;
  171. }
  172. /*
  173. * Bit shifts and masks in these defines must be kept in sync with
  174. * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
  175. */
  176. #define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
  177. #define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
  178. #define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
  179. #define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
  180. #define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
  181. #define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
  182. #define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
  183. #define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
  184. static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
  185. {
  186. *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
  187. *config |= (node_xp << 0) | (type << 8) | (port << 24);
  188. }
  189. static ssize_t arm_ccn_pmu_format_show(struct device *dev,
  190. struct device_attribute *attr, char *buf)
  191. {
  192. struct dev_ext_attribute *ea = container_of(attr,
  193. struct dev_ext_attribute, attr);
  194. return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
  195. }
  196. #define CCN_FORMAT_ATTR(_name, _config) \
  197. struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
  198. { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
  199. NULL), _config }
  200. static CCN_FORMAT_ATTR(node, "config:0-7");
  201. static CCN_FORMAT_ATTR(xp, "config:0-7");
  202. static CCN_FORMAT_ATTR(type, "config:8-15");
  203. static CCN_FORMAT_ATTR(event, "config:16-23");
  204. static CCN_FORMAT_ATTR(port, "config:24-25");
  205. static CCN_FORMAT_ATTR(vc, "config:26-28");
  206. static CCN_FORMAT_ATTR(dir, "config:29-29");
  207. static CCN_FORMAT_ATTR(mask, "config:30-33");
  208. static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
  209. static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
  210. static struct attribute *arm_ccn_pmu_format_attrs[] = {
  211. &arm_ccn_pmu_format_attr_node.attr.attr,
  212. &arm_ccn_pmu_format_attr_xp.attr.attr,
  213. &arm_ccn_pmu_format_attr_type.attr.attr,
  214. &arm_ccn_pmu_format_attr_event.attr.attr,
  215. &arm_ccn_pmu_format_attr_port.attr.attr,
  216. &arm_ccn_pmu_format_attr_vc.attr.attr,
  217. &arm_ccn_pmu_format_attr_dir.attr.attr,
  218. &arm_ccn_pmu_format_attr_mask.attr.attr,
  219. &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
  220. &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
  221. NULL
  222. };
  223. static struct attribute_group arm_ccn_pmu_format_attr_group = {
  224. .name = "format",
  225. .attrs = arm_ccn_pmu_format_attrs,
  226. };
  227. struct arm_ccn_pmu_event {
  228. struct device_attribute attr;
  229. u32 type;
  230. u32 event;
  231. int num_ports;
  232. int num_vcs;
  233. const char *def;
  234. int mask;
  235. };
  236. #define CCN_EVENT_ATTR(_name) \
  237. __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
  238. /*
  239. * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
  240. * their ports in XP they are connected to. For the sake of usability they are
  241. * explicitly defined here (and translated into a relevant watchpoint in
  242. * arm_ccn_pmu_event_init()) so the user can easily request them without deep
  243. * knowledge of the flit format.
  244. */
  245. #define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
  246. .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
  247. .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
  248. .def = _def, .mask = _mask, }
  249. #define CCN_EVENT_HNI(_name, _def, _mask) { \
  250. .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
  251. .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
  252. .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
  253. #define CCN_EVENT_SBSX(_name, _def, _mask) { \
  254. .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
  255. .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
  256. .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
  257. #define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
  258. .type = CCN_TYPE_HNF, .event = _event, }
  259. #define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
  260. .type = CCN_TYPE_XP, .event = _event, \
  261. .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
  262. /*
  263. * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
  264. * on configuration. One of them is picked to represent the whole group,
  265. * as they all share the same event types.
  266. */
  267. #define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
  268. .type = CCN_TYPE_RNI_3P, .event = _event, }
  269. #define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
  270. .type = CCN_TYPE_SBAS, .event = _event, }
  271. #define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
  272. .type = CCN_TYPE_CYCLES }
  273. static ssize_t arm_ccn_pmu_event_show(struct device *dev,
  274. struct device_attribute *attr, char *buf)
  275. {
  276. struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
  277. struct arm_ccn_pmu_event *event = container_of(attr,
  278. struct arm_ccn_pmu_event, attr);
  279. ssize_t res;
  280. res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
  281. if (event->event)
  282. res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
  283. event->event);
  284. if (event->def)
  285. res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
  286. event->def);
  287. if (event->mask)
  288. res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
  289. event->mask);
  290. /* Arguments required by an event */
  291. switch (event->type) {
  292. case CCN_TYPE_CYCLES:
  293. break;
  294. case CCN_TYPE_XP:
  295. res += snprintf(buf + res, PAGE_SIZE - res,
  296. ",xp=?,port=?,vc=?,dir=?");
  297. if (event->event == CCN_EVENT_WATCHPOINT)
  298. res += snprintf(buf + res, PAGE_SIZE - res,
  299. ",cmp_l=?,cmp_h=?,mask=?");
  300. break;
  301. case CCN_TYPE_MN:
  302. res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
  303. break;
  304. default:
  305. res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
  306. break;
  307. }
  308. res += snprintf(buf + res, PAGE_SIZE - res, "\n");
  309. return res;
  310. }
  311. static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
  312. struct attribute *attr, int index)
  313. {
  314. struct device *dev = kobj_to_dev(kobj);
  315. struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
  316. struct device_attribute *dev_attr = container_of(attr,
  317. struct device_attribute, attr);
  318. struct arm_ccn_pmu_event *event = container_of(dev_attr,
  319. struct arm_ccn_pmu_event, attr);
  320. if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
  321. return 0;
  322. if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
  323. return 0;
  324. return attr->mode;
  325. }
  326. static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
  327. CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
  328. CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
  329. CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
  330. CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
  331. CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
  332. CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
  333. CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
  334. CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
  335. CCN_IDX_MASK_ORDER),
  336. CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
  337. CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
  338. CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
  339. CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
  340. CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
  341. CCN_IDX_MASK_ORDER),
  342. CCN_EVENT_HNF(cache_miss, 0x1),
  343. CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
  344. CCN_EVENT_HNF(cache_fill, 0x3),
  345. CCN_EVENT_HNF(pocq_retry, 0x4),
  346. CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
  347. CCN_EVENT_HNF(sf_hit, 0x6),
  348. CCN_EVENT_HNF(sf_evictions, 0x7),
  349. CCN_EVENT_HNF(snoops_sent, 0x8),
  350. CCN_EVENT_HNF(snoops_broadcast, 0x9),
  351. CCN_EVENT_HNF(l3_eviction, 0xa),
  352. CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
  353. CCN_EVENT_HNF(mc_retries, 0xc),
  354. CCN_EVENT_HNF(mc_reqs, 0xd),
  355. CCN_EVENT_HNF(qos_hh_retry, 0xe),
  356. CCN_EVENT_RNI(rdata_beats_p0, 0x1),
  357. CCN_EVENT_RNI(rdata_beats_p1, 0x2),
  358. CCN_EVENT_RNI(rdata_beats_p2, 0x3),
  359. CCN_EVENT_RNI(rxdat_flits, 0x4),
  360. CCN_EVENT_RNI(txdat_flits, 0x5),
  361. CCN_EVENT_RNI(txreq_flits, 0x6),
  362. CCN_EVENT_RNI(txreq_flits_retried, 0x7),
  363. CCN_EVENT_RNI(rrt_full, 0x8),
  364. CCN_EVENT_RNI(wrt_full, 0x9),
  365. CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
  366. CCN_EVENT_XP(upload_starvation, 0x1),
  367. CCN_EVENT_XP(download_starvation, 0x2),
  368. CCN_EVENT_XP(respin, 0x3),
  369. CCN_EVENT_XP(valid_flit, 0x4),
  370. CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
  371. CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
  372. CCN_EVENT_SBAS(rxdat_flits, 0x4),
  373. CCN_EVENT_SBAS(txdat_flits, 0x5),
  374. CCN_EVENT_SBAS(txreq_flits, 0x6),
  375. CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
  376. CCN_EVENT_SBAS(rrt_full, 0x8),
  377. CCN_EVENT_SBAS(wrt_full, 0x9),
  378. CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
  379. CCN_EVENT_CYCLES(cycles),
  380. };
  381. /* Populated in arm_ccn_init() */
  382. static struct attribute
  383. *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
  384. static struct attribute_group arm_ccn_pmu_events_attr_group = {
  385. .name = "events",
  386. .is_visible = arm_ccn_pmu_events_is_visible,
  387. .attrs = arm_ccn_pmu_events_attrs,
  388. };
  389. static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
  390. {
  391. unsigned long i;
  392. if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
  393. return NULL;
  394. i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
  395. switch (name[1]) {
  396. case 'l':
  397. return &ccn->dt.cmp_mask[i].l;
  398. case 'h':
  399. return &ccn->dt.cmp_mask[i].h;
  400. default:
  401. return NULL;
  402. }
  403. }
  404. static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
  405. struct device_attribute *attr, char *buf)
  406. {
  407. struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
  408. u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
  409. return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
  410. }
  411. static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
  412. struct device_attribute *attr, const char *buf, size_t count)
  413. {
  414. struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
  415. u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
  416. int err = -EINVAL;
  417. if (mask)
  418. err = kstrtoull(buf, 0, mask);
  419. return err ? err : count;
  420. }
  421. #define CCN_CMP_MASK_ATTR(_name) \
  422. struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
  423. __ATTR(_name, S_IRUGO | S_IWUSR, \
  424. arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
  425. #define CCN_CMP_MASK_ATTR_RO(_name) \
  426. struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
  427. __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
  428. static CCN_CMP_MASK_ATTR(0l);
  429. static CCN_CMP_MASK_ATTR(0h);
  430. static CCN_CMP_MASK_ATTR(1l);
  431. static CCN_CMP_MASK_ATTR(1h);
  432. static CCN_CMP_MASK_ATTR(2l);
  433. static CCN_CMP_MASK_ATTR(2h);
  434. static CCN_CMP_MASK_ATTR(3l);
  435. static CCN_CMP_MASK_ATTR(3h);
  436. static CCN_CMP_MASK_ATTR(4l);
  437. static CCN_CMP_MASK_ATTR(4h);
  438. static CCN_CMP_MASK_ATTR(5l);
  439. static CCN_CMP_MASK_ATTR(5h);
  440. static CCN_CMP_MASK_ATTR(6l);
  441. static CCN_CMP_MASK_ATTR(6h);
  442. static CCN_CMP_MASK_ATTR(7l);
  443. static CCN_CMP_MASK_ATTR(7h);
  444. static CCN_CMP_MASK_ATTR_RO(8l);
  445. static CCN_CMP_MASK_ATTR_RO(8h);
  446. static CCN_CMP_MASK_ATTR_RO(9l);
  447. static CCN_CMP_MASK_ATTR_RO(9h);
  448. static CCN_CMP_MASK_ATTR_RO(al);
  449. static CCN_CMP_MASK_ATTR_RO(ah);
  450. static CCN_CMP_MASK_ATTR_RO(bl);
  451. static CCN_CMP_MASK_ATTR_RO(bh);
  452. static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
  453. &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
  454. &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
  455. &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
  456. &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
  457. &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
  458. &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
  459. &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
  460. &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
  461. &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
  462. &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
  463. &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
  464. &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
  465. NULL
  466. };
  467. static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
  468. .name = "cmp_mask",
  469. .attrs = arm_ccn_pmu_cmp_mask_attrs,
  470. };
  471. static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
  472. struct device_attribute *attr, char *buf)
  473. {
  474. struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
  475. return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
  476. }
  477. static struct device_attribute arm_ccn_pmu_cpumask_attr =
  478. __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
  479. static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
  480. &arm_ccn_pmu_cpumask_attr.attr,
  481. NULL,
  482. };
  483. static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
  484. .attrs = arm_ccn_pmu_cpumask_attrs,
  485. };
  486. /*
  487. * Default poll period is 10ms, which is way over the top anyway,
  488. * as in the worst case scenario (an event every cycle), with 1GHz
  489. * clocked bus, the smallest, 32 bit counter will overflow in
  490. * more than 4s.
  491. */
  492. static unsigned int arm_ccn_pmu_poll_period_us = 10000;
  493. module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
  494. S_IRUGO | S_IWUSR);
  495. static ktime_t arm_ccn_pmu_timer_period(void)
  496. {
  497. return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
  498. }
  499. static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
  500. &arm_ccn_pmu_events_attr_group,
  501. &arm_ccn_pmu_format_attr_group,
  502. &arm_ccn_pmu_cmp_mask_attr_group,
  503. &arm_ccn_pmu_cpumask_attr_group,
  504. NULL
  505. };
  506. static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
  507. {
  508. int bit;
  509. do {
  510. bit = find_first_zero_bit(bitmap, size);
  511. if (bit >= size)
  512. return -EAGAIN;
  513. } while (test_and_set_bit(bit, bitmap));
  514. return bit;
  515. }
  516. /* All RN-I and RN-D nodes have identical PMUs */
  517. static int arm_ccn_pmu_type_eq(u32 a, u32 b)
  518. {
  519. if (a == b)
  520. return 1;
  521. switch (a) {
  522. case CCN_TYPE_RNI_1P:
  523. case CCN_TYPE_RNI_2P:
  524. case CCN_TYPE_RNI_3P:
  525. case CCN_TYPE_RND_1P:
  526. case CCN_TYPE_RND_2P:
  527. case CCN_TYPE_RND_3P:
  528. switch (b) {
  529. case CCN_TYPE_RNI_1P:
  530. case CCN_TYPE_RNI_2P:
  531. case CCN_TYPE_RNI_3P:
  532. case CCN_TYPE_RND_1P:
  533. case CCN_TYPE_RND_2P:
  534. case CCN_TYPE_RND_3P:
  535. return 1;
  536. }
  537. break;
  538. }
  539. return 0;
  540. }
  541. static int arm_ccn_pmu_event_alloc(struct perf_event *event)
  542. {
  543. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  544. struct hw_perf_event *hw = &event->hw;
  545. u32 node_xp, type, event_id;
  546. struct arm_ccn_component *source;
  547. int bit;
  548. node_xp = CCN_CONFIG_NODE(event->attr.config);
  549. type = CCN_CONFIG_TYPE(event->attr.config);
  550. event_id = CCN_CONFIG_EVENT(event->attr.config);
  551. /* Allocate the cycle counter */
  552. if (type == CCN_TYPE_CYCLES) {
  553. if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
  554. ccn->dt.pmu_counters_mask))
  555. return -EAGAIN;
  556. hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
  557. ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
  558. return 0;
  559. }
  560. /* Allocate an event counter */
  561. hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
  562. CCN_NUM_PMU_EVENT_COUNTERS);
  563. if (hw->idx < 0) {
  564. dev_dbg(ccn->dev, "No more counters available!\n");
  565. return -EAGAIN;
  566. }
  567. if (type == CCN_TYPE_XP)
  568. source = &ccn->xp[node_xp];
  569. else
  570. source = &ccn->node[node_xp];
  571. ccn->dt.pmu_counters[hw->idx].source = source;
  572. /* Allocate an event source or a watchpoint */
  573. if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
  574. bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
  575. CCN_NUM_XP_WATCHPOINTS);
  576. else
  577. bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
  578. CCN_NUM_PMU_EVENTS);
  579. if (bit < 0) {
  580. dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
  581. node_xp);
  582. clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
  583. return -EAGAIN;
  584. }
  585. hw->config_base = bit;
  586. ccn->dt.pmu_counters[hw->idx].event = event;
  587. return 0;
  588. }
  589. static void arm_ccn_pmu_event_release(struct perf_event *event)
  590. {
  591. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  592. struct hw_perf_event *hw = &event->hw;
  593. if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
  594. clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
  595. } else {
  596. struct arm_ccn_component *source =
  597. ccn->dt.pmu_counters[hw->idx].source;
  598. if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
  599. CCN_CONFIG_EVENT(event->attr.config) ==
  600. CCN_EVENT_WATCHPOINT)
  601. clear_bit(hw->config_base, source->xp.dt_cmp_mask);
  602. else
  603. clear_bit(hw->config_base, source->pmu_events_mask);
  604. clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
  605. }
  606. ccn->dt.pmu_counters[hw->idx].source = NULL;
  607. ccn->dt.pmu_counters[hw->idx].event = NULL;
  608. }
  609. static int arm_ccn_pmu_event_init(struct perf_event *event)
  610. {
  611. struct arm_ccn *ccn;
  612. struct hw_perf_event *hw = &event->hw;
  613. u32 node_xp, type, event_id;
  614. int valid;
  615. int i;
  616. struct perf_event *sibling;
  617. if (event->attr.type != event->pmu->type)
  618. return -ENOENT;
  619. ccn = pmu_to_arm_ccn(event->pmu);
  620. if (hw->sample_period) {
  621. dev_warn(ccn->dev, "Sampling not supported!\n");
  622. return -EOPNOTSUPP;
  623. }
  624. if (has_branch_stack(event) || event->attr.exclude_user ||
  625. event->attr.exclude_kernel || event->attr.exclude_hv ||
  626. event->attr.exclude_idle) {
  627. dev_warn(ccn->dev, "Can't exclude execution levels!\n");
  628. return -EOPNOTSUPP;
  629. }
  630. if (event->cpu < 0) {
  631. dev_warn(ccn->dev, "Can't provide per-task data!\n");
  632. return -EOPNOTSUPP;
  633. }
  634. /*
  635. * Many perf core operations (eg. events rotation) operate on a
  636. * single CPU context. This is obvious for CPU PMUs, where one
  637. * expects the same sets of events being observed on all CPUs,
  638. * but can lead to issues for off-core PMUs, like CCN, where each
  639. * event could be theoretically assigned to a different CPU. To
  640. * mitigate this, we enforce CPU assignment to one, selected
  641. * processor (the one described in the "cpumask" attribute).
  642. */
  643. event->cpu = cpumask_first(&ccn->dt.cpu);
  644. node_xp = CCN_CONFIG_NODE(event->attr.config);
  645. type = CCN_CONFIG_TYPE(event->attr.config);
  646. event_id = CCN_CONFIG_EVENT(event->attr.config);
  647. /* Validate node/xp vs topology */
  648. switch (type) {
  649. case CCN_TYPE_MN:
  650. if (node_xp != ccn->mn_id) {
  651. dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
  652. return -EINVAL;
  653. }
  654. break;
  655. case CCN_TYPE_XP:
  656. if (node_xp >= ccn->num_xps) {
  657. dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
  658. return -EINVAL;
  659. }
  660. break;
  661. case CCN_TYPE_CYCLES:
  662. break;
  663. default:
  664. if (node_xp >= ccn->num_nodes) {
  665. dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
  666. return -EINVAL;
  667. }
  668. if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
  669. dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
  670. type, node_xp);
  671. return -EINVAL;
  672. }
  673. break;
  674. }
  675. /* Validate event ID vs available for the type */
  676. for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
  677. i++) {
  678. struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
  679. u32 port = CCN_CONFIG_PORT(event->attr.config);
  680. u32 vc = CCN_CONFIG_VC(event->attr.config);
  681. if (!arm_ccn_pmu_type_eq(type, e->type))
  682. continue;
  683. if (event_id != e->event)
  684. continue;
  685. if (e->num_ports && port >= e->num_ports) {
  686. dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
  687. port, node_xp);
  688. return -EINVAL;
  689. }
  690. if (e->num_vcs && vc >= e->num_vcs) {
  691. dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
  692. vc, node_xp);
  693. return -EINVAL;
  694. }
  695. valid = 1;
  696. }
  697. if (!valid) {
  698. dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
  699. event_id, node_xp);
  700. return -EINVAL;
  701. }
  702. /* Watchpoint-based event for a node is actually set on XP */
  703. if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
  704. u32 port;
  705. type = CCN_TYPE_XP;
  706. port = arm_ccn_node_to_xp_port(node_xp);
  707. node_xp = arm_ccn_node_to_xp(node_xp);
  708. arm_ccn_pmu_config_set(&event->attr.config,
  709. node_xp, type, port);
  710. }
  711. /*
  712. * We must NOT create groups containing mixed PMUs, although software
  713. * events are acceptable (for example to create a CCN group
  714. * periodically read when a hrtimer aka cpu-clock leader triggers).
  715. */
  716. if (event->group_leader->pmu != event->pmu &&
  717. !is_software_event(event->group_leader))
  718. return -EINVAL;
  719. list_for_each_entry(sibling, &event->group_leader->sibling_list,
  720. group_entry)
  721. if (sibling->pmu != event->pmu &&
  722. !is_software_event(sibling))
  723. return -EINVAL;
  724. return 0;
  725. }
  726. static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
  727. {
  728. u64 res;
  729. if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
  730. #ifdef readq
  731. res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
  732. #else
  733. /* 40 bit counter, can do snapshot and read in two parts */
  734. writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
  735. while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
  736. ;
  737. writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
  738. res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
  739. res <<= 32;
  740. res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
  741. #endif
  742. } else {
  743. res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
  744. }
  745. return res;
  746. }
  747. static void arm_ccn_pmu_event_update(struct perf_event *event)
  748. {
  749. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  750. struct hw_perf_event *hw = &event->hw;
  751. u64 prev_count, new_count, mask;
  752. do {
  753. prev_count = local64_read(&hw->prev_count);
  754. new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
  755. } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
  756. mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
  757. local64_add((new_count - prev_count) & mask, &event->count);
  758. }
  759. static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
  760. {
  761. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  762. struct hw_perf_event *hw = &event->hw;
  763. struct arm_ccn_component *xp;
  764. u32 val, dt_cfg;
  765. /* Nothing to do for cycle counter */
  766. if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
  767. return;
  768. if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
  769. xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
  770. else
  771. xp = &ccn->xp[arm_ccn_node_to_xp(
  772. CCN_CONFIG_NODE(event->attr.config))];
  773. if (enable)
  774. dt_cfg = hw->event_base;
  775. else
  776. dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
  777. spin_lock(&ccn->dt.config_lock);
  778. val = readl(xp->base + CCN_XP_DT_CONFIG);
  779. val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
  780. CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
  781. val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
  782. writel(val, xp->base + CCN_XP_DT_CONFIG);
  783. spin_unlock(&ccn->dt.config_lock);
  784. }
  785. static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
  786. {
  787. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  788. struct hw_perf_event *hw = &event->hw;
  789. local64_set(&event->hw.prev_count,
  790. arm_ccn_pmu_read_counter(ccn, hw->idx));
  791. hw->state = 0;
  792. /*
  793. * Pin the timer, so that the overflows are handled by the chosen
  794. * event->cpu (this is the same one as presented in "cpumask"
  795. * attribute).
  796. */
  797. if (!ccn->irq)
  798. hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
  799. HRTIMER_MODE_REL_PINNED);
  800. /* Set the DT bus input, engaging the counter */
  801. arm_ccn_pmu_xp_dt_config(event, 1);
  802. }
  803. static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
  804. {
  805. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  806. struct hw_perf_event *hw = &event->hw;
  807. u64 timeout;
  808. /* Disable counting, setting the DT bus to pass-through mode */
  809. arm_ccn_pmu_xp_dt_config(event, 0);
  810. if (!ccn->irq)
  811. hrtimer_cancel(&ccn->dt.hrtimer);
  812. /* Let the DT bus drain */
  813. timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
  814. ccn->num_xps;
  815. while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
  816. timeout)
  817. cpu_relax();
  818. if (flags & PERF_EF_UPDATE)
  819. arm_ccn_pmu_event_update(event);
  820. hw->state |= PERF_HES_STOPPED;
  821. }
  822. static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
  823. {
  824. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  825. struct hw_perf_event *hw = &event->hw;
  826. struct arm_ccn_component *source =
  827. ccn->dt.pmu_counters[hw->idx].source;
  828. unsigned long wp = hw->config_base;
  829. u32 val;
  830. u64 cmp_l = event->attr.config1;
  831. u64 cmp_h = event->attr.config2;
  832. u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
  833. u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
  834. hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
  835. /* Direction (RX/TX), device (port) & virtual channel */
  836. val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
  837. val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
  838. CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
  839. val |= CCN_CONFIG_DIR(event->attr.config) <<
  840. CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
  841. val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
  842. CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
  843. val |= CCN_CONFIG_PORT(event->attr.config) <<
  844. CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
  845. val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
  846. CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
  847. val |= CCN_CONFIG_VC(event->attr.config) <<
  848. CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
  849. writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
  850. /* Comparison values */
  851. writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
  852. writel((cmp_l >> 32) & 0x7fffffff,
  853. source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
  854. writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
  855. writel((cmp_h >> 32) & 0x0fffffff,
  856. source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
  857. /* Mask */
  858. writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
  859. writel((mask_l >> 32) & 0x7fffffff,
  860. source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
  861. writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
  862. writel((mask_h >> 32) & 0x0fffffff,
  863. source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
  864. }
  865. static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
  866. {
  867. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  868. struct hw_perf_event *hw = &event->hw;
  869. struct arm_ccn_component *source =
  870. ccn->dt.pmu_counters[hw->idx].source;
  871. u32 val, id;
  872. hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
  873. id = (CCN_CONFIG_VC(event->attr.config) << 4) |
  874. (CCN_CONFIG_PORT(event->attr.config) << 3) |
  875. (CCN_CONFIG_EVENT(event->attr.config) << 0);
  876. val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
  877. val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
  878. CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
  879. val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
  880. writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
  881. }
  882. static void arm_ccn_pmu_node_event_config(struct perf_event *event)
  883. {
  884. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  885. struct hw_perf_event *hw = &event->hw;
  886. struct arm_ccn_component *source =
  887. ccn->dt.pmu_counters[hw->idx].source;
  888. u32 type = CCN_CONFIG_TYPE(event->attr.config);
  889. u32 val, port;
  890. port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
  891. hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
  892. hw->config_base);
  893. /* These *_event_sel regs should be identical, but let's make sure... */
  894. BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
  895. BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
  896. BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
  897. CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
  898. BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
  899. CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
  900. BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
  901. CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
  902. BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
  903. CCN_RNI_PMU_EVENT_SEL__ID__MASK);
  904. if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
  905. !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
  906. return;
  907. /* Set the event id for the pre-allocated counter */
  908. val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
  909. val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
  910. CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
  911. val |= CCN_CONFIG_EVENT(event->attr.config) <<
  912. CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
  913. writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
  914. }
  915. static void arm_ccn_pmu_event_config(struct perf_event *event)
  916. {
  917. struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
  918. struct hw_perf_event *hw = &event->hw;
  919. u32 xp, offset, val;
  920. /* Cycle counter requires no setup */
  921. if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
  922. return;
  923. if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
  924. xp = CCN_CONFIG_XP(event->attr.config);
  925. else
  926. xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
  927. spin_lock(&ccn->dt.config_lock);
  928. /* Set the DT bus "distance" register */
  929. offset = (hw->idx / 4) * 4;
  930. val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
  931. val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
  932. CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
  933. val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
  934. writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
  935. if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
  936. if (CCN_CONFIG_EVENT(event->attr.config) ==
  937. CCN_EVENT_WATCHPOINT)
  938. arm_ccn_pmu_xp_watchpoint_config(event);
  939. else
  940. arm_ccn_pmu_xp_event_config(event);
  941. } else {
  942. arm_ccn_pmu_node_event_config(event);
  943. }
  944. spin_unlock(&ccn->dt.config_lock);
  945. }
  946. static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
  947. {
  948. int err;
  949. struct hw_perf_event *hw = &event->hw;
  950. err = arm_ccn_pmu_event_alloc(event);
  951. if (err)
  952. return err;
  953. arm_ccn_pmu_event_config(event);
  954. hw->state = PERF_HES_STOPPED;
  955. if (flags & PERF_EF_START)
  956. arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
  957. return 0;
  958. }
  959. static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
  960. {
  961. arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
  962. arm_ccn_pmu_event_release(event);
  963. }
  964. static void arm_ccn_pmu_event_read(struct perf_event *event)
  965. {
  966. arm_ccn_pmu_event_update(event);
  967. }
  968. static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
  969. {
  970. u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
  971. int idx;
  972. if (!pmovsr)
  973. return IRQ_NONE;
  974. writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
  975. BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
  976. for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
  977. struct perf_event *event = dt->pmu_counters[idx].event;
  978. int overflowed = pmovsr & BIT(idx);
  979. WARN_ON_ONCE(overflowed && !event &&
  980. idx != CCN_IDX_PMU_CYCLE_COUNTER);
  981. if (!event || !overflowed)
  982. continue;
  983. arm_ccn_pmu_event_update(event);
  984. }
  985. return IRQ_HANDLED;
  986. }
  987. static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
  988. {
  989. struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
  990. hrtimer);
  991. unsigned long flags;
  992. local_irq_save(flags);
  993. arm_ccn_pmu_overflow_handler(dt);
  994. local_irq_restore(flags);
  995. hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
  996. return HRTIMER_RESTART;
  997. }
  998. static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
  999. unsigned long action, void *hcpu)
  1000. {
  1001. struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb);
  1002. struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
  1003. unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
  1004. unsigned int target;
  1005. switch (action & ~CPU_TASKS_FROZEN) {
  1006. case CPU_DOWN_PREPARE:
  1007. if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
  1008. break;
  1009. target = cpumask_any_but(cpu_online_mask, cpu);
  1010. if (target >= nr_cpu_ids)
  1011. break;
  1012. perf_pmu_migrate_context(&dt->pmu, cpu, target);
  1013. cpumask_set_cpu(target, &dt->cpu);
  1014. if (ccn->irq)
  1015. WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
  1016. default:
  1017. break;
  1018. }
  1019. return NOTIFY_OK;
  1020. }
  1021. static DEFINE_IDA(arm_ccn_pmu_ida);
  1022. static int arm_ccn_pmu_init(struct arm_ccn *ccn)
  1023. {
  1024. int i;
  1025. char *name;
  1026. int err;
  1027. /* Initialize DT subsystem */
  1028. ccn->dt.base = ccn->base + CCN_REGION_SIZE;
  1029. spin_lock_init(&ccn->dt.config_lock);
  1030. writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
  1031. writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
  1032. writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
  1033. ccn->dt.base + CCN_DT_PMCR);
  1034. writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
  1035. for (i = 0; i < ccn->num_xps; i++) {
  1036. writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
  1037. writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
  1038. CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
  1039. (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
  1040. CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
  1041. CCN_XP_DT_CONTROL__DT_ENABLE,
  1042. ccn->xp[i].base + CCN_XP_DT_CONTROL);
  1043. }
  1044. ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
  1045. ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
  1046. ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
  1047. ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
  1048. ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
  1049. ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
  1050. ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
  1051. ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
  1052. /* Get a convenient /sys/event_source/devices/ name */
  1053. ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
  1054. if (ccn->dt.id == 0) {
  1055. name = "ccn";
  1056. } else {
  1057. int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
  1058. name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
  1059. snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
  1060. }
  1061. /* Perf driver registration */
  1062. ccn->dt.pmu = (struct pmu) {
  1063. .module = THIS_MODULE,
  1064. .attr_groups = arm_ccn_pmu_attr_groups,
  1065. .task_ctx_nr = perf_invalid_context,
  1066. .event_init = arm_ccn_pmu_event_init,
  1067. .add = arm_ccn_pmu_event_add,
  1068. .del = arm_ccn_pmu_event_del,
  1069. .start = arm_ccn_pmu_event_start,
  1070. .stop = arm_ccn_pmu_event_stop,
  1071. .read = arm_ccn_pmu_event_read,
  1072. };
  1073. /* No overflow interrupt? Have to use a timer instead. */
  1074. if (!ccn->irq) {
  1075. dev_info(ccn->dev, "No access to interrupts, using timer.\n");
  1076. hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
  1077. HRTIMER_MODE_REL);
  1078. ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
  1079. }
  1080. /* Pick one CPU which we will use to collect data from CCN... */
  1081. cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
  1082. /*
  1083. * ... and change the selection when it goes offline. Priority is
  1084. * picked to have a chance to migrate events before perf is notified.
  1085. */
  1086. ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
  1087. ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
  1088. err = register_cpu_notifier(&ccn->dt.cpu_nb);
  1089. if (err)
  1090. goto error_cpu_notifier;
  1091. /* Also make sure that the overflow interrupt is handled by this CPU */
  1092. if (ccn->irq) {
  1093. err = irq_set_affinity(ccn->irq, &ccn->dt.cpu);
  1094. if (err) {
  1095. dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
  1096. goto error_set_affinity;
  1097. }
  1098. }
  1099. err = perf_pmu_register(&ccn->dt.pmu, name, -1);
  1100. if (err)
  1101. goto error_pmu_register;
  1102. return 0;
  1103. error_pmu_register:
  1104. error_set_affinity:
  1105. unregister_cpu_notifier(&ccn->dt.cpu_nb);
  1106. error_cpu_notifier:
  1107. ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
  1108. for (i = 0; i < ccn->num_xps; i++)
  1109. writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
  1110. writel(0, ccn->dt.base + CCN_DT_PMCR);
  1111. return err;
  1112. }
  1113. static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
  1114. {
  1115. int i;
  1116. irq_set_affinity(ccn->irq, cpu_possible_mask);
  1117. unregister_cpu_notifier(&ccn->dt.cpu_nb);
  1118. for (i = 0; i < ccn->num_xps; i++)
  1119. writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
  1120. writel(0, ccn->dt.base + CCN_DT_PMCR);
  1121. perf_pmu_unregister(&ccn->dt.pmu);
  1122. ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
  1123. }
  1124. static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
  1125. int (*callback)(struct arm_ccn *ccn, int region,
  1126. void __iomem *base, u32 type, u32 id))
  1127. {
  1128. int region;
  1129. for (region = 0; region < CCN_NUM_REGIONS; region++) {
  1130. u32 val, type, id;
  1131. void __iomem *base;
  1132. int err;
  1133. val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
  1134. 4 * (region / 32));
  1135. if (!(val & (1 << (region % 32))))
  1136. continue;
  1137. base = ccn->base + region * CCN_REGION_SIZE;
  1138. val = readl(base + CCN_ALL_OLY_ID);
  1139. type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
  1140. CCN_ALL_OLY_ID__OLY_ID__MASK;
  1141. id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
  1142. CCN_ALL_OLY_ID__NODE_ID__MASK;
  1143. err = callback(ccn, region, base, type, id);
  1144. if (err)
  1145. return err;
  1146. }
  1147. return 0;
  1148. }
  1149. static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
  1150. void __iomem *base, u32 type, u32 id)
  1151. {
  1152. if (type == CCN_TYPE_XP && id >= ccn->num_xps)
  1153. ccn->num_xps = id + 1;
  1154. else if (id >= ccn->num_nodes)
  1155. ccn->num_nodes = id + 1;
  1156. return 0;
  1157. }
  1158. static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
  1159. void __iomem *base, u32 type, u32 id)
  1160. {
  1161. struct arm_ccn_component *component;
  1162. dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
  1163. switch (type) {
  1164. case CCN_TYPE_MN:
  1165. ccn->mn_id = id;
  1166. return 0;
  1167. case CCN_TYPE_DT:
  1168. return 0;
  1169. case CCN_TYPE_XP:
  1170. component = &ccn->xp[id];
  1171. break;
  1172. case CCN_TYPE_SBSX:
  1173. ccn->sbsx_present = 1;
  1174. component = &ccn->node[id];
  1175. break;
  1176. case CCN_TYPE_SBAS:
  1177. ccn->sbas_present = 1;
  1178. /* Fall-through */
  1179. default:
  1180. component = &ccn->node[id];
  1181. break;
  1182. }
  1183. component->base = base;
  1184. component->type = type;
  1185. return 0;
  1186. }
  1187. static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
  1188. const u32 *err_sig_val)
  1189. {
  1190. /* This should be really handled by firmware... */
  1191. dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
  1192. err_sig_val[5], err_sig_val[4], err_sig_val[3],
  1193. err_sig_val[2], err_sig_val[1], err_sig_val[0]);
  1194. dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
  1195. writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
  1196. ccn->base + CCN_MN_ERRINT_STATUS);
  1197. return IRQ_HANDLED;
  1198. }
  1199. static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
  1200. {
  1201. irqreturn_t res = IRQ_NONE;
  1202. struct arm_ccn *ccn = dev_id;
  1203. u32 err_sig_val[6];
  1204. u32 err_or;
  1205. int i;
  1206. /* PMU overflow is a special case */
  1207. err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
  1208. if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
  1209. err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
  1210. res = arm_ccn_pmu_overflow_handler(&ccn->dt);
  1211. }
  1212. /* Have to read all err_sig_vals to clear them */
  1213. for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
  1214. err_sig_val[i] = readl(ccn->base +
  1215. CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
  1216. err_or |= err_sig_val[i];
  1217. }
  1218. if (err_or)
  1219. res |= arm_ccn_error_handler(ccn, err_sig_val);
  1220. if (res != IRQ_NONE)
  1221. writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
  1222. ccn->base + CCN_MN_ERRINT_STATUS);
  1223. return res;
  1224. }
  1225. static int arm_ccn_probe(struct platform_device *pdev)
  1226. {
  1227. struct arm_ccn *ccn;
  1228. struct resource *res;
  1229. unsigned int irq;
  1230. int err;
  1231. ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
  1232. if (!ccn)
  1233. return -ENOMEM;
  1234. ccn->dev = &pdev->dev;
  1235. platform_set_drvdata(pdev, ccn);
  1236. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1237. if (!res)
  1238. return -EINVAL;
  1239. if (!devm_request_mem_region(ccn->dev, res->start,
  1240. resource_size(res), pdev->name))
  1241. return -EBUSY;
  1242. ccn->base = devm_ioremap(ccn->dev, res->start,
  1243. resource_size(res));
  1244. if (!ccn->base)
  1245. return -EFAULT;
  1246. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1247. if (!res)
  1248. return -EINVAL;
  1249. irq = res->start;
  1250. /* Check if we can use the interrupt */
  1251. writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
  1252. ccn->base + CCN_MN_ERRINT_STATUS);
  1253. if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
  1254. CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
  1255. /* Can set 'disable' bits, so can acknowledge interrupts */
  1256. writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
  1257. ccn->base + CCN_MN_ERRINT_STATUS);
  1258. err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
  1259. dev_name(ccn->dev), ccn);
  1260. if (err)
  1261. return err;
  1262. ccn->irq = irq;
  1263. }
  1264. /* Build topology */
  1265. err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
  1266. if (err)
  1267. return err;
  1268. ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
  1269. GFP_KERNEL);
  1270. ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
  1271. GFP_KERNEL);
  1272. if (!ccn->node || !ccn->xp)
  1273. return -ENOMEM;
  1274. err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
  1275. if (err)
  1276. return err;
  1277. return arm_ccn_pmu_init(ccn);
  1278. }
  1279. static int arm_ccn_remove(struct platform_device *pdev)
  1280. {
  1281. struct arm_ccn *ccn = platform_get_drvdata(pdev);
  1282. arm_ccn_pmu_cleanup(ccn);
  1283. return 0;
  1284. }
  1285. static const struct of_device_id arm_ccn_match[] = {
  1286. { .compatible = "arm,ccn-504", },
  1287. {},
  1288. };
  1289. static struct platform_driver arm_ccn_driver = {
  1290. .driver = {
  1291. .name = "arm-ccn",
  1292. .of_match_table = arm_ccn_match,
  1293. },
  1294. .probe = arm_ccn_probe,
  1295. .remove = arm_ccn_remove,
  1296. };
  1297. static int __init arm_ccn_init(void)
  1298. {
  1299. int i;
  1300. for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
  1301. arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
  1302. return platform_driver_register(&arm_ccn_driver);
  1303. }
  1304. static void __exit arm_ccn_exit(void)
  1305. {
  1306. platform_driver_unregister(&arm_ccn_driver);
  1307. }
  1308. module_init(arm_ccn_init);
  1309. module_exit(arm_ccn_exit);
  1310. MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
  1311. MODULE_LICENSE("GPL");