exynos-ppmu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /*
  2. * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
  3. *
  4. * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  5. * Author : Chanwoo Choi <cw00.choi@samsung.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/mutex.h>
  18. #include <linux/of_address.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/suspend.h>
  21. #include <linux/devfreq-event.h>
  22. #include "exynos-ppmu.h"
  23. struct exynos_ppmu_data {
  24. void __iomem *base;
  25. struct clk *clk;
  26. };
  27. struct exynos_ppmu {
  28. struct devfreq_event_dev **edev;
  29. struct devfreq_event_desc *desc;
  30. unsigned int num_events;
  31. struct device *dev;
  32. struct mutex lock;
  33. struct exynos_ppmu_data ppmu;
  34. };
  35. #define PPMU_EVENT(name) \
  36. { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
  37. { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
  38. { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
  39. { "ppmu-event3-"#name, PPMU_PMNCNT3 }
  40. struct __exynos_ppmu_events {
  41. char *name;
  42. int id;
  43. } ppmu_events[] = {
  44. /* For Exynos3250, Exynos4 and Exynos5260 */
  45. PPMU_EVENT(g3d),
  46. PPMU_EVENT(fsys),
  47. /* For Exynos4 SoCs and Exynos3250 */
  48. PPMU_EVENT(dmc0),
  49. PPMU_EVENT(dmc1),
  50. PPMU_EVENT(cpu),
  51. PPMU_EVENT(rightbus),
  52. PPMU_EVENT(leftbus),
  53. PPMU_EVENT(lcd0),
  54. PPMU_EVENT(camif),
  55. /* Only for Exynos3250 and Exynos5260 */
  56. PPMU_EVENT(mfc),
  57. /* Only for Exynos4 SoCs */
  58. PPMU_EVENT(mfc-left),
  59. PPMU_EVENT(mfc-right),
  60. /* Only for Exynos5260 SoCs */
  61. PPMU_EVENT(drex0-s0),
  62. PPMU_EVENT(drex0-s1),
  63. PPMU_EVENT(drex1-s0),
  64. PPMU_EVENT(drex1-s1),
  65. PPMU_EVENT(eagle),
  66. PPMU_EVENT(kfc),
  67. PPMU_EVENT(isp),
  68. PPMU_EVENT(fimc),
  69. PPMU_EVENT(gscl),
  70. PPMU_EVENT(mscl),
  71. PPMU_EVENT(fimd0x),
  72. PPMU_EVENT(fimd1x),
  73. /* Only for Exynos5433 SoCs */
  74. PPMU_EVENT(d0-cpu),
  75. PPMU_EVENT(d0-general),
  76. PPMU_EVENT(d0-rt),
  77. PPMU_EVENT(d1-cpu),
  78. PPMU_EVENT(d1-general),
  79. PPMU_EVENT(d1-rt),
  80. { /* sentinel */ },
  81. };
  82. static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
  83. {
  84. int i;
  85. for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
  86. if (!strcmp(edev->desc->name, ppmu_events[i].name))
  87. return ppmu_events[i].id;
  88. return -EINVAL;
  89. }
  90. /*
  91. * The devfreq-event ops structure for PPMU v1.1
  92. */
  93. static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
  94. {
  95. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  96. u32 pmnc;
  97. /* Disable all counters */
  98. __raw_writel(PPMU_CCNT_MASK |
  99. PPMU_PMCNT0_MASK |
  100. PPMU_PMCNT1_MASK |
  101. PPMU_PMCNT2_MASK |
  102. PPMU_PMCNT3_MASK,
  103. info->ppmu.base + PPMU_CNTENC);
  104. /* Disable PPMU */
  105. pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
  106. pmnc &= ~PPMU_PMNC_ENABLE_MASK;
  107. __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
  108. return 0;
  109. }
  110. static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
  111. {
  112. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  113. int id = exynos_ppmu_find_ppmu_id(edev);
  114. u32 pmnc, cntens;
  115. if (id < 0)
  116. return id;
  117. /* Enable specific counter */
  118. cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
  119. cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
  120. __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
  121. /* Set the event of Read/Write data count */
  122. __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
  123. info->ppmu.base + PPMU_BEVTxSEL(id));
  124. /* Reset cycle counter/performance counter and enable PPMU */
  125. pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
  126. pmnc &= ~(PPMU_PMNC_ENABLE_MASK
  127. | PPMU_PMNC_COUNTER_RESET_MASK
  128. | PPMU_PMNC_CC_RESET_MASK);
  129. pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
  130. pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
  131. pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
  132. __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
  133. return 0;
  134. }
  135. static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
  136. struct devfreq_event_data *edata)
  137. {
  138. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  139. int id = exynos_ppmu_find_ppmu_id(edev);
  140. u32 pmnc, cntenc;
  141. if (id < 0)
  142. return -EINVAL;
  143. /* Disable PPMU */
  144. pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
  145. pmnc &= ~PPMU_PMNC_ENABLE_MASK;
  146. __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
  147. /* Read cycle count */
  148. edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
  149. /* Read performance count */
  150. switch (id) {
  151. case PPMU_PMNCNT0:
  152. case PPMU_PMNCNT1:
  153. case PPMU_PMNCNT2:
  154. edata->load_count
  155. = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
  156. break;
  157. case PPMU_PMNCNT3:
  158. edata->load_count =
  159. ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
  160. | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
  161. break;
  162. default:
  163. return -EINVAL;
  164. }
  165. /* Disable specific counter */
  166. cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
  167. cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
  168. __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
  169. dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
  170. edata->load_count, edata->total_count);
  171. return 0;
  172. }
  173. static const struct devfreq_event_ops exynos_ppmu_ops = {
  174. .disable = exynos_ppmu_disable,
  175. .set_event = exynos_ppmu_set_event,
  176. .get_event = exynos_ppmu_get_event,
  177. };
  178. /*
  179. * The devfreq-event ops structure for PPMU v2.0
  180. */
  181. static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
  182. {
  183. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  184. u32 pmnc, clear;
  185. /* Disable all counters */
  186. clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
  187. | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
  188. __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
  189. __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
  190. __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
  191. __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
  192. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
  193. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
  194. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
  195. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
  196. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
  197. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
  198. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
  199. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
  200. __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
  201. __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
  202. __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
  203. __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
  204. __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
  205. __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
  206. /* Disable PPMU */
  207. pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
  208. pmnc &= ~PPMU_PMNC_ENABLE_MASK;
  209. __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
  210. return 0;
  211. }
  212. static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
  213. {
  214. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  215. int id = exynos_ppmu_find_ppmu_id(edev);
  216. u32 pmnc, cntens;
  217. /* Enable all counters */
  218. cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
  219. cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
  220. __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
  221. /* Set the event of Read/Write data count */
  222. switch (id) {
  223. case PPMU_PMNCNT0:
  224. case PPMU_PMNCNT1:
  225. case PPMU_PMNCNT2:
  226. __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
  227. info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
  228. break;
  229. case PPMU_PMNCNT3:
  230. __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
  231. info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
  232. break;
  233. }
  234. /* Reset cycle counter/performance counter and enable PPMU */
  235. pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
  236. pmnc &= ~(PPMU_PMNC_ENABLE_MASK
  237. | PPMU_PMNC_COUNTER_RESET_MASK
  238. | PPMU_PMNC_CC_RESET_MASK
  239. | PPMU_PMNC_CC_DIVIDER_MASK
  240. | PPMU_V2_PMNC_START_MODE_MASK);
  241. pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
  242. pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
  243. pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
  244. pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
  245. __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
  246. return 0;
  247. }
  248. static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
  249. struct devfreq_event_data *edata)
  250. {
  251. struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
  252. int id = exynos_ppmu_find_ppmu_id(edev);
  253. u32 pmnc, cntenc;
  254. u32 pmcnt_high, pmcnt_low;
  255. u64 load_count = 0;
  256. /* Disable PPMU */
  257. pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
  258. pmnc &= ~PPMU_PMNC_ENABLE_MASK;
  259. __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
  260. /* Read cycle count and performance count */
  261. edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
  262. switch (id) {
  263. case PPMU_PMNCNT0:
  264. case PPMU_PMNCNT1:
  265. case PPMU_PMNCNT2:
  266. load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
  267. break;
  268. case PPMU_PMNCNT3:
  269. pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
  270. pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
  271. load_count = ((u64)((pmcnt_high & 0xff)) << 32)
  272. + (u64)pmcnt_low;
  273. break;
  274. }
  275. edata->load_count = load_count;
  276. /* Disable all counters */
  277. cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
  278. cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
  279. __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
  280. dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
  281. edata->load_count, edata->total_count);
  282. return 0;
  283. }
  284. static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
  285. .disable = exynos_ppmu_v2_disable,
  286. .set_event = exynos_ppmu_v2_set_event,
  287. .get_event = exynos_ppmu_v2_get_event,
  288. };
  289. static const struct of_device_id exynos_ppmu_id_match[] = {
  290. {
  291. .compatible = "samsung,exynos-ppmu",
  292. .data = (void *)&exynos_ppmu_ops,
  293. }, {
  294. .compatible = "samsung,exynos-ppmu-v2",
  295. .data = (void *)&exynos_ppmu_v2_ops,
  296. },
  297. { /* sentinel */ },
  298. };
  299. static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
  300. {
  301. const struct of_device_id *match;
  302. match = of_match_node(exynos_ppmu_id_match, np);
  303. return (struct devfreq_event_ops *)match->data;
  304. }
  305. static int of_get_devfreq_events(struct device_node *np,
  306. struct exynos_ppmu *info)
  307. {
  308. struct devfreq_event_desc *desc;
  309. struct devfreq_event_ops *event_ops;
  310. struct device *dev = info->dev;
  311. struct device_node *events_np, *node;
  312. int i, j, count;
  313. events_np = of_get_child_by_name(np, "events");
  314. if (!events_np) {
  315. dev_err(dev,
  316. "failed to get child node of devfreq-event devices\n");
  317. return -EINVAL;
  318. }
  319. event_ops = exynos_bus_get_ops(np);
  320. count = of_get_child_count(events_np);
  321. desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
  322. if (!desc)
  323. return -ENOMEM;
  324. info->num_events = count;
  325. j = 0;
  326. for_each_child_of_node(events_np, node) {
  327. for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
  328. if (!ppmu_events[i].name)
  329. continue;
  330. if (!of_node_cmp(node->name, ppmu_events[i].name))
  331. break;
  332. }
  333. if (i == ARRAY_SIZE(ppmu_events)) {
  334. dev_warn(dev,
  335. "don't know how to configure events : %s\n",
  336. node->name);
  337. continue;
  338. }
  339. desc[j].ops = event_ops;
  340. desc[j].driver_data = info;
  341. of_property_read_string(node, "event-name", &desc[j].name);
  342. j++;
  343. of_node_put(node);
  344. }
  345. info->desc = desc;
  346. of_node_put(events_np);
  347. return 0;
  348. }
  349. static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
  350. {
  351. struct device *dev = info->dev;
  352. struct device_node *np = dev->of_node;
  353. int ret = 0;
  354. if (!np) {
  355. dev_err(dev, "failed to find devicetree node\n");
  356. return -EINVAL;
  357. }
  358. /* Maps the memory mapped IO to control PPMU register */
  359. info->ppmu.base = of_iomap(np, 0);
  360. if (IS_ERR_OR_NULL(info->ppmu.base)) {
  361. dev_err(dev, "failed to map memory region\n");
  362. return -ENOMEM;
  363. }
  364. info->ppmu.clk = devm_clk_get(dev, "ppmu");
  365. if (IS_ERR(info->ppmu.clk)) {
  366. info->ppmu.clk = NULL;
  367. dev_warn(dev, "cannot get PPMU clock\n");
  368. }
  369. ret = of_get_devfreq_events(np, info);
  370. if (ret < 0) {
  371. dev_err(dev, "failed to parse exynos ppmu dt node\n");
  372. goto err;
  373. }
  374. return 0;
  375. err:
  376. iounmap(info->ppmu.base);
  377. return ret;
  378. }
  379. static int exynos_ppmu_probe(struct platform_device *pdev)
  380. {
  381. struct exynos_ppmu *info;
  382. struct devfreq_event_dev **edev;
  383. struct devfreq_event_desc *desc;
  384. int i, ret = 0, size;
  385. info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
  386. if (!info)
  387. return -ENOMEM;
  388. mutex_init(&info->lock);
  389. info->dev = &pdev->dev;
  390. /* Parse dt data to get resource */
  391. ret = exynos_ppmu_parse_dt(info);
  392. if (ret < 0) {
  393. dev_err(&pdev->dev,
  394. "failed to parse devicetree for resource\n");
  395. return ret;
  396. }
  397. desc = info->desc;
  398. size = sizeof(struct devfreq_event_dev *) * info->num_events;
  399. info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  400. if (!info->edev) {
  401. dev_err(&pdev->dev,
  402. "failed to allocate memory devfreq-event devices\n");
  403. return -ENOMEM;
  404. }
  405. edev = info->edev;
  406. platform_set_drvdata(pdev, info);
  407. for (i = 0; i < info->num_events; i++) {
  408. edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
  409. if (IS_ERR(edev[i])) {
  410. ret = PTR_ERR(edev[i]);
  411. dev_err(&pdev->dev,
  412. "failed to add devfreq-event device\n");
  413. goto err;
  414. }
  415. }
  416. clk_prepare_enable(info->ppmu.clk);
  417. return 0;
  418. err:
  419. iounmap(info->ppmu.base);
  420. return ret;
  421. }
  422. static int exynos_ppmu_remove(struct platform_device *pdev)
  423. {
  424. struct exynos_ppmu *info = platform_get_drvdata(pdev);
  425. clk_disable_unprepare(info->ppmu.clk);
  426. iounmap(info->ppmu.base);
  427. return 0;
  428. }
  429. static struct platform_driver exynos_ppmu_driver = {
  430. .probe = exynos_ppmu_probe,
  431. .remove = exynos_ppmu_remove,
  432. .driver = {
  433. .name = "exynos-ppmu",
  434. .of_match_table = exynos_ppmu_id_match,
  435. },
  436. };
  437. module_platform_driver(exynos_ppmu_driver);
  438. MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
  439. MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
  440. MODULE_LICENSE("GPL");