op_model_fsl_emb.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Freescale Embedded oprofile support, based on ppc64 oprofile support
  3. * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
  4. *
  5. * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
  6. *
  7. * Author: Andy Fleming
  8. * Maintainer: Kumar Gala <galak@kernel.crashing.org>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/oprofile.h>
  16. #include <linux/smp.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/processor.h>
  19. #include <asm/cputable.h>
  20. #include <asm/reg_fsl_emb.h>
  21. #include <asm/page.h>
  22. #include <asm/pmc.h>
  23. #include <asm/oprofile_impl.h>
  24. static unsigned long reset_value[OP_MAX_COUNTER];
  25. static int num_counters;
  26. static int oprofile_running;
  27. static inline u32 get_pmlca(int ctr)
  28. {
  29. u32 pmlca;
  30. switch (ctr) {
  31. case 0:
  32. pmlca = mfpmr(PMRN_PMLCA0);
  33. break;
  34. case 1:
  35. pmlca = mfpmr(PMRN_PMLCA1);
  36. break;
  37. case 2:
  38. pmlca = mfpmr(PMRN_PMLCA2);
  39. break;
  40. case 3:
  41. pmlca = mfpmr(PMRN_PMLCA3);
  42. break;
  43. case 4:
  44. pmlca = mfpmr(PMRN_PMLCA4);
  45. break;
  46. case 5:
  47. pmlca = mfpmr(PMRN_PMLCA5);
  48. break;
  49. default:
  50. panic("Bad ctr number\n");
  51. }
  52. return pmlca;
  53. }
  54. static inline void set_pmlca(int ctr, u32 pmlca)
  55. {
  56. switch (ctr) {
  57. case 0:
  58. mtpmr(PMRN_PMLCA0, pmlca);
  59. break;
  60. case 1:
  61. mtpmr(PMRN_PMLCA1, pmlca);
  62. break;
  63. case 2:
  64. mtpmr(PMRN_PMLCA2, pmlca);
  65. break;
  66. case 3:
  67. mtpmr(PMRN_PMLCA3, pmlca);
  68. break;
  69. case 4:
  70. mtpmr(PMRN_PMLCA4, pmlca);
  71. break;
  72. case 5:
  73. mtpmr(PMRN_PMLCA5, pmlca);
  74. break;
  75. default:
  76. panic("Bad ctr number\n");
  77. }
  78. }
  79. static inline unsigned int ctr_read(unsigned int i)
  80. {
  81. switch(i) {
  82. case 0:
  83. return mfpmr(PMRN_PMC0);
  84. case 1:
  85. return mfpmr(PMRN_PMC1);
  86. case 2:
  87. return mfpmr(PMRN_PMC2);
  88. case 3:
  89. return mfpmr(PMRN_PMC3);
  90. case 4:
  91. return mfpmr(PMRN_PMC4);
  92. case 5:
  93. return mfpmr(PMRN_PMC5);
  94. default:
  95. return 0;
  96. }
  97. }
  98. static inline void ctr_write(unsigned int i, unsigned int val)
  99. {
  100. switch(i) {
  101. case 0:
  102. mtpmr(PMRN_PMC0, val);
  103. break;
  104. case 1:
  105. mtpmr(PMRN_PMC1, val);
  106. break;
  107. case 2:
  108. mtpmr(PMRN_PMC2, val);
  109. break;
  110. case 3:
  111. mtpmr(PMRN_PMC3, val);
  112. break;
  113. case 4:
  114. mtpmr(PMRN_PMC4, val);
  115. break;
  116. case 5:
  117. mtpmr(PMRN_PMC5, val);
  118. break;
  119. default:
  120. break;
  121. }
  122. }
  123. static void init_pmc_stop(int ctr)
  124. {
  125. u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
  126. PMLCA_FCM1 | PMLCA_FCM0);
  127. u32 pmlcb = 0;
  128. switch (ctr) {
  129. case 0:
  130. mtpmr(PMRN_PMLCA0, pmlca);
  131. mtpmr(PMRN_PMLCB0, pmlcb);
  132. break;
  133. case 1:
  134. mtpmr(PMRN_PMLCA1, pmlca);
  135. mtpmr(PMRN_PMLCB1, pmlcb);
  136. break;
  137. case 2:
  138. mtpmr(PMRN_PMLCA2, pmlca);
  139. mtpmr(PMRN_PMLCB2, pmlcb);
  140. break;
  141. case 3:
  142. mtpmr(PMRN_PMLCA3, pmlca);
  143. mtpmr(PMRN_PMLCB3, pmlcb);
  144. break;
  145. case 4:
  146. mtpmr(PMRN_PMLCA4, pmlca);
  147. mtpmr(PMRN_PMLCB4, pmlcb);
  148. break;
  149. case 5:
  150. mtpmr(PMRN_PMLCA5, pmlca);
  151. mtpmr(PMRN_PMLCB5, pmlcb);
  152. break;
  153. default:
  154. panic("Bad ctr number!\n");
  155. }
  156. }
  157. static void set_pmc_event(int ctr, int event)
  158. {
  159. u32 pmlca;
  160. pmlca = get_pmlca(ctr);
  161. pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
  162. ((event << PMLCA_EVENT_SHIFT) &
  163. PMLCA_EVENT_MASK);
  164. set_pmlca(ctr, pmlca);
  165. }
  166. static void set_pmc_user_kernel(int ctr, int user, int kernel)
  167. {
  168. u32 pmlca;
  169. pmlca = get_pmlca(ctr);
  170. if(user)
  171. pmlca &= ~PMLCA_FCU;
  172. else
  173. pmlca |= PMLCA_FCU;
  174. if(kernel)
  175. pmlca &= ~PMLCA_FCS;
  176. else
  177. pmlca |= PMLCA_FCS;
  178. set_pmlca(ctr, pmlca);
  179. }
  180. static void set_pmc_marked(int ctr, int mark0, int mark1)
  181. {
  182. u32 pmlca = get_pmlca(ctr);
  183. if(mark0)
  184. pmlca &= ~PMLCA_FCM0;
  185. else
  186. pmlca |= PMLCA_FCM0;
  187. if(mark1)
  188. pmlca &= ~PMLCA_FCM1;
  189. else
  190. pmlca |= PMLCA_FCM1;
  191. set_pmlca(ctr, pmlca);
  192. }
  193. static void pmc_start_ctr(int ctr, int enable)
  194. {
  195. u32 pmlca = get_pmlca(ctr);
  196. pmlca &= ~PMLCA_FC;
  197. if (enable)
  198. pmlca |= PMLCA_CE;
  199. else
  200. pmlca &= ~PMLCA_CE;
  201. set_pmlca(ctr, pmlca);
  202. }
  203. static void pmc_start_ctrs(int enable)
  204. {
  205. u32 pmgc0 = mfpmr(PMRN_PMGC0);
  206. pmgc0 &= ~PMGC0_FAC;
  207. pmgc0 |= PMGC0_FCECE;
  208. if (enable)
  209. pmgc0 |= PMGC0_PMIE;
  210. else
  211. pmgc0 &= ~PMGC0_PMIE;
  212. mtpmr(PMRN_PMGC0, pmgc0);
  213. }
  214. static void pmc_stop_ctrs(void)
  215. {
  216. u32 pmgc0 = mfpmr(PMRN_PMGC0);
  217. pmgc0 |= PMGC0_FAC;
  218. pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
  219. mtpmr(PMRN_PMGC0, pmgc0);
  220. }
  221. static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
  222. {
  223. int i;
  224. /* freeze all counters */
  225. pmc_stop_ctrs();
  226. for (i = 0;i < num_counters;i++) {
  227. init_pmc_stop(i);
  228. set_pmc_event(i, ctr[i].event);
  229. set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
  230. }
  231. return 0;
  232. }
  233. static int fsl_emb_reg_setup(struct op_counter_config *ctr,
  234. struct op_system_config *sys,
  235. int num_ctrs)
  236. {
  237. int i;
  238. num_counters = num_ctrs;
  239. /* Our counters count up, and "count" refers to
  240. * how much before the next interrupt, and we interrupt
  241. * on overflow. So we calculate the starting value
  242. * which will give us "count" until overflow.
  243. * Then we set the events on the enabled counters */
  244. for (i = 0; i < num_counters; ++i)
  245. reset_value[i] = 0x80000000UL - ctr[i].count;
  246. return 0;
  247. }
  248. static int fsl_emb_start(struct op_counter_config *ctr)
  249. {
  250. int i;
  251. mtmsr(mfmsr() | MSR_PMM);
  252. for (i = 0; i < num_counters; ++i) {
  253. if (ctr[i].enabled) {
  254. ctr_write(i, reset_value[i]);
  255. /* Set each enabled counter to only
  256. * count when the Mark bit is *not* set */
  257. set_pmc_marked(i, 1, 0);
  258. pmc_start_ctr(i, 1);
  259. } else {
  260. ctr_write(i, 0);
  261. /* Set the ctr to be stopped */
  262. pmc_start_ctr(i, 0);
  263. }
  264. }
  265. /* Clear the freeze bit, and enable the interrupt.
  266. * The counters won't actually start until the rfi clears
  267. * the PMM bit */
  268. pmc_start_ctrs(1);
  269. oprofile_running = 1;
  270. pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
  271. mfpmr(PMRN_PMGC0));
  272. return 0;
  273. }
  274. static void fsl_emb_stop(void)
  275. {
  276. /* freeze counters */
  277. pmc_stop_ctrs();
  278. oprofile_running = 0;
  279. pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
  280. mfpmr(PMRN_PMGC0));
  281. mb();
  282. }
  283. static void fsl_emb_handle_interrupt(struct pt_regs *regs,
  284. struct op_counter_config *ctr)
  285. {
  286. unsigned long pc;
  287. int is_kernel;
  288. int val;
  289. int i;
  290. pc = regs->nip;
  291. is_kernel = is_kernel_addr(pc);
  292. for (i = 0; i < num_counters; ++i) {
  293. val = ctr_read(i);
  294. if (val < 0) {
  295. if (oprofile_running && ctr[i].enabled) {
  296. oprofile_add_ext_sample(pc, regs, i, is_kernel);
  297. ctr_write(i, reset_value[i]);
  298. } else {
  299. ctr_write(i, 0);
  300. }
  301. }
  302. }
  303. /* The freeze bit was set by the interrupt. */
  304. /* Clear the freeze bit, and reenable the interrupt. The
  305. * counters won't actually start until the rfi clears the PMM
  306. * bit. The PMM bit should not be set until after the interrupt
  307. * is cleared to avoid it getting lost in some hypervisor
  308. * environments.
  309. */
  310. mtmsr(mfmsr() | MSR_PMM);
  311. pmc_start_ctrs(1);
  312. }
  313. struct op_powerpc_model op_model_fsl_emb = {
  314. .reg_setup = fsl_emb_reg_setup,
  315. .cpu_setup = fsl_emb_cpu_setup,
  316. .start = fsl_emb_start,
  317. .stop = fsl_emb_stop,
  318. .handle_interrupt = fsl_emb_handle_interrupt,
  319. };