op_model_mipsxx.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004, 05, 06 by Ralf Baechle
  7. * Copyright (C) 2005 by MIPS Technologies, Inc.
  8. */
  9. #include <linux/cpumask.h>
  10. #include <linux/oprofile.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/smp.h>
  13. #include <asm/irq_regs.h>
  14. #include <asm/time.h>
  15. #include "op_impl.h"
  16. #define M_PERFCTL_EXL (1UL << 0)
  17. #define M_PERFCTL_KERNEL (1UL << 1)
  18. #define M_PERFCTL_SUPERVISOR (1UL << 2)
  19. #define M_PERFCTL_USER (1UL << 3)
  20. #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
  21. #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
  22. #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
  23. #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
  24. #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
  25. #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
  26. #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
  27. #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
  28. #define M_PERFCTL_WIDE (1UL << 30)
  29. #define M_PERFCTL_MORE (1UL << 31)
  30. #define M_COUNTER_OVERFLOW (1UL << 31)
  31. /* Netlogic XLR specific, count events in all threads in a core */
  32. #define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
  33. static int (*save_perf_irq)(void);
  34. static int perfcount_irq;
  35. /*
  36. * XLR has only one set of counters per core. Designate the
  37. * first hardware thread in the core for setup and init.
  38. * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  39. */
  40. #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
  41. #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
  42. #else
  43. #define oprofile_skip_cpu(c) 0
  44. #endif
  45. #ifdef CONFIG_MIPS_MT_SMP
  46. static int cpu_has_mipsmt_pertccounters;
  47. #define WHAT (M_TC_EN_VPE | \
  48. M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
  49. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  50. 0 : cpu_data[smp_processor_id()].vpe_id)
  51. /*
  52. * The number of bits to shift to convert between counters per core and
  53. * counters per VPE. There is no reasonable interface atm to obtain the
  54. * number of VPEs used by Linux and in the 34K this number is fixed to two
  55. * anyways so we hardcore a few things here for the moment. The way it's
  56. * done here will ensure that oprofile VSMP kernel will run right on a lesser
  57. * core like a 24K also or with maxcpus=1.
  58. */
  59. static inline unsigned int vpe_shift(void)
  60. {
  61. if (num_possible_cpus() > 1)
  62. return 1;
  63. return 0;
  64. }
  65. #else
  66. #define WHAT 0
  67. #define vpe_id() 0
  68. static inline unsigned int vpe_shift(void)
  69. {
  70. return 0;
  71. }
  72. #endif
  73. static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
  74. {
  75. return counters >> vpe_shift();
  76. }
  77. static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
  78. {
  79. return counters << vpe_shift();
  80. }
  81. #define __define_perf_accessors(r, n, np) \
  82. \
  83. static inline unsigned int r_c0_ ## r ## n(void) \
  84. { \
  85. unsigned int cpu = vpe_id(); \
  86. \
  87. switch (cpu) { \
  88. case 0: \
  89. return read_c0_ ## r ## n(); \
  90. case 1: \
  91. return read_c0_ ## r ## np(); \
  92. default: \
  93. BUG(); \
  94. } \
  95. return 0; \
  96. } \
  97. \
  98. static inline void w_c0_ ## r ## n(unsigned int value) \
  99. { \
  100. unsigned int cpu = vpe_id(); \
  101. \
  102. switch (cpu) { \
  103. case 0: \
  104. write_c0_ ## r ## n(value); \
  105. return; \
  106. case 1: \
  107. write_c0_ ## r ## np(value); \
  108. return; \
  109. default: \
  110. BUG(); \
  111. } \
  112. return; \
  113. } \
  114. __define_perf_accessors(perfcntr, 0, 2)
  115. __define_perf_accessors(perfcntr, 1, 3)
  116. __define_perf_accessors(perfcntr, 2, 0)
  117. __define_perf_accessors(perfcntr, 3, 1)
  118. __define_perf_accessors(perfctrl, 0, 2)
  119. __define_perf_accessors(perfctrl, 1, 3)
  120. __define_perf_accessors(perfctrl, 2, 0)
  121. __define_perf_accessors(perfctrl, 3, 1)
  122. struct op_mips_model op_model_mipsxx_ops;
  123. static struct mipsxx_register_config {
  124. unsigned int control[4];
  125. unsigned int counter[4];
  126. } reg;
  127. /* Compute all of the registers in preparation for enabling profiling. */
  128. static void mipsxx_reg_setup(struct op_counter_config *ctr)
  129. {
  130. unsigned int counters = op_model_mipsxx_ops.num_counters;
  131. int i;
  132. /* Compute the performance counter control word. */
  133. for (i = 0; i < counters; i++) {
  134. reg.control[i] = 0;
  135. reg.counter[i] = 0;
  136. if (!ctr[i].enabled)
  137. continue;
  138. reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
  139. M_PERFCTL_INTERRUPT_ENABLE;
  140. if (ctr[i].kernel)
  141. reg.control[i] |= M_PERFCTL_KERNEL;
  142. if (ctr[i].user)
  143. reg.control[i] |= M_PERFCTL_USER;
  144. if (ctr[i].exl)
  145. reg.control[i] |= M_PERFCTL_EXL;
  146. if (boot_cpu_type() == CPU_XLR)
  147. reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
  148. reg.counter[i] = 0x80000000 - ctr[i].count;
  149. }
  150. }
  151. /* Program all of the registers in preparation for enabling profiling. */
  152. static void mipsxx_cpu_setup(void *args)
  153. {
  154. unsigned int counters = op_model_mipsxx_ops.num_counters;
  155. if (oprofile_skip_cpu(smp_processor_id()))
  156. return;
  157. switch (counters) {
  158. case 4:
  159. w_c0_perfctrl3(0);
  160. w_c0_perfcntr3(reg.counter[3]);
  161. case 3:
  162. w_c0_perfctrl2(0);
  163. w_c0_perfcntr2(reg.counter[2]);
  164. case 2:
  165. w_c0_perfctrl1(0);
  166. w_c0_perfcntr1(reg.counter[1]);
  167. case 1:
  168. w_c0_perfctrl0(0);
  169. w_c0_perfcntr0(reg.counter[0]);
  170. }
  171. }
  172. /* Start all counters on current CPU */
  173. static void mipsxx_cpu_start(void *args)
  174. {
  175. unsigned int counters = op_model_mipsxx_ops.num_counters;
  176. if (oprofile_skip_cpu(smp_processor_id()))
  177. return;
  178. switch (counters) {
  179. case 4:
  180. w_c0_perfctrl3(WHAT | reg.control[3]);
  181. case 3:
  182. w_c0_perfctrl2(WHAT | reg.control[2]);
  183. case 2:
  184. w_c0_perfctrl1(WHAT | reg.control[1]);
  185. case 1:
  186. w_c0_perfctrl0(WHAT | reg.control[0]);
  187. }
  188. }
  189. /* Stop all counters on current CPU */
  190. static void mipsxx_cpu_stop(void *args)
  191. {
  192. unsigned int counters = op_model_mipsxx_ops.num_counters;
  193. if (oprofile_skip_cpu(smp_processor_id()))
  194. return;
  195. switch (counters) {
  196. case 4:
  197. w_c0_perfctrl3(0);
  198. case 3:
  199. w_c0_perfctrl2(0);
  200. case 2:
  201. w_c0_perfctrl1(0);
  202. case 1:
  203. w_c0_perfctrl0(0);
  204. }
  205. }
  206. static int mipsxx_perfcount_handler(void)
  207. {
  208. unsigned int counters = op_model_mipsxx_ops.num_counters;
  209. unsigned int control;
  210. unsigned int counter;
  211. int handled = IRQ_NONE;
  212. if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
  213. return handled;
  214. switch (counters) {
  215. #define HANDLE_COUNTER(n) \
  216. case n + 1: \
  217. control = r_c0_perfctrl ## n(); \
  218. counter = r_c0_perfcntr ## n(); \
  219. if ((control & M_PERFCTL_INTERRUPT_ENABLE) && \
  220. (counter & M_COUNTER_OVERFLOW)) { \
  221. oprofile_add_sample(get_irq_regs(), n); \
  222. w_c0_perfcntr ## n(reg.counter[n]); \
  223. handled = IRQ_HANDLED; \
  224. }
  225. HANDLE_COUNTER(3)
  226. HANDLE_COUNTER(2)
  227. HANDLE_COUNTER(1)
  228. HANDLE_COUNTER(0)
  229. }
  230. return handled;
  231. }
  232. #define M_CONFIG1_PC (1 << 4)
  233. static inline int __n_counters(void)
  234. {
  235. if (!(read_c0_config1() & M_CONFIG1_PC))
  236. return 0;
  237. if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
  238. return 1;
  239. if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
  240. return 2;
  241. if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
  242. return 3;
  243. return 4;
  244. }
  245. static inline int n_counters(void)
  246. {
  247. int counters;
  248. switch (current_cpu_type()) {
  249. case CPU_R10000:
  250. counters = 2;
  251. break;
  252. case CPU_R12000:
  253. case CPU_R14000:
  254. case CPU_R16000:
  255. counters = 4;
  256. break;
  257. default:
  258. counters = __n_counters();
  259. }
  260. return counters;
  261. }
  262. static void reset_counters(void *arg)
  263. {
  264. int counters = (int)(long)arg;
  265. switch (counters) {
  266. case 4:
  267. w_c0_perfctrl3(0);
  268. w_c0_perfcntr3(0);
  269. case 3:
  270. w_c0_perfctrl2(0);
  271. w_c0_perfcntr2(0);
  272. case 2:
  273. w_c0_perfctrl1(0);
  274. w_c0_perfcntr1(0);
  275. case 1:
  276. w_c0_perfctrl0(0);
  277. w_c0_perfcntr0(0);
  278. }
  279. }
  280. static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
  281. {
  282. return mipsxx_perfcount_handler();
  283. }
  284. static int __init mipsxx_init(void)
  285. {
  286. int counters;
  287. counters = n_counters();
  288. if (counters == 0) {
  289. printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
  290. return -ENODEV;
  291. }
  292. #ifdef CONFIG_MIPS_MT_SMP
  293. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  294. if (!cpu_has_mipsmt_pertccounters)
  295. counters = counters_total_to_per_cpu(counters);
  296. #endif
  297. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  298. op_model_mipsxx_ops.num_counters = counters;
  299. switch (current_cpu_type()) {
  300. case CPU_M14KC:
  301. op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
  302. break;
  303. case CPU_M14KEC:
  304. op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
  305. break;
  306. case CPU_20KC:
  307. op_model_mipsxx_ops.cpu_type = "mips/20K";
  308. break;
  309. case CPU_24K:
  310. op_model_mipsxx_ops.cpu_type = "mips/24K";
  311. break;
  312. case CPU_25KF:
  313. op_model_mipsxx_ops.cpu_type = "mips/25K";
  314. break;
  315. case CPU_1004K:
  316. case CPU_34K:
  317. op_model_mipsxx_ops.cpu_type = "mips/34K";
  318. break;
  319. case CPU_1074K:
  320. case CPU_74K:
  321. op_model_mipsxx_ops.cpu_type = "mips/74K";
  322. break;
  323. case CPU_INTERAPTIV:
  324. op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
  325. break;
  326. case CPU_PROAPTIV:
  327. op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
  328. break;
  329. case CPU_P5600:
  330. op_model_mipsxx_ops.cpu_type = "mips/P5600";
  331. break;
  332. case CPU_I6400:
  333. op_model_mipsxx_ops.cpu_type = "mips/I6400";
  334. break;
  335. case CPU_M5150:
  336. op_model_mipsxx_ops.cpu_type = "mips/M5150";
  337. break;
  338. case CPU_5KC:
  339. op_model_mipsxx_ops.cpu_type = "mips/5K";
  340. break;
  341. case CPU_R10000:
  342. if ((current_cpu_data.processor_id & 0xff) == 0x20)
  343. op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
  344. else
  345. op_model_mipsxx_ops.cpu_type = "mips/r10000";
  346. break;
  347. case CPU_R12000:
  348. case CPU_R14000:
  349. op_model_mipsxx_ops.cpu_type = "mips/r12000";
  350. break;
  351. case CPU_R16000:
  352. op_model_mipsxx_ops.cpu_type = "mips/r16000";
  353. break;
  354. case CPU_SB1:
  355. case CPU_SB1A:
  356. op_model_mipsxx_ops.cpu_type = "mips/sb1";
  357. break;
  358. case CPU_LOONGSON1:
  359. op_model_mipsxx_ops.cpu_type = "mips/loongson1";
  360. break;
  361. case CPU_XLR:
  362. op_model_mipsxx_ops.cpu_type = "mips/xlr";
  363. break;
  364. default:
  365. printk(KERN_ERR "Profiling unsupported for this CPU\n");
  366. return -ENODEV;
  367. }
  368. save_perf_irq = perf_irq;
  369. perf_irq = mipsxx_perfcount_handler;
  370. if (get_c0_perfcount_int)
  371. perfcount_irq = get_c0_perfcount_int();
  372. else if (cp0_perfcount_irq >= 0)
  373. perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  374. else
  375. perfcount_irq = -1;
  376. if (perfcount_irq >= 0)
  377. return request_irq(perfcount_irq, mipsxx_perfcount_int,
  378. IRQF_PERCPU | IRQF_NOBALANCING |
  379. IRQF_NO_THREAD | IRQF_NO_SUSPEND |
  380. IRQF_SHARED,
  381. "Perfcounter", save_perf_irq);
  382. return 0;
  383. }
  384. static void mipsxx_exit(void)
  385. {
  386. int counters = op_model_mipsxx_ops.num_counters;
  387. if (perfcount_irq >= 0)
  388. free_irq(perfcount_irq, save_perf_irq);
  389. counters = counters_per_cpu_to_total(counters);
  390. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  391. perf_irq = save_perf_irq;
  392. }
  393. struct op_mips_model op_model_mipsxx_ops = {
  394. .reg_setup = mipsxx_reg_setup,
  395. .cpu_setup = mipsxx_cpu_setup,
  396. .init = mipsxx_init,
  397. .exit = mipsxx_exit,
  398. .cpu_start = mipsxx_cpu_start,
  399. .cpu_stop = mipsxx_cpu_stop,
  400. };