trace_functions.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static void tracing_start_function_trace(struct trace_array *tr);
  20. static void tracing_stop_function_trace(struct trace_array *tr);
  21. static void
  22. function_trace_call(unsigned long ip, unsigned long parent_ip,
  23. struct ftrace_ops *op, struct pt_regs *pt_regs);
  24. static void
  25. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  26. struct ftrace_ops *op, struct pt_regs *pt_regs);
  27. static struct tracer_flags func_flags;
  28. /* Our option */
  29. enum {
  30. TRACE_FUNC_OPT_STACK = 0x1,
  31. };
  32. static int allocate_ftrace_ops(struct trace_array *tr)
  33. {
  34. struct ftrace_ops *ops;
  35. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  36. if (!ops)
  37. return -ENOMEM;
  38. /* Currently only the non stack verision is supported */
  39. ops->func = function_trace_call;
  40. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
  41. tr->ops = ops;
  42. ops->private = tr;
  43. return 0;
  44. }
  45. int ftrace_create_function_files(struct trace_array *tr,
  46. struct dentry *parent)
  47. {
  48. int ret;
  49. /*
  50. * The top level array uses the "global_ops", and the files are
  51. * created on boot up.
  52. */
  53. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  54. return 0;
  55. ret = allocate_ftrace_ops(tr);
  56. if (ret)
  57. return ret;
  58. ftrace_create_filter_files(tr->ops, parent);
  59. return 0;
  60. }
  61. void ftrace_destroy_function_files(struct trace_array *tr)
  62. {
  63. ftrace_destroy_filter_files(tr->ops);
  64. kfree(tr->ops);
  65. tr->ops = NULL;
  66. }
  67. static int function_trace_init(struct trace_array *tr)
  68. {
  69. ftrace_func_t func;
  70. /*
  71. * Instance trace_arrays get their ops allocated
  72. * at instance creation. Unless it failed
  73. * the allocation.
  74. */
  75. if (!tr->ops)
  76. return -ENOMEM;
  77. /* Currently only the global instance can do stack tracing */
  78. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  79. func_flags.val & TRACE_FUNC_OPT_STACK)
  80. func = function_stack_trace_call;
  81. else
  82. func = function_trace_call;
  83. ftrace_init_array_ops(tr, func);
  84. tr->trace_buffer.cpu = get_cpu();
  85. put_cpu();
  86. tracing_start_cmdline_record();
  87. tracing_start_function_trace(tr);
  88. return 0;
  89. }
  90. static void function_trace_reset(struct trace_array *tr)
  91. {
  92. tracing_stop_function_trace(tr);
  93. tracing_stop_cmdline_record();
  94. ftrace_reset_array_ops(tr);
  95. }
  96. static void function_trace_start(struct trace_array *tr)
  97. {
  98. tracing_reset_online_cpus(&tr->trace_buffer);
  99. }
  100. static void
  101. function_trace_call(unsigned long ip, unsigned long parent_ip,
  102. struct ftrace_ops *op, struct pt_regs *pt_regs)
  103. {
  104. struct trace_array *tr = op->private;
  105. struct trace_array_cpu *data;
  106. unsigned long flags;
  107. int bit;
  108. int cpu;
  109. int pc;
  110. if (unlikely(!tr->function_enabled))
  111. return;
  112. pc = preempt_count();
  113. preempt_disable_notrace();
  114. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  115. if (bit < 0)
  116. goto out;
  117. cpu = smp_processor_id();
  118. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  119. if (!atomic_read(&data->disabled)) {
  120. local_save_flags(flags);
  121. trace_function(tr, ip, parent_ip, flags, pc);
  122. }
  123. trace_clear_recursion(bit);
  124. out:
  125. preempt_enable_notrace();
  126. }
  127. static void
  128. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  129. struct ftrace_ops *op, struct pt_regs *pt_regs)
  130. {
  131. struct trace_array *tr = op->private;
  132. struct trace_array_cpu *data;
  133. unsigned long flags;
  134. long disabled;
  135. int cpu;
  136. int pc;
  137. if (unlikely(!tr->function_enabled))
  138. return;
  139. /*
  140. * Need to use raw, since this must be called before the
  141. * recursive protection is performed.
  142. */
  143. local_irq_save(flags);
  144. cpu = raw_smp_processor_id();
  145. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  146. disabled = atomic_inc_return(&data->disabled);
  147. if (likely(disabled == 1)) {
  148. pc = preempt_count();
  149. trace_function(tr, ip, parent_ip, flags, pc);
  150. /*
  151. * skip over 5 funcs:
  152. * __ftrace_trace_stack,
  153. * __trace_stack,
  154. * function_stack_trace_call
  155. * ftrace_list_func
  156. * ftrace_call
  157. */
  158. __trace_stack(tr, flags, 5, pc);
  159. }
  160. atomic_dec(&data->disabled);
  161. local_irq_restore(flags);
  162. }
  163. static struct tracer_opt func_opts[] = {
  164. #ifdef CONFIG_STACKTRACE
  165. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  166. #endif
  167. { } /* Always set a last empty entry */
  168. };
  169. static struct tracer_flags func_flags = {
  170. .val = 0, /* By default: all flags disabled */
  171. .opts = func_opts
  172. };
  173. static void tracing_start_function_trace(struct trace_array *tr)
  174. {
  175. tr->function_enabled = 0;
  176. register_ftrace_function(tr->ops);
  177. tr->function_enabled = 1;
  178. }
  179. static void tracing_stop_function_trace(struct trace_array *tr)
  180. {
  181. tr->function_enabled = 0;
  182. unregister_ftrace_function(tr->ops);
  183. }
  184. static int
  185. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  186. {
  187. switch (bit) {
  188. case TRACE_FUNC_OPT_STACK:
  189. /* do nothing if already set */
  190. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  191. break;
  192. unregister_ftrace_function(tr->ops);
  193. if (set) {
  194. tr->ops->func = function_stack_trace_call;
  195. register_ftrace_function(tr->ops);
  196. } else {
  197. tr->ops->func = function_trace_call;
  198. register_ftrace_function(tr->ops);
  199. }
  200. break;
  201. default:
  202. return -EINVAL;
  203. }
  204. return 0;
  205. }
  206. static struct tracer function_trace __tracer_data =
  207. {
  208. .name = "function",
  209. .init = function_trace_init,
  210. .reset = function_trace_reset,
  211. .start = function_trace_start,
  212. .flags = &func_flags,
  213. .set_flag = func_set_flag,
  214. .allow_instances = true,
  215. #ifdef CONFIG_FTRACE_SELFTEST
  216. .selftest = trace_selftest_startup_function,
  217. #endif
  218. };
  219. #ifdef CONFIG_DYNAMIC_FTRACE
  220. static void update_traceon_count(void **data, bool on)
  221. {
  222. long *count = (long *)data;
  223. long old_count = *count;
  224. /*
  225. * Tracing gets disabled (or enabled) once per count.
  226. * This function can be called at the same time on multiple CPUs.
  227. * It is fine if both disable (or enable) tracing, as disabling
  228. * (or enabling) the second time doesn't do anything as the
  229. * state of the tracer is already disabled (or enabled).
  230. * What needs to be synchronized in this case is that the count
  231. * only gets decremented once, even if the tracer is disabled
  232. * (or enabled) twice, as the second one is really a nop.
  233. *
  234. * The memory barriers guarantee that we only decrement the
  235. * counter once. First the count is read to a local variable
  236. * and a read barrier is used to make sure that it is loaded
  237. * before checking if the tracer is in the state we want.
  238. * If the tracer is not in the state we want, then the count
  239. * is guaranteed to be the old count.
  240. *
  241. * Next the tracer is set to the state we want (disabled or enabled)
  242. * then a write memory barrier is used to make sure that
  243. * the new state is visible before changing the counter by
  244. * one minus the old counter. This guarantees that another CPU
  245. * executing this code will see the new state before seeing
  246. * the new counter value, and would not do anything if the new
  247. * counter is seen.
  248. *
  249. * Note, there is no synchronization between this and a user
  250. * setting the tracing_on file. But we currently don't care
  251. * about that.
  252. */
  253. if (!old_count)
  254. return;
  255. /* Make sure we see count before checking tracing state */
  256. smp_rmb();
  257. if (on == !!tracing_is_on())
  258. return;
  259. if (on)
  260. tracing_on();
  261. else
  262. tracing_off();
  263. /* unlimited? */
  264. if (old_count == -1)
  265. return;
  266. /* Make sure tracing state is visible before updating count */
  267. smp_wmb();
  268. *count = old_count - 1;
  269. }
  270. static void
  271. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
  272. {
  273. update_traceon_count(data, 1);
  274. }
  275. static void
  276. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
  277. {
  278. update_traceon_count(data, 0);
  279. }
  280. static void
  281. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  282. {
  283. if (tracing_is_on())
  284. return;
  285. tracing_on();
  286. }
  287. static void
  288. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  289. {
  290. if (!tracing_is_on())
  291. return;
  292. tracing_off();
  293. }
  294. /*
  295. * Skip 4:
  296. * ftrace_stacktrace()
  297. * function_trace_probe_call()
  298. * ftrace_ops_list_func()
  299. * ftrace_call()
  300. */
  301. #define STACK_SKIP 4
  302. static void
  303. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  304. {
  305. trace_dump_stack(STACK_SKIP);
  306. }
  307. static void
  308. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  309. {
  310. long *count = (long *)data;
  311. long old_count;
  312. long new_count;
  313. /*
  314. * Stack traces should only execute the number of times the
  315. * user specified in the counter.
  316. */
  317. do {
  318. if (!tracing_is_on())
  319. return;
  320. old_count = *count;
  321. if (!old_count)
  322. return;
  323. /* unlimited? */
  324. if (old_count == -1) {
  325. trace_dump_stack(STACK_SKIP);
  326. return;
  327. }
  328. new_count = old_count - 1;
  329. new_count = cmpxchg(count, old_count, new_count);
  330. if (new_count == old_count)
  331. trace_dump_stack(STACK_SKIP);
  332. } while (new_count != old_count);
  333. }
  334. static int update_count(void **data)
  335. {
  336. unsigned long *count = (long *)data;
  337. if (!*count)
  338. return 0;
  339. if (*count != -1)
  340. (*count)--;
  341. return 1;
  342. }
  343. static void
  344. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  345. {
  346. if (update_count(data))
  347. ftrace_dump(DUMP_ALL);
  348. }
  349. /* Only dump the current CPU buffer. */
  350. static void
  351. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  352. {
  353. if (update_count(data))
  354. ftrace_dump(DUMP_ORIG);
  355. }
  356. static int
  357. ftrace_probe_print(const char *name, struct seq_file *m,
  358. unsigned long ip, void *data)
  359. {
  360. long count = (long)data;
  361. seq_printf(m, "%ps:%s", (void *)ip, name);
  362. if (count == -1)
  363. seq_puts(m, ":unlimited\n");
  364. else
  365. seq_printf(m, ":count=%ld\n", count);
  366. return 0;
  367. }
  368. static int
  369. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  370. struct ftrace_probe_ops *ops, void *data)
  371. {
  372. return ftrace_probe_print("traceon", m, ip, data);
  373. }
  374. static int
  375. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  376. struct ftrace_probe_ops *ops, void *data)
  377. {
  378. return ftrace_probe_print("traceoff", m, ip, data);
  379. }
  380. static int
  381. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  382. struct ftrace_probe_ops *ops, void *data)
  383. {
  384. return ftrace_probe_print("stacktrace", m, ip, data);
  385. }
  386. static int
  387. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  388. struct ftrace_probe_ops *ops, void *data)
  389. {
  390. return ftrace_probe_print("dump", m, ip, data);
  391. }
  392. static int
  393. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  394. struct ftrace_probe_ops *ops, void *data)
  395. {
  396. return ftrace_probe_print("cpudump", m, ip, data);
  397. }
  398. static struct ftrace_probe_ops traceon_count_probe_ops = {
  399. .func = ftrace_traceon_count,
  400. .print = ftrace_traceon_print,
  401. };
  402. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  403. .func = ftrace_traceoff_count,
  404. .print = ftrace_traceoff_print,
  405. };
  406. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  407. .func = ftrace_stacktrace_count,
  408. .print = ftrace_stacktrace_print,
  409. };
  410. static struct ftrace_probe_ops dump_probe_ops = {
  411. .func = ftrace_dump_probe,
  412. .print = ftrace_dump_print,
  413. };
  414. static struct ftrace_probe_ops cpudump_probe_ops = {
  415. .func = ftrace_cpudump_probe,
  416. .print = ftrace_cpudump_print,
  417. };
  418. static struct ftrace_probe_ops traceon_probe_ops = {
  419. .func = ftrace_traceon,
  420. .print = ftrace_traceon_print,
  421. };
  422. static struct ftrace_probe_ops traceoff_probe_ops = {
  423. .func = ftrace_traceoff,
  424. .print = ftrace_traceoff_print,
  425. };
  426. static struct ftrace_probe_ops stacktrace_probe_ops = {
  427. .func = ftrace_stacktrace,
  428. .print = ftrace_stacktrace_print,
  429. };
  430. static int
  431. ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  432. struct ftrace_hash *hash, char *glob,
  433. char *cmd, char *param, int enable)
  434. {
  435. void *count = (void *)-1;
  436. char *number;
  437. int ret;
  438. /* hash funcs only work with set_ftrace_filter */
  439. if (!enable)
  440. return -EINVAL;
  441. if (glob[0] == '!') {
  442. unregister_ftrace_function_probe_func(glob+1, ops);
  443. return 0;
  444. }
  445. if (!param)
  446. goto out_reg;
  447. number = strsep(&param, ":");
  448. if (!strlen(number))
  449. goto out_reg;
  450. /*
  451. * We use the callback data field (which is a pointer)
  452. * as our counter.
  453. */
  454. ret = kstrtoul(number, 0, (unsigned long *)&count);
  455. if (ret)
  456. return ret;
  457. out_reg:
  458. ret = register_ftrace_function_probe(glob, ops, count);
  459. return ret < 0 ? ret : 0;
  460. }
  461. static int
  462. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  463. char *glob, char *cmd, char *param, int enable)
  464. {
  465. struct ftrace_probe_ops *ops;
  466. /* we register both traceon and traceoff to this callback */
  467. if (strcmp(cmd, "traceon") == 0)
  468. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  469. else
  470. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  471. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  472. param, enable);
  473. }
  474. static int
  475. ftrace_stacktrace_callback(struct ftrace_hash *hash,
  476. char *glob, char *cmd, char *param, int enable)
  477. {
  478. struct ftrace_probe_ops *ops;
  479. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  480. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  481. param, enable);
  482. }
  483. static int
  484. ftrace_dump_callback(struct ftrace_hash *hash,
  485. char *glob, char *cmd, char *param, int enable)
  486. {
  487. struct ftrace_probe_ops *ops;
  488. ops = &dump_probe_ops;
  489. /* Only dump once. */
  490. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  491. "1", enable);
  492. }
  493. static int
  494. ftrace_cpudump_callback(struct ftrace_hash *hash,
  495. char *glob, char *cmd, char *param, int enable)
  496. {
  497. struct ftrace_probe_ops *ops;
  498. ops = &cpudump_probe_ops;
  499. /* Only dump once. */
  500. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  501. "1", enable);
  502. }
  503. static struct ftrace_func_command ftrace_traceon_cmd = {
  504. .name = "traceon",
  505. .func = ftrace_trace_onoff_callback,
  506. };
  507. static struct ftrace_func_command ftrace_traceoff_cmd = {
  508. .name = "traceoff",
  509. .func = ftrace_trace_onoff_callback,
  510. };
  511. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  512. .name = "stacktrace",
  513. .func = ftrace_stacktrace_callback,
  514. };
  515. static struct ftrace_func_command ftrace_dump_cmd = {
  516. .name = "dump",
  517. .func = ftrace_dump_callback,
  518. };
  519. static struct ftrace_func_command ftrace_cpudump_cmd = {
  520. .name = "cpudump",
  521. .func = ftrace_cpudump_callback,
  522. };
  523. static int __init init_func_cmd_traceon(void)
  524. {
  525. int ret;
  526. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  527. if (ret)
  528. return ret;
  529. ret = register_ftrace_command(&ftrace_traceon_cmd);
  530. if (ret)
  531. goto out_free_traceoff;
  532. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  533. if (ret)
  534. goto out_free_traceon;
  535. ret = register_ftrace_command(&ftrace_dump_cmd);
  536. if (ret)
  537. goto out_free_stacktrace;
  538. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  539. if (ret)
  540. goto out_free_dump;
  541. return 0;
  542. out_free_dump:
  543. unregister_ftrace_command(&ftrace_dump_cmd);
  544. out_free_stacktrace:
  545. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  546. out_free_traceon:
  547. unregister_ftrace_command(&ftrace_traceon_cmd);
  548. out_free_traceoff:
  549. unregister_ftrace_command(&ftrace_traceoff_cmd);
  550. return ret;
  551. }
  552. #else
  553. static inline int init_func_cmd_traceon(void)
  554. {
  555. return 0;
  556. }
  557. #endif /* CONFIG_DYNAMIC_FTRACE */
  558. static __init int init_function_trace(void)
  559. {
  560. init_func_cmd_traceon();
  561. return register_tracer(&function_trace);
  562. }
  563. core_initcall(init_function_trace);