ftrace.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * ftrace graph code
  3. *
  4. * Copyright (C) 2009-2010 Analog Devices Inc.
  5. * Licensed under the GPL-2 or later.
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/atomic.h>
  12. #include <asm/cacheflush.h>
  13. #ifdef CONFIG_DYNAMIC_FTRACE
  14. static const unsigned char mnop[] = {
  15. 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
  16. 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
  17. };
  18. static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
  19. unsigned long dst)
  20. {
  21. uint32_t pcrel = (dst - src) >> 1;
  22. insn[0] = pcrel >> 16;
  23. insn[1] = 0xe3;
  24. insn[2] = pcrel;
  25. insn[3] = pcrel >> 8;
  26. }
  27. #define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
  28. static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
  29. unsigned long len)
  30. {
  31. int ret = probe_kernel_write((void *)ip, (void *)code, len);
  32. flush_icache_range(ip, ip + len);
  33. return ret;
  34. }
  35. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  36. unsigned long addr)
  37. {
  38. /* Turn the mcount call site into two MNOPs as those are 32bit insns */
  39. return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
  40. }
  41. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  42. {
  43. /* Restore the mcount call site */
  44. unsigned char call[8];
  45. call[0] = 0x67; /* [--SP] = RETS; */
  46. call[1] = 0x01;
  47. bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
  48. call[6] = 0x27; /* RETS = [SP++]; */
  49. call[7] = 0x01;
  50. return ftrace_modify_code(rec->ip, call, sizeof(call));
  51. }
  52. int ftrace_update_ftrace_func(ftrace_func_t func)
  53. {
  54. unsigned char call[4];
  55. unsigned long ip = (unsigned long)&ftrace_call;
  56. bfin_make_pcrel24(call, ip, func);
  57. return ftrace_modify_code(ip, call, sizeof(call));
  58. }
  59. int __init ftrace_dyn_arch_init(void)
  60. {
  61. return 0;
  62. }
  63. #endif
  64. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  65. # ifdef CONFIG_DYNAMIC_FTRACE
  66. extern void ftrace_graph_call(void);
  67. int ftrace_enable_ftrace_graph_caller(void)
  68. {
  69. unsigned long ip = (unsigned long)&ftrace_graph_call;
  70. uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
  71. jump_pcrel12 |= 0x2000;
  72. return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
  73. }
  74. int ftrace_disable_ftrace_graph_caller(void)
  75. {
  76. return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
  77. }
  78. # endif
  79. /*
  80. * Hook the return address and push it in the stack of return addrs
  81. * in current thread info.
  82. */
  83. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  84. unsigned long frame_pointer)
  85. {
  86. struct ftrace_graph_ent trace;
  87. unsigned long return_hooker = (unsigned long)&return_to_handler;
  88. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  89. return;
  90. if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
  91. frame_pointer) == -EBUSY)
  92. return;
  93. trace.func = self_addr;
  94. /* Only trace if the calling function expects to */
  95. if (!ftrace_graph_entry(&trace)) {
  96. current->curr_ret_stack--;
  97. return;
  98. }
  99. /* all is well in the world ! hijack RETS ... */
  100. *parent = return_hooker;
  101. }
  102. #endif