entry-ftrace.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * arch/arm64/kernel/entry-ftrace.S
  3. *
  4. * Copyright (C) 2013 Linaro Limited
  5. * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/ftrace.h>
  13. #include <asm/insn.h>
  14. /*
  15. * Gcc with -pg will put the following code in the beginning of each function:
  16. * mov x0, x30
  17. * bl _mcount
  18. * [function's body ...]
  19. * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
  20. * ftrace is enabled.
  21. *
  22. * Please note that x0 as an argument will not be used here because we can
  23. * get lr(x30) of instrumented function at any time by winding up call stack
  24. * as long as the kernel is compiled without -fomit-frame-pointer.
  25. * (or CONFIG_FRAME_POINTER, this is forced on arm64)
  26. *
  27. * stack layout after mcount_enter in _mcount():
  28. *
  29. * current sp/fp => 0:+-----+
  30. * in _mcount() | x29 | -> instrumented function's fp
  31. * +-----+
  32. * | x30 | -> _mcount()'s lr (= instrumented function's pc)
  33. * old sp => +16:+-----+
  34. * when instrumented | |
  35. * function calls | ... |
  36. * _mcount() | |
  37. * | |
  38. * instrumented => +xx:+-----+
  39. * function's fp | x29 | -> parent's fp
  40. * +-----+
  41. * | x30 | -> instrumented function's lr (= parent's pc)
  42. * +-----+
  43. * | ... |
  44. */
  45. .macro mcount_enter
  46. stp x29, x30, [sp, #-16]!
  47. mov x29, sp
  48. .endm
  49. .macro mcount_exit
  50. ldp x29, x30, [sp], #16
  51. ret
  52. .endm
  53. .macro mcount_adjust_addr rd, rn
  54. sub \rd, \rn, #AARCH64_INSN_SIZE
  55. .endm
  56. /* for instrumented function's parent */
  57. .macro mcount_get_parent_fp reg
  58. ldr \reg, [x29]
  59. ldr \reg, [\reg]
  60. .endm
  61. /* for instrumented function */
  62. .macro mcount_get_pc0 reg
  63. mcount_adjust_addr \reg, x30
  64. .endm
  65. .macro mcount_get_pc reg
  66. ldr \reg, [x29, #8]
  67. mcount_adjust_addr \reg, \reg
  68. .endm
  69. .macro mcount_get_lr reg
  70. ldr \reg, [x29]
  71. ldr \reg, [\reg, #8]
  72. .endm
  73. .macro mcount_get_lr_addr reg
  74. ldr \reg, [x29]
  75. add \reg, \reg, #8
  76. .endm
  77. #ifndef CONFIG_DYNAMIC_FTRACE
  78. /*
  79. * void _mcount(unsigned long return_address)
  80. * @return_address: return address to instrumented function
  81. *
  82. * This function makes calls, if enabled, to:
  83. * - tracer function to probe instrumented function's entry,
  84. * - ftrace_graph_caller to set up an exit hook
  85. */
  86. ENTRY(_mcount)
  87. mcount_enter
  88. adrp x0, ftrace_trace_function
  89. ldr x2, [x0, #:lo12:ftrace_trace_function]
  90. adr x0, ftrace_stub
  91. cmp x0, x2 // if (ftrace_trace_function
  92. b.eq skip_ftrace_call // != ftrace_stub) {
  93. mcount_get_pc x0 // function's pc
  94. mcount_get_lr x1 // function's lr (= parent's pc)
  95. blr x2 // (*ftrace_trace_function)(pc, lr);
  96. #ifndef CONFIG_FUNCTION_GRAPH_TRACER
  97. skip_ftrace_call: // return;
  98. mcount_exit // }
  99. #else
  100. mcount_exit // return;
  101. // }
  102. skip_ftrace_call:
  103. adrp x1, ftrace_graph_return
  104. ldr x2, [x1, #:lo12:ftrace_graph_return]
  105. cmp x0, x2 // if ((ftrace_graph_return
  106. b.ne ftrace_graph_caller // != ftrace_stub)
  107. adrp x1, ftrace_graph_entry // || (ftrace_graph_entry
  108. adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
  109. ldr x2, [x1, #:lo12:ftrace_graph_entry]
  110. add x0, x0, #:lo12:ftrace_graph_entry_stub
  111. cmp x0, x2
  112. b.ne ftrace_graph_caller // ftrace_graph_caller();
  113. mcount_exit
  114. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  115. ENDPROC(_mcount)
  116. #else /* CONFIG_DYNAMIC_FTRACE */
  117. /*
  118. * _mcount() is used to build the kernel with -pg option, but all the branch
  119. * instructions to _mcount() are replaced to NOP initially at kernel start up,
  120. * and later on, NOP to branch to ftrace_caller() when enabled or branch to
  121. * NOP when disabled per-function base.
  122. */
  123. ENTRY(_mcount)
  124. ret
  125. ENDPROC(_mcount)
  126. /*
  127. * void ftrace_caller(unsigned long return_address)
  128. * @return_address: return address to instrumented function
  129. *
  130. * This function is a counterpart of _mcount() in 'static' ftrace, and
  131. * makes calls to:
  132. * - tracer function to probe instrumented function's entry,
  133. * - ftrace_graph_caller to set up an exit hook
  134. */
  135. ENTRY(ftrace_caller)
  136. mcount_enter
  137. mcount_get_pc0 x0 // function's pc
  138. mcount_get_lr x1 // function's lr
  139. .global ftrace_call
  140. ftrace_call: // tracer(pc, lr);
  141. nop // This will be replaced with "bl xxx"
  142. // where xxx can be any kind of tracer.
  143. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  144. .global ftrace_graph_call
  145. ftrace_graph_call: // ftrace_graph_caller();
  146. nop // If enabled, this will be replaced
  147. // "b ftrace_graph_caller"
  148. #endif
  149. mcount_exit
  150. ENDPROC(ftrace_caller)
  151. #endif /* CONFIG_DYNAMIC_FTRACE */
  152. ENTRY(ftrace_stub)
  153. ret
  154. ENDPROC(ftrace_stub)
  155. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  156. /* save return value regs*/
  157. .macro save_return_regs
  158. sub sp, sp, #64
  159. stp x0, x1, [sp]
  160. stp x2, x3, [sp, #16]
  161. stp x4, x5, [sp, #32]
  162. stp x6, x7, [sp, #48]
  163. .endm
  164. /* restore return value regs*/
  165. .macro restore_return_regs
  166. ldp x0, x1, [sp]
  167. ldp x2, x3, [sp, #16]
  168. ldp x4, x5, [sp, #32]
  169. ldp x6, x7, [sp, #48]
  170. add sp, sp, #64
  171. .endm
  172. /*
  173. * void ftrace_graph_caller(void)
  174. *
  175. * Called from _mcount() or ftrace_caller() when function_graph tracer is
  176. * selected.
  177. * This function w/ prepare_ftrace_return() fakes link register's value on
  178. * the call stack in order to intercept instrumented function's return path
  179. * and run return_to_handler() later on its exit.
  180. */
  181. ENTRY(ftrace_graph_caller)
  182. mcount_get_lr_addr x0 // pointer to function's saved lr
  183. mcount_get_pc x1 // function's pc
  184. mcount_get_parent_fp x2 // parent's fp
  185. bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
  186. mcount_exit
  187. ENDPROC(ftrace_graph_caller)
  188. /*
  189. * void return_to_handler(void)
  190. *
  191. * Run ftrace_return_to_handler() before going back to parent.
  192. * @fp is checked against the value passed by ftrace_graph_caller()
  193. * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  194. */
  195. ENTRY(return_to_handler)
  196. save_return_regs
  197. mov x0, x29 // parent's fp
  198. bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
  199. mov x30, x0 // restore the original return address
  200. restore_return_regs
  201. ret
  202. END(return_to_handler)
  203. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */