ftrace.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
  5. *
  6. * For licencing details, see COPYING.
  7. *
  8. * Defines low-level handling of mcount calls when the kernel
  9. * is compiled with the -pg flag. When using dynamic ftrace, the
  10. * mcount call-sites get patched lazily with NOP till they are
  11. * enabled. All code mutation routines here take effect atomically.
  12. */
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/patch.h>
  17. /* In IA64, each function will be added below two bundles with -pg option */
  18. static unsigned char __attribute__((aligned(8)))
  19. ftrace_orig_code[MCOUNT_INSN_SIZE] = {
  20. 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
  21. 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
  22. 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */
  23. 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
  24. 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
  25. 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */
  26. };
  27. struct ftrace_orig_insn {
  28. u64 dummy1, dummy2, dummy3;
  29. u64 dummy4:64-41+13;
  30. u64 imm20:20;
  31. u64 dummy5:3;
  32. u64 sign:1;
  33. u64 dummy6:4;
  34. };
  35. /* mcount stub will be converted below for nop */
  36. static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
  37. 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
  38. 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
  39. 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
  40. 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
  41. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
  42. 0x00, 0x00, 0x04, 0x00
  43. };
  44. static unsigned char *ftrace_nop_replace(void)
  45. {
  46. return ftrace_nop_code;
  47. }
  48. /*
  49. * mcount stub will be converted below for call
  50. * Note: Just the last instruction is changed against nop
  51. * */
  52. static unsigned char __attribute__((aligned(8)))
  53. ftrace_call_code[MCOUNT_INSN_SIZE] = {
  54. 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
  55. 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
  56. 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
  57. 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
  58. 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
  59. 0xf8, 0xff, 0xff, 0xc8
  60. };
  61. struct ftrace_call_insn {
  62. u64 dummy1, dummy2;
  63. u64 dummy3:48;
  64. u64 imm39_l:16;
  65. u64 imm39_h:23;
  66. u64 dummy4:13;
  67. u64 imm20:20;
  68. u64 dummy5:3;
  69. u64 i:1;
  70. u64 dummy6:4;
  71. };
  72. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  73. {
  74. struct ftrace_call_insn *code = (void *)ftrace_call_code;
  75. unsigned long offset = addr - (ip + 0x10);
  76. code->imm39_l = offset >> 24;
  77. code->imm39_h = offset >> 40;
  78. code->imm20 = offset >> 4;
  79. code->i = offset >> 63;
  80. return ftrace_call_code;
  81. }
  82. static int
  83. ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  84. unsigned char *new_code, int do_check)
  85. {
  86. unsigned char replaced[MCOUNT_INSN_SIZE];
  87. /*
  88. * Note: Due to modules and __init, code can
  89. * disappear and change, we need to protect against faulting
  90. * as well as code changing. We do this by using the
  91. * probe_kernel_* functions.
  92. *
  93. * No real locking needed, this code is run through
  94. * kstop_machine, or before SMP starts.
  95. */
  96. if (!do_check)
  97. goto skip_check;
  98. /* read the text we want to modify */
  99. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  100. return -EFAULT;
  101. /* Make sure it is what we expect it to be */
  102. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  103. return -EINVAL;
  104. skip_check:
  105. /* replace the text with the new text */
  106. if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
  107. return -EPERM;
  108. flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
  109. return 0;
  110. }
  111. static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
  112. {
  113. unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
  114. unsigned long ip = rec->ip;
  115. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  116. return -EFAULT;
  117. if (rec->flags & FTRACE_FL_CONVERTED) {
  118. struct ftrace_call_insn *call_insn, *tmp_call;
  119. call_insn = (void *)ftrace_call_code;
  120. tmp_call = (void *)replaced;
  121. call_insn->imm39_l = tmp_call->imm39_l;
  122. call_insn->imm39_h = tmp_call->imm39_h;
  123. call_insn->imm20 = tmp_call->imm20;
  124. call_insn->i = tmp_call->i;
  125. if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
  126. return -EINVAL;
  127. return 0;
  128. } else {
  129. struct ftrace_orig_insn *call_insn, *tmp_call;
  130. call_insn = (void *)ftrace_orig_code;
  131. tmp_call = (void *)replaced;
  132. call_insn->sign = tmp_call->sign;
  133. call_insn->imm20 = tmp_call->imm20;
  134. if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
  135. return -EINVAL;
  136. return 0;
  137. }
  138. }
  139. int ftrace_make_nop(struct module *mod,
  140. struct dyn_ftrace *rec, unsigned long addr)
  141. {
  142. int ret;
  143. char *new;
  144. ret = ftrace_make_nop_check(rec, addr);
  145. if (ret)
  146. return ret;
  147. new = ftrace_nop_replace();
  148. return ftrace_modify_code(rec->ip, NULL, new, 0);
  149. }
  150. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  151. {
  152. unsigned long ip = rec->ip;
  153. unsigned char *old, *new;
  154. old= ftrace_nop_replace();
  155. new = ftrace_call_replace(ip, addr);
  156. return ftrace_modify_code(ip, old, new, 1);
  157. }
  158. /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
  159. int ftrace_update_ftrace_func(ftrace_func_t func)
  160. {
  161. unsigned long ip;
  162. unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
  163. if (func == ftrace_stub)
  164. return 0;
  165. ip = ((struct fnptr *)func)->ip;
  166. ia64_patch_imm64(addr + 2, ip);
  167. flush_icache_range(addr, addr + 16);
  168. return 0;
  169. }
  170. /* run from kstop_machine */
  171. int __init ftrace_dyn_arch_init(void)
  172. {
  173. return 0;
  174. }