kprobes.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Kernel Probes (KProbes)
  3. *
  4. * Copyright (C) 2005-2006 Atmel Corporation
  5. *
  6. * Based on arch/ppc64/kernel/kprobes.c
  7. * Copyright (C) IBM Corporation, 2002, 2004
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/kprobes.h>
  14. #include <linux/ptrace.h>
  15. #include <asm/cacheflush.h>
  16. #include <linux/kdebug.h>
  17. #include <asm/ocd.h>
  18. DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  19. static unsigned long kprobe_status;
  20. static struct pt_regs jprobe_saved_regs;
  21. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  22. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  23. {
  24. int ret = 0;
  25. if ((unsigned long)p->addr & 0x01) {
  26. printk("Attempt to register kprobe at an unaligned address\n");
  27. ret = -EINVAL;
  28. }
  29. /* XXX: Might be a good idea to check if p->addr is a valid
  30. * kernel address as well... */
  31. if (!ret) {
  32. pr_debug("copy kprobe at %p\n", p->addr);
  33. memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  34. p->opcode = *p->addr;
  35. }
  36. return ret;
  37. }
  38. void __kprobes arch_arm_kprobe(struct kprobe *p)
  39. {
  40. pr_debug("arming kprobe at %p\n", p->addr);
  41. ocd_enable(NULL);
  42. *p->addr = BREAKPOINT_INSTRUCTION;
  43. flush_icache_range((unsigned long)p->addr,
  44. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  45. }
  46. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  47. {
  48. pr_debug("disarming kprobe at %p\n", p->addr);
  49. ocd_disable(NULL);
  50. *p->addr = p->opcode;
  51. flush_icache_range((unsigned long)p->addr,
  52. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  53. }
  54. static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  55. {
  56. unsigned long dc;
  57. pr_debug("preparing to singlestep over %p (PC=%08lx)\n",
  58. p->addr, regs->pc);
  59. BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D)));
  60. dc = ocd_read(DC);
  61. dc |= 1 << OCD_DC_SS_BIT;
  62. ocd_write(DC, dc);
  63. /*
  64. * We must run the instruction from its original location
  65. * since it may actually reference PC.
  66. *
  67. * TODO: Do the instruction replacement directly in icache.
  68. */
  69. *p->addr = p->opcode;
  70. flush_icache_range((unsigned long)p->addr,
  71. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  72. }
  73. static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
  74. {
  75. unsigned long dc;
  76. pr_debug("resuming execution at PC=%08lx\n", regs->pc);
  77. dc = ocd_read(DC);
  78. dc &= ~(1 << OCD_DC_SS_BIT);
  79. ocd_write(DC, dc);
  80. *p->addr = BREAKPOINT_INSTRUCTION;
  81. flush_icache_range((unsigned long)p->addr,
  82. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  83. }
  84. static void __kprobes set_current_kprobe(struct kprobe *p)
  85. {
  86. __this_cpu_write(current_kprobe, p);
  87. }
  88. static int __kprobes kprobe_handler(struct pt_regs *regs)
  89. {
  90. struct kprobe *p;
  91. void *addr = (void *)regs->pc;
  92. int ret = 0;
  93. pr_debug("kprobe_handler: kprobe_running=%p\n",
  94. kprobe_running());
  95. /*
  96. * We don't want to be preempted for the entire
  97. * duration of kprobe processing
  98. */
  99. preempt_disable();
  100. /* Check that we're not recursing */
  101. if (kprobe_running()) {
  102. p = get_kprobe(addr);
  103. if (p) {
  104. if (kprobe_status == KPROBE_HIT_SS) {
  105. printk("FIXME: kprobe hit while single-stepping!\n");
  106. goto no_kprobe;
  107. }
  108. printk("FIXME: kprobe hit while handling another kprobe\n");
  109. goto no_kprobe;
  110. } else {
  111. p = kprobe_running();
  112. if (p->break_handler && p->break_handler(p, regs))
  113. goto ss_probe;
  114. }
  115. /* If it's not ours, can't be delete race, (we hold lock). */
  116. goto no_kprobe;
  117. }
  118. p = get_kprobe(addr);
  119. if (!p)
  120. goto no_kprobe;
  121. kprobe_status = KPROBE_HIT_ACTIVE;
  122. set_current_kprobe(p);
  123. if (p->pre_handler && p->pre_handler(p, regs))
  124. /* handler has already set things up, so skip ss setup */
  125. return 1;
  126. ss_probe:
  127. prepare_singlestep(p, regs);
  128. kprobe_status = KPROBE_HIT_SS;
  129. return 1;
  130. no_kprobe:
  131. preempt_enable_no_resched();
  132. return ret;
  133. }
  134. static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  135. {
  136. struct kprobe *cur = kprobe_running();
  137. pr_debug("post_kprobe_handler, cur=%p\n", cur);
  138. if (!cur)
  139. return 0;
  140. if (cur->post_handler) {
  141. kprobe_status = KPROBE_HIT_SSDONE;
  142. cur->post_handler(cur, regs, 0);
  143. }
  144. resume_execution(cur, regs);
  145. reset_current_kprobe();
  146. preempt_enable_no_resched();
  147. return 1;
  148. }
  149. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  150. {
  151. struct kprobe *cur = kprobe_running();
  152. pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr);
  153. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  154. return 1;
  155. if (kprobe_status & KPROBE_HIT_SS) {
  156. resume_execution(cur, regs);
  157. preempt_enable_no_resched();
  158. }
  159. return 0;
  160. }
  161. /*
  162. * Wrapper routine to for handling exceptions.
  163. */
  164. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  165. unsigned long val, void *data)
  166. {
  167. struct die_args *args = (struct die_args *)data;
  168. int ret = NOTIFY_DONE;
  169. pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n",
  170. val, data);
  171. switch (val) {
  172. case DIE_BREAKPOINT:
  173. if (kprobe_handler(args->regs))
  174. ret = NOTIFY_STOP;
  175. break;
  176. case DIE_SSTEP:
  177. if (post_kprobe_handler(args->regs))
  178. ret = NOTIFY_STOP;
  179. break;
  180. default:
  181. break;
  182. }
  183. return ret;
  184. }
  185. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  186. {
  187. struct jprobe *jp = container_of(p, struct jprobe, kp);
  188. memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
  189. /*
  190. * TODO: We should probably save some of the stack here as
  191. * well, since gcc may pass arguments on the stack for certain
  192. * functions (lots of arguments, large aggregates, varargs)
  193. */
  194. /* setup return addr to the jprobe handler routine */
  195. regs->pc = (unsigned long)jp->entry;
  196. return 1;
  197. }
  198. void __kprobes jprobe_return(void)
  199. {
  200. asm volatile("breakpoint" ::: "memory");
  201. }
  202. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  203. {
  204. /*
  205. * FIXME - we should ideally be validating that we got here 'cos
  206. * of the "trap" in jprobe_return() above, before restoring the
  207. * saved regs...
  208. */
  209. memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
  210. return 1;
  211. }
  212. int __init arch_init_kprobes(void)
  213. {
  214. /* TODO: Register kretprobe trampoline */
  215. return 0;
  216. }