kprobes.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/kprobes.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/sched.h>
  14. #include <linux/uaccess.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/current.h>
  17. #include <asm/disasm.h>
  18. #define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
  19. (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
  20. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  21. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  22. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  23. {
  24. /* Attempt to probe at unaligned address */
  25. if ((unsigned long)p->addr & 0x01)
  26. return -EINVAL;
  27. /* Address should not be in exception handling code */
  28. p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
  29. p->opcode = *p->addr;
  30. return 0;
  31. }
  32. void __kprobes arch_arm_kprobe(struct kprobe *p)
  33. {
  34. *p->addr = UNIMP_S_INSTRUCTION;
  35. flush_icache_range((unsigned long)p->addr,
  36. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  37. }
  38. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  39. {
  40. *p->addr = p->opcode;
  41. flush_icache_range((unsigned long)p->addr,
  42. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  43. }
  44. void __kprobes arch_remove_kprobe(struct kprobe *p)
  45. {
  46. arch_disarm_kprobe(p);
  47. /* Can we remove the kprobe in the middle of kprobe handling? */
  48. if (p->ainsn.t1_addr) {
  49. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  50. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  51. (unsigned long)p->ainsn.t1_addr +
  52. sizeof(kprobe_opcode_t));
  53. p->ainsn.t1_addr = NULL;
  54. }
  55. if (p->ainsn.t2_addr) {
  56. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  57. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  58. (unsigned long)p->ainsn.t2_addr +
  59. sizeof(kprobe_opcode_t));
  60. p->ainsn.t2_addr = NULL;
  61. }
  62. }
  63. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  64. {
  65. kcb->prev_kprobe.kp = kprobe_running();
  66. kcb->prev_kprobe.status = kcb->kprobe_status;
  67. }
  68. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  69. {
  70. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  71. kcb->kprobe_status = kcb->prev_kprobe.status;
  72. }
  73. static inline void __kprobes set_current_kprobe(struct kprobe *p)
  74. {
  75. __this_cpu_write(current_kprobe, p);
  76. }
  77. static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
  78. struct pt_regs *regs)
  79. {
  80. /* Remove the trap instructions inserted for single step and
  81. * restore the original instructions
  82. */
  83. if (p->ainsn.t1_addr) {
  84. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  85. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  86. (unsigned long)p->ainsn.t1_addr +
  87. sizeof(kprobe_opcode_t));
  88. p->ainsn.t1_addr = NULL;
  89. }
  90. if (p->ainsn.t2_addr) {
  91. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  92. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  93. (unsigned long)p->ainsn.t2_addr +
  94. sizeof(kprobe_opcode_t));
  95. p->ainsn.t2_addr = NULL;
  96. }
  97. return;
  98. }
  99. static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
  100. {
  101. unsigned long next_pc;
  102. unsigned long tgt_if_br = 0;
  103. int is_branch;
  104. unsigned long bta;
  105. /* Copy the opcode back to the kprobe location and execute the
  106. * instruction. Because of this we will not be able to get into the
  107. * same kprobe until this kprobe is done
  108. */
  109. *(p->addr) = p->opcode;
  110. flush_icache_range((unsigned long)p->addr,
  111. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  112. /* Now we insert the trap at the next location after this instruction to
  113. * single step. If it is a branch we insert the trap at possible branch
  114. * targets
  115. */
  116. bta = regs->bta;
  117. if (regs->status32 & 0x40) {
  118. /* We are in a delay slot with the branch taken */
  119. next_pc = bta & ~0x01;
  120. if (!p->ainsn.is_short) {
  121. if (bta & 0x01)
  122. regs->blink += 2;
  123. else {
  124. /* Branch not taken */
  125. next_pc += 2;
  126. /* next pc is taken from bta after executing the
  127. * delay slot instruction
  128. */
  129. regs->bta += 2;
  130. }
  131. }
  132. is_branch = 0;
  133. } else
  134. is_branch =
  135. disasm_next_pc((unsigned long)p->addr, regs,
  136. (struct callee_regs *) current->thread.callee_reg,
  137. &next_pc, &tgt_if_br);
  138. p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
  139. p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
  140. *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
  141. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  142. (unsigned long)p->ainsn.t1_addr +
  143. sizeof(kprobe_opcode_t));
  144. if (is_branch) {
  145. p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
  146. p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
  147. *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
  148. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  149. (unsigned long)p->ainsn.t2_addr +
  150. sizeof(kprobe_opcode_t));
  151. }
  152. }
  153. int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
  154. {
  155. struct kprobe *p;
  156. struct kprobe_ctlblk *kcb;
  157. preempt_disable();
  158. kcb = get_kprobe_ctlblk();
  159. p = get_kprobe((unsigned long *)addr);
  160. if (p) {
  161. /*
  162. * We have reentered the kprobe_handler, since another kprobe
  163. * was hit while within the handler, we save the original
  164. * kprobes and single step on the instruction of the new probe
  165. * without calling any user handlers to avoid recursive
  166. * kprobes.
  167. */
  168. if (kprobe_running()) {
  169. save_previous_kprobe(kcb);
  170. set_current_kprobe(p);
  171. kprobes_inc_nmissed_count(p);
  172. setup_singlestep(p, regs);
  173. kcb->kprobe_status = KPROBE_REENTER;
  174. return 1;
  175. }
  176. set_current_kprobe(p);
  177. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  178. /* If we have no pre-handler or it returned 0, we continue with
  179. * normal processing. If we have a pre-handler and it returned
  180. * non-zero - which is expected from setjmp_pre_handler for
  181. * jprobe, we return without single stepping and leave that to
  182. * the break-handler which is invoked by a kprobe from
  183. * jprobe_return
  184. */
  185. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  186. setup_singlestep(p, regs);
  187. kcb->kprobe_status = KPROBE_HIT_SS;
  188. }
  189. return 1;
  190. } else if (kprobe_running()) {
  191. p = __this_cpu_read(current_kprobe);
  192. if (p->break_handler && p->break_handler(p, regs)) {
  193. setup_singlestep(p, regs);
  194. kcb->kprobe_status = KPROBE_HIT_SS;
  195. return 1;
  196. }
  197. }
  198. /* no_kprobe: */
  199. preempt_enable_no_resched();
  200. return 0;
  201. }
  202. static int __kprobes arc_post_kprobe_handler(unsigned long addr,
  203. struct pt_regs *regs)
  204. {
  205. struct kprobe *cur = kprobe_running();
  206. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  207. if (!cur)
  208. return 0;
  209. resume_execution(cur, addr, regs);
  210. /* Rearm the kprobe */
  211. arch_arm_kprobe(cur);
  212. /*
  213. * When we return from trap instruction we go to the next instruction
  214. * We restored the actual instruction in resume_exectuiont and we to
  215. * return to the same address and execute it
  216. */
  217. regs->ret = addr;
  218. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  219. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  220. cur->post_handler(cur, regs, 0);
  221. }
  222. if (kcb->kprobe_status == KPROBE_REENTER) {
  223. restore_previous_kprobe(kcb);
  224. goto out;
  225. }
  226. reset_current_kprobe();
  227. out:
  228. preempt_enable_no_resched();
  229. return 1;
  230. }
  231. /*
  232. * Fault can be for the instruction being single stepped or for the
  233. * pre/post handlers in the module.
  234. * This is applicable for applications like user probes, where we have the
  235. * probe in user space and the handlers in the kernel
  236. */
  237. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
  238. {
  239. struct kprobe *cur = kprobe_running();
  240. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  241. switch (kcb->kprobe_status) {
  242. case KPROBE_HIT_SS:
  243. case KPROBE_REENTER:
  244. /*
  245. * We are here because the instruction being single stepped
  246. * caused the fault. We reset the current kprobe and allow the
  247. * exception handler as if it is regular exception. In our
  248. * case it doesn't matter because the system will be halted
  249. */
  250. resume_execution(cur, (unsigned long)cur->addr, regs);
  251. if (kcb->kprobe_status == KPROBE_REENTER)
  252. restore_previous_kprobe(kcb);
  253. else
  254. reset_current_kprobe();
  255. preempt_enable_no_resched();
  256. break;
  257. case KPROBE_HIT_ACTIVE:
  258. case KPROBE_HIT_SSDONE:
  259. /*
  260. * We are here because the instructions in the pre/post handler
  261. * caused the fault.
  262. */
  263. /* We increment the nmissed count for accounting,
  264. * we can also use npre/npostfault count for accounting
  265. * these specific fault cases.
  266. */
  267. kprobes_inc_nmissed_count(cur);
  268. /*
  269. * We come here because instructions in the pre/post
  270. * handler caused the page_fault, this could happen
  271. * if handler tries to access user space by
  272. * copy_from_user(), get_user() etc. Let the
  273. * user-specified handler try to fix it first.
  274. */
  275. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  276. return 1;
  277. /*
  278. * In case the user-specified fault handler returned zero,
  279. * try to fix up.
  280. */
  281. if (fixup_exception(regs))
  282. return 1;
  283. /*
  284. * fixup_exception() could not handle it,
  285. * Let do_page_fault() fix it.
  286. */
  287. break;
  288. default:
  289. break;
  290. }
  291. return 0;
  292. }
  293. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  294. unsigned long val, void *data)
  295. {
  296. struct die_args *args = data;
  297. unsigned long addr = args->err;
  298. int ret = NOTIFY_DONE;
  299. switch (val) {
  300. case DIE_IERR:
  301. if (arc_kprobe_handler(addr, args->regs))
  302. return NOTIFY_STOP;
  303. break;
  304. case DIE_TRAP:
  305. if (arc_post_kprobe_handler(addr, args->regs))
  306. return NOTIFY_STOP;
  307. break;
  308. default:
  309. break;
  310. }
  311. return ret;
  312. }
  313. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  314. {
  315. struct jprobe *jp = container_of(p, struct jprobe, kp);
  316. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  317. unsigned long sp_addr = regs->sp;
  318. kcb->jprobe_saved_regs = *regs;
  319. memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
  320. regs->ret = (unsigned long)(jp->entry);
  321. return 1;
  322. }
  323. void __kprobes jprobe_return(void)
  324. {
  325. __asm__ __volatile__("unimp_s");
  326. return;
  327. }
  328. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  329. {
  330. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  331. unsigned long sp_addr;
  332. *regs = kcb->jprobe_saved_regs;
  333. sp_addr = regs->sp;
  334. memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
  335. preempt_enable_no_resched();
  336. return 1;
  337. }
  338. static void __used kretprobe_trampoline_holder(void)
  339. {
  340. __asm__ __volatile__(".global kretprobe_trampoline\n"
  341. "kretprobe_trampoline:\n" "nop\n");
  342. }
  343. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  344. struct pt_regs *regs)
  345. {
  346. ri->ret_addr = (kprobe_opcode_t *) regs->blink;
  347. /* Replace the return addr with trampoline addr */
  348. regs->blink = (unsigned long)&kretprobe_trampoline;
  349. }
  350. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  351. struct pt_regs *regs)
  352. {
  353. struct kretprobe_instance *ri = NULL;
  354. struct hlist_head *head, empty_rp;
  355. struct hlist_node *tmp;
  356. unsigned long flags, orig_ret_address = 0;
  357. unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
  358. INIT_HLIST_HEAD(&empty_rp);
  359. kretprobe_hash_lock(current, &head, &flags);
  360. /*
  361. * It is possible to have multiple instances associated with a given
  362. * task either because an multiple functions in the call path
  363. * have a return probe installed on them, and/or more than one return
  364. * return probe was registered for a target function.
  365. *
  366. * We can handle this because:
  367. * - instances are always inserted at the head of the list
  368. * - when multiple return probes are registered for the same
  369. * function, the first instance's ret_addr will point to the
  370. * real return address, and all the rest will point to
  371. * kretprobe_trampoline
  372. */
  373. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  374. if (ri->task != current)
  375. /* another task is sharing our hash bucket */
  376. continue;
  377. if (ri->rp && ri->rp->handler)
  378. ri->rp->handler(ri, regs);
  379. orig_ret_address = (unsigned long)ri->ret_addr;
  380. recycle_rp_inst(ri, &empty_rp);
  381. if (orig_ret_address != trampoline_address) {
  382. /*
  383. * This is the real return address. Any other
  384. * instances associated with this task are for
  385. * other calls deeper on the call stack
  386. */
  387. break;
  388. }
  389. }
  390. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  391. regs->ret = orig_ret_address;
  392. reset_current_kprobe();
  393. kretprobe_hash_unlock(current, &flags);
  394. preempt_enable_no_resched();
  395. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  396. hlist_del(&ri->hlist);
  397. kfree(ri);
  398. }
  399. /* By returning a non zero value, we are telling the kprobe handler
  400. * that we don't want the post_handler to run
  401. */
  402. return 1;
  403. }
  404. static struct kprobe trampoline_p = {
  405. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  406. .pre_handler = trampoline_probe_handler
  407. };
  408. int __init arch_init_kprobes(void)
  409. {
  410. /* Registering the trampoline code for the kret probe */
  411. return register_kprobe(&trampoline_p);
  412. }
  413. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  414. {
  415. if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
  416. return 1;
  417. return 0;
  418. }
  419. void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
  420. {
  421. notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
  422. }