uprobes.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. #include <linux/highmem.h>
  2. #include <linux/kdebug.h>
  3. #include <linux/types.h>
  4. #include <linux/notifier.h>
  5. #include <linux/sched.h>
  6. #include <linux/uprobes.h>
  7. #include <asm/branch.h>
  8. #include <asm/cpu-features.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/inst.h>
  11. static inline int insn_has_delay_slot(const union mips_instruction insn)
  12. {
  13. switch (insn.i_format.opcode) {
  14. /*
  15. * jr and jalr are in r_format format.
  16. */
  17. case spec_op:
  18. switch (insn.r_format.func) {
  19. case jalr_op:
  20. case jr_op:
  21. return 1;
  22. }
  23. break;
  24. /*
  25. * This group contains:
  26. * bltz_op, bgez_op, bltzl_op, bgezl_op,
  27. * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  28. */
  29. case bcond_op:
  30. switch (insn.i_format.rt) {
  31. case bltz_op:
  32. case bltzl_op:
  33. case bgez_op:
  34. case bgezl_op:
  35. case bltzal_op:
  36. case bltzall_op:
  37. case bgezal_op:
  38. case bgezall_op:
  39. case bposge32_op:
  40. return 1;
  41. }
  42. break;
  43. /*
  44. * These are unconditional and in j_format.
  45. */
  46. case jal_op:
  47. case j_op:
  48. case beq_op:
  49. case beql_op:
  50. case bne_op:
  51. case bnel_op:
  52. case blez_op: /* not really i_format */
  53. case blezl_op:
  54. case bgtz_op:
  55. case bgtzl_op:
  56. return 1;
  57. /*
  58. * And now the FPA/cp1 branch instructions.
  59. */
  60. case cop1_op:
  61. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  62. case lwc2_op: /* This is bbit0 on Octeon */
  63. case ldc2_op: /* This is bbit032 on Octeon */
  64. case swc2_op: /* This is bbit1 on Octeon */
  65. case sdc2_op: /* This is bbit132 on Octeon */
  66. #endif
  67. return 1;
  68. }
  69. return 0;
  70. }
  71. /**
  72. * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
  73. * @mm: the probed address space.
  74. * @arch_uprobe: the probepoint information.
  75. * @addr: virtual address at which to install the probepoint
  76. * Return 0 on success or a -ve number on error.
  77. */
  78. int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
  79. struct mm_struct *mm, unsigned long addr)
  80. {
  81. union mips_instruction inst;
  82. /*
  83. * For the time being this also blocks attempts to use uprobes with
  84. * MIPS16 and microMIPS.
  85. */
  86. if (addr & 0x03)
  87. return -EINVAL;
  88. inst.word = aup->insn[0];
  89. aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
  90. aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
  91. return 0;
  92. }
  93. /**
  94. * is_trap_insn - check if the instruction is a trap variant
  95. * @insn: instruction to be checked.
  96. * Returns true if @insn is a trap variant.
  97. *
  98. * This definition overrides the weak definition in kernel/events/uprobes.c.
  99. * and is needed for the case where an architecture has multiple trap
  100. * instructions (like PowerPC or MIPS). We treat BREAK just like the more
  101. * modern conditional trap instructions.
  102. */
  103. bool is_trap_insn(uprobe_opcode_t *insn)
  104. {
  105. union mips_instruction inst;
  106. inst.word = *insn;
  107. switch (inst.i_format.opcode) {
  108. case spec_op:
  109. switch (inst.r_format.func) {
  110. case break_op:
  111. case teq_op:
  112. case tge_op:
  113. case tgeu_op:
  114. case tlt_op:
  115. case tltu_op:
  116. case tne_op:
  117. return 1;
  118. }
  119. break;
  120. case bcond_op: /* Yes, really ... */
  121. switch (inst.u_format.rt) {
  122. case teqi_op:
  123. case tgei_op:
  124. case tgeiu_op:
  125. case tlti_op:
  126. case tltiu_op:
  127. case tnei_op:
  128. return 1;
  129. }
  130. break;
  131. }
  132. return 0;
  133. }
  134. #define UPROBE_TRAP_NR ULONG_MAX
  135. /*
  136. * arch_uprobe_pre_xol - prepare to execute out of line.
  137. * @auprobe: the probepoint information.
  138. * @regs: reflects the saved user state of current task.
  139. */
  140. int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
  141. {
  142. struct uprobe_task *utask = current->utask;
  143. /*
  144. * Now find the EPC where to resume after the breakpoint has been
  145. * dealt with. This may require emulation of a branch.
  146. */
  147. aup->resume_epc = regs->cp0_epc + 4;
  148. if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
  149. unsigned long epc;
  150. epc = regs->cp0_epc;
  151. __compute_return_epc_for_insn(regs,
  152. (union mips_instruction) aup->insn[0]);
  153. aup->resume_epc = regs->cp0_epc;
  154. }
  155. utask->autask.saved_trap_nr = current->thread.trap_nr;
  156. current->thread.trap_nr = UPROBE_TRAP_NR;
  157. regs->cp0_epc = current->utask->xol_vaddr;
  158. return 0;
  159. }
  160. int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
  161. {
  162. struct uprobe_task *utask = current->utask;
  163. current->thread.trap_nr = utask->autask.saved_trap_nr;
  164. regs->cp0_epc = aup->resume_epc;
  165. return 0;
  166. }
  167. /*
  168. * If xol insn itself traps and generates a signal(Say,
  169. * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
  170. * instruction jumps back to its own address. It is assumed that anything
  171. * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
  172. *
  173. * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
  174. * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
  175. * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
  176. */
  177. bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
  178. {
  179. if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
  180. return true;
  181. return false;
  182. }
  183. int arch_uprobe_exception_notify(struct notifier_block *self,
  184. unsigned long val, void *data)
  185. {
  186. struct die_args *args = data;
  187. struct pt_regs *regs = args->regs;
  188. /* regs == NULL is a kernel bug */
  189. if (WARN_ON(!regs))
  190. return NOTIFY_DONE;
  191. /* We are only interested in userspace traps */
  192. if (!user_mode(regs))
  193. return NOTIFY_DONE;
  194. switch (val) {
  195. case DIE_BREAK:
  196. if (uprobe_pre_sstep_notifier(regs))
  197. return NOTIFY_STOP;
  198. break;
  199. case DIE_UPROBE_XOL:
  200. if (uprobe_post_sstep_notifier(regs))
  201. return NOTIFY_STOP;
  202. default:
  203. break;
  204. }
  205. return 0;
  206. }
  207. /*
  208. * This function gets called when XOL instruction either gets trapped or
  209. * the thread has a fatal signal. Reset the instruction pointer to its
  210. * probed address for the potential restart or for post mortem analysis.
  211. */
  212. void arch_uprobe_abort_xol(struct arch_uprobe *aup,
  213. struct pt_regs *regs)
  214. {
  215. struct uprobe_task *utask = current->utask;
  216. instruction_pointer_set(regs, utask->vaddr);
  217. }
  218. unsigned long arch_uretprobe_hijack_return_addr(
  219. unsigned long trampoline_vaddr, struct pt_regs *regs)
  220. {
  221. unsigned long ra;
  222. ra = regs->regs[31];
  223. /* Replace the return address with the trampoline address */
  224. regs->regs[31] = trampoline_vaddr;
  225. return ra;
  226. }
  227. /**
  228. * set_swbp - store breakpoint at a given address.
  229. * @auprobe: arch specific probepoint information.
  230. * @mm: the probed process address space.
  231. * @vaddr: the virtual address to insert the opcode.
  232. *
  233. * For mm @mm, store the breakpoint instruction at @vaddr.
  234. * Return 0 (success) or a negative errno.
  235. *
  236. * This version overrides the weak version in kernel/events/uprobes.c.
  237. * It is required to handle MIPS16 and microMIPS.
  238. */
  239. int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
  240. unsigned long vaddr)
  241. {
  242. return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
  243. }
  244. void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
  245. void *src, unsigned long len)
  246. {
  247. void *kaddr;
  248. /* Initialize the slot */
  249. kaddr = kmap_atomic(page);
  250. memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
  251. kunmap_atomic(kaddr);
  252. /*
  253. * The MIPS version of flush_icache_range will operate safely on
  254. * user space addresses and more importantly, it doesn't require a
  255. * VMA argument.
  256. */
  257. flush_icache_range(vaddr, vaddr + len);
  258. }
  259. /**
  260. * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
  261. * @regs: Reflects the saved state of the task after it has hit a breakpoint
  262. * instruction.
  263. * Return the address of the breakpoint instruction.
  264. *
  265. * This overrides the weak version in kernel/events/uprobes.c.
  266. */
  267. unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
  268. {
  269. return instruction_pointer(regs);
  270. }
  271. /*
  272. * See if the instruction can be emulated.
  273. * Returns true if instruction was emulated, false otherwise.
  274. *
  275. * For now we always emulate so this function just returns 0.
  276. */
  277. bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
  278. {
  279. return 0;
  280. }