debug-monitors.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * ARMv8 single-step debug support and mdscr context switching.
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Author: Will Deacon <will.deacon@arm.com>
  19. */
  20. #include <linux/cpu.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/init.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/stat.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/cpufeature.h>
  28. #include <asm/cputype.h>
  29. #include <asm/debug-monitors.h>
  30. #include <asm/system_misc.h>
  31. /* Determine debug architecture. */
  32. u8 debug_monitors_arch(void)
  33. {
  34. return cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
  35. ID_AA64DFR0_DEBUGVER_SHIFT);
  36. }
  37. /*
  38. * MDSCR access routines.
  39. */
  40. static void mdscr_write(u32 mdscr)
  41. {
  42. unsigned long flags;
  43. local_dbg_save(flags);
  44. asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
  45. local_dbg_restore(flags);
  46. }
  47. static u32 mdscr_read(void)
  48. {
  49. u32 mdscr;
  50. asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
  51. return mdscr;
  52. }
  53. /*
  54. * Allow root to disable self-hosted debug from userspace.
  55. * This is useful if you want to connect an external JTAG debugger.
  56. */
  57. static bool debug_enabled = true;
  58. static int create_debug_debugfs_entry(void)
  59. {
  60. debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
  61. return 0;
  62. }
  63. fs_initcall(create_debug_debugfs_entry);
  64. static int __init early_debug_disable(char *buf)
  65. {
  66. debug_enabled = false;
  67. return 0;
  68. }
  69. early_param("nodebugmon", early_debug_disable);
  70. /*
  71. * Keep track of debug users on each core.
  72. * The ref counts are per-cpu so we use a local_t type.
  73. */
  74. static DEFINE_PER_CPU(int, mde_ref_count);
  75. static DEFINE_PER_CPU(int, kde_ref_count);
  76. void enable_debug_monitors(enum dbg_active_el el)
  77. {
  78. u32 mdscr, enable = 0;
  79. WARN_ON(preemptible());
  80. if (this_cpu_inc_return(mde_ref_count) == 1)
  81. enable = DBG_MDSCR_MDE;
  82. if (el == DBG_ACTIVE_EL1 &&
  83. this_cpu_inc_return(kde_ref_count) == 1)
  84. enable |= DBG_MDSCR_KDE;
  85. if (enable && debug_enabled) {
  86. mdscr = mdscr_read();
  87. mdscr |= enable;
  88. mdscr_write(mdscr);
  89. }
  90. }
  91. void disable_debug_monitors(enum dbg_active_el el)
  92. {
  93. u32 mdscr, disable = 0;
  94. WARN_ON(preemptible());
  95. if (this_cpu_dec_return(mde_ref_count) == 0)
  96. disable = ~DBG_MDSCR_MDE;
  97. if (el == DBG_ACTIVE_EL1 &&
  98. this_cpu_dec_return(kde_ref_count) == 0)
  99. disable &= ~DBG_MDSCR_KDE;
  100. if (disable) {
  101. mdscr = mdscr_read();
  102. mdscr &= disable;
  103. mdscr_write(mdscr);
  104. }
  105. }
  106. /*
  107. * OS lock clearing.
  108. */
  109. static void clear_os_lock(void *unused)
  110. {
  111. asm volatile("msr oslar_el1, %0" : : "r" (0));
  112. }
  113. static int os_lock_notify(struct notifier_block *self,
  114. unsigned long action, void *data)
  115. {
  116. int cpu = (unsigned long)data;
  117. if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
  118. smp_call_function_single(cpu, clear_os_lock, NULL, 1);
  119. return NOTIFY_OK;
  120. }
  121. static struct notifier_block os_lock_nb = {
  122. .notifier_call = os_lock_notify,
  123. };
  124. static int debug_monitors_init(void)
  125. {
  126. cpu_notifier_register_begin();
  127. /* Clear the OS lock. */
  128. on_each_cpu(clear_os_lock, NULL, 1);
  129. isb();
  130. /* Register hotplug handler. */
  131. __register_cpu_notifier(&os_lock_nb);
  132. cpu_notifier_register_done();
  133. return 0;
  134. }
  135. postcore_initcall(debug_monitors_init);
  136. /*
  137. * Single step API and exception handling.
  138. */
  139. static void set_regs_spsr_ss(struct pt_regs *regs)
  140. {
  141. unsigned long spsr;
  142. spsr = regs->pstate;
  143. spsr &= ~DBG_SPSR_SS;
  144. spsr |= DBG_SPSR_SS;
  145. regs->pstate = spsr;
  146. }
  147. static void clear_regs_spsr_ss(struct pt_regs *regs)
  148. {
  149. unsigned long spsr;
  150. spsr = regs->pstate;
  151. spsr &= ~DBG_SPSR_SS;
  152. regs->pstate = spsr;
  153. }
  154. /* EL1 Single Step Handler hooks */
  155. static LIST_HEAD(step_hook);
  156. static DEFINE_SPINLOCK(step_hook_lock);
  157. void register_step_hook(struct step_hook *hook)
  158. {
  159. spin_lock(&step_hook_lock);
  160. list_add_rcu(&hook->node, &step_hook);
  161. spin_unlock(&step_hook_lock);
  162. }
  163. void unregister_step_hook(struct step_hook *hook)
  164. {
  165. spin_lock(&step_hook_lock);
  166. list_del_rcu(&hook->node);
  167. spin_unlock(&step_hook_lock);
  168. synchronize_rcu();
  169. }
  170. /*
  171. * Call registered single step handlers
  172. * There is no Syndrome info to check for determining the handler.
  173. * So we call all the registered handlers, until the right handler is
  174. * found which returns zero.
  175. */
  176. static int call_step_hook(struct pt_regs *regs, unsigned int esr)
  177. {
  178. struct step_hook *hook;
  179. int retval = DBG_HOOK_ERROR;
  180. rcu_read_lock();
  181. list_for_each_entry_rcu(hook, &step_hook, node) {
  182. retval = hook->fn(regs, esr);
  183. if (retval == DBG_HOOK_HANDLED)
  184. break;
  185. }
  186. rcu_read_unlock();
  187. return retval;
  188. }
  189. static int single_step_handler(unsigned long addr, unsigned int esr,
  190. struct pt_regs *regs)
  191. {
  192. siginfo_t info;
  193. /*
  194. * If we are stepping a pending breakpoint, call the hw_breakpoint
  195. * handler first.
  196. */
  197. if (!reinstall_suspended_bps(regs))
  198. return 0;
  199. if (user_mode(regs)) {
  200. info.si_signo = SIGTRAP;
  201. info.si_errno = 0;
  202. info.si_code = TRAP_HWBKPT;
  203. info.si_addr = (void __user *)instruction_pointer(regs);
  204. force_sig_info(SIGTRAP, &info, current);
  205. /*
  206. * ptrace will disable single step unless explicitly
  207. * asked to re-enable it. For other clients, it makes
  208. * sense to leave it enabled (i.e. rewind the controls
  209. * to the active-not-pending state).
  210. */
  211. user_rewind_single_step(current);
  212. } else {
  213. if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
  214. return 0;
  215. pr_warning("Unexpected kernel single-step exception at EL1\n");
  216. /*
  217. * Re-enable stepping since we know that we will be
  218. * returning to regs.
  219. */
  220. set_regs_spsr_ss(regs);
  221. }
  222. return 0;
  223. }
  224. /*
  225. * Breakpoint handler is re-entrant as another breakpoint can
  226. * hit within breakpoint handler, especically in kprobes.
  227. * Use reader/writer locks instead of plain spinlock.
  228. */
  229. static LIST_HEAD(break_hook);
  230. static DEFINE_SPINLOCK(break_hook_lock);
  231. void register_break_hook(struct break_hook *hook)
  232. {
  233. spin_lock(&break_hook_lock);
  234. list_add_rcu(&hook->node, &break_hook);
  235. spin_unlock(&break_hook_lock);
  236. }
  237. void unregister_break_hook(struct break_hook *hook)
  238. {
  239. spin_lock(&break_hook_lock);
  240. list_del_rcu(&hook->node);
  241. spin_unlock(&break_hook_lock);
  242. synchronize_rcu();
  243. }
  244. static int call_break_hook(struct pt_regs *regs, unsigned int esr)
  245. {
  246. struct break_hook *hook;
  247. int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
  248. rcu_read_lock();
  249. list_for_each_entry_rcu(hook, &break_hook, node)
  250. if ((esr & hook->esr_mask) == hook->esr_val)
  251. fn = hook->fn;
  252. rcu_read_unlock();
  253. return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
  254. }
  255. static int brk_handler(unsigned long addr, unsigned int esr,
  256. struct pt_regs *regs)
  257. {
  258. siginfo_t info;
  259. if (user_mode(regs)) {
  260. info = (siginfo_t) {
  261. .si_signo = SIGTRAP,
  262. .si_errno = 0,
  263. .si_code = TRAP_BRKPT,
  264. .si_addr = (void __user *)instruction_pointer(regs),
  265. };
  266. force_sig_info(SIGTRAP, &info, current);
  267. } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
  268. pr_warning("Unexpected kernel BRK exception at EL1\n");
  269. return -EFAULT;
  270. }
  271. return 0;
  272. }
  273. int aarch32_break_handler(struct pt_regs *regs)
  274. {
  275. siginfo_t info;
  276. u32 arm_instr;
  277. u16 thumb_instr;
  278. bool bp = false;
  279. void __user *pc = (void __user *)instruction_pointer(regs);
  280. if (!compat_user_mode(regs))
  281. return -EFAULT;
  282. if (compat_thumb_mode(regs)) {
  283. /* get 16-bit Thumb instruction */
  284. get_user(thumb_instr, (u16 __user *)pc);
  285. thumb_instr = le16_to_cpu(thumb_instr);
  286. if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
  287. /* get second half of 32-bit Thumb-2 instruction */
  288. get_user(thumb_instr, (u16 __user *)(pc + 2));
  289. thumb_instr = le16_to_cpu(thumb_instr);
  290. bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
  291. } else {
  292. bp = thumb_instr == AARCH32_BREAK_THUMB;
  293. }
  294. } else {
  295. /* 32-bit ARM instruction */
  296. get_user(arm_instr, (u32 __user *)pc);
  297. arm_instr = le32_to_cpu(arm_instr);
  298. bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
  299. }
  300. if (!bp)
  301. return -EFAULT;
  302. info = (siginfo_t) {
  303. .si_signo = SIGTRAP,
  304. .si_errno = 0,
  305. .si_code = TRAP_BRKPT,
  306. .si_addr = pc,
  307. };
  308. force_sig_info(SIGTRAP, &info, current);
  309. return 0;
  310. }
  311. static int __init debug_traps_init(void)
  312. {
  313. hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
  314. TRAP_HWBKPT, "single-step handler");
  315. hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
  316. TRAP_BRKPT, "ptrace BRK handler");
  317. return 0;
  318. }
  319. arch_initcall(debug_traps_init);
  320. /* Re-enable single step for syscall restarting. */
  321. void user_rewind_single_step(struct task_struct *task)
  322. {
  323. /*
  324. * If single step is active for this thread, then set SPSR.SS
  325. * to 1 to avoid returning to the active-pending state.
  326. */
  327. if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
  328. set_regs_spsr_ss(task_pt_regs(task));
  329. }
  330. void user_fastforward_single_step(struct task_struct *task)
  331. {
  332. if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
  333. clear_regs_spsr_ss(task_pt_regs(task));
  334. }
  335. /* Kernel API */
  336. void kernel_enable_single_step(struct pt_regs *regs)
  337. {
  338. WARN_ON(!irqs_disabled());
  339. set_regs_spsr_ss(regs);
  340. mdscr_write(mdscr_read() | DBG_MDSCR_SS);
  341. enable_debug_monitors(DBG_ACTIVE_EL1);
  342. }
  343. void kernel_disable_single_step(void)
  344. {
  345. WARN_ON(!irqs_disabled());
  346. mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
  347. disable_debug_monitors(DBG_ACTIVE_EL1);
  348. }
  349. int kernel_active_single_step(void)
  350. {
  351. WARN_ON(!irqs_disabled());
  352. return mdscr_read() & DBG_MDSCR_SS;
  353. }
  354. /* ptrace API */
  355. void user_enable_single_step(struct task_struct *task)
  356. {
  357. struct thread_info *ti = task_thread_info(task);
  358. if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
  359. set_regs_spsr_ss(task_pt_regs(task));
  360. }
  361. void user_disable_single_step(struct task_struct *task)
  362. {
  363. clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
  364. }