hw_breakpoint.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. /*
  2. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  3. * using the CPU's debug registers.
  4. *
  5. * Copyright (C) 2012 ARM Limited
  6. * Author: Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #define pr_fmt(fmt) "hw-breakpoint: " fmt
  21. #include <linux/compat.h>
  22. #include <linux/cpu_pm.h>
  23. #include <linux/errno.h>
  24. #include <linux/hw_breakpoint.h>
  25. #include <linux/perf_event.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/smp.h>
  28. #include <asm/compat.h>
  29. #include <asm/current.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/hw_breakpoint.h>
  32. #include <asm/traps.h>
  33. #include <asm/cputype.h>
  34. #include <asm/system_misc.h>
  35. #include <asm/uaccess.h>
  36. /* Breakpoint currently in use for each BRP. */
  37. static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  38. /* Watchpoint currently in use for each WRP. */
  39. static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  40. /* Currently stepping a per-CPU kernel breakpoint. */
  41. static DEFINE_PER_CPU(int, stepping_kernel_bp);
  42. /* Number of BRP/WRP registers on this CPU. */
  43. static int core_num_brps;
  44. static int core_num_wrps;
  45. int hw_breakpoint_slots(int type)
  46. {
  47. /*
  48. * We can be called early, so don't rely on
  49. * our static variables being initialised.
  50. */
  51. switch (type) {
  52. case TYPE_INST:
  53. return get_num_brps();
  54. case TYPE_DATA:
  55. return get_num_wrps();
  56. default:
  57. pr_warning("unknown slot type: %d\n", type);
  58. return 0;
  59. }
  60. }
  61. #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
  62. case (OFF + N): \
  63. AARCH64_DBG_READ(N, REG, VAL); \
  64. break
  65. #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
  66. case (OFF + N): \
  67. AARCH64_DBG_WRITE(N, REG, VAL); \
  68. break
  69. #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
  70. READ_WB_REG_CASE(OFF, 0, REG, VAL); \
  71. READ_WB_REG_CASE(OFF, 1, REG, VAL); \
  72. READ_WB_REG_CASE(OFF, 2, REG, VAL); \
  73. READ_WB_REG_CASE(OFF, 3, REG, VAL); \
  74. READ_WB_REG_CASE(OFF, 4, REG, VAL); \
  75. READ_WB_REG_CASE(OFF, 5, REG, VAL); \
  76. READ_WB_REG_CASE(OFF, 6, REG, VAL); \
  77. READ_WB_REG_CASE(OFF, 7, REG, VAL); \
  78. READ_WB_REG_CASE(OFF, 8, REG, VAL); \
  79. READ_WB_REG_CASE(OFF, 9, REG, VAL); \
  80. READ_WB_REG_CASE(OFF, 10, REG, VAL); \
  81. READ_WB_REG_CASE(OFF, 11, REG, VAL); \
  82. READ_WB_REG_CASE(OFF, 12, REG, VAL); \
  83. READ_WB_REG_CASE(OFF, 13, REG, VAL); \
  84. READ_WB_REG_CASE(OFF, 14, REG, VAL); \
  85. READ_WB_REG_CASE(OFF, 15, REG, VAL)
  86. #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
  87. WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
  88. WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
  89. WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
  90. WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
  91. WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
  92. WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
  93. WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
  94. WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
  95. WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
  96. WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
  97. WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
  98. WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
  99. WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
  100. WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
  101. WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
  102. WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
  103. static u64 read_wb_reg(int reg, int n)
  104. {
  105. u64 val = 0;
  106. switch (reg + n) {
  107. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
  108. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
  109. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
  110. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
  111. default:
  112. pr_warning("attempt to read from unknown breakpoint register %d\n", n);
  113. }
  114. return val;
  115. }
  116. static void write_wb_reg(int reg, int n, u64 val)
  117. {
  118. switch (reg + n) {
  119. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
  120. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
  121. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
  122. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
  123. default:
  124. pr_warning("attempt to write to unknown breakpoint register %d\n", n);
  125. }
  126. isb();
  127. }
  128. /*
  129. * Convert a breakpoint privilege level to the corresponding exception
  130. * level.
  131. */
  132. static enum dbg_active_el debug_exception_level(int privilege)
  133. {
  134. switch (privilege) {
  135. case AARCH64_BREAKPOINT_EL0:
  136. return DBG_ACTIVE_EL0;
  137. case AARCH64_BREAKPOINT_EL1:
  138. return DBG_ACTIVE_EL1;
  139. default:
  140. pr_warning("invalid breakpoint privilege level %d\n", privilege);
  141. return -EINVAL;
  142. }
  143. }
  144. enum hw_breakpoint_ops {
  145. HW_BREAKPOINT_INSTALL,
  146. HW_BREAKPOINT_UNINSTALL,
  147. HW_BREAKPOINT_RESTORE
  148. };
  149. static int is_compat_bp(struct perf_event *bp)
  150. {
  151. struct task_struct *tsk = bp->hw.target;
  152. /*
  153. * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
  154. * In this case, use the native interface, since we don't have
  155. * the notion of a "compat CPU" and could end up relying on
  156. * deprecated behaviour if we use unaligned watchpoints in
  157. * AArch64 state.
  158. */
  159. return tsk && is_compat_thread(task_thread_info(tsk));
  160. }
  161. /**
  162. * hw_breakpoint_slot_setup - Find and setup a perf slot according to
  163. * operations
  164. *
  165. * @slots: pointer to array of slots
  166. * @max_slots: max number of slots
  167. * @bp: perf_event to setup
  168. * @ops: operation to be carried out on the slot
  169. *
  170. * Return:
  171. * slot index on success
  172. * -ENOSPC if no slot is available/matches
  173. * -EINVAL on wrong operations parameter
  174. */
  175. static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
  176. struct perf_event *bp,
  177. enum hw_breakpoint_ops ops)
  178. {
  179. int i;
  180. struct perf_event **slot;
  181. for (i = 0; i < max_slots; ++i) {
  182. slot = &slots[i];
  183. switch (ops) {
  184. case HW_BREAKPOINT_INSTALL:
  185. if (!*slot) {
  186. *slot = bp;
  187. return i;
  188. }
  189. break;
  190. case HW_BREAKPOINT_UNINSTALL:
  191. if (*slot == bp) {
  192. *slot = NULL;
  193. return i;
  194. }
  195. break;
  196. case HW_BREAKPOINT_RESTORE:
  197. if (*slot == bp)
  198. return i;
  199. break;
  200. default:
  201. pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
  202. return -EINVAL;
  203. }
  204. }
  205. return -ENOSPC;
  206. }
  207. static int hw_breakpoint_control(struct perf_event *bp,
  208. enum hw_breakpoint_ops ops)
  209. {
  210. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  211. struct perf_event **slots;
  212. struct debug_info *debug_info = &current->thread.debug;
  213. int i, max_slots, ctrl_reg, val_reg, reg_enable;
  214. enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
  215. u32 ctrl;
  216. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  217. /* Breakpoint */
  218. ctrl_reg = AARCH64_DBG_REG_BCR;
  219. val_reg = AARCH64_DBG_REG_BVR;
  220. slots = this_cpu_ptr(bp_on_reg);
  221. max_slots = core_num_brps;
  222. reg_enable = !debug_info->bps_disabled;
  223. } else {
  224. /* Watchpoint */
  225. ctrl_reg = AARCH64_DBG_REG_WCR;
  226. val_reg = AARCH64_DBG_REG_WVR;
  227. slots = this_cpu_ptr(wp_on_reg);
  228. max_slots = core_num_wrps;
  229. reg_enable = !debug_info->wps_disabled;
  230. }
  231. i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
  232. if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
  233. return i;
  234. switch (ops) {
  235. case HW_BREAKPOINT_INSTALL:
  236. /*
  237. * Ensure debug monitors are enabled at the correct exception
  238. * level.
  239. */
  240. enable_debug_monitors(dbg_el);
  241. /* Fall through */
  242. case HW_BREAKPOINT_RESTORE:
  243. /* Setup the address register. */
  244. write_wb_reg(val_reg, i, info->address);
  245. /* Setup the control register. */
  246. ctrl = encode_ctrl_reg(info->ctrl);
  247. write_wb_reg(ctrl_reg, i,
  248. reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
  249. break;
  250. case HW_BREAKPOINT_UNINSTALL:
  251. /* Reset the control register. */
  252. write_wb_reg(ctrl_reg, i, 0);
  253. /*
  254. * Release the debug monitors for the correct exception
  255. * level.
  256. */
  257. disable_debug_monitors(dbg_el);
  258. break;
  259. }
  260. return 0;
  261. }
  262. /*
  263. * Install a perf counter breakpoint.
  264. */
  265. int arch_install_hw_breakpoint(struct perf_event *bp)
  266. {
  267. return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
  268. }
  269. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  270. {
  271. hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
  272. }
  273. static int get_hbp_len(u8 hbp_len)
  274. {
  275. unsigned int len_in_bytes = 0;
  276. switch (hbp_len) {
  277. case ARM_BREAKPOINT_LEN_1:
  278. len_in_bytes = 1;
  279. break;
  280. case ARM_BREAKPOINT_LEN_2:
  281. len_in_bytes = 2;
  282. break;
  283. case ARM_BREAKPOINT_LEN_4:
  284. len_in_bytes = 4;
  285. break;
  286. case ARM_BREAKPOINT_LEN_8:
  287. len_in_bytes = 8;
  288. break;
  289. }
  290. return len_in_bytes;
  291. }
  292. /*
  293. * Check whether bp virtual address is in kernel space.
  294. */
  295. int arch_check_bp_in_kernelspace(struct perf_event *bp)
  296. {
  297. unsigned int len;
  298. unsigned long va;
  299. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  300. va = info->address;
  301. len = get_hbp_len(info->ctrl.len);
  302. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  303. }
  304. /*
  305. * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
  306. * Hopefully this will disappear when ptrace can bypass the conversion
  307. * to generic breakpoint descriptions.
  308. */
  309. int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
  310. int *gen_len, int *gen_type)
  311. {
  312. /* Type */
  313. switch (ctrl.type) {
  314. case ARM_BREAKPOINT_EXECUTE:
  315. *gen_type = HW_BREAKPOINT_X;
  316. break;
  317. case ARM_BREAKPOINT_LOAD:
  318. *gen_type = HW_BREAKPOINT_R;
  319. break;
  320. case ARM_BREAKPOINT_STORE:
  321. *gen_type = HW_BREAKPOINT_W;
  322. break;
  323. case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
  324. *gen_type = HW_BREAKPOINT_RW;
  325. break;
  326. default:
  327. return -EINVAL;
  328. }
  329. /* Len */
  330. switch (ctrl.len) {
  331. case ARM_BREAKPOINT_LEN_1:
  332. *gen_len = HW_BREAKPOINT_LEN_1;
  333. break;
  334. case ARM_BREAKPOINT_LEN_2:
  335. *gen_len = HW_BREAKPOINT_LEN_2;
  336. break;
  337. case ARM_BREAKPOINT_LEN_4:
  338. *gen_len = HW_BREAKPOINT_LEN_4;
  339. break;
  340. case ARM_BREAKPOINT_LEN_8:
  341. *gen_len = HW_BREAKPOINT_LEN_8;
  342. break;
  343. default:
  344. return -EINVAL;
  345. }
  346. return 0;
  347. }
  348. /*
  349. * Construct an arch_hw_breakpoint from a perf_event.
  350. */
  351. static int arch_build_bp_info(struct perf_event *bp)
  352. {
  353. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  354. /* Type */
  355. switch (bp->attr.bp_type) {
  356. case HW_BREAKPOINT_X:
  357. info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
  358. break;
  359. case HW_BREAKPOINT_R:
  360. info->ctrl.type = ARM_BREAKPOINT_LOAD;
  361. break;
  362. case HW_BREAKPOINT_W:
  363. info->ctrl.type = ARM_BREAKPOINT_STORE;
  364. break;
  365. case HW_BREAKPOINT_RW:
  366. info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
  367. break;
  368. default:
  369. return -EINVAL;
  370. }
  371. /* Len */
  372. switch (bp->attr.bp_len) {
  373. case HW_BREAKPOINT_LEN_1:
  374. info->ctrl.len = ARM_BREAKPOINT_LEN_1;
  375. break;
  376. case HW_BREAKPOINT_LEN_2:
  377. info->ctrl.len = ARM_BREAKPOINT_LEN_2;
  378. break;
  379. case HW_BREAKPOINT_LEN_4:
  380. info->ctrl.len = ARM_BREAKPOINT_LEN_4;
  381. break;
  382. case HW_BREAKPOINT_LEN_8:
  383. info->ctrl.len = ARM_BREAKPOINT_LEN_8;
  384. break;
  385. default:
  386. return -EINVAL;
  387. }
  388. /*
  389. * On AArch64, we only permit breakpoints of length 4, whereas
  390. * AArch32 also requires breakpoints of length 2 for Thumb.
  391. * Watchpoints can be of length 1, 2, 4 or 8 bytes.
  392. */
  393. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  394. if (is_compat_bp(bp)) {
  395. if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
  396. info->ctrl.len != ARM_BREAKPOINT_LEN_4)
  397. return -EINVAL;
  398. } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
  399. /*
  400. * FIXME: Some tools (I'm looking at you perf) assume
  401. * that breakpoints should be sizeof(long). This
  402. * is nonsense. For now, we fix up the parameter
  403. * but we should probably return -EINVAL instead.
  404. */
  405. info->ctrl.len = ARM_BREAKPOINT_LEN_4;
  406. }
  407. }
  408. /* Address */
  409. info->address = bp->attr.bp_addr;
  410. /*
  411. * Privilege
  412. * Note that we disallow combined EL0/EL1 breakpoints because
  413. * that would complicate the stepping code.
  414. */
  415. if (arch_check_bp_in_kernelspace(bp))
  416. info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
  417. else
  418. info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
  419. /* Enabled? */
  420. info->ctrl.enabled = !bp->attr.disabled;
  421. return 0;
  422. }
  423. /*
  424. * Validate the arch-specific HW Breakpoint register settings.
  425. */
  426. int arch_validate_hwbkpt_settings(struct perf_event *bp)
  427. {
  428. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  429. int ret;
  430. u64 alignment_mask, offset;
  431. /* Build the arch_hw_breakpoint. */
  432. ret = arch_build_bp_info(bp);
  433. if (ret)
  434. return ret;
  435. /*
  436. * Check address alignment.
  437. * We don't do any clever alignment correction for watchpoints
  438. * because using 64-bit unaligned addresses is deprecated for
  439. * AArch64.
  440. *
  441. * AArch32 tasks expect some simple alignment fixups, so emulate
  442. * that here.
  443. */
  444. if (is_compat_bp(bp)) {
  445. if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
  446. alignment_mask = 0x7;
  447. else
  448. alignment_mask = 0x3;
  449. offset = info->address & alignment_mask;
  450. switch (offset) {
  451. case 0:
  452. /* Aligned */
  453. break;
  454. case 1:
  455. /* Allow single byte watchpoint. */
  456. if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
  457. break;
  458. case 2:
  459. /* Allow halfword watchpoints and breakpoints. */
  460. if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
  461. break;
  462. default:
  463. return -EINVAL;
  464. }
  465. info->address &= ~alignment_mask;
  466. info->ctrl.len <<= offset;
  467. } else {
  468. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
  469. alignment_mask = 0x3;
  470. else
  471. alignment_mask = 0x7;
  472. if (info->address & alignment_mask)
  473. return -EINVAL;
  474. }
  475. /*
  476. * Disallow per-task kernel breakpoints since these would
  477. * complicate the stepping code.
  478. */
  479. if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
  480. return -EINVAL;
  481. return 0;
  482. }
  483. /*
  484. * Enable/disable all of the breakpoints active at the specified
  485. * exception level at the register level.
  486. * This is used when single-stepping after a breakpoint exception.
  487. */
  488. static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
  489. {
  490. int i, max_slots, privilege;
  491. u32 ctrl;
  492. struct perf_event **slots;
  493. switch (reg) {
  494. case AARCH64_DBG_REG_BCR:
  495. slots = this_cpu_ptr(bp_on_reg);
  496. max_slots = core_num_brps;
  497. break;
  498. case AARCH64_DBG_REG_WCR:
  499. slots = this_cpu_ptr(wp_on_reg);
  500. max_slots = core_num_wrps;
  501. break;
  502. default:
  503. return;
  504. }
  505. for (i = 0; i < max_slots; ++i) {
  506. if (!slots[i])
  507. continue;
  508. privilege = counter_arch_bp(slots[i])->ctrl.privilege;
  509. if (debug_exception_level(privilege) != el)
  510. continue;
  511. ctrl = read_wb_reg(reg, i);
  512. if (enable)
  513. ctrl |= 0x1;
  514. else
  515. ctrl &= ~0x1;
  516. write_wb_reg(reg, i, ctrl);
  517. }
  518. }
  519. /*
  520. * Debug exception handlers.
  521. */
  522. static int breakpoint_handler(unsigned long unused, unsigned int esr,
  523. struct pt_regs *regs)
  524. {
  525. int i, step = 0, *kernel_step;
  526. u32 ctrl_reg;
  527. u64 addr, val;
  528. struct perf_event *bp, **slots;
  529. struct debug_info *debug_info;
  530. struct arch_hw_breakpoint_ctrl ctrl;
  531. slots = this_cpu_ptr(bp_on_reg);
  532. addr = instruction_pointer(regs);
  533. debug_info = &current->thread.debug;
  534. for (i = 0; i < core_num_brps; ++i) {
  535. rcu_read_lock();
  536. bp = slots[i];
  537. if (bp == NULL)
  538. goto unlock;
  539. /* Check if the breakpoint value matches. */
  540. val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
  541. if (val != (addr & ~0x3))
  542. goto unlock;
  543. /* Possible match, check the byte address select to confirm. */
  544. ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
  545. decode_ctrl_reg(ctrl_reg, &ctrl);
  546. if (!((1 << (addr & 0x3)) & ctrl.len))
  547. goto unlock;
  548. counter_arch_bp(bp)->trigger = addr;
  549. perf_bp_event(bp, regs);
  550. /* Do we need to handle the stepping? */
  551. if (!bp->overflow_handler)
  552. step = 1;
  553. unlock:
  554. rcu_read_unlock();
  555. }
  556. if (!step)
  557. return 0;
  558. if (user_mode(regs)) {
  559. debug_info->bps_disabled = 1;
  560. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
  561. /* If we're already stepping a watchpoint, just return. */
  562. if (debug_info->wps_disabled)
  563. return 0;
  564. if (test_thread_flag(TIF_SINGLESTEP))
  565. debug_info->suspended_step = 1;
  566. else
  567. user_enable_single_step(current);
  568. } else {
  569. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
  570. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  571. if (*kernel_step != ARM_KERNEL_STEP_NONE)
  572. return 0;
  573. if (kernel_active_single_step()) {
  574. *kernel_step = ARM_KERNEL_STEP_SUSPEND;
  575. } else {
  576. *kernel_step = ARM_KERNEL_STEP_ACTIVE;
  577. kernel_enable_single_step(regs);
  578. }
  579. }
  580. return 0;
  581. }
  582. static int watchpoint_handler(unsigned long addr, unsigned int esr,
  583. struct pt_regs *regs)
  584. {
  585. int i, step = 0, *kernel_step, access;
  586. u32 ctrl_reg;
  587. u64 val, alignment_mask;
  588. struct perf_event *wp, **slots;
  589. struct debug_info *debug_info;
  590. struct arch_hw_breakpoint *info;
  591. struct arch_hw_breakpoint_ctrl ctrl;
  592. slots = this_cpu_ptr(wp_on_reg);
  593. debug_info = &current->thread.debug;
  594. for (i = 0; i < core_num_wrps; ++i) {
  595. rcu_read_lock();
  596. wp = slots[i];
  597. if (wp == NULL)
  598. goto unlock;
  599. info = counter_arch_bp(wp);
  600. /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
  601. if (is_compat_task()) {
  602. if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
  603. alignment_mask = 0x7;
  604. else
  605. alignment_mask = 0x3;
  606. } else {
  607. alignment_mask = 0x7;
  608. }
  609. /* Check if the watchpoint value matches. */
  610. val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
  611. if (val != (untagged_addr(addr) & ~alignment_mask))
  612. goto unlock;
  613. /* Possible match, check the byte address select to confirm. */
  614. ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
  615. decode_ctrl_reg(ctrl_reg, &ctrl);
  616. if (!((1 << (addr & alignment_mask)) & ctrl.len))
  617. goto unlock;
  618. /*
  619. * Check that the access type matches.
  620. * 0 => load, otherwise => store
  621. */
  622. access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
  623. HW_BREAKPOINT_R;
  624. if (!(access & hw_breakpoint_type(wp)))
  625. goto unlock;
  626. info->trigger = addr;
  627. perf_bp_event(wp, regs);
  628. /* Do we need to handle the stepping? */
  629. if (!wp->overflow_handler)
  630. step = 1;
  631. unlock:
  632. rcu_read_unlock();
  633. }
  634. if (!step)
  635. return 0;
  636. /*
  637. * We always disable EL0 watchpoints because the kernel can
  638. * cause these to fire via an unprivileged access.
  639. */
  640. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
  641. if (user_mode(regs)) {
  642. debug_info->wps_disabled = 1;
  643. /* If we're already stepping a breakpoint, just return. */
  644. if (debug_info->bps_disabled)
  645. return 0;
  646. if (test_thread_flag(TIF_SINGLESTEP))
  647. debug_info->suspended_step = 1;
  648. else
  649. user_enable_single_step(current);
  650. } else {
  651. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
  652. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  653. if (*kernel_step != ARM_KERNEL_STEP_NONE)
  654. return 0;
  655. if (kernel_active_single_step()) {
  656. *kernel_step = ARM_KERNEL_STEP_SUSPEND;
  657. } else {
  658. *kernel_step = ARM_KERNEL_STEP_ACTIVE;
  659. kernel_enable_single_step(regs);
  660. }
  661. }
  662. return 0;
  663. }
  664. /*
  665. * Handle single-step exception.
  666. */
  667. int reinstall_suspended_bps(struct pt_regs *regs)
  668. {
  669. struct debug_info *debug_info = &current->thread.debug;
  670. int handled_exception = 0, *kernel_step;
  671. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  672. /*
  673. * Called from single-step exception handler.
  674. * Return 0 if execution can resume, 1 if a SIGTRAP should be
  675. * reported.
  676. */
  677. if (user_mode(regs)) {
  678. if (debug_info->bps_disabled) {
  679. debug_info->bps_disabled = 0;
  680. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
  681. handled_exception = 1;
  682. }
  683. if (debug_info->wps_disabled) {
  684. debug_info->wps_disabled = 0;
  685. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
  686. handled_exception = 1;
  687. }
  688. if (handled_exception) {
  689. if (debug_info->suspended_step) {
  690. debug_info->suspended_step = 0;
  691. /* Allow exception handling to fall-through. */
  692. handled_exception = 0;
  693. } else {
  694. user_disable_single_step(current);
  695. }
  696. }
  697. } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
  698. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
  699. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
  700. if (!debug_info->wps_disabled)
  701. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
  702. if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
  703. kernel_disable_single_step();
  704. handled_exception = 1;
  705. } else {
  706. handled_exception = 0;
  707. }
  708. *kernel_step = ARM_KERNEL_STEP_NONE;
  709. }
  710. return !handled_exception;
  711. }
  712. /*
  713. * Context-switcher for restoring suspended breakpoints.
  714. */
  715. void hw_breakpoint_thread_switch(struct task_struct *next)
  716. {
  717. /*
  718. * current next
  719. * disabled: 0 0 => The usual case, NOTIFY_DONE
  720. * 0 1 => Disable the registers
  721. * 1 0 => Enable the registers
  722. * 1 1 => NOTIFY_DONE. per-task bps will
  723. * get taken care of by perf.
  724. */
  725. struct debug_info *current_debug_info, *next_debug_info;
  726. current_debug_info = &current->thread.debug;
  727. next_debug_info = &next->thread.debug;
  728. /* Update breakpoints. */
  729. if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
  730. toggle_bp_registers(AARCH64_DBG_REG_BCR,
  731. DBG_ACTIVE_EL0,
  732. !next_debug_info->bps_disabled);
  733. /* Update watchpoints. */
  734. if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
  735. toggle_bp_registers(AARCH64_DBG_REG_WCR,
  736. DBG_ACTIVE_EL0,
  737. !next_debug_info->wps_disabled);
  738. }
  739. /*
  740. * CPU initialisation.
  741. */
  742. static void hw_breakpoint_reset(void *unused)
  743. {
  744. int i;
  745. struct perf_event **slots;
  746. /*
  747. * When a CPU goes through cold-boot, it does not have any installed
  748. * slot, so it is safe to share the same function for restoring and
  749. * resetting breakpoints; when a CPU is hotplugged in, it goes
  750. * through the slots, which are all empty, hence it just resets control
  751. * and value for debug registers.
  752. * When this function is triggered on warm-boot through a CPU PM
  753. * notifier some slots might be initialized; if so they are
  754. * reprogrammed according to the debug slots content.
  755. */
  756. for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
  757. if (slots[i]) {
  758. hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
  759. } else {
  760. write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
  761. write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
  762. }
  763. }
  764. for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
  765. if (slots[i]) {
  766. hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
  767. } else {
  768. write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
  769. write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
  770. }
  771. }
  772. }
  773. static int hw_breakpoint_reset_notify(struct notifier_block *self,
  774. unsigned long action,
  775. void *hcpu)
  776. {
  777. int cpu = (long)hcpu;
  778. if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
  779. smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
  780. return NOTIFY_OK;
  781. }
  782. static struct notifier_block hw_breakpoint_reset_nb = {
  783. .notifier_call = hw_breakpoint_reset_notify,
  784. };
  785. #ifdef CONFIG_CPU_PM
  786. extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
  787. #else
  788. static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
  789. {
  790. }
  791. #endif
  792. /*
  793. * One-time initialisation.
  794. */
  795. static int __init arch_hw_breakpoint_init(void)
  796. {
  797. core_num_brps = get_num_brps();
  798. core_num_wrps = get_num_wrps();
  799. pr_info("found %d breakpoint and %d watchpoint registers.\n",
  800. core_num_brps, core_num_wrps);
  801. cpu_notifier_register_begin();
  802. /*
  803. * Reset the breakpoint resources. We assume that a halting
  804. * debugger will leave the world in a nice state for us.
  805. */
  806. smp_call_function(hw_breakpoint_reset, NULL, 1);
  807. hw_breakpoint_reset(NULL);
  808. /* Register debug fault handlers. */
  809. hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
  810. TRAP_HWBKPT, "hw-breakpoint handler");
  811. hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
  812. TRAP_HWBKPT, "hw-watchpoint handler");
  813. /* Register hotplug notifier. */
  814. __register_cpu_notifier(&hw_breakpoint_reset_nb);
  815. cpu_notifier_register_done();
  816. /* Register cpu_suspend hw breakpoint restore hook */
  817. cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
  818. return 0;
  819. }
  820. arch_initcall(arch_hw_breakpoint_init);
  821. void hw_breakpoint_pmu_read(struct perf_event *bp)
  822. {
  823. }
  824. /*
  825. * Dummy function to register with die_notifier.
  826. */
  827. int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  828. unsigned long val, void *data)
  829. {
  830. return NOTIFY_DONE;
  831. }