kgdb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License as published by the
  4. * Free Software Foundation; either version 2, or (at your option) any
  5. * later version.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. *
  12. */
  13. /*
  14. * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
  15. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  16. * Copyright (C) 2002 Andi Kleen, SuSE Labs
  17. * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
  18. * Copyright (C) 2007 MontaVista Software, Inc.
  19. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
  20. */
  21. /****************************************************************************
  22. * Contributor: Lake Stevens Instrument Division$
  23. * Written by: Glenn Engel $
  24. * Updated by: Amit Kale<akale@veritas.com>
  25. * Updated by: Tom Rini <trini@kernel.crashing.org>
  26. * Updated by: Jason Wessel <jason.wessel@windriver.com>
  27. * Modified for 386 by Jim Kingdon, Cygnus Support.
  28. * Origianl kgdb, compatibility with 2.1.xx kernel by
  29. * David Grothe <dave@gcom.com>
  30. * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  31. * X86_64 changes from Andi Kleen's patch merged by Jim Houston
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/kdebug.h>
  35. #include <linux/string.h>
  36. #include <linux/kernel.h>
  37. #include <linux/ptrace.h>
  38. #include <linux/sched.h>
  39. #include <linux/delay.h>
  40. #include <linux/kgdb.h>
  41. #include <linux/smp.h>
  42. #include <linux/nmi.h>
  43. #include <linux/hw_breakpoint.h>
  44. #include <linux/uaccess.h>
  45. #include <linux/memory.h>
  46. #include <asm/debugreg.h>
  47. #include <asm/apicdef.h>
  48. #include <asm/apic.h>
  49. #include <asm/nmi.h>
  50. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
  51. {
  52. #ifdef CONFIG_X86_32
  53. { "ax", 4, offsetof(struct pt_regs, ax) },
  54. { "cx", 4, offsetof(struct pt_regs, cx) },
  55. { "dx", 4, offsetof(struct pt_regs, dx) },
  56. { "bx", 4, offsetof(struct pt_regs, bx) },
  57. { "sp", 4, offsetof(struct pt_regs, sp) },
  58. { "bp", 4, offsetof(struct pt_regs, bp) },
  59. { "si", 4, offsetof(struct pt_regs, si) },
  60. { "di", 4, offsetof(struct pt_regs, di) },
  61. { "ip", 4, offsetof(struct pt_regs, ip) },
  62. { "flags", 4, offsetof(struct pt_regs, flags) },
  63. { "cs", 4, offsetof(struct pt_regs, cs) },
  64. { "ss", 4, offsetof(struct pt_regs, ss) },
  65. { "ds", 4, offsetof(struct pt_regs, ds) },
  66. { "es", 4, offsetof(struct pt_regs, es) },
  67. #else
  68. { "ax", 8, offsetof(struct pt_regs, ax) },
  69. { "bx", 8, offsetof(struct pt_regs, bx) },
  70. { "cx", 8, offsetof(struct pt_regs, cx) },
  71. { "dx", 8, offsetof(struct pt_regs, dx) },
  72. { "si", 8, offsetof(struct pt_regs, si) },
  73. { "di", 8, offsetof(struct pt_regs, di) },
  74. { "bp", 8, offsetof(struct pt_regs, bp) },
  75. { "sp", 8, offsetof(struct pt_regs, sp) },
  76. { "r8", 8, offsetof(struct pt_regs, r8) },
  77. { "r9", 8, offsetof(struct pt_regs, r9) },
  78. { "r10", 8, offsetof(struct pt_regs, r10) },
  79. { "r11", 8, offsetof(struct pt_regs, r11) },
  80. { "r12", 8, offsetof(struct pt_regs, r12) },
  81. { "r13", 8, offsetof(struct pt_regs, r13) },
  82. { "r14", 8, offsetof(struct pt_regs, r14) },
  83. { "r15", 8, offsetof(struct pt_regs, r15) },
  84. { "ip", 8, offsetof(struct pt_regs, ip) },
  85. { "flags", 4, offsetof(struct pt_regs, flags) },
  86. { "cs", 4, offsetof(struct pt_regs, cs) },
  87. { "ss", 4, offsetof(struct pt_regs, ss) },
  88. { "ds", 4, -1 },
  89. { "es", 4, -1 },
  90. #endif
  91. { "fs", 4, -1 },
  92. { "gs", 4, -1 },
  93. };
  94. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  95. {
  96. if (
  97. #ifdef CONFIG_X86_32
  98. regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
  99. #endif
  100. regno == GDB_SP || regno == GDB_ORIG_AX)
  101. return 0;
  102. if (dbg_reg_def[regno].offset != -1)
  103. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  104. dbg_reg_def[regno].size);
  105. return 0;
  106. }
  107. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  108. {
  109. if (regno == GDB_ORIG_AX) {
  110. memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax));
  111. return "orig_ax";
  112. }
  113. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  114. return NULL;
  115. if (dbg_reg_def[regno].offset != -1)
  116. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  117. dbg_reg_def[regno].size);
  118. #ifdef CONFIG_X86_32
  119. switch (regno) {
  120. case GDB_SS:
  121. if (!user_mode(regs))
  122. *(unsigned long *)mem = __KERNEL_DS;
  123. break;
  124. case GDB_SP:
  125. if (!user_mode(regs))
  126. *(unsigned long *)mem = kernel_stack_pointer(regs);
  127. break;
  128. case GDB_GS:
  129. case GDB_FS:
  130. *(unsigned long *)mem = 0xFFFF;
  131. break;
  132. }
  133. #endif
  134. return dbg_reg_def[regno].name;
  135. }
  136. /**
  137. * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
  138. * @gdb_regs: A pointer to hold the registers in the order GDB wants.
  139. * @p: The &struct task_struct of the desired process.
  140. *
  141. * Convert the register values of the sleeping process in @p to
  142. * the format that GDB expects.
  143. * This function is called when kgdb does not have access to the
  144. * &struct pt_regs and therefore it should fill the gdb registers
  145. * @gdb_regs with what has been saved in &struct thread_struct
  146. * thread field during switch_to.
  147. */
  148. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  149. {
  150. #ifndef CONFIG_X86_32
  151. u32 *gdb_regs32 = (u32 *)gdb_regs;
  152. #endif
  153. gdb_regs[GDB_AX] = 0;
  154. gdb_regs[GDB_BX] = 0;
  155. gdb_regs[GDB_CX] = 0;
  156. gdb_regs[GDB_DX] = 0;
  157. gdb_regs[GDB_SI] = 0;
  158. gdb_regs[GDB_DI] = 0;
  159. gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
  160. #ifdef CONFIG_X86_32
  161. gdb_regs[GDB_DS] = __KERNEL_DS;
  162. gdb_regs[GDB_ES] = __KERNEL_DS;
  163. gdb_regs[GDB_PS] = 0;
  164. gdb_regs[GDB_CS] = __KERNEL_CS;
  165. gdb_regs[GDB_PC] = p->thread.ip;
  166. gdb_regs[GDB_SS] = __KERNEL_DS;
  167. gdb_regs[GDB_FS] = 0xFFFF;
  168. gdb_regs[GDB_GS] = 0xFFFF;
  169. #else
  170. gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
  171. gdb_regs32[GDB_CS] = __KERNEL_CS;
  172. gdb_regs32[GDB_SS] = __KERNEL_DS;
  173. gdb_regs[GDB_PC] = 0;
  174. gdb_regs[GDB_R8] = 0;
  175. gdb_regs[GDB_R9] = 0;
  176. gdb_regs[GDB_R10] = 0;
  177. gdb_regs[GDB_R11] = 0;
  178. gdb_regs[GDB_R12] = 0;
  179. gdb_regs[GDB_R13] = 0;
  180. gdb_regs[GDB_R14] = 0;
  181. gdb_regs[GDB_R15] = 0;
  182. #endif
  183. gdb_regs[GDB_SP] = p->thread.sp;
  184. }
  185. static struct hw_breakpoint {
  186. unsigned enabled;
  187. unsigned long addr;
  188. int len;
  189. int type;
  190. struct perf_event * __percpu *pev;
  191. } breakinfo[HBP_NUM];
  192. static unsigned long early_dr7;
  193. static void kgdb_correct_hw_break(void)
  194. {
  195. int breakno;
  196. for (breakno = 0; breakno < HBP_NUM; breakno++) {
  197. struct perf_event *bp;
  198. struct arch_hw_breakpoint *info;
  199. int val;
  200. int cpu = raw_smp_processor_id();
  201. if (!breakinfo[breakno].enabled)
  202. continue;
  203. if (dbg_is_early) {
  204. set_debugreg(breakinfo[breakno].addr, breakno);
  205. early_dr7 |= encode_dr7(breakno,
  206. breakinfo[breakno].len,
  207. breakinfo[breakno].type);
  208. set_debugreg(early_dr7, 7);
  209. continue;
  210. }
  211. bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
  212. info = counter_arch_bp(bp);
  213. if (bp->attr.disabled != 1)
  214. continue;
  215. bp->attr.bp_addr = breakinfo[breakno].addr;
  216. bp->attr.bp_len = breakinfo[breakno].len;
  217. bp->attr.bp_type = breakinfo[breakno].type;
  218. info->address = breakinfo[breakno].addr;
  219. info->len = breakinfo[breakno].len;
  220. info->type = breakinfo[breakno].type;
  221. val = arch_install_hw_breakpoint(bp);
  222. if (!val)
  223. bp->attr.disabled = 0;
  224. }
  225. if (!dbg_is_early)
  226. hw_breakpoint_restore();
  227. }
  228. static int hw_break_reserve_slot(int breakno)
  229. {
  230. int cpu;
  231. int cnt = 0;
  232. struct perf_event **pevent;
  233. if (dbg_is_early)
  234. return 0;
  235. for_each_online_cpu(cpu) {
  236. cnt++;
  237. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  238. if (dbg_reserve_bp_slot(*pevent))
  239. goto fail;
  240. }
  241. return 0;
  242. fail:
  243. for_each_online_cpu(cpu) {
  244. cnt--;
  245. if (!cnt)
  246. break;
  247. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  248. dbg_release_bp_slot(*pevent);
  249. }
  250. return -1;
  251. }
  252. static int hw_break_release_slot(int breakno)
  253. {
  254. struct perf_event **pevent;
  255. int cpu;
  256. if (dbg_is_early)
  257. return 0;
  258. for_each_online_cpu(cpu) {
  259. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  260. if (dbg_release_bp_slot(*pevent))
  261. /*
  262. * The debugger is responsible for handing the retry on
  263. * remove failure.
  264. */
  265. return -1;
  266. }
  267. return 0;
  268. }
  269. static int
  270. kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  271. {
  272. int i;
  273. for (i = 0; i < HBP_NUM; i++)
  274. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  275. break;
  276. if (i == HBP_NUM)
  277. return -1;
  278. if (hw_break_release_slot(i)) {
  279. printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
  280. return -1;
  281. }
  282. breakinfo[i].enabled = 0;
  283. return 0;
  284. }
  285. static void kgdb_remove_all_hw_break(void)
  286. {
  287. int i;
  288. int cpu = raw_smp_processor_id();
  289. struct perf_event *bp;
  290. for (i = 0; i < HBP_NUM; i++) {
  291. if (!breakinfo[i].enabled)
  292. continue;
  293. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  294. if (!bp->attr.disabled) {
  295. arch_uninstall_hw_breakpoint(bp);
  296. bp->attr.disabled = 1;
  297. continue;
  298. }
  299. if (dbg_is_early)
  300. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  301. breakinfo[i].type);
  302. else if (hw_break_release_slot(i))
  303. printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
  304. breakinfo[i].addr);
  305. breakinfo[i].enabled = 0;
  306. }
  307. }
  308. static int
  309. kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  310. {
  311. int i;
  312. for (i = 0; i < HBP_NUM; i++)
  313. if (!breakinfo[i].enabled)
  314. break;
  315. if (i == HBP_NUM)
  316. return -1;
  317. switch (bptype) {
  318. case BP_HARDWARE_BREAKPOINT:
  319. len = 1;
  320. breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
  321. break;
  322. case BP_WRITE_WATCHPOINT:
  323. breakinfo[i].type = X86_BREAKPOINT_WRITE;
  324. break;
  325. case BP_ACCESS_WATCHPOINT:
  326. breakinfo[i].type = X86_BREAKPOINT_RW;
  327. break;
  328. default:
  329. return -1;
  330. }
  331. switch (len) {
  332. case 1:
  333. breakinfo[i].len = X86_BREAKPOINT_LEN_1;
  334. break;
  335. case 2:
  336. breakinfo[i].len = X86_BREAKPOINT_LEN_2;
  337. break;
  338. case 4:
  339. breakinfo[i].len = X86_BREAKPOINT_LEN_4;
  340. break;
  341. #ifdef CONFIG_X86_64
  342. case 8:
  343. breakinfo[i].len = X86_BREAKPOINT_LEN_8;
  344. break;
  345. #endif
  346. default:
  347. return -1;
  348. }
  349. breakinfo[i].addr = addr;
  350. if (hw_break_reserve_slot(i)) {
  351. breakinfo[i].addr = 0;
  352. return -1;
  353. }
  354. breakinfo[i].enabled = 1;
  355. return 0;
  356. }
  357. /**
  358. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  359. * @regs: Current &struct pt_regs.
  360. *
  361. * This function will be called if the particular architecture must
  362. * disable hardware debugging while it is processing gdb packets or
  363. * handling exception.
  364. */
  365. static void kgdb_disable_hw_debug(struct pt_regs *regs)
  366. {
  367. int i;
  368. int cpu = raw_smp_processor_id();
  369. struct perf_event *bp;
  370. /* Disable hardware debugging while we are in kgdb: */
  371. set_debugreg(0UL, 7);
  372. for (i = 0; i < HBP_NUM; i++) {
  373. if (!breakinfo[i].enabled)
  374. continue;
  375. if (dbg_is_early) {
  376. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  377. breakinfo[i].type);
  378. continue;
  379. }
  380. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  381. if (bp->attr.disabled == 1)
  382. continue;
  383. arch_uninstall_hw_breakpoint(bp);
  384. bp->attr.disabled = 1;
  385. }
  386. }
  387. #ifdef CONFIG_SMP
  388. /**
  389. * kgdb_roundup_cpus - Get other CPUs into a holding pattern
  390. * @flags: Current IRQ state
  391. *
  392. * On SMP systems, we need to get the attention of the other CPUs
  393. * and get them be in a known state. This should do what is needed
  394. * to get the other CPUs to call kgdb_wait(). Note that on some arches,
  395. * the NMI approach is not used for rounding up all the CPUs. For example,
  396. * in case of MIPS, smp_call_function() is used to roundup CPUs. In
  397. * this case, we have to make sure that interrupts are enabled before
  398. * calling smp_call_function(). The argument to this function is
  399. * the flags that will be used when restoring the interrupts. There is
  400. * local_irq_save() call before kgdb_roundup_cpus().
  401. *
  402. * On non-SMP systems, this is not called.
  403. */
  404. void kgdb_roundup_cpus(unsigned long flags)
  405. {
  406. apic->send_IPI_allbutself(APIC_DM_NMI);
  407. }
  408. #endif
  409. /**
  410. * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  411. * @e_vector: The error vector of the exception that happened.
  412. * @signo: The signal number of the exception that happened.
  413. * @err_code: The error code of the exception that happened.
  414. * @remcomInBuffer: The buffer of the packet we have read.
  415. * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
  416. * @linux_regs: The &struct pt_regs of the current process.
  417. *
  418. * This function MUST handle the 'c' and 's' command packets,
  419. * as well packets to set / remove a hardware breakpoint, if used.
  420. * If there are additional packets which the hardware needs to handle,
  421. * they are handled here. The code should return -1 if it wants to
  422. * process more packets, and a %0 or %1 if it wants to exit from the
  423. * kgdb callback.
  424. */
  425. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  426. char *remcomInBuffer, char *remcomOutBuffer,
  427. struct pt_regs *linux_regs)
  428. {
  429. unsigned long addr;
  430. char *ptr;
  431. switch (remcomInBuffer[0]) {
  432. case 'c':
  433. case 's':
  434. /* try to read optional parameter, pc unchanged if no parm */
  435. ptr = &remcomInBuffer[1];
  436. if (kgdb_hex2long(&ptr, &addr))
  437. linux_regs->ip = addr;
  438. case 'D':
  439. case 'k':
  440. /* clear the trace bit */
  441. linux_regs->flags &= ~X86_EFLAGS_TF;
  442. atomic_set(&kgdb_cpu_doing_single_step, -1);
  443. /* set the trace bit if we're stepping */
  444. if (remcomInBuffer[0] == 's') {
  445. linux_regs->flags |= X86_EFLAGS_TF;
  446. atomic_set(&kgdb_cpu_doing_single_step,
  447. raw_smp_processor_id());
  448. }
  449. return 0;
  450. }
  451. /* this means that we do not want to exit from the handler: */
  452. return -1;
  453. }
  454. static inline int
  455. single_step_cont(struct pt_regs *regs, struct die_args *args)
  456. {
  457. /*
  458. * Single step exception from kernel space to user space so
  459. * eat the exception and continue the process:
  460. */
  461. printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
  462. "resuming...\n");
  463. kgdb_arch_handle_exception(args->trapnr, args->signr,
  464. args->err, "c", "", regs);
  465. /*
  466. * Reset the BS bit in dr6 (pointed by args->err) to
  467. * denote completion of processing
  468. */
  469. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  470. return NOTIFY_STOP;
  471. }
  472. static DECLARE_BITMAP(was_in_debug_nmi, NR_CPUS);
  473. static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  474. {
  475. int cpu;
  476. switch (cmd) {
  477. case NMI_LOCAL:
  478. if (atomic_read(&kgdb_active) != -1) {
  479. /* KGDB CPU roundup */
  480. cpu = raw_smp_processor_id();
  481. kgdb_nmicallback(cpu, regs);
  482. set_bit(cpu, was_in_debug_nmi);
  483. touch_nmi_watchdog();
  484. return NMI_HANDLED;
  485. }
  486. break;
  487. case NMI_UNKNOWN:
  488. cpu = raw_smp_processor_id();
  489. if (__test_and_clear_bit(cpu, was_in_debug_nmi))
  490. return NMI_HANDLED;
  491. break;
  492. default:
  493. /* do nothing */
  494. break;
  495. }
  496. return NMI_DONE;
  497. }
  498. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  499. {
  500. struct pt_regs *regs = args->regs;
  501. switch (cmd) {
  502. case DIE_DEBUG:
  503. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  504. if (user_mode(regs))
  505. return single_step_cont(regs, args);
  506. break;
  507. } else if (test_thread_flag(TIF_SINGLESTEP))
  508. /* This means a user thread is single stepping
  509. * a system call which should be ignored
  510. */
  511. return NOTIFY_DONE;
  512. /* fall through */
  513. default:
  514. if (user_mode(regs))
  515. return NOTIFY_DONE;
  516. }
  517. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  518. return NOTIFY_DONE;
  519. /* Must touch watchdog before return to normal operation */
  520. touch_nmi_watchdog();
  521. return NOTIFY_STOP;
  522. }
  523. int kgdb_ll_trap(int cmd, const char *str,
  524. struct pt_regs *regs, long err, int trap, int sig)
  525. {
  526. struct die_args args = {
  527. .regs = regs,
  528. .str = str,
  529. .err = err,
  530. .trapnr = trap,
  531. .signr = sig,
  532. };
  533. if (!kgdb_io_module_registered)
  534. return NOTIFY_DONE;
  535. return __kgdb_notify(&args, cmd);
  536. }
  537. static int
  538. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  539. {
  540. unsigned long flags;
  541. int ret;
  542. local_irq_save(flags);
  543. ret = __kgdb_notify(ptr, cmd);
  544. local_irq_restore(flags);
  545. return ret;
  546. }
  547. static struct notifier_block kgdb_notifier = {
  548. .notifier_call = kgdb_notify,
  549. };
  550. /**
  551. * kgdb_arch_init - Perform any architecture specific initalization.
  552. *
  553. * This function will handle the initalization of any architecture
  554. * specific callbacks.
  555. */
  556. int kgdb_arch_init(void)
  557. {
  558. int retval;
  559. retval = register_die_notifier(&kgdb_notifier);
  560. if (retval)
  561. goto out;
  562. retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
  563. 0, "kgdb");
  564. if (retval)
  565. goto out1;
  566. retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
  567. 0, "kgdb");
  568. if (retval)
  569. goto out2;
  570. return retval;
  571. out2:
  572. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  573. out1:
  574. unregister_die_notifier(&kgdb_notifier);
  575. out:
  576. return retval;
  577. }
  578. static void kgdb_hw_overflow_handler(struct perf_event *event,
  579. struct perf_sample_data *data, struct pt_regs *regs)
  580. {
  581. struct task_struct *tsk = current;
  582. int i;
  583. for (i = 0; i < 4; i++)
  584. if (breakinfo[i].enabled)
  585. tsk->thread.debugreg6 |= (DR_TRAP0 << i);
  586. }
  587. void kgdb_arch_late(void)
  588. {
  589. int i, cpu;
  590. struct perf_event_attr attr;
  591. struct perf_event **pevent;
  592. /*
  593. * Pre-allocate the hw breakpoint structions in the non-atomic
  594. * portion of kgdb because this operation requires mutexs to
  595. * complete.
  596. */
  597. hw_breakpoint_init(&attr);
  598. attr.bp_addr = (unsigned long)kgdb_arch_init;
  599. attr.bp_len = HW_BREAKPOINT_LEN_1;
  600. attr.bp_type = HW_BREAKPOINT_W;
  601. attr.disabled = 1;
  602. for (i = 0; i < HBP_NUM; i++) {
  603. if (breakinfo[i].pev)
  604. continue;
  605. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  606. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  607. printk(KERN_ERR "kgdb: Could not allocate hw"
  608. "breakpoints\nDisabling the kernel debugger\n");
  609. breakinfo[i].pev = NULL;
  610. kgdb_arch_exit();
  611. return;
  612. }
  613. for_each_online_cpu(cpu) {
  614. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  615. pevent[0]->hw.sample_period = 1;
  616. pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
  617. if (pevent[0]->destroy != NULL) {
  618. pevent[0]->destroy = NULL;
  619. release_bp_slot(*pevent);
  620. }
  621. }
  622. }
  623. }
  624. /**
  625. * kgdb_arch_exit - Perform any architecture specific uninitalization.
  626. *
  627. * This function will handle the uninitalization of any architecture
  628. * specific callbacks, for dynamic registration and unregistration.
  629. */
  630. void kgdb_arch_exit(void)
  631. {
  632. int i;
  633. for (i = 0; i < 4; i++) {
  634. if (breakinfo[i].pev) {
  635. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  636. breakinfo[i].pev = NULL;
  637. }
  638. }
  639. unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
  640. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  641. unregister_die_notifier(&kgdb_notifier);
  642. }
  643. /**
  644. *
  645. * kgdb_skipexception - Bail out of KGDB when we've been triggered.
  646. * @exception: Exception vector number
  647. * @regs: Current &struct pt_regs.
  648. *
  649. * On some architectures we need to skip a breakpoint exception when
  650. * it occurs after a breakpoint has been removed.
  651. *
  652. * Skip an int3 exception when it occurs after a breakpoint has been
  653. * removed. Backtrack eip by 1 since the int3 would have caused it to
  654. * increment by 1.
  655. */
  656. int kgdb_skipexception(int exception, struct pt_regs *regs)
  657. {
  658. if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
  659. regs->ip -= 1;
  660. return 1;
  661. }
  662. return 0;
  663. }
  664. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  665. {
  666. if (exception == 3)
  667. return instruction_pointer(regs) - 1;
  668. return instruction_pointer(regs);
  669. }
  670. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  671. {
  672. regs->ip = ip;
  673. }
  674. int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  675. {
  676. int err;
  677. #ifdef CONFIG_DEBUG_RODATA
  678. char opc[BREAK_INSTR_SIZE];
  679. #endif /* CONFIG_DEBUG_RODATA */
  680. bpt->type = BP_BREAKPOINT;
  681. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  682. BREAK_INSTR_SIZE);
  683. if (err)
  684. return err;
  685. err = probe_kernel_write((char *)bpt->bpt_addr,
  686. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  687. #ifdef CONFIG_DEBUG_RODATA
  688. if (!err)
  689. return err;
  690. /*
  691. * It is safe to call text_poke() because normal kernel execution
  692. * is stopped on all cores, so long as the text_mutex is not locked.
  693. */
  694. if (mutex_is_locked(&text_mutex))
  695. return -EBUSY;
  696. text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
  697. BREAK_INSTR_SIZE);
  698. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  699. if (err)
  700. return err;
  701. if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
  702. return -EINVAL;
  703. bpt->type = BP_POKE_BREAKPOINT;
  704. #endif /* CONFIG_DEBUG_RODATA */
  705. return err;
  706. }
  707. int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  708. {
  709. #ifdef CONFIG_DEBUG_RODATA
  710. int err;
  711. char opc[BREAK_INSTR_SIZE];
  712. if (bpt->type != BP_POKE_BREAKPOINT)
  713. goto knl_write;
  714. /*
  715. * It is safe to call text_poke() because normal kernel execution
  716. * is stopped on all cores, so long as the text_mutex is not locked.
  717. */
  718. if (mutex_is_locked(&text_mutex))
  719. goto knl_write;
  720. text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
  721. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  722. if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
  723. goto knl_write;
  724. return err;
  725. knl_write:
  726. #endif /* CONFIG_DEBUG_RODATA */
  727. return probe_kernel_write((char *)bpt->bpt_addr,
  728. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  729. }
  730. struct kgdb_arch arch_kgdb_ops = {
  731. /* Breakpoint instruction: */
  732. .gdb_bpt_instr = { 0xcc },
  733. .flags = KGDB_HW_BREAKPOINT,
  734. .set_hw_breakpoint = kgdb_set_hw_break,
  735. .remove_hw_breakpoint = kgdb_remove_hw_break,
  736. .disable_hw_break = kgdb_disable_hw_debug,
  737. .remove_all_hw_break = kgdb_remove_all_hw_break,
  738. .correct_hw_break = kgdb_correct_hw_break,
  739. };