debug_core.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #define pr_fmt(fmt) "KGDB: " fmt
  31. #include <linux/pid_namespace.h>
  32. #include <linux/clocksource.h>
  33. #include <linux/serial_core.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/console.h>
  37. #include <linux/threads.h>
  38. #include <linux/uaccess.h>
  39. #include <linux/kernel.h>
  40. #include <linux/module.h>
  41. #include <linux/ptrace.h>
  42. #include <linux/string.h>
  43. #include <linux/delay.h>
  44. #include <linux/sched.h>
  45. #include <linux/sysrq.h>
  46. #include <linux/reboot.h>
  47. #include <linux/init.h>
  48. #include <linux/kgdb.h>
  49. #include <linux/kdb.h>
  50. #include <linux/pid.h>
  51. #include <linux/smp.h>
  52. #include <linux/mm.h>
  53. #include <linux/vmacache.h>
  54. #include <linux/rcupdate.h>
  55. #include <asm/cacheflush.h>
  56. #include <asm/byteorder.h>
  57. #include <linux/atomic.h>
  58. #include "debug_core.h"
  59. static int kgdb_break_asap;
  60. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  61. /**
  62. * kgdb_connected - Is a host GDB connected to us?
  63. */
  64. int kgdb_connected;
  65. EXPORT_SYMBOL_GPL(kgdb_connected);
  66. /* All the KGDB handlers are installed */
  67. int kgdb_io_module_registered;
  68. /* Guard for recursive entry */
  69. static int exception_level;
  70. struct kgdb_io *dbg_io_ops;
  71. static DEFINE_SPINLOCK(kgdb_registration_lock);
  72. /* Action for the reboot notifiter, a global allow kdb to change it */
  73. static int kgdbreboot;
  74. /* kgdb console driver is loaded */
  75. static int kgdb_con_registered;
  76. /* determine if kgdb console output should be used */
  77. static int kgdb_use_con;
  78. /* Flag for alternate operations for early debugging */
  79. bool dbg_is_early = true;
  80. /* Next cpu to become the master debug core */
  81. int dbg_switch_cpu;
  82. /* Use kdb or gdbserver mode */
  83. int dbg_kdb_mode = 1;
  84. static int __init opt_kgdb_con(char *str)
  85. {
  86. kgdb_use_con = 1;
  87. return 0;
  88. }
  89. early_param("kgdbcon", opt_kgdb_con);
  90. module_param(kgdb_use_con, int, 0644);
  91. module_param(kgdbreboot, int, 0644);
  92. /*
  93. * Holds information about breakpoints in a kernel. These breakpoints are
  94. * added and removed by gdb.
  95. */
  96. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  97. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  98. };
  99. /*
  100. * The CPU# of the active CPU, or -1 if none:
  101. */
  102. atomic_t kgdb_active = ATOMIC_INIT(-1);
  103. EXPORT_SYMBOL_GPL(kgdb_active);
  104. static DEFINE_RAW_SPINLOCK(dbg_master_lock);
  105. static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  106. /*
  107. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  108. * bootup code (which might not have percpu set up yet):
  109. */
  110. static atomic_t masters_in_kgdb;
  111. static atomic_t slaves_in_kgdb;
  112. static atomic_t kgdb_break_tasklet_var;
  113. atomic_t kgdb_setting_breakpoint;
  114. struct task_struct *kgdb_usethread;
  115. struct task_struct *kgdb_contthread;
  116. int kgdb_single_step;
  117. static pid_t kgdb_sstep_pid;
  118. /* to keep track of the CPU which is doing the single stepping*/
  119. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  120. /*
  121. * If you are debugging a problem where roundup (the collection of
  122. * all other CPUs) is a problem [this should be extremely rare],
  123. * then use the nokgdbroundup option to avoid roundup. In that case
  124. * the other CPUs might interfere with your debugging context, so
  125. * use this with care:
  126. */
  127. static int kgdb_do_roundup = 1;
  128. static int __init opt_nokgdbroundup(char *str)
  129. {
  130. kgdb_do_roundup = 0;
  131. return 0;
  132. }
  133. early_param("nokgdbroundup", opt_nokgdbroundup);
  134. /*
  135. * Finally, some KGDB code :-)
  136. */
  137. /*
  138. * Weak aliases for breakpoint management,
  139. * can be overriden by architectures when needed:
  140. */
  141. int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  142. {
  143. int err;
  144. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  145. BREAK_INSTR_SIZE);
  146. if (err)
  147. return err;
  148. err = probe_kernel_write((char *)bpt->bpt_addr,
  149. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  150. return err;
  151. }
  152. int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  153. {
  154. return probe_kernel_write((char *)bpt->bpt_addr,
  155. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  156. }
  157. int __weak kgdb_validate_break_address(unsigned long addr)
  158. {
  159. struct kgdb_bkpt tmp;
  160. int err;
  161. /* Validate setting the breakpoint and then removing it. If the
  162. * remove fails, the kernel needs to emit a bad message because we
  163. * are deep trouble not being able to put things back the way we
  164. * found them.
  165. */
  166. tmp.bpt_addr = addr;
  167. err = kgdb_arch_set_breakpoint(&tmp);
  168. if (err)
  169. return err;
  170. err = kgdb_arch_remove_breakpoint(&tmp);
  171. if (err)
  172. pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
  173. addr);
  174. return err;
  175. }
  176. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  177. {
  178. return instruction_pointer(regs);
  179. }
  180. int __weak kgdb_arch_init(void)
  181. {
  182. return 0;
  183. }
  184. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  185. {
  186. return 0;
  187. }
  188. /*
  189. * Some architectures need cache flushes when we set/clear a
  190. * breakpoint:
  191. */
  192. static void kgdb_flush_swbreak_addr(unsigned long addr)
  193. {
  194. if (!CACHE_FLUSH_IS_SAFE)
  195. return;
  196. if (current->mm) {
  197. int i;
  198. for (i = 0; i < VMACACHE_SIZE; i++) {
  199. if (!current->vmacache[i])
  200. continue;
  201. flush_cache_range(current->vmacache[i],
  202. addr, addr + BREAK_INSTR_SIZE);
  203. }
  204. }
  205. /* Force flush instruction cache if it was outside the mm */
  206. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  207. }
  208. /*
  209. * SW breakpoint management:
  210. */
  211. int dbg_activate_sw_breakpoints(void)
  212. {
  213. int error;
  214. int ret = 0;
  215. int i;
  216. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  217. if (kgdb_break[i].state != BP_SET)
  218. continue;
  219. error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
  220. if (error) {
  221. ret = error;
  222. pr_info("BP install failed: %lx\n",
  223. kgdb_break[i].bpt_addr);
  224. continue;
  225. }
  226. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  227. kgdb_break[i].state = BP_ACTIVE;
  228. }
  229. return ret;
  230. }
  231. int dbg_set_sw_break(unsigned long addr)
  232. {
  233. int err = kgdb_validate_break_address(addr);
  234. int breakno = -1;
  235. int i;
  236. if (err)
  237. return err;
  238. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  239. if ((kgdb_break[i].state == BP_SET) &&
  240. (kgdb_break[i].bpt_addr == addr))
  241. return -EEXIST;
  242. }
  243. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  244. if (kgdb_break[i].state == BP_REMOVED &&
  245. kgdb_break[i].bpt_addr == addr) {
  246. breakno = i;
  247. break;
  248. }
  249. }
  250. if (breakno == -1) {
  251. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  252. if (kgdb_break[i].state == BP_UNDEFINED) {
  253. breakno = i;
  254. break;
  255. }
  256. }
  257. }
  258. if (breakno == -1)
  259. return -E2BIG;
  260. kgdb_break[breakno].state = BP_SET;
  261. kgdb_break[breakno].type = BP_BREAKPOINT;
  262. kgdb_break[breakno].bpt_addr = addr;
  263. return 0;
  264. }
  265. int dbg_deactivate_sw_breakpoints(void)
  266. {
  267. int error;
  268. int ret = 0;
  269. int i;
  270. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  271. if (kgdb_break[i].state != BP_ACTIVE)
  272. continue;
  273. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  274. if (error) {
  275. pr_info("BP remove failed: %lx\n",
  276. kgdb_break[i].bpt_addr);
  277. ret = error;
  278. }
  279. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  280. kgdb_break[i].state = BP_SET;
  281. }
  282. return ret;
  283. }
  284. int dbg_remove_sw_break(unsigned long addr)
  285. {
  286. int i;
  287. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  288. if ((kgdb_break[i].state == BP_SET) &&
  289. (kgdb_break[i].bpt_addr == addr)) {
  290. kgdb_break[i].state = BP_REMOVED;
  291. return 0;
  292. }
  293. }
  294. return -ENOENT;
  295. }
  296. int kgdb_isremovedbreak(unsigned long addr)
  297. {
  298. int i;
  299. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  300. if ((kgdb_break[i].state == BP_REMOVED) &&
  301. (kgdb_break[i].bpt_addr == addr))
  302. return 1;
  303. }
  304. return 0;
  305. }
  306. int dbg_remove_all_break(void)
  307. {
  308. int error;
  309. int i;
  310. /* Clear memory breakpoints. */
  311. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  312. if (kgdb_break[i].state != BP_ACTIVE)
  313. goto setundefined;
  314. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  315. if (error)
  316. pr_err("breakpoint remove failed: %lx\n",
  317. kgdb_break[i].bpt_addr);
  318. setundefined:
  319. kgdb_break[i].state = BP_UNDEFINED;
  320. }
  321. /* Clear hardware breakpoints. */
  322. if (arch_kgdb_ops.remove_all_hw_break)
  323. arch_kgdb_ops.remove_all_hw_break();
  324. return 0;
  325. }
  326. /*
  327. * Return true if there is a valid kgdb I/O module. Also if no
  328. * debugger is attached a message can be printed to the console about
  329. * waiting for the debugger to attach.
  330. *
  331. * The print_wait argument is only to be true when called from inside
  332. * the core kgdb_handle_exception, because it will wait for the
  333. * debugger to attach.
  334. */
  335. static int kgdb_io_ready(int print_wait)
  336. {
  337. if (!dbg_io_ops)
  338. return 0;
  339. if (kgdb_connected)
  340. return 1;
  341. if (atomic_read(&kgdb_setting_breakpoint))
  342. return 1;
  343. if (print_wait) {
  344. #ifdef CONFIG_KGDB_KDB
  345. if (!dbg_kdb_mode)
  346. pr_crit("waiting... or $3#33 for KDB\n");
  347. #else
  348. pr_crit("Waiting for remote debugger\n");
  349. #endif
  350. }
  351. return 1;
  352. }
  353. static int kgdb_reenter_check(struct kgdb_state *ks)
  354. {
  355. unsigned long addr;
  356. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  357. return 0;
  358. /* Panic on recursive debugger calls: */
  359. exception_level++;
  360. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  361. dbg_deactivate_sw_breakpoints();
  362. /*
  363. * If the break point removed ok at the place exception
  364. * occurred, try to recover and print a warning to the end
  365. * user because the user planted a breakpoint in a place that
  366. * KGDB needs in order to function.
  367. */
  368. if (dbg_remove_sw_break(addr) == 0) {
  369. exception_level = 0;
  370. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  371. dbg_activate_sw_breakpoints();
  372. pr_crit("re-enter error: breakpoint removed %lx\n", addr);
  373. WARN_ON_ONCE(1);
  374. return 1;
  375. }
  376. dbg_remove_all_break();
  377. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  378. if (exception_level > 1) {
  379. dump_stack();
  380. panic("Recursive entry to debugger");
  381. }
  382. pr_crit("re-enter exception: ALL breakpoints killed\n");
  383. #ifdef CONFIG_KGDB_KDB
  384. /* Allow kdb to debug itself one level */
  385. return 0;
  386. #endif
  387. dump_stack();
  388. panic("Recursive entry to debugger");
  389. return 1;
  390. }
  391. static void dbg_touch_watchdogs(void)
  392. {
  393. touch_softlockup_watchdog_sync();
  394. clocksource_touch_watchdog();
  395. rcu_cpu_stall_reset();
  396. }
  397. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
  398. int exception_state)
  399. {
  400. unsigned long flags;
  401. int sstep_tries = 100;
  402. int error;
  403. int cpu;
  404. int trace_on = 0;
  405. int online_cpus = num_online_cpus();
  406. u64 time_left;
  407. kgdb_info[ks->cpu].enter_kgdb++;
  408. kgdb_info[ks->cpu].exception_state |= exception_state;
  409. if (exception_state == DCPU_WANT_MASTER)
  410. atomic_inc(&masters_in_kgdb);
  411. else
  412. atomic_inc(&slaves_in_kgdb);
  413. if (arch_kgdb_ops.disable_hw_break)
  414. arch_kgdb_ops.disable_hw_break(regs);
  415. acquirelock:
  416. /*
  417. * Interrupts will be restored by the 'trap return' code, except when
  418. * single stepping.
  419. */
  420. local_irq_save(flags);
  421. cpu = ks->cpu;
  422. kgdb_info[cpu].debuggerinfo = regs;
  423. kgdb_info[cpu].task = current;
  424. kgdb_info[cpu].ret_state = 0;
  425. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  426. /* Make sure the above info reaches the primary CPU */
  427. smp_mb();
  428. if (exception_level == 1) {
  429. if (raw_spin_trylock(&dbg_master_lock))
  430. atomic_xchg(&kgdb_active, cpu);
  431. goto cpu_master_loop;
  432. }
  433. /*
  434. * CPU will loop if it is a slave or request to become a kgdb
  435. * master cpu and acquire the kgdb_active lock:
  436. */
  437. while (1) {
  438. cpu_loop:
  439. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  440. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  441. goto cpu_master_loop;
  442. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  443. if (raw_spin_trylock(&dbg_master_lock)) {
  444. atomic_xchg(&kgdb_active, cpu);
  445. break;
  446. }
  447. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  448. if (!raw_spin_is_locked(&dbg_slave_lock))
  449. goto return_normal;
  450. } else {
  451. return_normal:
  452. /* Return to normal operation by executing any
  453. * hw breakpoint fixup.
  454. */
  455. if (arch_kgdb_ops.correct_hw_break)
  456. arch_kgdb_ops.correct_hw_break();
  457. if (trace_on)
  458. tracing_on();
  459. kgdb_info[cpu].exception_state &=
  460. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  461. kgdb_info[cpu].enter_kgdb--;
  462. smp_mb__before_atomic();
  463. atomic_dec(&slaves_in_kgdb);
  464. dbg_touch_watchdogs();
  465. local_irq_restore(flags);
  466. return 0;
  467. }
  468. cpu_relax();
  469. }
  470. /*
  471. * For single stepping, try to only enter on the processor
  472. * that was single stepping. To guard against a deadlock, the
  473. * kernel will only try for the value of sstep_tries before
  474. * giving up and continuing on.
  475. */
  476. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  477. (kgdb_info[cpu].task &&
  478. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  479. atomic_set(&kgdb_active, -1);
  480. raw_spin_unlock(&dbg_master_lock);
  481. dbg_touch_watchdogs();
  482. local_irq_restore(flags);
  483. goto acquirelock;
  484. }
  485. if (!kgdb_io_ready(1)) {
  486. kgdb_info[cpu].ret_state = 1;
  487. goto kgdb_restore; /* No I/O connection, resume the system */
  488. }
  489. /*
  490. * Don't enter if we have hit a removed breakpoint.
  491. */
  492. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  493. goto kgdb_restore;
  494. /* Call the I/O driver's pre_exception routine */
  495. if (dbg_io_ops->pre_exception)
  496. dbg_io_ops->pre_exception();
  497. /*
  498. * Get the passive CPU lock which will hold all the non-primary
  499. * CPU in a spin state while the debugger is active
  500. */
  501. if (!kgdb_single_step)
  502. raw_spin_lock(&dbg_slave_lock);
  503. #ifdef CONFIG_SMP
  504. /* If send_ready set, slaves are already waiting */
  505. if (ks->send_ready)
  506. atomic_set(ks->send_ready, 1);
  507. /* Signal the other CPUs to enter kgdb_wait() */
  508. else if ((!kgdb_single_step) && kgdb_do_roundup)
  509. kgdb_roundup_cpus(flags);
  510. #endif
  511. /*
  512. * Wait for the other CPUs to be notified and be waiting for us:
  513. */
  514. time_left = MSEC_PER_SEC;
  515. while (kgdb_do_roundup && --time_left &&
  516. (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
  517. online_cpus)
  518. udelay(1000);
  519. if (!time_left)
  520. pr_crit("Timed out waiting for secondary CPUs.\n");
  521. /*
  522. * At this point the primary processor is completely
  523. * in the debugger and all secondary CPUs are quiescent
  524. */
  525. dbg_deactivate_sw_breakpoints();
  526. kgdb_single_step = 0;
  527. kgdb_contthread = current;
  528. exception_level = 0;
  529. trace_on = tracing_is_on();
  530. if (trace_on)
  531. tracing_off();
  532. while (1) {
  533. cpu_master_loop:
  534. if (dbg_kdb_mode) {
  535. kgdb_connected = 1;
  536. error = kdb_stub(ks);
  537. if (error == -1)
  538. continue;
  539. kgdb_connected = 0;
  540. } else {
  541. error = gdb_serial_stub(ks);
  542. }
  543. if (error == DBG_PASS_EVENT) {
  544. dbg_kdb_mode = !dbg_kdb_mode;
  545. } else if (error == DBG_SWITCH_CPU_EVENT) {
  546. kgdb_info[dbg_switch_cpu].exception_state |=
  547. DCPU_NEXT_MASTER;
  548. goto cpu_loop;
  549. } else {
  550. kgdb_info[cpu].ret_state = error;
  551. break;
  552. }
  553. }
  554. /* Call the I/O driver's post_exception routine */
  555. if (dbg_io_ops->post_exception)
  556. dbg_io_ops->post_exception();
  557. if (!kgdb_single_step) {
  558. raw_spin_unlock(&dbg_slave_lock);
  559. /* Wait till all the CPUs have quit from the debugger. */
  560. while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
  561. cpu_relax();
  562. }
  563. kgdb_restore:
  564. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  565. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  566. if (kgdb_info[sstep_cpu].task)
  567. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  568. else
  569. kgdb_sstep_pid = 0;
  570. }
  571. if (arch_kgdb_ops.correct_hw_break)
  572. arch_kgdb_ops.correct_hw_break();
  573. if (trace_on)
  574. tracing_on();
  575. kgdb_info[cpu].exception_state &=
  576. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  577. kgdb_info[cpu].enter_kgdb--;
  578. smp_mb__before_atomic();
  579. atomic_dec(&masters_in_kgdb);
  580. /* Free kgdb_active */
  581. atomic_set(&kgdb_active, -1);
  582. raw_spin_unlock(&dbg_master_lock);
  583. dbg_touch_watchdogs();
  584. local_irq_restore(flags);
  585. return kgdb_info[cpu].ret_state;
  586. }
  587. /*
  588. * kgdb_handle_exception() - main entry point from a kernel exception
  589. *
  590. * Locking hierarchy:
  591. * interface locks, if any (begin_session)
  592. * kgdb lock (kgdb_active)
  593. */
  594. int
  595. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  596. {
  597. struct kgdb_state kgdb_var;
  598. struct kgdb_state *ks = &kgdb_var;
  599. int ret = 0;
  600. if (arch_kgdb_ops.enable_nmi)
  601. arch_kgdb_ops.enable_nmi(0);
  602. /*
  603. * Avoid entering the debugger if we were triggered due to an oops
  604. * but panic_timeout indicates the system should automatically
  605. * reboot on panic. We don't want to get stuck waiting for input
  606. * on such systems, especially if its "just" an oops.
  607. */
  608. if (signo != SIGTRAP && panic_timeout)
  609. return 1;
  610. memset(ks, 0, sizeof(struct kgdb_state));
  611. ks->cpu = raw_smp_processor_id();
  612. ks->ex_vector = evector;
  613. ks->signo = signo;
  614. ks->err_code = ecode;
  615. ks->linux_regs = regs;
  616. if (kgdb_reenter_check(ks))
  617. goto out; /* Ouch, double exception ! */
  618. if (kgdb_info[ks->cpu].enter_kgdb != 0)
  619. goto out;
  620. ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  621. out:
  622. if (arch_kgdb_ops.enable_nmi)
  623. arch_kgdb_ops.enable_nmi(1);
  624. return ret;
  625. }
  626. /*
  627. * GDB places a breakpoint at this function to know dynamically
  628. * loaded objects. It's not defined static so that only one instance with this
  629. * name exists in the kernel.
  630. */
  631. static int module_event(struct notifier_block *self, unsigned long val,
  632. void *data)
  633. {
  634. return 0;
  635. }
  636. static struct notifier_block dbg_module_load_nb = {
  637. .notifier_call = module_event,
  638. };
  639. int kgdb_nmicallback(int cpu, void *regs)
  640. {
  641. #ifdef CONFIG_SMP
  642. struct kgdb_state kgdb_var;
  643. struct kgdb_state *ks = &kgdb_var;
  644. memset(ks, 0, sizeof(struct kgdb_state));
  645. ks->cpu = cpu;
  646. ks->linux_regs = regs;
  647. if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
  648. raw_spin_is_locked(&dbg_master_lock)) {
  649. kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
  650. return 0;
  651. }
  652. #endif
  653. return 1;
  654. }
  655. int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
  656. atomic_t *send_ready)
  657. {
  658. #ifdef CONFIG_SMP
  659. if (!kgdb_io_ready(0) || !send_ready)
  660. return 1;
  661. if (kgdb_info[cpu].enter_kgdb == 0) {
  662. struct kgdb_state kgdb_var;
  663. struct kgdb_state *ks = &kgdb_var;
  664. memset(ks, 0, sizeof(struct kgdb_state));
  665. ks->cpu = cpu;
  666. ks->ex_vector = trapnr;
  667. ks->signo = SIGTRAP;
  668. ks->err_code = err_code;
  669. ks->linux_regs = regs;
  670. ks->send_ready = send_ready;
  671. kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  672. return 0;
  673. }
  674. #endif
  675. return 1;
  676. }
  677. static void kgdb_console_write(struct console *co, const char *s,
  678. unsigned count)
  679. {
  680. unsigned long flags;
  681. /* If we're debugging, or KGDB has not connected, don't try
  682. * and print. */
  683. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  684. return;
  685. local_irq_save(flags);
  686. gdbstub_msg_write(s, count);
  687. local_irq_restore(flags);
  688. }
  689. static struct console kgdbcons = {
  690. .name = "kgdb",
  691. .write = kgdb_console_write,
  692. .flags = CON_PRINTBUFFER | CON_ENABLED,
  693. .index = -1,
  694. };
  695. #ifdef CONFIG_MAGIC_SYSRQ
  696. static void sysrq_handle_dbg(int key)
  697. {
  698. if (!dbg_io_ops) {
  699. pr_crit("ERROR: No KGDB I/O module available\n");
  700. return;
  701. }
  702. if (!kgdb_connected) {
  703. #ifdef CONFIG_KGDB_KDB
  704. if (!dbg_kdb_mode)
  705. pr_crit("KGDB or $3#33 for KDB\n");
  706. #else
  707. pr_crit("Entering KGDB\n");
  708. #endif
  709. }
  710. kgdb_breakpoint();
  711. }
  712. static struct sysrq_key_op sysrq_dbg_op = {
  713. .handler = sysrq_handle_dbg,
  714. .help_msg = "debug(g)",
  715. .action_msg = "DEBUG",
  716. };
  717. #endif
  718. static int kgdb_panic_event(struct notifier_block *self,
  719. unsigned long val,
  720. void *data)
  721. {
  722. /*
  723. * Avoid entering the debugger if we were triggered due to a panic
  724. * We don't want to get stuck waiting for input from user in such case.
  725. * panic_timeout indicates the system should automatically
  726. * reboot on panic.
  727. */
  728. if (panic_timeout)
  729. return NOTIFY_DONE;
  730. if (dbg_kdb_mode)
  731. kdb_printf("PANIC: %s\n", (char *)data);
  732. kgdb_breakpoint();
  733. return NOTIFY_DONE;
  734. }
  735. static struct notifier_block kgdb_panic_event_nb = {
  736. .notifier_call = kgdb_panic_event,
  737. .priority = INT_MAX,
  738. };
  739. void __weak kgdb_arch_late(void)
  740. {
  741. }
  742. void __init dbg_late_init(void)
  743. {
  744. dbg_is_early = false;
  745. if (kgdb_io_module_registered)
  746. kgdb_arch_late();
  747. kdb_init(KDB_INIT_FULL);
  748. }
  749. static int
  750. dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
  751. {
  752. /*
  753. * Take the following action on reboot notify depending on value:
  754. * 1 == Enter debugger
  755. * 0 == [the default] detatch debug client
  756. * -1 == Do nothing... and use this until the board resets
  757. */
  758. switch (kgdbreboot) {
  759. case 1:
  760. kgdb_breakpoint();
  761. case -1:
  762. goto done;
  763. }
  764. if (!dbg_kdb_mode)
  765. gdbstub_exit(code);
  766. done:
  767. return NOTIFY_DONE;
  768. }
  769. static struct notifier_block dbg_reboot_notifier = {
  770. .notifier_call = dbg_notify_reboot,
  771. .next = NULL,
  772. .priority = INT_MAX,
  773. };
  774. static void kgdb_register_callbacks(void)
  775. {
  776. if (!kgdb_io_module_registered) {
  777. kgdb_io_module_registered = 1;
  778. kgdb_arch_init();
  779. if (!dbg_is_early)
  780. kgdb_arch_late();
  781. register_module_notifier(&dbg_module_load_nb);
  782. register_reboot_notifier(&dbg_reboot_notifier);
  783. atomic_notifier_chain_register(&panic_notifier_list,
  784. &kgdb_panic_event_nb);
  785. #ifdef CONFIG_MAGIC_SYSRQ
  786. register_sysrq_key('g', &sysrq_dbg_op);
  787. #endif
  788. if (kgdb_use_con && !kgdb_con_registered) {
  789. register_console(&kgdbcons);
  790. kgdb_con_registered = 1;
  791. }
  792. }
  793. }
  794. static void kgdb_unregister_callbacks(void)
  795. {
  796. /*
  797. * When this routine is called KGDB should unregister from the
  798. * panic handler and clean up, making sure it is not handling any
  799. * break exceptions at the time.
  800. */
  801. if (kgdb_io_module_registered) {
  802. kgdb_io_module_registered = 0;
  803. unregister_reboot_notifier(&dbg_reboot_notifier);
  804. unregister_module_notifier(&dbg_module_load_nb);
  805. atomic_notifier_chain_unregister(&panic_notifier_list,
  806. &kgdb_panic_event_nb);
  807. kgdb_arch_exit();
  808. #ifdef CONFIG_MAGIC_SYSRQ
  809. unregister_sysrq_key('g', &sysrq_dbg_op);
  810. #endif
  811. if (kgdb_con_registered) {
  812. unregister_console(&kgdbcons);
  813. kgdb_con_registered = 0;
  814. }
  815. }
  816. }
  817. /*
  818. * There are times a tasklet needs to be used vs a compiled in
  819. * break point so as to cause an exception outside a kgdb I/O module,
  820. * such as is the case with kgdboe, where calling a breakpoint in the
  821. * I/O driver itself would be fatal.
  822. */
  823. static void kgdb_tasklet_bpt(unsigned long ing)
  824. {
  825. kgdb_breakpoint();
  826. atomic_set(&kgdb_break_tasklet_var, 0);
  827. }
  828. static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
  829. void kgdb_schedule_breakpoint(void)
  830. {
  831. if (atomic_read(&kgdb_break_tasklet_var) ||
  832. atomic_read(&kgdb_active) != -1 ||
  833. atomic_read(&kgdb_setting_breakpoint))
  834. return;
  835. atomic_inc(&kgdb_break_tasklet_var);
  836. tasklet_schedule(&kgdb_tasklet_breakpoint);
  837. }
  838. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  839. static void kgdb_initial_breakpoint(void)
  840. {
  841. kgdb_break_asap = 0;
  842. pr_crit("Waiting for connection from remote gdb...\n");
  843. kgdb_breakpoint();
  844. }
  845. /**
  846. * kgdb_register_io_module - register KGDB IO module
  847. * @new_dbg_io_ops: the io ops vector
  848. *
  849. * Register it with the KGDB core.
  850. */
  851. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  852. {
  853. int err;
  854. spin_lock(&kgdb_registration_lock);
  855. if (dbg_io_ops) {
  856. spin_unlock(&kgdb_registration_lock);
  857. pr_err("Another I/O driver is already registered with KGDB\n");
  858. return -EBUSY;
  859. }
  860. if (new_dbg_io_ops->init) {
  861. err = new_dbg_io_ops->init();
  862. if (err) {
  863. spin_unlock(&kgdb_registration_lock);
  864. return err;
  865. }
  866. }
  867. dbg_io_ops = new_dbg_io_ops;
  868. spin_unlock(&kgdb_registration_lock);
  869. pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
  870. /* Arm KGDB now. */
  871. kgdb_register_callbacks();
  872. if (kgdb_break_asap)
  873. kgdb_initial_breakpoint();
  874. return 0;
  875. }
  876. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  877. /**
  878. * kkgdb_unregister_io_module - unregister KGDB IO module
  879. * @old_dbg_io_ops: the io ops vector
  880. *
  881. * Unregister it with the KGDB core.
  882. */
  883. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  884. {
  885. BUG_ON(kgdb_connected);
  886. /*
  887. * KGDB is no longer able to communicate out, so
  888. * unregister our callbacks and reset state.
  889. */
  890. kgdb_unregister_callbacks();
  891. spin_lock(&kgdb_registration_lock);
  892. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  893. dbg_io_ops = NULL;
  894. spin_unlock(&kgdb_registration_lock);
  895. pr_info("Unregistered I/O driver %s, debugger disabled\n",
  896. old_dbg_io_ops->name);
  897. }
  898. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  899. int dbg_io_get_char(void)
  900. {
  901. int ret = dbg_io_ops->read_char();
  902. if (ret == NO_POLL_CHAR)
  903. return -1;
  904. if (!dbg_kdb_mode)
  905. return ret;
  906. if (ret == 127)
  907. return 8;
  908. return ret;
  909. }
  910. /**
  911. * kgdb_breakpoint - generate breakpoint exception
  912. *
  913. * This function will generate a breakpoint exception. It is used at the
  914. * beginning of a program to sync up with a debugger and can be used
  915. * otherwise as a quick means to stop program execution and "break" into
  916. * the debugger.
  917. */
  918. noinline void kgdb_breakpoint(void)
  919. {
  920. atomic_inc(&kgdb_setting_breakpoint);
  921. wmb(); /* Sync point before breakpoint */
  922. arch_kgdb_breakpoint();
  923. wmb(); /* Sync point after breakpoint */
  924. atomic_dec(&kgdb_setting_breakpoint);
  925. }
  926. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  927. static int __init opt_kgdb_wait(char *str)
  928. {
  929. kgdb_break_asap = 1;
  930. kdb_init(KDB_INIT_EARLY);
  931. if (kgdb_io_module_registered)
  932. kgdb_initial_breakpoint();
  933. return 0;
  934. }
  935. early_param("kgdbwait", opt_kgdb_wait);