This source file includes following definitions.
- opt_kgdb_con
- opt_nokgdbroundup
- kgdb_arch_set_breakpoint
- kgdb_arch_remove_breakpoint
- kgdb_validate_break_address
- kgdb_arch_pc
- kgdb_arch_init
- kgdb_skipexception
- kgdb_call_nmi_hook
- kgdb_roundup_cpus
- kgdb_flush_swbreak_addr
- dbg_activate_sw_breakpoints
- dbg_set_sw_break
- dbg_deactivate_sw_breakpoints
- dbg_remove_sw_break
- kgdb_isremovedbreak
- dbg_remove_all_break
- kgdb_io_ready
- kgdb_reenter_check
- dbg_touch_watchdogs
- kgdb_cpu_enter
- kgdb_handle_exception
- module_event
- kgdb_nmicallback
- kgdb_nmicallin
- kgdb_console_write
- sysrq_handle_dbg
- kgdb_panic
- kgdb_arch_late
- dbg_late_init
- dbg_notify_reboot
- kgdb_register_callbacks
- kgdb_unregister_callbacks
- kgdb_tasklet_bpt
- kgdb_schedule_breakpoint
- kgdb_initial_breakpoint
- kgdb_register_io_module
- kgdb_unregister_io_module
- dbg_io_get_char
- kgdb_breakpoint
- opt_kgdb_wait
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #define pr_fmt(fmt) "KGDB: " fmt
32
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/nmi.h>
53 #include <linux/pid.h>
54 #include <linux/smp.h>
55 #include <linux/mm.h>
56 #include <linux/vmacache.h>
57 #include <linux/rcupdate.h>
58 #include <linux/irq.h>
59
60 #include <asm/cacheflush.h>
61 #include <asm/byteorder.h>
62 #include <linux/atomic.h>
63
64 #include "debug_core.h"
65
66 static int kgdb_break_asap;
67
68 struct debuggerinfo_struct kgdb_info[NR_CPUS];
69
70
71
72
73 int kgdb_connected;
74 EXPORT_SYMBOL_GPL(kgdb_connected);
75
76
77 int kgdb_io_module_registered;
78
79
80 static int exception_level;
81
82 struct kgdb_io *dbg_io_ops;
83 static DEFINE_SPINLOCK(kgdb_registration_lock);
84
85
86 static int kgdbreboot;
87
88 static int kgdb_con_registered;
89
90 static int kgdb_use_con;
91
92 bool dbg_is_early = true;
93
94 int dbg_switch_cpu;
95
96
97 int dbg_kdb_mode = 1;
98
99 static int __init opt_kgdb_con(char *str)
100 {
101 kgdb_use_con = 1;
102 return 0;
103 }
104
105 early_param("kgdbcon", opt_kgdb_con);
106
107 module_param(kgdb_use_con, int, 0644);
108 module_param(kgdbreboot, int, 0644);
109
110
111
112
113
114 static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
115 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
116 };
117
118
119
120
121 atomic_t kgdb_active = ATOMIC_INIT(-1);
122 EXPORT_SYMBOL_GPL(kgdb_active);
123 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
124 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
125
126
127
128
129
130 static atomic_t masters_in_kgdb;
131 static atomic_t slaves_in_kgdb;
132 static atomic_t kgdb_break_tasklet_var;
133 atomic_t kgdb_setting_breakpoint;
134
135 struct task_struct *kgdb_usethread;
136 struct task_struct *kgdb_contthread;
137
138 int kgdb_single_step;
139 static pid_t kgdb_sstep_pid;
140
141
142 atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
143
144
145
146
147
148
149
150
151 static int kgdb_do_roundup = 1;
152
153 static int __init opt_nokgdbroundup(char *str)
154 {
155 kgdb_do_roundup = 0;
156
157 return 0;
158 }
159
160 early_param("nokgdbroundup", opt_nokgdbroundup);
161
162
163
164
165
166
167
168
169
170 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
171 {
172 int err;
173
174 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
175 BREAK_INSTR_SIZE);
176 if (err)
177 return err;
178 err = probe_kernel_write((char *)bpt->bpt_addr,
179 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
180 return err;
181 }
182
183 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
184 {
185 return probe_kernel_write((char *)bpt->bpt_addr,
186 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
187 }
188
189 int __weak kgdb_validate_break_address(unsigned long addr)
190 {
191 struct kgdb_bkpt tmp;
192 int err;
193
194
195
196
197
198 tmp.bpt_addr = addr;
199 err = kgdb_arch_set_breakpoint(&tmp);
200 if (err)
201 return err;
202 err = kgdb_arch_remove_breakpoint(&tmp);
203 if (err)
204 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
205 addr);
206 return err;
207 }
208
209 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
210 {
211 return instruction_pointer(regs);
212 }
213
214 int __weak kgdb_arch_init(void)
215 {
216 return 0;
217 }
218
219 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
220 {
221 return 0;
222 }
223
224 #ifdef CONFIG_SMP
225
226
227
228
229
230 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
231
232 void __weak kgdb_call_nmi_hook(void *ignored)
233 {
234
235
236
237
238
239
240
241
242 kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
243 }
244
245 void __weak kgdb_roundup_cpus(void)
246 {
247 call_single_data_t *csd;
248 int this_cpu = raw_smp_processor_id();
249 int cpu;
250 int ret;
251
252 for_each_online_cpu(cpu) {
253
254 if (cpu == this_cpu)
255 continue;
256
257 csd = &per_cpu(kgdb_roundup_csd, cpu);
258
259
260
261
262
263
264
265
266
267 if (kgdb_info[cpu].rounding_up)
268 continue;
269 kgdb_info[cpu].rounding_up = true;
270
271 csd->func = kgdb_call_nmi_hook;
272 ret = smp_call_function_single_async(cpu, csd);
273 if (ret)
274 kgdb_info[cpu].rounding_up = false;
275 }
276 }
277
278 #endif
279
280
281
282
283
284 static void kgdb_flush_swbreak_addr(unsigned long addr)
285 {
286 if (!CACHE_FLUSH_IS_SAFE)
287 return;
288
289 if (current->mm) {
290 int i;
291
292 for (i = 0; i < VMACACHE_SIZE; i++) {
293 if (!current->vmacache.vmas[i])
294 continue;
295 flush_cache_range(current->vmacache.vmas[i],
296 addr, addr + BREAK_INSTR_SIZE);
297 }
298 }
299
300
301 flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
302 }
303
304
305
306
307 int dbg_activate_sw_breakpoints(void)
308 {
309 int error;
310 int ret = 0;
311 int i;
312
313 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
314 if (kgdb_break[i].state != BP_SET)
315 continue;
316
317 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
318 if (error) {
319 ret = error;
320 pr_info("BP install failed: %lx\n",
321 kgdb_break[i].bpt_addr);
322 continue;
323 }
324
325 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
326 kgdb_break[i].state = BP_ACTIVE;
327 }
328 return ret;
329 }
330
331 int dbg_set_sw_break(unsigned long addr)
332 {
333 int err = kgdb_validate_break_address(addr);
334 int breakno = -1;
335 int i;
336
337 if (err)
338 return err;
339
340 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
341 if ((kgdb_break[i].state == BP_SET) &&
342 (kgdb_break[i].bpt_addr == addr))
343 return -EEXIST;
344 }
345 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
346 if (kgdb_break[i].state == BP_REMOVED &&
347 kgdb_break[i].bpt_addr == addr) {
348 breakno = i;
349 break;
350 }
351 }
352
353 if (breakno == -1) {
354 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
355 if (kgdb_break[i].state == BP_UNDEFINED) {
356 breakno = i;
357 break;
358 }
359 }
360 }
361
362 if (breakno == -1)
363 return -E2BIG;
364
365 kgdb_break[breakno].state = BP_SET;
366 kgdb_break[breakno].type = BP_BREAKPOINT;
367 kgdb_break[breakno].bpt_addr = addr;
368
369 return 0;
370 }
371
372 int dbg_deactivate_sw_breakpoints(void)
373 {
374 int error;
375 int ret = 0;
376 int i;
377
378 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
379 if (kgdb_break[i].state != BP_ACTIVE)
380 continue;
381 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
382 if (error) {
383 pr_info("BP remove failed: %lx\n",
384 kgdb_break[i].bpt_addr);
385 ret = error;
386 }
387
388 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
389 kgdb_break[i].state = BP_SET;
390 }
391 return ret;
392 }
393
394 int dbg_remove_sw_break(unsigned long addr)
395 {
396 int i;
397
398 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
399 if ((kgdb_break[i].state == BP_SET) &&
400 (kgdb_break[i].bpt_addr == addr)) {
401 kgdb_break[i].state = BP_REMOVED;
402 return 0;
403 }
404 }
405 return -ENOENT;
406 }
407
408 int kgdb_isremovedbreak(unsigned long addr)
409 {
410 int i;
411
412 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
413 if ((kgdb_break[i].state == BP_REMOVED) &&
414 (kgdb_break[i].bpt_addr == addr))
415 return 1;
416 }
417 return 0;
418 }
419
420 int dbg_remove_all_break(void)
421 {
422 int error;
423 int i;
424
425
426 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
427 if (kgdb_break[i].state != BP_ACTIVE)
428 goto setundefined;
429 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
430 if (error)
431 pr_err("breakpoint remove failed: %lx\n",
432 kgdb_break[i].bpt_addr);
433 setundefined:
434 kgdb_break[i].state = BP_UNDEFINED;
435 }
436
437
438 if (arch_kgdb_ops.remove_all_hw_break)
439 arch_kgdb_ops.remove_all_hw_break();
440
441 return 0;
442 }
443
444
445
446
447
448
449
450
451
452
453 static int kgdb_io_ready(int print_wait)
454 {
455 if (!dbg_io_ops)
456 return 0;
457 if (kgdb_connected)
458 return 1;
459 if (atomic_read(&kgdb_setting_breakpoint))
460 return 1;
461 if (print_wait) {
462 #ifdef CONFIG_KGDB_KDB
463 if (!dbg_kdb_mode)
464 pr_crit("waiting... or $3#33 for KDB\n");
465 #else
466 pr_crit("Waiting for remote debugger\n");
467 #endif
468 }
469 return 1;
470 }
471
472 static int kgdb_reenter_check(struct kgdb_state *ks)
473 {
474 unsigned long addr;
475
476 if (atomic_read(&kgdb_active) != raw_smp_processor_id())
477 return 0;
478
479
480 exception_level++;
481 addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
482 dbg_deactivate_sw_breakpoints();
483
484
485
486
487
488
489
490 if (dbg_remove_sw_break(addr) == 0) {
491 exception_level = 0;
492 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
493 dbg_activate_sw_breakpoints();
494 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
495 WARN_ON_ONCE(1);
496
497 return 1;
498 }
499 dbg_remove_all_break();
500 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
501
502 if (exception_level > 1) {
503 dump_stack();
504 panic("Recursive entry to debugger");
505 }
506
507 pr_crit("re-enter exception: ALL breakpoints killed\n");
508 #ifdef CONFIG_KGDB_KDB
509
510 return 0;
511 #endif
512 dump_stack();
513 panic("Recursive entry to debugger");
514
515 return 1;
516 }
517
518 static void dbg_touch_watchdogs(void)
519 {
520 touch_softlockup_watchdog_sync();
521 clocksource_touch_watchdog();
522 rcu_cpu_stall_reset();
523 }
524
525 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
526 int exception_state)
527 {
528 unsigned long flags;
529 int sstep_tries = 100;
530 int error;
531 int cpu;
532 int trace_on = 0;
533 int online_cpus = num_online_cpus();
534 u64 time_left;
535
536 kgdb_info[ks->cpu].enter_kgdb++;
537 kgdb_info[ks->cpu].exception_state |= exception_state;
538
539 if (exception_state == DCPU_WANT_MASTER)
540 atomic_inc(&masters_in_kgdb);
541 else
542 atomic_inc(&slaves_in_kgdb);
543
544 if (arch_kgdb_ops.disable_hw_break)
545 arch_kgdb_ops.disable_hw_break(regs);
546
547 acquirelock:
548
549
550
551
552 local_irq_save(flags);
553
554 cpu = ks->cpu;
555 kgdb_info[cpu].debuggerinfo = regs;
556 kgdb_info[cpu].task = current;
557 kgdb_info[cpu].ret_state = 0;
558 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
559
560
561 smp_mb();
562
563 if (exception_level == 1) {
564 if (raw_spin_trylock(&dbg_master_lock))
565 atomic_xchg(&kgdb_active, cpu);
566 goto cpu_master_loop;
567 }
568
569
570
571
572
573 while (1) {
574 cpu_loop:
575 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
576 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
577 goto cpu_master_loop;
578 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
579 if (raw_spin_trylock(&dbg_master_lock)) {
580 atomic_xchg(&kgdb_active, cpu);
581 break;
582 }
583 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
584 if (!raw_spin_is_locked(&dbg_slave_lock))
585 goto return_normal;
586 } else {
587 return_normal:
588
589
590
591 if (arch_kgdb_ops.correct_hw_break)
592 arch_kgdb_ops.correct_hw_break();
593 if (trace_on)
594 tracing_on();
595 kgdb_info[cpu].debuggerinfo = NULL;
596 kgdb_info[cpu].task = NULL;
597 kgdb_info[cpu].exception_state &=
598 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
599 kgdb_info[cpu].enter_kgdb--;
600 smp_mb__before_atomic();
601 atomic_dec(&slaves_in_kgdb);
602 dbg_touch_watchdogs();
603 local_irq_restore(flags);
604 return 0;
605 }
606 cpu_relax();
607 }
608
609
610
611
612
613
614
615 if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
616 (kgdb_info[cpu].task &&
617 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
618 atomic_set(&kgdb_active, -1);
619 raw_spin_unlock(&dbg_master_lock);
620 dbg_touch_watchdogs();
621 local_irq_restore(flags);
622
623 goto acquirelock;
624 }
625
626 if (!kgdb_io_ready(1)) {
627 kgdb_info[cpu].ret_state = 1;
628 goto kgdb_restore;
629 }
630
631
632
633
634 if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
635 goto kgdb_restore;
636
637
638 if (dbg_io_ops->pre_exception)
639 dbg_io_ops->pre_exception();
640
641
642
643
644
645 if (!kgdb_single_step)
646 raw_spin_lock(&dbg_slave_lock);
647
648 #ifdef CONFIG_SMP
649
650 if (ks->send_ready)
651 atomic_set(ks->send_ready, 1);
652
653
654 else if ((!kgdb_single_step) && kgdb_do_roundup)
655 kgdb_roundup_cpus();
656 #endif
657
658
659
660
661 time_left = MSEC_PER_SEC;
662 while (kgdb_do_roundup && --time_left &&
663 (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
664 online_cpus)
665 udelay(1000);
666 if (!time_left)
667 pr_crit("Timed out waiting for secondary CPUs.\n");
668
669
670
671
672
673 dbg_deactivate_sw_breakpoints();
674 kgdb_single_step = 0;
675 kgdb_contthread = current;
676 exception_level = 0;
677 trace_on = tracing_is_on();
678 if (trace_on)
679 tracing_off();
680
681 while (1) {
682 cpu_master_loop:
683 if (dbg_kdb_mode) {
684 kgdb_connected = 1;
685 error = kdb_stub(ks);
686 if (error == -1)
687 continue;
688 kgdb_connected = 0;
689 } else {
690 error = gdb_serial_stub(ks);
691 }
692
693 if (error == DBG_PASS_EVENT) {
694 dbg_kdb_mode = !dbg_kdb_mode;
695 } else if (error == DBG_SWITCH_CPU_EVENT) {
696 kgdb_info[dbg_switch_cpu].exception_state |=
697 DCPU_NEXT_MASTER;
698 goto cpu_loop;
699 } else {
700 kgdb_info[cpu].ret_state = error;
701 break;
702 }
703 }
704
705
706 if (dbg_io_ops->post_exception)
707 dbg_io_ops->post_exception();
708
709 if (!kgdb_single_step) {
710 raw_spin_unlock(&dbg_slave_lock);
711
712 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
713 cpu_relax();
714 }
715
716 kgdb_restore:
717 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
718 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
719 if (kgdb_info[sstep_cpu].task)
720 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
721 else
722 kgdb_sstep_pid = 0;
723 }
724 if (arch_kgdb_ops.correct_hw_break)
725 arch_kgdb_ops.correct_hw_break();
726 if (trace_on)
727 tracing_on();
728
729 kgdb_info[cpu].debuggerinfo = NULL;
730 kgdb_info[cpu].task = NULL;
731 kgdb_info[cpu].exception_state &=
732 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
733 kgdb_info[cpu].enter_kgdb--;
734 smp_mb__before_atomic();
735 atomic_dec(&masters_in_kgdb);
736
737 atomic_set(&kgdb_active, -1);
738 raw_spin_unlock(&dbg_master_lock);
739 dbg_touch_watchdogs();
740 local_irq_restore(flags);
741
742 return kgdb_info[cpu].ret_state;
743 }
744
745
746
747
748
749
750
751
752 int
753 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
754 {
755 struct kgdb_state kgdb_var;
756 struct kgdb_state *ks = &kgdb_var;
757 int ret = 0;
758
759 if (arch_kgdb_ops.enable_nmi)
760 arch_kgdb_ops.enable_nmi(0);
761
762
763
764
765
766
767 if (signo != SIGTRAP && panic_timeout)
768 return 1;
769
770 memset(ks, 0, sizeof(struct kgdb_state));
771 ks->cpu = raw_smp_processor_id();
772 ks->ex_vector = evector;
773 ks->signo = signo;
774 ks->err_code = ecode;
775 ks->linux_regs = regs;
776
777 if (kgdb_reenter_check(ks))
778 goto out;
779 if (kgdb_info[ks->cpu].enter_kgdb != 0)
780 goto out;
781
782 ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
783 out:
784 if (arch_kgdb_ops.enable_nmi)
785 arch_kgdb_ops.enable_nmi(1);
786 return ret;
787 }
788
789
790
791
792 static int module_event(struct notifier_block *self, unsigned long val,
793 void *data)
794 {
795 return 0;
796 }
797
798 static struct notifier_block dbg_module_load_nb = {
799 .notifier_call = module_event,
800 };
801
802 int kgdb_nmicallback(int cpu, void *regs)
803 {
804 #ifdef CONFIG_SMP
805 struct kgdb_state kgdb_var;
806 struct kgdb_state *ks = &kgdb_var;
807
808 kgdb_info[cpu].rounding_up = false;
809
810 memset(ks, 0, sizeof(struct kgdb_state));
811 ks->cpu = cpu;
812 ks->linux_regs = regs;
813
814 if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
815 raw_spin_is_locked(&dbg_master_lock)) {
816 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
817 return 0;
818 }
819 #endif
820 return 1;
821 }
822
823 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
824 atomic_t *send_ready)
825 {
826 #ifdef CONFIG_SMP
827 if (!kgdb_io_ready(0) || !send_ready)
828 return 1;
829
830 if (kgdb_info[cpu].enter_kgdb == 0) {
831 struct kgdb_state kgdb_var;
832 struct kgdb_state *ks = &kgdb_var;
833
834 memset(ks, 0, sizeof(struct kgdb_state));
835 ks->cpu = cpu;
836 ks->ex_vector = trapnr;
837 ks->signo = SIGTRAP;
838 ks->err_code = err_code;
839 ks->linux_regs = regs;
840 ks->send_ready = send_ready;
841 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
842 return 0;
843 }
844 #endif
845 return 1;
846 }
847
848 static void kgdb_console_write(struct console *co, const char *s,
849 unsigned count)
850 {
851 unsigned long flags;
852
853
854
855 if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
856 return;
857
858 local_irq_save(flags);
859 gdbstub_msg_write(s, count);
860 local_irq_restore(flags);
861 }
862
863 static struct console kgdbcons = {
864 .name = "kgdb",
865 .write = kgdb_console_write,
866 .flags = CON_PRINTBUFFER | CON_ENABLED,
867 .index = -1,
868 };
869
870 #ifdef CONFIG_MAGIC_SYSRQ
871 static void sysrq_handle_dbg(int key)
872 {
873 if (!dbg_io_ops) {
874 pr_crit("ERROR: No KGDB I/O module available\n");
875 return;
876 }
877 if (!kgdb_connected) {
878 #ifdef CONFIG_KGDB_KDB
879 if (!dbg_kdb_mode)
880 pr_crit("KGDB or $3#33 for KDB\n");
881 #else
882 pr_crit("Entering KGDB\n");
883 #endif
884 }
885
886 kgdb_breakpoint();
887 }
888
889 static struct sysrq_key_op sysrq_dbg_op = {
890 .handler = sysrq_handle_dbg,
891 .help_msg = "debug(g)",
892 .action_msg = "DEBUG",
893 };
894 #endif
895
896 void kgdb_panic(const char *msg)
897 {
898 if (!kgdb_io_module_registered)
899 return;
900
901
902
903
904
905
906 if (panic_timeout)
907 return;
908
909 if (dbg_kdb_mode)
910 kdb_printf("PANIC: %s\n", msg);
911
912 kgdb_breakpoint();
913 }
914
915 void __weak kgdb_arch_late(void)
916 {
917 }
918
919 void __init dbg_late_init(void)
920 {
921 dbg_is_early = false;
922 if (kgdb_io_module_registered)
923 kgdb_arch_late();
924 kdb_init(KDB_INIT_FULL);
925 }
926
927 static int
928 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
929 {
930
931
932
933
934
935
936 switch (kgdbreboot) {
937 case 1:
938 kgdb_breakpoint();
939 case -1:
940 goto done;
941 }
942 if (!dbg_kdb_mode)
943 gdbstub_exit(code);
944 done:
945 return NOTIFY_DONE;
946 }
947
948 static struct notifier_block dbg_reboot_notifier = {
949 .notifier_call = dbg_notify_reboot,
950 .next = NULL,
951 .priority = INT_MAX,
952 };
953
954 static void kgdb_register_callbacks(void)
955 {
956 if (!kgdb_io_module_registered) {
957 kgdb_io_module_registered = 1;
958 kgdb_arch_init();
959 if (!dbg_is_early)
960 kgdb_arch_late();
961 register_module_notifier(&dbg_module_load_nb);
962 register_reboot_notifier(&dbg_reboot_notifier);
963 #ifdef CONFIG_MAGIC_SYSRQ
964 register_sysrq_key('g', &sysrq_dbg_op);
965 #endif
966 if (kgdb_use_con && !kgdb_con_registered) {
967 register_console(&kgdbcons);
968 kgdb_con_registered = 1;
969 }
970 }
971 }
972
973 static void kgdb_unregister_callbacks(void)
974 {
975
976
977
978
979
980 if (kgdb_io_module_registered) {
981 kgdb_io_module_registered = 0;
982 unregister_reboot_notifier(&dbg_reboot_notifier);
983 unregister_module_notifier(&dbg_module_load_nb);
984 kgdb_arch_exit();
985 #ifdef CONFIG_MAGIC_SYSRQ
986 unregister_sysrq_key('g', &sysrq_dbg_op);
987 #endif
988 if (kgdb_con_registered) {
989 unregister_console(&kgdbcons);
990 kgdb_con_registered = 0;
991 }
992 }
993 }
994
995
996
997
998
999
1000
1001 static void kgdb_tasklet_bpt(unsigned long ing)
1002 {
1003 kgdb_breakpoint();
1004 atomic_set(&kgdb_break_tasklet_var, 0);
1005 }
1006
1007 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
1008
1009 void kgdb_schedule_breakpoint(void)
1010 {
1011 if (atomic_read(&kgdb_break_tasklet_var) ||
1012 atomic_read(&kgdb_active) != -1 ||
1013 atomic_read(&kgdb_setting_breakpoint))
1014 return;
1015 atomic_inc(&kgdb_break_tasklet_var);
1016 tasklet_schedule(&kgdb_tasklet_breakpoint);
1017 }
1018 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
1019
1020 static void kgdb_initial_breakpoint(void)
1021 {
1022 kgdb_break_asap = 0;
1023
1024 pr_crit("Waiting for connection from remote gdb...\n");
1025 kgdb_breakpoint();
1026 }
1027
1028
1029
1030
1031
1032
1033
1034 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1035 {
1036 int err;
1037
1038 spin_lock(&kgdb_registration_lock);
1039
1040 if (dbg_io_ops) {
1041 spin_unlock(&kgdb_registration_lock);
1042
1043 pr_err("Another I/O driver is already registered with KGDB\n");
1044 return -EBUSY;
1045 }
1046
1047 if (new_dbg_io_ops->init) {
1048 err = new_dbg_io_ops->init();
1049 if (err) {
1050 spin_unlock(&kgdb_registration_lock);
1051 return err;
1052 }
1053 }
1054
1055 dbg_io_ops = new_dbg_io_ops;
1056
1057 spin_unlock(&kgdb_registration_lock);
1058
1059 pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1060
1061
1062 kgdb_register_callbacks();
1063
1064 if (kgdb_break_asap)
1065 kgdb_initial_breakpoint();
1066
1067 return 0;
1068 }
1069 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1070
1071
1072
1073
1074
1075
1076
1077 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1078 {
1079 BUG_ON(kgdb_connected);
1080
1081
1082
1083
1084
1085 kgdb_unregister_callbacks();
1086
1087 spin_lock(&kgdb_registration_lock);
1088
1089 WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1090 dbg_io_ops = NULL;
1091
1092 spin_unlock(&kgdb_registration_lock);
1093
1094 pr_info("Unregistered I/O driver %s, debugger disabled\n",
1095 old_dbg_io_ops->name);
1096 }
1097 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1098
1099 int dbg_io_get_char(void)
1100 {
1101 int ret = dbg_io_ops->read_char();
1102 if (ret == NO_POLL_CHAR)
1103 return -1;
1104 if (!dbg_kdb_mode)
1105 return ret;
1106 if (ret == 127)
1107 return 8;
1108 return ret;
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 noinline void kgdb_breakpoint(void)
1120 {
1121 atomic_inc(&kgdb_setting_breakpoint);
1122 wmb();
1123 arch_kgdb_breakpoint();
1124 wmb();
1125 atomic_dec(&kgdb_setting_breakpoint);
1126 }
1127 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1128
1129 static int __init opt_kgdb_wait(char *str)
1130 {
1131 kgdb_break_asap = 1;
1132
1133 kdb_init(KDB_INIT_EARLY);
1134 if (kgdb_io_module_registered)
1135 kgdb_initial_breakpoint();
1136
1137 return 0;
1138 }
1139
1140 early_param("kgdbwait", opt_kgdb_wait);