This source file includes following definitions.
- param_get_local64
- param_set_local64
- param_get_action
- param_set_action
- uv_nmi_action_is
- uv_nmi_setup_mmrs
- uv_nmi_test_mmr
- uv_local_mmr_clear_nmi
- uv_reassert_nmi
- uv_init_hubless_pch_io
- uv_nmi_setup_hubless_intr
- uv_init_hubless_pch_d0
- uv_nmi_test_hubless
- uv_test_nmi
- uv_set_in_nmi
- uv_check_nmi
- uv_clear_nmi
- uv_nmi_nr_cpus_ping
- uv_nmi_cleanup_mask
- uv_nmi_wait_cpus
- uv_nmi_wait
- uv_nmi_dump_cpu_ip_hdr
- uv_nmi_dump_cpu_ip
- uv_nmi_dump_state_cpu
- uv_nmi_trigger_dump
- uv_nmi_sync_exit
- uv_nmi_action_health
- uv_nmi_dump_state
- uv_nmi_touch_watchdogs
- uv_nmi_kdump
- uv_nmi_kdump
- uv_nmi_kdb_reason
- uv_nmi_kdb_reason
- uv_call_kgdb_kdb
- uv_call_kgdb_kdb
- uv_handle_nmi
- uv_handle_nmi_ping
- uv_register_nmi_notifier
- uv_nmi_init
- uv_nmi_setup_common
- uv_nmi_setup
- uv_nmi_setup_hubless
1
2
3
4
5
6
7
8
9 #include <linux/cpu.h>
10 #include <linux/delay.h>
11 #include <linux/kdb.h>
12 #include <linux/kexec.h>
13 #include <linux/kgdb.h>
14 #include <linux/moduleparam.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/slab.h>
19 #include <linux/clocksource.h>
20
21 #include <asm/apic.h>
22 #include <asm/current.h>
23 #include <asm/kdebug.h>
24 #include <asm/local64.h>
25 #include <asm/nmi.h>
26 #include <asm/traps.h>
27 #include <asm/uv/uv.h>
28 #include <asm/uv/uv_hub.h>
29 #include <asm/uv/uv_mmrs.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 static struct uv_hub_nmi_s **uv_hub_nmi_list;
54
55 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
56
57
58 #define NMI_CONTROL_PORT 0x70
59 #define NMI_DUMMY_PORT 0x71
60 #define PAD_OWN_GPP_D_0 0x2c
61 #define GPI_NMI_STS_GPP_D_0 0x164
62 #define GPI_NMI_ENA_GPP_D_0 0x174
63 #define STS_GPP_D_0_MASK 0x1
64 #define PAD_CFG_DW0_GPP_D_0 0x4c0
65 #define GPIROUTNMI (1ul << 17)
66 #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
67 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
68
69 static u64 *pch_base;
70 static unsigned long nmi_mmr;
71 static unsigned long nmi_mmr_clear;
72 static unsigned long nmi_mmr_pending;
73
74 static atomic_t uv_in_nmi;
75 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
76 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
77 static atomic_t uv_nmi_slave_continue;
78 static cpumask_var_t uv_nmi_cpu_mask;
79
80
81 #define SLAVE_CLEAR 0
82 #define SLAVE_CONTINUE 1
83 #define SLAVE_EXIT 2
84
85
86
87
88
89 static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
90 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
91
92
93
94
95
96 static int param_get_local64(char *buffer, const struct kernel_param *kp)
97 {
98 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
99 }
100
101 static int param_set_local64(const char *val, const struct kernel_param *kp)
102 {
103
104 local64_set((local64_t *)kp->arg, 0);
105 return 0;
106 }
107
108 static const struct kernel_param_ops param_ops_local64 = {
109 .get = param_get_local64,
110 .set = param_set_local64,
111 };
112 #define param_check_local64(name, p) __param_check(name, p, local64_t)
113
114 static local64_t uv_nmi_count;
115 module_param_named(nmi_count, uv_nmi_count, local64, 0644);
116
117 static local64_t uv_nmi_misses;
118 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
119
120 static local64_t uv_nmi_ping_count;
121 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
122
123 static local64_t uv_nmi_ping_misses;
124 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
125
126
127
128
129 static int uv_nmi_initial_delay = 100;
130 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
131
132 static int uv_nmi_slave_delay = 100;
133 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
134
135 static int uv_nmi_loop_delay = 100;
136 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
137
138 static int uv_nmi_trigger_delay = 10000;
139 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
140
141 static int uv_nmi_wait_count = 100;
142 module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
143
144 static int uv_nmi_retry_count = 500;
145 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
146
147 static bool uv_pch_intr_enable = true;
148 static bool uv_pch_intr_now_enabled;
149 module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
150
151 static bool uv_pch_init_enable = true;
152 module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
153
154 static int uv_nmi_debug;
155 module_param_named(debug, uv_nmi_debug, int, 0644);
156
157 #define nmi_debug(fmt, ...) \
158 do { \
159 if (uv_nmi_debug) \
160 pr_info(fmt, ##__VA_ARGS__); \
161 } while (0)
162
163
164 #define ACTION_LEN 16
165 static struct nmi_action {
166 char *action;
167 char *desc;
168 } valid_acts[] = {
169 { "kdump", "do kernel crash dump" },
170 { "dump", "dump process stack for each cpu" },
171 { "ips", "dump Inst Ptr info for each cpu" },
172 { "kdb", "enter KDB (needs kgdboc= assignment)" },
173 { "kgdb", "enter KGDB (needs gdb target remote)" },
174 { "health", "check if CPUs respond to NMI" },
175 };
176 typedef char action_t[ACTION_LEN];
177 static action_t uv_nmi_action = { "dump" };
178
179 static int param_get_action(char *buffer, const struct kernel_param *kp)
180 {
181 return sprintf(buffer, "%s\n", uv_nmi_action);
182 }
183
184 static int param_set_action(const char *val, const struct kernel_param *kp)
185 {
186 int i;
187 int n = ARRAY_SIZE(valid_acts);
188 char arg[ACTION_LEN], *p;
189
190
191 strncpy(arg, val, ACTION_LEN - 1);
192 arg[ACTION_LEN - 1] = '\0';
193 p = strchr(arg, '\n');
194 if (p)
195 *p = '\0';
196
197 for (i = 0; i < n; i++)
198 if (!strcmp(arg, valid_acts[i].action))
199 break;
200
201 if (i < n) {
202 strcpy(uv_nmi_action, arg);
203 pr_info("UV: New NMI action:%s\n", uv_nmi_action);
204 return 0;
205 }
206
207 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
208 for (i = 0; i < n; i++)
209 pr_err("UV: %-8s - %s\n",
210 valid_acts[i].action, valid_acts[i].desc);
211 return -EINVAL;
212 }
213
214 static const struct kernel_param_ops param_ops_action = {
215 .get = param_get_action,
216 .set = param_set_action,
217 };
218 #define param_check_action(name, p) __param_check(name, p, action_t)
219
220 module_param_named(action, uv_nmi_action, action, 0644);
221
222 static inline bool uv_nmi_action_is(const char *action)
223 {
224 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
225 }
226
227
228 static void uv_nmi_setup_mmrs(void)
229 {
230 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
231 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
232 1UL << UVH_NMI_MMRX_REQ_SHIFT);
233 nmi_mmr = UVH_NMI_MMRX;
234 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
235 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
236 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
237 } else {
238 nmi_mmr = UVH_NMI_MMR;
239 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
240 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
241 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
242 }
243 }
244
245
246 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
247 {
248 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
249 atomic_inc(&hub_nmi->read_mmr_count);
250 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
251 }
252
253 static inline void uv_local_mmr_clear_nmi(void)
254 {
255 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
256 }
257
258
259
260
261 static inline void uv_reassert_nmi(void)
262 {
263
264 outb(0x8f, NMI_CONTROL_PORT);
265 inb(NMI_DUMMY_PORT);
266 outb(0x0f, NMI_CONTROL_PORT);
267 inb(NMI_DUMMY_PORT);
268 }
269
270 static void uv_init_hubless_pch_io(int offset, int mask, int data)
271 {
272 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
273 int readd = readl(addr);
274
275 if (mask) {
276 int writed = (readd & ~mask) | data;
277
278 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
279 addr, readd, ~mask, data, writed);
280 writel(writed, addr);
281 } else if (readd & data) {
282 nmi_debug("UV:PCH: %p = %x\n", addr, data);
283 writel(data, addr);
284 }
285
286 (void)readl(addr);
287 }
288
289 static void uv_nmi_setup_hubless_intr(void)
290 {
291 uv_pch_intr_now_enabled = uv_pch_intr_enable;
292
293 uv_init_hubless_pch_io(
294 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
295 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
296
297 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
298 uv_pch_intr_now_enabled ? "enabled" : "disabled");
299 }
300
301 static struct init_nmi {
302 unsigned int offset;
303 unsigned int mask;
304 unsigned int data;
305 } init_nmi[] = {
306 {
307 .offset = 0x84,
308 .mask = 0x1,
309 .data = 0x0,
310 },
311
312
313 {
314 .offset = 0x104,
315 .mask = 0x0,
316 .data = 0x1,
317 },
318 {
319 .offset = 0x124,
320 .mask = 0x0,
321 .data = 0x1,
322 },
323 {
324 .offset = 0x144,
325 .mask = 0x0,
326 .data = 0x1,
327 },
328 {
329 .offset = 0x164,
330 .mask = 0x0,
331 .data = 0x1,
332 },
333
334
335 {
336 .offset = 0x114,
337 .mask = 0x1,
338 .data = 0x0,
339 },
340 {
341 .offset = 0x134,
342 .mask = 0x1,
343 .data = 0x0,
344 },
345 {
346 .offset = 0x154,
347 .mask = 0x1,
348 .data = 0x0,
349 },
350 {
351 .offset = 0x174,
352 .mask = 0x1,
353 .data = 0x0,
354 },
355
356
357 {
358 .offset = 0x4c0,
359 .mask = 0xffffffff,
360 .data = 0x82020100,
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 },
395
396
397 {
398 .offset = 0x4c4,
399 .mask = 0x3c00,
400 .data = 0,
401 },
402 };
403
404 static void uv_init_hubless_pch_d0(void)
405 {
406 int i, read;
407
408 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
409 if (read != 0) {
410 pr_info("UV: Hubless NMI already configured\n");
411 return;
412 }
413
414 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
415 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
416 uv_init_hubless_pch_io(init_nmi[i].offset,
417 init_nmi[i].mask,
418 init_nmi[i].data);
419 }
420 }
421
422 static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
423 {
424 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
425 int status = *pstat;
426
427 hub_nmi->nmi_value = status;
428 atomic_inc(&hub_nmi->read_mmr_count);
429
430 if (!(status & STS_GPP_D_0_MASK))
431 return 0;
432
433 *pstat = STS_GPP_D_0_MASK;
434 (void)*pstat;
435
436 return 1;
437 }
438
439 static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
440 {
441 if (hub_nmi->hub_present)
442 return uv_nmi_test_mmr(hub_nmi);
443
444 if (hub_nmi->pch_owner)
445 return uv_nmi_test_hubless(hub_nmi);
446
447 return -1;
448 }
449
450
451
452
453
454 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
455 {
456 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
457
458 if (first) {
459 atomic_set(&hub_nmi->cpu_owner, cpu);
460 if (atomic_add_unless(&uv_in_nmi, 1, 1))
461 atomic_set(&uv_nmi_cpu, cpu);
462
463 atomic_inc(&hub_nmi->nmi_count);
464 }
465 return first;
466 }
467
468
469 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
470 {
471 int cpu = smp_processor_id();
472 int nmi = 0;
473 int nmi_detected = 0;
474
475 local64_inc(&uv_nmi_count);
476 this_cpu_inc(uv_cpu_nmi.queries);
477
478 do {
479 nmi = atomic_read(&hub_nmi->in_nmi);
480 if (nmi)
481 break;
482
483 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
484 nmi_detected = uv_test_nmi(hub_nmi);
485
486
487 if (nmi_detected > 0) {
488 uv_set_in_nmi(cpu, hub_nmi);
489 nmi = 1;
490 break;
491 }
492
493
494 else if (nmi_detected < 0)
495 goto slave_wait;
496
497
498 raw_spin_unlock(&hub_nmi->nmi_lock);
499
500 } else {
501
502
503 slave_wait: cpu_relax();
504 udelay(uv_nmi_slave_delay);
505
506
507 nmi = atomic_read(&hub_nmi->in_nmi);
508 if (nmi)
509 break;
510 }
511
512
513
514
515
516 if (!nmi) {
517 nmi = atomic_read(&uv_in_nmi);
518 if (nmi)
519 uv_set_in_nmi(cpu, hub_nmi);
520 }
521
522
523 if (nmi_detected < 0)
524 raw_spin_unlock(&hub_nmi->nmi_lock);
525
526 } while (0);
527
528 if (!nmi)
529 local64_inc(&uv_nmi_misses);
530
531 return nmi;
532 }
533
534
535 static inline void uv_clear_nmi(int cpu)
536 {
537 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
538
539 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
540 atomic_set(&hub_nmi->cpu_owner, -1);
541 atomic_set(&hub_nmi->in_nmi, 0);
542 if (hub_nmi->hub_present)
543 uv_local_mmr_clear_nmi();
544 else
545 uv_reassert_nmi();
546 raw_spin_unlock(&hub_nmi->nmi_lock);
547 }
548 }
549
550
551 static void uv_nmi_nr_cpus_ping(void)
552 {
553 int cpu;
554
555 for_each_cpu(cpu, uv_nmi_cpu_mask)
556 uv_cpu_nmi_per(cpu).pinging = 1;
557
558 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
559 }
560
561
562 static void uv_nmi_cleanup_mask(void)
563 {
564 int cpu;
565
566 for_each_cpu(cpu, uv_nmi_cpu_mask) {
567 uv_cpu_nmi_per(cpu).pinging = 0;
568 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
569 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
570 }
571 }
572
573
574 static int uv_nmi_wait_cpus(int first)
575 {
576 int i, j, k, n = num_online_cpus();
577 int last_k = 0, waiting = 0;
578 int cpu = smp_processor_id();
579
580 if (first) {
581 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
582 k = 0;
583 } else {
584 k = n - cpumask_weight(uv_nmi_cpu_mask);
585 }
586
587
588 if (first && uv_pch_intr_now_enabled) {
589 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
590 return n - k - 1;
591 }
592
593 udelay(uv_nmi_initial_delay);
594 for (i = 0; i < uv_nmi_retry_count; i++) {
595 int loop_delay = uv_nmi_loop_delay;
596
597 for_each_cpu(j, uv_nmi_cpu_mask) {
598 if (uv_cpu_nmi_per(j).state) {
599 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
600 if (++k >= n)
601 break;
602 }
603 }
604 if (k >= n) {
605 k = n;
606 break;
607 }
608 if (last_k != k) {
609 last_k = k;
610 waiting = 0;
611 } else if (++waiting > uv_nmi_wait_count)
612 break;
613
614
615 if (waiting && (n - k) == 1 &&
616 cpumask_test_cpu(0, uv_nmi_cpu_mask))
617 loop_delay *= 100;
618
619 udelay(loop_delay);
620 }
621 atomic_set(&uv_nmi_cpus_in_nmi, k);
622 return n - k;
623 }
624
625
626 static void uv_nmi_wait(int master)
627 {
628
629 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
630
631
632 if (!master)
633 return;
634
635 do {
636
637 if (!uv_nmi_wait_cpus(1))
638 break;
639
640
641 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
642 cpumask_weight(uv_nmi_cpu_mask),
643 cpumask_pr_args(uv_nmi_cpu_mask));
644
645 uv_nmi_nr_cpus_ping();
646
647
648 if (!uv_nmi_wait_cpus(0))
649 break;
650
651 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
652 cpumask_weight(uv_nmi_cpu_mask),
653 cpumask_pr_args(uv_nmi_cpu_mask));
654 } while (0);
655
656 pr_alert("UV: %d of %d CPUs in NMI\n",
657 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
658 }
659
660
661 static void uv_nmi_dump_cpu_ip_hdr(void)
662 {
663 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
664 "CPU", "PID", "COMMAND", "IP");
665 }
666
667
668 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
669 {
670 pr_info("UV: %4d %6d %-32.32s %pS",
671 cpu, current->pid, current->comm, (void *)regs->ip);
672 }
673
674
675
676
677
678
679
680
681 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
682 {
683 const char *dots = " ................................. ";
684
685 if (cpu == 0)
686 uv_nmi_dump_cpu_ip_hdr();
687
688 if (current->pid != 0 || !uv_nmi_action_is("ips"))
689 uv_nmi_dump_cpu_ip(cpu, regs);
690
691 if (uv_nmi_action_is("dump")) {
692 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
693 show_regs(regs);
694 }
695
696 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
697 }
698
699
700 static void uv_nmi_trigger_dump(int cpu)
701 {
702 int retry = uv_nmi_trigger_delay;
703
704 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
705 return;
706
707 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
708 do {
709 cpu_relax();
710 udelay(10);
711 if (uv_cpu_nmi_per(cpu).state
712 != UV_NMI_STATE_DUMP)
713 return;
714 } while (--retry > 0);
715
716 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
717 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
718 }
719
720
721 static void uv_nmi_sync_exit(int master)
722 {
723 atomic_dec(&uv_nmi_cpus_in_nmi);
724 if (master) {
725 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
726 cpu_relax();
727 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
728 } else {
729 while (atomic_read(&uv_nmi_slave_continue))
730 cpu_relax();
731 }
732 }
733
734
735 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
736 {
737 if (master) {
738 int in = atomic_read(&uv_nmi_cpus_in_nmi);
739 int out = num_online_cpus() - in;
740
741 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
742 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
743 } else {
744 while (!atomic_read(&uv_nmi_slave_continue))
745 cpu_relax();
746 }
747 uv_nmi_sync_exit(master);
748 }
749
750
751 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
752 {
753 if (master) {
754 int tcpu;
755 int ignored = 0;
756 int saved_console_loglevel = console_loglevel;
757
758 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
759 uv_nmi_action_is("ips") ? "IPs" : "processes",
760 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
761
762 console_loglevel = uv_nmi_loglevel;
763 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
764 for_each_online_cpu(tcpu) {
765 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
766 ignored++;
767 else if (tcpu == cpu)
768 uv_nmi_dump_state_cpu(tcpu, regs);
769 else
770 uv_nmi_trigger_dump(tcpu);
771 }
772 if (ignored)
773 pr_alert("UV: %d CPUs ignored NMI\n", ignored);
774
775 console_loglevel = saved_console_loglevel;
776 pr_alert("UV: process trace complete\n");
777 } else {
778 while (!atomic_read(&uv_nmi_slave_continue))
779 cpu_relax();
780 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
781 cpu_relax();
782 uv_nmi_dump_state_cpu(cpu, regs);
783 }
784 uv_nmi_sync_exit(master);
785 }
786
787 static void uv_nmi_touch_watchdogs(void)
788 {
789 touch_softlockup_watchdog_sync();
790 clocksource_touch_watchdog();
791 rcu_cpu_stall_reset();
792 touch_nmi_watchdog();
793 }
794
795 static atomic_t uv_nmi_kexec_failed;
796
797 #if defined(CONFIG_KEXEC_CORE)
798 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
799 {
800
801 if (master) {
802 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
803 crash_kexec(regs);
804
805 pr_emerg("UV: crash_kexec unexpectedly returned, ");
806 atomic_set(&uv_nmi_kexec_failed, 1);
807 if (!kexec_crash_image) {
808 pr_cont("crash kernel not loaded\n");
809 return;
810 }
811 pr_cont("kexec busy, stalling cpus while waiting\n");
812 }
813
814
815 while (atomic_read(&uv_nmi_kexec_failed) == 0)
816 mdelay(10);
817 }
818
819 #else
820 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
821 {
822 if (master)
823 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
824 atomic_set(&uv_nmi_kexec_failed, 1);
825 }
826 #endif
827
828 #ifdef CONFIG_KGDB
829 #ifdef CONFIG_KGDB_KDB
830 static inline int uv_nmi_kdb_reason(void)
831 {
832 return KDB_REASON_SYSTEM_NMI;
833 }
834 #else
835 static inline int uv_nmi_kdb_reason(void)
836 {
837
838 if (uv_nmi_action_is("kgdb"))
839 return 0;
840
841 pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
842 return -1;
843 }
844 #endif
845
846
847
848
849
850
851
852
853 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
854 {
855 if (master) {
856 int reason = uv_nmi_kdb_reason();
857 int ret;
858
859 if (reason < 0)
860 return;
861
862
863 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
864 &uv_nmi_slave_continue);
865 if (ret) {
866 pr_alert("KGDB returned error, is kgdboc set?\n");
867 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
868 }
869 } else {
870
871 int sig;
872
873 do {
874 cpu_relax();
875 sig = atomic_read(&uv_nmi_slave_continue);
876 } while (!sig);
877
878
879 if (sig == SLAVE_CONTINUE)
880 kgdb_nmicallback(cpu, regs);
881 }
882 uv_nmi_sync_exit(master);
883 }
884
885 #else
886 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
887 {
888 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
889 }
890 #endif
891
892
893
894
895 static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
896 {
897 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
898 int cpu = smp_processor_id();
899 int master = 0;
900 unsigned long flags;
901
902 local_irq_save(flags);
903
904
905 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
906 local_irq_restore(flags);
907 return NMI_DONE;
908 }
909
910
911 master = (atomic_read(&uv_nmi_cpu) == cpu);
912
913
914 if (uv_nmi_action_is("kdump")) {
915 uv_nmi_kdump(cpu, master, regs);
916
917
918 if (master)
919 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
920 }
921
922
923 uv_nmi_wait(master);
924
925
926 if (uv_nmi_action_is("health")) {
927 uv_nmi_action_health(cpu, regs, master);
928 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
929 uv_nmi_dump_state(cpu, regs, master);
930 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
931 uv_call_kgdb_kdb(cpu, regs, master);
932 } else {
933 if (master)
934 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
935 uv_nmi_sync_exit(master);
936 }
937
938
939 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
940
941
942 uv_clear_nmi(cpu);
943
944
945 if (master) {
946 if (cpumask_weight(uv_nmi_cpu_mask))
947 uv_nmi_cleanup_mask();
948 atomic_set(&uv_nmi_cpus_in_nmi, -1);
949 atomic_set(&uv_nmi_cpu, -1);
950 atomic_set(&uv_in_nmi, 0);
951 atomic_set(&uv_nmi_kexec_failed, 0);
952 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
953 }
954
955 uv_nmi_touch_watchdogs();
956 local_irq_restore(flags);
957
958 return NMI_HANDLED;
959 }
960
961
962
963
964 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
965 {
966 int ret;
967
968 this_cpu_inc(uv_cpu_nmi.queries);
969 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
970 local64_inc(&uv_nmi_ping_misses);
971 return NMI_DONE;
972 }
973
974 this_cpu_inc(uv_cpu_nmi.pings);
975 local64_inc(&uv_nmi_ping_count);
976 ret = uv_handle_nmi(reason, regs);
977 this_cpu_write(uv_cpu_nmi.pinging, 0);
978 return ret;
979 }
980
981 static void uv_register_nmi_notifier(void)
982 {
983 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
984 pr_warn("UV: NMI handler failed to register\n");
985
986 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
987 pr_warn("UV: PING NMI handler failed to register\n");
988 }
989
990 void uv_nmi_init(void)
991 {
992 unsigned int value;
993
994
995
996
997 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
998 value &= ~APIC_LVT_MASKED;
999 apic_write(APIC_LVT1, value);
1000 }
1001
1002
1003 static void __init uv_nmi_setup_common(bool hubbed)
1004 {
1005 int size = sizeof(void *) * (1 << NODES_SHIFT);
1006 int cpu;
1007
1008 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
1009 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
1010 BUG_ON(!uv_hub_nmi_list);
1011 size = sizeof(struct uv_hub_nmi_s);
1012 for_each_present_cpu(cpu) {
1013 int nid = cpu_to_node(cpu);
1014 if (uv_hub_nmi_list[nid] == NULL) {
1015 uv_hub_nmi_list[nid] = kzalloc_node(size,
1016 GFP_KERNEL, nid);
1017 BUG_ON(!uv_hub_nmi_list[nid]);
1018 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
1019 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
1020 uv_hub_nmi_list[nid]->hub_present = hubbed;
1021 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
1022 }
1023 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
1024 }
1025 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
1026 }
1027
1028
1029 void __init uv_nmi_setup(void)
1030 {
1031 uv_nmi_setup_mmrs();
1032 uv_nmi_setup_common(true);
1033 uv_register_nmi_notifier();
1034 pr_info("UV: Hub NMI enabled\n");
1035 }
1036
1037
1038 void __init uv_nmi_setup_hubless(void)
1039 {
1040 uv_nmi_setup_common(false);
1041 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
1042 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
1043 pch_base, PCH_PCR_GPIO_1_BASE);
1044 if (uv_pch_init_enable)
1045 uv_init_hubless_pch_d0();
1046 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
1047 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
1048 uv_nmi_setup_hubless_intr();
1049
1050 uv_reassert_nmi();
1051 uv_register_nmi_notifier();
1052 pr_info("UV: Hubless NMI enabled\n");
1053 }