Lines Matching refs:cpu

197 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)  in uv_set_in_nmi()  argument
202 atomic_set(&hub_nmi->cpu_owner, cpu); in uv_set_in_nmi()
204 atomic_set(&uv_nmi_cpu, cpu); in uv_set_in_nmi()
214 int cpu = smp_processor_id(); in uv_check_nmi() local
229 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
252 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
264 static inline void uv_clear_nmi(int cpu) in uv_clear_nmi() argument
268 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { in uv_clear_nmi()
279 int cpu; in uv_nmi_nr_cpus_ping() local
281 for_each_cpu(cpu, uv_nmi_cpu_mask) in uv_nmi_nr_cpus_ping()
282 uv_cpu_nmi_per(cpu).pinging = 1; in uv_nmi_nr_cpus_ping()
290 int cpu; in uv_nmi_cleanup_mask() local
292 for_each_cpu(cpu, uv_nmi_cpu_mask) { in uv_nmi_cleanup_mask()
293 uv_cpu_nmi_per(cpu).pinging = 0; in uv_nmi_cleanup_mask()
294 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; in uv_nmi_cleanup_mask()
295 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); in uv_nmi_cleanup_mask()
386 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) in uv_nmi_dump_cpu_ip() argument
389 cpu, current->pid, current->comm); in uv_nmi_dump_cpu_ip()
395 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) in uv_nmi_dump_state_cpu() argument
400 if (cpu == 0) in uv_nmi_dump_state_cpu()
404 uv_nmi_dump_cpu_ip(cpu, regs); in uv_nmi_dump_state_cpu()
408 "UV:%sNMI process trace for CPU %d\n", dots, cpu); in uv_nmi_dump_state_cpu()
415 static void uv_nmi_trigger_dump(int cpu) in uv_nmi_trigger_dump() argument
419 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) in uv_nmi_trigger_dump()
422 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; in uv_nmi_trigger_dump()
426 if (uv_cpu_nmi_per(cpu).state in uv_nmi_trigger_dump()
431 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); in uv_nmi_trigger_dump()
432 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; in uv_nmi_trigger_dump()
450 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) in uv_nmi_dump_state() argument
459 atomic_read(&uv_nmi_cpus_in_nmi), cpu); in uv_nmi_dump_state()
466 else if (tcpu == cpu) in uv_nmi_dump_state()
482 uv_nmi_dump_state_cpu(cpu, regs); in uv_nmi_dump_state()
497 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) in uv_nmi_kdump() argument
501 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu); in uv_nmi_kdump()
523 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) in uv_nmi_kdump() argument
555 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
565 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, in uv_call_kgdb_kdb()
582 kgdb_nmicallback(cpu, regs); in uv_call_kgdb_kdb()
588 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
600 int cpu = smp_processor_id(); in uv_handle_nmi() local
613 master = (atomic_read(&uv_nmi_cpu) == cpu); in uv_handle_nmi()
617 uv_nmi_kdump(cpu, master, regs); in uv_handle_nmi()
624 uv_nmi_dump_state(cpu, regs, master); in uv_handle_nmi()
628 uv_call_kgdb_kdb(cpu, regs, master); in uv_handle_nmi()
634 uv_clear_nmi(cpu); in uv_handle_nmi()
695 int cpu, nid; in uv_nmi_setup() local
703 for_each_present_cpu(cpu) { in uv_nmi_setup()
704 nid = cpu_to_node(cpu); in uv_nmi_setup()
712 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; in uv_nmi_setup()