This source file includes following definitions.
- smp_stop_nmi_callback
- smp_reboot_interrupt
- register_stop_handler
- native_stop_other_cpus
- smp_reschedule_interrupt
- smp_call_function_interrupt
- smp_call_function_single_interrupt
- nonmi_ipi_setup
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/init.h>
13
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/export.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/cache.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpu.h>
23 #include <linux/gfp.h>
24
25 #include <asm/mtrr.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/proto.h>
29 #include <asm/apic.h>
30 #include <asm/nmi.h>
31 #include <asm/mce.h>
32 #include <asm/trace/irq_vectors.h>
33 #include <asm/kexec.h>
34 #include <asm/virtext.h>
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 static atomic_t stopping_cpu = ATOMIC_INIT(-1);
116 static bool smp_no_nmi_ipi = false;
117
118 static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
119 {
120
121 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
122 return NMI_HANDLED;
123
124 cpu_emergency_vmxoff();
125 stop_this_cpu(NULL);
126
127 return NMI_HANDLED;
128 }
129
130
131
132
133
134 asmlinkage __visible void smp_reboot_interrupt(void)
135 {
136 ipi_entering_ack_irq();
137 cpu_emergency_vmxoff();
138 stop_this_cpu(NULL);
139 irq_exit();
140 }
141
142 static int register_stop_handler(void)
143 {
144 return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
145 NMI_FLAG_FIRST, "smp_stop");
146 }
147
148 static void native_stop_other_cpus(int wait)
149 {
150 unsigned long flags;
151 unsigned long timeout;
152
153 if (reboot_force)
154 return;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 if (num_online_cpus() > 1) {
171
172 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
173 return;
174
175
176 wmb();
177
178 apic_send_IPI_allbutself(REBOOT_VECTOR);
179
180
181
182
183
184
185
186 timeout = USEC_PER_SEC;
187 while (num_online_cpus() > 1 && timeout--)
188 udelay(1);
189 }
190
191
192 if (num_online_cpus() > 1) {
193
194
195
196
197
198 if (!smp_no_nmi_ipi && !register_stop_handler()) {
199
200 wmb();
201
202 pr_emerg("Shutting down cpus with NMI\n");
203
204 apic_send_IPI_allbutself(NMI_VECTOR);
205 }
206
207
208
209
210
211 timeout = USEC_PER_MSEC * 10;
212 while (num_online_cpus() > 1 && (wait || timeout--))
213 udelay(1);
214 }
215
216 local_irq_save(flags);
217 disable_local_APIC();
218 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
219 local_irq_restore(flags);
220 }
221
222
223
224
225
226 __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
227 {
228 ack_APIC_irq();
229 inc_irq_stat(irq_resched_count);
230 kvm_set_cpu_l1tf_flush_l1d();
231
232 if (trace_resched_ipi_enabled()) {
233
234
235
236
237 irq_enter();
238 trace_reschedule_entry(RESCHEDULE_VECTOR);
239 scheduler_ipi();
240 trace_reschedule_exit(RESCHEDULE_VECTOR);
241 irq_exit();
242 return;
243 }
244 scheduler_ipi();
245 }
246
247 __visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
248 {
249 ipi_entering_ack_irq();
250 trace_call_function_entry(CALL_FUNCTION_VECTOR);
251 inc_irq_stat(irq_call_count);
252 generic_smp_call_function_interrupt();
253 trace_call_function_exit(CALL_FUNCTION_VECTOR);
254 exiting_irq();
255 }
256
257 __visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
258 {
259 ipi_entering_ack_irq();
260 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
261 inc_irq_stat(irq_call_count);
262 generic_smp_call_function_single_interrupt();
263 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
264 exiting_irq();
265 }
266
267 static int __init nonmi_ipi_setup(char *str)
268 {
269 smp_no_nmi_ipi = true;
270 return 1;
271 }
272
273 __setup("nonmi_ipi", nonmi_ipi_setup);
274
275 struct smp_ops smp_ops = {
276 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
277 .smp_prepare_cpus = native_smp_prepare_cpus,
278 .smp_cpus_done = native_smp_cpus_done,
279
280 .stop_other_cpus = native_stop_other_cpus,
281 #if defined(CONFIG_KEXEC_CORE)
282 .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
283 #endif
284 .smp_send_reschedule = native_smp_send_reschedule,
285
286 .cpu_up = native_cpu_up,
287 .cpu_die = native_cpu_die,
288 .cpu_disable = native_cpu_disable,
289 .play_dead = native_play_dead,
290
291 .send_call_func_ipi = native_send_call_func_ipi,
292 .send_call_func_single_ipi = native_send_call_func_single_ipi,
293 };
294 EXPORT_SYMBOL_GPL(smp_ops);