This source file includes following definitions.
- idle_thread_get
- idle_thread_set_boot_cpu
- idle_init
- idle_threads_init
- smpboot_thread_fn
- __smpboot_create_thread
- smpboot_create_threads
- smpboot_unpark_thread
- smpboot_unpark_threads
- smpboot_park_thread
- smpboot_park_threads
- smpboot_destroy_threads
- smpboot_register_percpu_thread
- smpboot_unregister_percpu_thread
- cpu_report_state
- cpu_check_up_prepare
- cpu_set_state_online
- cpu_wait_death
- cpu_report_death
1
2
3
4
5 #include <linux/cpu.h>
6 #include <linux/err.h>
7 #include <linux/smp.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/kthread.h>
17 #include <linux/smpboot.h>
18
19 #include "smpboot.h"
20
21 #ifdef CONFIG_SMP
22
23 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
24
25
26
27
28 static DEFINE_PER_CPU(struct task_struct *, idle_threads);
29
30 struct task_struct *idle_thread_get(unsigned int cpu)
31 {
32 struct task_struct *tsk = per_cpu(idle_threads, cpu);
33
34 if (!tsk)
35 return ERR_PTR(-ENOMEM);
36 init_idle(tsk, cpu);
37 return tsk;
38 }
39
40 void __init idle_thread_set_boot_cpu(void)
41 {
42 per_cpu(idle_threads, smp_processor_id()) = current;
43 }
44
45
46
47
48
49
50
51 static inline void idle_init(unsigned int cpu)
52 {
53 struct task_struct *tsk = per_cpu(idle_threads, cpu);
54
55 if (!tsk) {
56 tsk = fork_idle(cpu);
57 if (IS_ERR(tsk))
58 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
59 else
60 per_cpu(idle_threads, cpu) = tsk;
61 }
62 }
63
64
65
66
67 void __init idle_threads_init(void)
68 {
69 unsigned int cpu, boot_cpu;
70
71 boot_cpu = smp_processor_id();
72
73 for_each_possible_cpu(cpu) {
74 if (cpu != boot_cpu)
75 idle_init(cpu);
76 }
77 }
78 #endif
79
80 #endif
81
82 static LIST_HEAD(hotplug_threads);
83 static DEFINE_MUTEX(smpboot_threads_lock);
84
85 struct smpboot_thread_data {
86 unsigned int cpu;
87 unsigned int status;
88 struct smp_hotplug_thread *ht;
89 };
90
91 enum {
92 HP_THREAD_NONE = 0,
93 HP_THREAD_ACTIVE,
94 HP_THREAD_PARKED,
95 };
96
97
98
99
100
101
102
103
104
105
106
107 static int smpboot_thread_fn(void *data)
108 {
109 struct smpboot_thread_data *td = data;
110 struct smp_hotplug_thread *ht = td->ht;
111
112 while (1) {
113 set_current_state(TASK_INTERRUPTIBLE);
114 preempt_disable();
115 if (kthread_should_stop()) {
116 __set_current_state(TASK_RUNNING);
117 preempt_enable();
118
119 if (ht->cleanup && td->status != HP_THREAD_NONE)
120 ht->cleanup(td->cpu, cpu_online(td->cpu));
121 kfree(td);
122 return 0;
123 }
124
125 if (kthread_should_park()) {
126 __set_current_state(TASK_RUNNING);
127 preempt_enable();
128 if (ht->park && td->status == HP_THREAD_ACTIVE) {
129 BUG_ON(td->cpu != smp_processor_id());
130 ht->park(td->cpu);
131 td->status = HP_THREAD_PARKED;
132 }
133 kthread_parkme();
134
135 continue;
136 }
137
138 BUG_ON(td->cpu != smp_processor_id());
139
140
141 switch (td->status) {
142 case HP_THREAD_NONE:
143 __set_current_state(TASK_RUNNING);
144 preempt_enable();
145 if (ht->setup)
146 ht->setup(td->cpu);
147 td->status = HP_THREAD_ACTIVE;
148 continue;
149
150 case HP_THREAD_PARKED:
151 __set_current_state(TASK_RUNNING);
152 preempt_enable();
153 if (ht->unpark)
154 ht->unpark(td->cpu);
155 td->status = HP_THREAD_ACTIVE;
156 continue;
157 }
158
159 if (!ht->thread_should_run(td->cpu)) {
160 preempt_enable_no_resched();
161 schedule();
162 } else {
163 __set_current_state(TASK_RUNNING);
164 preempt_enable();
165 ht->thread_fn(td->cpu);
166 }
167 }
168 }
169
170 static int
171 __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
172 {
173 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
174 struct smpboot_thread_data *td;
175
176 if (tsk)
177 return 0;
178
179 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
180 if (!td)
181 return -ENOMEM;
182 td->cpu = cpu;
183 td->ht = ht;
184
185 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
186 ht->thread_comm);
187 if (IS_ERR(tsk)) {
188 kfree(td);
189 return PTR_ERR(tsk);
190 }
191
192
193
194
195 kthread_park(tsk);
196 get_task_struct(tsk);
197 *per_cpu_ptr(ht->store, cpu) = tsk;
198 if (ht->create) {
199
200
201
202
203
204
205 if (!wait_task_inactive(tsk, TASK_PARKED))
206 WARN_ON(1);
207 else
208 ht->create(cpu);
209 }
210 return 0;
211 }
212
213 int smpboot_create_threads(unsigned int cpu)
214 {
215 struct smp_hotplug_thread *cur;
216 int ret = 0;
217
218 mutex_lock(&smpboot_threads_lock);
219 list_for_each_entry(cur, &hotplug_threads, list) {
220 ret = __smpboot_create_thread(cur, cpu);
221 if (ret)
222 break;
223 }
224 mutex_unlock(&smpboot_threads_lock);
225 return ret;
226 }
227
228 static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
229 {
230 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
231
232 if (!ht->selfparking)
233 kthread_unpark(tsk);
234 }
235
236 int smpboot_unpark_threads(unsigned int cpu)
237 {
238 struct smp_hotplug_thread *cur;
239
240 mutex_lock(&smpboot_threads_lock);
241 list_for_each_entry(cur, &hotplug_threads, list)
242 smpboot_unpark_thread(cur, cpu);
243 mutex_unlock(&smpboot_threads_lock);
244 return 0;
245 }
246
247 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248 {
249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250
251 if (tsk && !ht->selfparking)
252 kthread_park(tsk);
253 }
254
255 int smpboot_park_threads(unsigned int cpu)
256 {
257 struct smp_hotplug_thread *cur;
258
259 mutex_lock(&smpboot_threads_lock);
260 list_for_each_entry_reverse(cur, &hotplug_threads, list)
261 smpboot_park_thread(cur, cpu);
262 mutex_unlock(&smpboot_threads_lock);
263 return 0;
264 }
265
266 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267 {
268 unsigned int cpu;
269
270
271 for_each_possible_cpu(cpu) {
272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273
274 if (tsk) {
275 kthread_stop(tsk);
276 put_task_struct(tsk);
277 *per_cpu_ptr(ht->store, cpu) = NULL;
278 }
279 }
280 }
281
282
283
284
285
286
287
288
289 int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
290 {
291 unsigned int cpu;
292 int ret = 0;
293
294 get_online_cpus();
295 mutex_lock(&smpboot_threads_lock);
296 for_each_online_cpu(cpu) {
297 ret = __smpboot_create_thread(plug_thread, cpu);
298 if (ret) {
299 smpboot_destroy_threads(plug_thread);
300 goto out;
301 }
302 smpboot_unpark_thread(plug_thread, cpu);
303 }
304 list_add(&plug_thread->list, &hotplug_threads);
305 out:
306 mutex_unlock(&smpboot_threads_lock);
307 put_online_cpus();
308 return ret;
309 }
310 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
311
312
313
314
315
316
317
318 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
319 {
320 get_online_cpus();
321 mutex_lock(&smpboot_threads_lock);
322 list_del(&plug_thread->list);
323 smpboot_destroy_threads(plug_thread);
324 mutex_unlock(&smpboot_threads_lock);
325 put_online_cpus();
326 }
327 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
328
329 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
330
331
332
333
334
335 int cpu_report_state(int cpu)
336 {
337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351
352 int cpu_check_up_prepare(int cpu)
353 {
354 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
356 return 0;
357 }
358
359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
360
361 case CPU_POST_DEAD:
362
363
364 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
365 return 0;
366
367 case CPU_DEAD_FROZEN:
368
369
370
371
372
373
374
375
376
377
378
379
380 return -EBUSY;
381
382 case CPU_BROKEN:
383
384
385
386
387
388
389
390
391
392
393 return -EAGAIN;
394
395 default:
396
397
398 return -EIO;
399 }
400 }
401
402
403
404
405
406
407
408 void cpu_set_state_online(int cpu)
409 {
410 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
411 }
412
413 #ifdef CONFIG_HOTPLUG_CPU
414
415
416
417
418 bool cpu_wait_death(unsigned int cpu, int seconds)
419 {
420 int jf_left = seconds * HZ;
421 int oldstate;
422 bool ret = true;
423 int sleep_jf = 1;
424
425 might_sleep();
426
427
428 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
429 goto update_state;
430 udelay(5);
431
432
433 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
434 schedule_timeout_uninterruptible(sleep_jf);
435 jf_left -= sleep_jf;
436 if (jf_left <= 0)
437 break;
438 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
439 }
440 update_state:
441 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
442 if (oldstate == CPU_DEAD) {
443
444 smp_mb();
445 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
446 } else {
447
448 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
449 oldstate, CPU_BROKEN) != oldstate)
450 goto update_state;
451 ret = false;
452 }
453 return ret;
454 }
455
456
457
458
459
460
461
462
463
464
465 bool cpu_report_death(void)
466 {
467 int oldstate;
468 int newstate;
469 int cpu = smp_processor_id();
470
471 do {
472 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
473 if (oldstate != CPU_BROKEN)
474 newstate = CPU_DEAD;
475 else
476 newstate = CPU_DEAD_FROZEN;
477 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
478 oldstate, newstate) != oldstate);
479 return newstate == CPU_DEAD;
480 }
481
482 #endif