This source file includes following definitions.
- smpcfd_prepare_cpu
- smpcfd_dead_cpu
- smpcfd_dying_cpu
- call_function_init
- csd_lock_wait
- csd_lock
- csd_unlock
- generic_exec_single
- generic_smp_call_function_single_interrupt
- flush_smp_call_function_queue
- smp_call_function_single
- smp_call_function_single_async
- smp_call_function_any
- smp_call_function_many
- smp_call_function
- arch_disable_smp_support
- nosmp
- nrcpus
- maxcpus
- setup_nr_cpu_ids
- smp_init
- on_each_cpu
- on_each_cpu_mask
- on_each_cpu_cond_mask
- on_each_cpu_cond
- do_nothing
- kick_all_cpus_sync
- wake_up_all_idle_cpus
- smp_call_on_cpu_callback
- smp_call_on_cpu
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/smp.h>
19 #include <linux/cpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/idle.h>
22 #include <linux/hypervisor.h>
23
24 #include "smpboot.h"
25
26 enum {
27 CSD_FLAG_LOCK = 0x01,
28 CSD_FLAG_SYNCHRONOUS = 0x02,
29 };
30
31 struct call_function_data {
32 call_single_data_t __percpu *csd;
33 cpumask_var_t cpumask;
34 cpumask_var_t cpumask_ipi;
35 };
36
37 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
38
39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
40
41 static void flush_smp_call_function_queue(bool warn_cpu_offline);
42
43 int smpcfd_prepare_cpu(unsigned int cpu)
44 {
45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
46
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 cpu_to_node(cpu)))
49 return -ENOMEM;
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu))) {
52 free_cpumask_var(cfd->cpumask);
53 return -ENOMEM;
54 }
55 cfd->csd = alloc_percpu(call_single_data_t);
56 if (!cfd->csd) {
57 free_cpumask_var(cfd->cpumask);
58 free_cpumask_var(cfd->cpumask_ipi);
59 return -ENOMEM;
60 }
61
62 return 0;
63 }
64
65 int smpcfd_dead_cpu(unsigned int cpu)
66 {
67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
68
69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd);
72 return 0;
73 }
74
75 int smpcfd_dying_cpu(unsigned int cpu)
76 {
77
78
79
80
81
82
83
84
85
86 flush_smp_call_function_queue(false);
87 return 0;
88 }
89
90 void __init call_function_init(void)
91 {
92 int i;
93
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
96
97 smpcfd_prepare_cpu(smp_processor_id());
98 }
99
100
101
102
103
104
105
106
107 static __always_inline void csd_lock_wait(call_single_data_t *csd)
108 {
109 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
110 }
111
112 static __always_inline void csd_lock(call_single_data_t *csd)
113 {
114 csd_lock_wait(csd);
115 csd->flags |= CSD_FLAG_LOCK;
116
117
118
119
120
121
122 smp_wmb();
123 }
124
125 static __always_inline void csd_unlock(call_single_data_t *csd)
126 {
127 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
128
129
130
131
132 smp_store_release(&csd->flags, 0);
133 }
134
135 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
136
137
138
139
140
141
142 static int generic_exec_single(int cpu, call_single_data_t *csd,
143 smp_call_func_t func, void *info)
144 {
145 if (cpu == smp_processor_id()) {
146 unsigned long flags;
147
148
149
150
151
152 csd_unlock(csd);
153 local_irq_save(flags);
154 func(info);
155 local_irq_restore(flags);
156 return 0;
157 }
158
159
160 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
161 csd_unlock(csd);
162 return -ENXIO;
163 }
164
165 csd->func = func;
166 csd->info = info;
167
168
169
170
171
172
173
174
175
176
177
178
179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
180 arch_send_call_function_single_ipi(cpu);
181
182 return 0;
183 }
184
185
186
187
188
189
190
191 void generic_smp_call_function_single_interrupt(void)
192 {
193 flush_smp_call_function_queue(true);
194 }
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210 static void flush_smp_call_function_queue(bool warn_cpu_offline)
211 {
212 struct llist_head *head;
213 struct llist_node *entry;
214 call_single_data_t *csd, *csd_next;
215 static bool warned;
216
217 lockdep_assert_irqs_disabled();
218
219 head = this_cpu_ptr(&call_single_queue);
220 entry = llist_del_all(head);
221 entry = llist_reverse_order(entry);
222
223
224 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
225 !warned && !llist_empty(head))) {
226 warned = true;
227 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
228
229
230
231
232
233 llist_for_each_entry(csd, entry, llist)
234 pr_warn("IPI callback %pS sent to offline CPU\n",
235 csd->func);
236 }
237
238 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
239 smp_call_func_t func = csd->func;
240 void *info = csd->info;
241
242
243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
244 func(info);
245 csd_unlock(csd);
246 } else {
247 csd_unlock(csd);
248 func(info);
249 }
250 }
251
252
253
254
255
256
257
258 irq_work_run();
259 }
260
261
262
263
264
265
266
267
268
269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
270 int wait)
271 {
272 call_single_data_t *csd;
273 call_single_data_t csd_stack = {
274 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
275 };
276 int this_cpu;
277 int err;
278
279
280
281
282
283 this_cpu = get_cpu();
284
285
286
287
288
289
290
291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292 && !oops_in_progress);
293
294
295
296
297
298
299
300 WARN_ON_ONCE(!in_task());
301
302 csd = &csd_stack;
303 if (!wait) {
304 csd = this_cpu_ptr(&csd_data);
305 csd_lock(csd);
306 }
307
308 err = generic_exec_single(cpu, csd, func, info);
309
310 if (wait)
311 csd_lock_wait(csd);
312
313 put_cpu();
314
315 return err;
316 }
317 EXPORT_SYMBOL(smp_call_function_single);
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
336 {
337 int err = 0;
338
339 preempt_disable();
340
341
342 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
343 csd_lock_wait(csd);
344
345 csd->flags = CSD_FLAG_LOCK;
346 smp_wmb();
347
348 err = generic_exec_single(cpu, csd, csd->func, csd->info);
349 preempt_enable();
350
351 return err;
352 }
353 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369 int smp_call_function_any(const struct cpumask *mask,
370 smp_call_func_t func, void *info, int wait)
371 {
372 unsigned int cpu;
373 const struct cpumask *nodemask;
374 int ret;
375
376
377 cpu = get_cpu();
378 if (cpumask_test_cpu(cpu, mask))
379 goto call;
380
381
382 nodemask = cpumask_of_node(cpu_to_node(cpu));
383 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
384 cpu = cpumask_next_and(cpu, nodemask, mask)) {
385 if (cpu_online(cpu))
386 goto call;
387 }
388
389
390 cpu = cpumask_any_and(mask, cpu_online_mask);
391 call:
392 ret = smp_call_function_single(cpu, func, info, wait);
393 put_cpu();
394 return ret;
395 }
396 EXPORT_SYMBOL_GPL(smp_call_function_any);
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412 void smp_call_function_many(const struct cpumask *mask,
413 smp_call_func_t func, void *info, bool wait)
414 {
415 struct call_function_data *cfd;
416 int cpu, next_cpu, this_cpu = smp_processor_id();
417
418
419
420
421
422
423
424 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
425 && !oops_in_progress && !early_boot_irqs_disabled);
426
427
428
429
430
431
432
433 WARN_ON_ONCE(!in_task());
434
435
436 cpu = cpumask_first_and(mask, cpu_online_mask);
437 if (cpu == this_cpu)
438 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
439
440
441 if (cpu >= nr_cpu_ids)
442 return;
443
444
445 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
446 if (next_cpu == this_cpu)
447 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
448
449
450 if (next_cpu >= nr_cpu_ids) {
451 smp_call_function_single(cpu, func, info, wait);
452 return;
453 }
454
455 cfd = this_cpu_ptr(&cfd_data);
456
457 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
458 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
459
460
461 if (unlikely(!cpumask_weight(cfd->cpumask)))
462 return;
463
464 cpumask_clear(cfd->cpumask_ipi);
465 for_each_cpu(cpu, cfd->cpumask) {
466 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
467
468 csd_lock(csd);
469 if (wait)
470 csd->flags |= CSD_FLAG_SYNCHRONOUS;
471 csd->func = func;
472 csd->info = info;
473 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
474 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
475 }
476
477
478 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
479
480 if (wait) {
481 for_each_cpu(cpu, cfd->cpumask) {
482 call_single_data_t *csd;
483
484 csd = per_cpu_ptr(cfd->csd, cpu);
485 csd_lock_wait(csd);
486 }
487 }
488 }
489 EXPORT_SYMBOL(smp_call_function_many);
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506 void smp_call_function(smp_call_func_t func, void *info, int wait)
507 {
508 preempt_disable();
509 smp_call_function_many(cpu_online_mask, func, info, wait);
510 preempt_enable();
511 }
512 EXPORT_SYMBOL(smp_call_function);
513
514
515 unsigned int setup_max_cpus = NR_CPUS;
516 EXPORT_SYMBOL(setup_max_cpus);
517
518
519
520
521
522
523
524
525
526
527
528
529
530 void __weak arch_disable_smp_support(void) { }
531
532 static int __init nosmp(char *str)
533 {
534 setup_max_cpus = 0;
535 arch_disable_smp_support();
536
537 return 0;
538 }
539
540 early_param("nosmp", nosmp);
541
542
543 static int __init nrcpus(char *str)
544 {
545 int nr_cpus;
546
547 get_option(&str, &nr_cpus);
548 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
549 nr_cpu_ids = nr_cpus;
550
551 return 0;
552 }
553
554 early_param("nr_cpus", nrcpus);
555
556 static int __init maxcpus(char *str)
557 {
558 get_option(&str, &setup_max_cpus);
559 if (setup_max_cpus == 0)
560 arch_disable_smp_support();
561
562 return 0;
563 }
564
565 early_param("maxcpus", maxcpus);
566
567
568 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
569 EXPORT_SYMBOL(nr_cpu_ids);
570
571
572 void __init setup_nr_cpu_ids(void)
573 {
574 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
575 }
576
577
578 void __init smp_init(void)
579 {
580 int num_nodes, num_cpus;
581 unsigned int cpu;
582
583 idle_threads_init();
584 cpuhp_threads_init();
585
586 pr_info("Bringing up secondary CPUs ...\n");
587
588
589 for_each_present_cpu(cpu) {
590 if (num_online_cpus() >= setup_max_cpus)
591 break;
592 if (!cpu_online(cpu))
593 cpu_up(cpu);
594 }
595
596 num_nodes = num_online_nodes();
597 num_cpus = num_online_cpus();
598 pr_info("Brought up %d node%s, %d CPU%s\n",
599 num_nodes, (num_nodes > 1 ? "s" : ""),
600 num_cpus, (num_cpus > 1 ? "s" : ""));
601
602
603 smp_cpus_done(setup_max_cpus);
604 }
605
606
607
608
609
610
611 void on_each_cpu(void (*func) (void *info), void *info, int wait)
612 {
613 unsigned long flags;
614
615 preempt_disable();
616 smp_call_function(func, info, wait);
617 local_irq_save(flags);
618 func(info);
619 local_irq_restore(flags);
620 preempt_enable();
621 }
622 EXPORT_SYMBOL(on_each_cpu);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
641 void *info, bool wait)
642 {
643 int cpu = get_cpu();
644
645 smp_call_function_many(mask, func, info, wait);
646 if (cpumask_test_cpu(cpu, mask)) {
647 unsigned long flags;
648 local_irq_save(flags);
649 func(info);
650 local_irq_restore(flags);
651 }
652 put_cpu();
653 }
654 EXPORT_SYMBOL(on_each_cpu_mask);
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
684 smp_call_func_t func, void *info, bool wait,
685 gfp_t gfp_flags, const struct cpumask *mask)
686 {
687 cpumask_var_t cpus;
688 int cpu, ret;
689
690 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
691
692 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
693 preempt_disable();
694 for_each_cpu(cpu, mask)
695 if (cond_func(cpu, info))
696 __cpumask_set_cpu(cpu, cpus);
697 on_each_cpu_mask(cpus, func, info, wait);
698 preempt_enable();
699 free_cpumask_var(cpus);
700 } else {
701
702
703
704
705 preempt_disable();
706 for_each_cpu(cpu, mask)
707 if (cond_func(cpu, info)) {
708 ret = smp_call_function_single(cpu, func,
709 info, wait);
710 WARN_ON_ONCE(ret);
711 }
712 preempt_enable();
713 }
714 }
715 EXPORT_SYMBOL(on_each_cpu_cond_mask);
716
717 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
718 smp_call_func_t func, void *info, bool wait,
719 gfp_t gfp_flags)
720 {
721 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
722 cpu_online_mask);
723 }
724 EXPORT_SYMBOL(on_each_cpu_cond);
725
726 static void do_nothing(void *unused)
727 {
728 }
729
730
731
732
733
734
735
736
737
738
739
740
741 void kick_all_cpus_sync(void)
742 {
743
744 smp_mb();
745 smp_call_function(do_nothing, NULL, 1);
746 }
747 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
748
749
750
751
752
753
754
755 void wake_up_all_idle_cpus(void)
756 {
757 int cpu;
758
759 preempt_disable();
760 for_each_online_cpu(cpu) {
761 if (cpu == smp_processor_id())
762 continue;
763
764 wake_up_if_idle(cpu);
765 }
766 preempt_enable();
767 }
768 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
769
770
771
772
773
774
775
776
777 struct smp_call_on_cpu_struct {
778 struct work_struct work;
779 struct completion done;
780 int (*func)(void *);
781 void *data;
782 int ret;
783 int cpu;
784 };
785
786 static void smp_call_on_cpu_callback(struct work_struct *work)
787 {
788 struct smp_call_on_cpu_struct *sscs;
789
790 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
791 if (sscs->cpu >= 0)
792 hypervisor_pin_vcpu(sscs->cpu);
793 sscs->ret = sscs->func(sscs->data);
794 if (sscs->cpu >= 0)
795 hypervisor_pin_vcpu(-1);
796
797 complete(&sscs->done);
798 }
799
800 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
801 {
802 struct smp_call_on_cpu_struct sscs = {
803 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
804 .func = func,
805 .data = par,
806 .cpu = phys ? cpu : -1,
807 };
808
809 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
810
811 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
812 return -ENXIO;
813
814 queue_work_on(cpu, system_wq, &sscs.work);
815 wait_for_completion(&sscs.done);
816
817 return sscs.ret;
818 }
819 EXPORT_SYMBOL_GPL(smp_call_on_cpu);