This source file includes following definitions.
- smp_set_ops
- get_arch_pgd
- secondary_biglittle_prepare
- secondary_biglittle_init
- secondary_biglittle_prepare
- secondary_biglittle_init
- __cpu_up
- smp_init_cpus
- platform_can_secondary_boot
- platform_can_cpu_hotplug
- platform_cpu_kill
- platform_cpu_disable
- platform_can_hotplug_cpu
- __cpu_disable
- __cpu_die
- arch_cpu_idle_dead
- smp_store_cpu_info
- secondary_start_kernel
- smp_cpus_done
- smp_prepare_boot_cpu
- smp_prepare_cpus
- set_smp_cross_call
- smp_cross_call
- show_ipi_list
- smp_irq_stat_cpu
- arch_send_call_function_ipi_mask
- arch_send_wakeup_ipi_mask
- arch_send_call_function_single_ipi
- arch_irq_work_raise
- tick_broadcast
- ipi_cpu_stop
- register_ipi_completion
- ipi_complete
- do_IPI
- handle_IPI
- smp_send_reschedule
- smp_send_stop
- panic_smp_self_stop
- setup_profiling_timer
- cpufreq_callback
- register_cpufreq_notifier
- raise_nmi
- arch_trigger_cpumask_backtrace
1
2
3
4
5
6
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/spinlock.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/interrupt.h>
15 #include <linux/cache.h>
16 #include <linux/profile.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/cpu.h>
21 #include <linux/seq_file.h>
22 #include <linux/irq.h>
23 #include <linux/nmi.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 #include <linux/irq_work.h>
29
30 #include <linux/atomic.h>
31 #include <asm/bugs.h>
32 #include <asm/smp.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpu.h>
35 #include <asm/cputype.h>
36 #include <asm/exception.h>
37 #include <asm/idmap.h>
38 #include <asm/topology.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
42 #include <asm/procinfo.h>
43 #include <asm/processor.h>
44 #include <asm/sections.h>
45 #include <asm/tlbflush.h>
46 #include <asm/ptrace.h>
47 #include <asm/smp_plat.h>
48 #include <asm/virt.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mpu.h>
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/ipi.h>
54
55
56
57
58
59
60 struct secondary_data secondary_data;
61
62 enum ipi_msg_type {
63 IPI_WAKEUP,
64 IPI_TIMER,
65 IPI_RESCHEDULE,
66 IPI_CALL_FUNC,
67 IPI_CPU_STOP,
68 IPI_IRQ_WORK,
69 IPI_COMPLETION,
70
71
72
73
74 IPI_CPU_BACKTRACE,
75
76
77
78
79
80 };
81
82 static DECLARE_COMPLETION(cpu_running);
83
84 static struct smp_operations smp_ops __ro_after_init;
85
86 void __init smp_set_ops(const struct smp_operations *ops)
87 {
88 if (ops)
89 smp_ops = *ops;
90 };
91
92 static unsigned long get_arch_pgd(pgd_t *pgd)
93 {
94 #ifdef CONFIG_ARM_LPAE
95 return __phys_to_pfn(virt_to_phys(pgd));
96 #else
97 return virt_to_phys(pgd);
98 #endif
99 }
100
101 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
102 static int secondary_biglittle_prepare(unsigned int cpu)
103 {
104 if (!cpu_vtable[cpu])
105 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
106
107 return cpu_vtable[cpu] ? 0 : -ENOMEM;
108 }
109
110 static void secondary_biglittle_init(void)
111 {
112 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
113 }
114 #else
115 static int secondary_biglittle_prepare(unsigned int cpu)
116 {
117 return 0;
118 }
119
120 static void secondary_biglittle_init(void)
121 {
122 }
123 #endif
124
125 int __cpu_up(unsigned int cpu, struct task_struct *idle)
126 {
127 int ret;
128
129 if (!smp_ops.smp_boot_secondary)
130 return -ENOSYS;
131
132 ret = secondary_biglittle_prepare(cpu);
133 if (ret)
134 return ret;
135
136
137
138
139
140 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
141 #ifdef CONFIG_ARM_MPU
142 secondary_data.mpu_rgn_info = &mpu_rgn_info;
143 #endif
144
145 #ifdef CONFIG_MMU
146 secondary_data.pgdir = virt_to_phys(idmap_pgd);
147 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
148 #endif
149 sync_cache_w(&secondary_data);
150
151
152
153
154 ret = smp_ops.smp_boot_secondary(cpu, idle);
155 if (ret == 0) {
156
157
158
159
160 wait_for_completion_timeout(&cpu_running,
161 msecs_to_jiffies(1000));
162
163 if (!cpu_online(cpu)) {
164 pr_crit("CPU%u: failed to come online\n", cpu);
165 ret = -EIO;
166 }
167 } else {
168 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
169 }
170
171
172 memset(&secondary_data, 0, sizeof(secondary_data));
173 return ret;
174 }
175
176
177 void __init smp_init_cpus(void)
178 {
179 if (smp_ops.smp_init_cpus)
180 smp_ops.smp_init_cpus();
181 }
182
183 int platform_can_secondary_boot(void)
184 {
185 return !!smp_ops.smp_boot_secondary;
186 }
187
188 int platform_can_cpu_hotplug(void)
189 {
190 #ifdef CONFIG_HOTPLUG_CPU
191 if (smp_ops.cpu_kill)
192 return 1;
193 #endif
194
195 return 0;
196 }
197
198 #ifdef CONFIG_HOTPLUG_CPU
199 static int platform_cpu_kill(unsigned int cpu)
200 {
201 if (smp_ops.cpu_kill)
202 return smp_ops.cpu_kill(cpu);
203 return 1;
204 }
205
206 static int platform_cpu_disable(unsigned int cpu)
207 {
208 if (smp_ops.cpu_disable)
209 return smp_ops.cpu_disable(cpu);
210
211 return 0;
212 }
213
214 int platform_can_hotplug_cpu(unsigned int cpu)
215 {
216
217 if (!smp_ops.cpu_die)
218 return 0;
219
220 if (smp_ops.cpu_can_disable)
221 return smp_ops.cpu_can_disable(cpu);
222
223
224
225
226
227
228 return cpu != 0;
229 }
230
231
232
233
234 int __cpu_disable(void)
235 {
236 unsigned int cpu = smp_processor_id();
237 int ret;
238
239 ret = platform_cpu_disable(cpu);
240 if (ret)
241 return ret;
242
243 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
244 remove_cpu_topology(cpu);
245 #endif
246
247
248
249
250
251 set_cpu_online(cpu, false);
252
253
254
255
256 irq_migrate_all_off_this_cpu();
257
258
259
260
261
262
263
264
265 flush_cache_louis();
266 local_flush_tlb_all();
267
268 return 0;
269 }
270
271
272
273
274
275 void __cpu_die(unsigned int cpu)
276 {
277 if (!cpu_wait_death(cpu, 5)) {
278 pr_err("CPU%u: cpu didn't die\n", cpu);
279 return;
280 }
281 pr_debug("CPU%u: shutdown\n", cpu);
282
283 clear_tasks_mm_cpumask(cpu);
284
285
286
287
288
289
290
291 if (!platform_cpu_kill(cpu))
292 pr_err("CPU%u: unable to kill\n", cpu);
293 }
294
295
296
297
298
299
300
301
302
303 void arch_cpu_idle_dead(void)
304 {
305 unsigned int cpu = smp_processor_id();
306
307 idle_task_exit();
308
309 local_irq_disable();
310
311
312
313
314
315
316
317 flush_cache_louis();
318
319
320
321
322
323
324 (void)cpu_report_death();
325
326
327
328
329
330
331
332 flush_cache_louis();
333
334
335
336
337
338
339
340
341
342
343
344
345
346 if (smp_ops.cpu_die)
347 smp_ops.cpu_die(cpu);
348
349 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
350 cpu);
351
352
353
354
355
356
357 __asm__("mov sp, %0\n"
358 " mov fp, #0\n"
359 " b secondary_start_kernel"
360 :
361 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
362 }
363 #endif
364
365
366
367
368
369 static void smp_store_cpu_info(unsigned int cpuid)
370 {
371 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
372
373 cpu_info->loops_per_jiffy = loops_per_jiffy;
374 cpu_info->cpuid = read_cpuid_id();
375
376 store_cpu_topology(cpuid);
377 check_cpu_icache_size(cpuid);
378 }
379
380
381
382
383
384 asmlinkage void secondary_start_kernel(void)
385 {
386 struct mm_struct *mm = &init_mm;
387 unsigned int cpu;
388
389 secondary_biglittle_init();
390
391
392
393
394
395 cpu_switch_mm(mm->pgd, mm);
396 local_flush_bp_all();
397 enter_lazy_tlb(mm, current);
398 local_flush_tlb_all();
399
400
401
402
403
404 cpu = smp_processor_id();
405 mmgrab(mm);
406 current->active_mm = mm;
407 cpumask_set_cpu(cpu, mm_cpumask(mm));
408
409 cpu_init();
410
411 #ifndef CONFIG_MMU
412 setup_vectors_base();
413 #endif
414 pr_debug("CPU%u: Booted secondary processor\n", cpu);
415
416 preempt_disable();
417 trace_hardirqs_off();
418
419
420
421
422 if (smp_ops.smp_secondary_init)
423 smp_ops.smp_secondary_init(cpu);
424
425 notify_cpu_starting(cpu);
426
427 calibrate_delay();
428
429 smp_store_cpu_info(cpu);
430
431
432
433
434
435
436 set_cpu_online(cpu, true);
437
438 check_other_bugs();
439
440 complete(&cpu_running);
441
442 local_irq_enable();
443 local_fiq_enable();
444 local_abt_enable();
445
446
447
448
449 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
450 }
451
452 void __init smp_cpus_done(unsigned int max_cpus)
453 {
454 int cpu;
455 unsigned long bogosum = 0;
456
457 for_each_online_cpu(cpu)
458 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
459
460 printk(KERN_INFO "SMP: Total of %d processors activated "
461 "(%lu.%02lu BogoMIPS).\n",
462 num_online_cpus(),
463 bogosum / (500000/HZ),
464 (bogosum / (5000/HZ)) % 100);
465
466 hyp_mode_check();
467 }
468
469 void __init smp_prepare_boot_cpu(void)
470 {
471 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
472 }
473
474 void __init smp_prepare_cpus(unsigned int max_cpus)
475 {
476 unsigned int ncores = num_possible_cpus();
477
478 init_cpu_topology();
479
480 smp_store_cpu_info(smp_processor_id());
481
482
483
484
485 if (max_cpus > ncores)
486 max_cpus = ncores;
487 if (ncores > 1 && max_cpus) {
488
489
490
491
492
493
494 init_cpu_present(cpu_possible_mask);
495
496
497
498
499
500 if (smp_ops.smp_prepare_cpus)
501 smp_ops.smp_prepare_cpus(max_cpus);
502 }
503 }
504
505 static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
506
507 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
508 {
509 if (!__smp_cross_call)
510 __smp_cross_call = fn;
511 }
512
513 static const char *ipi_types[NR_IPI] __tracepoint_string = {
514 #define S(x,s) [x] = s
515 S(IPI_WAKEUP, "CPU wakeup interrupts"),
516 S(IPI_TIMER, "Timer broadcast interrupts"),
517 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
518 S(IPI_CALL_FUNC, "Function call interrupts"),
519 S(IPI_CPU_STOP, "CPU stop interrupts"),
520 S(IPI_IRQ_WORK, "IRQ work interrupts"),
521 S(IPI_COMPLETION, "completion interrupts"),
522 };
523
524 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
525 {
526 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
527 __smp_cross_call(target, ipinr);
528 }
529
530 void show_ipi_list(struct seq_file *p, int prec)
531 {
532 unsigned int cpu, i;
533
534 for (i = 0; i < NR_IPI; i++) {
535 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
536
537 for_each_online_cpu(cpu)
538 seq_printf(p, "%10u ",
539 __get_irq_stat(cpu, ipi_irqs[i]));
540
541 seq_printf(p, " %s\n", ipi_types[i]);
542 }
543 }
544
545 u64 smp_irq_stat_cpu(unsigned int cpu)
546 {
547 u64 sum = 0;
548 int i;
549
550 for (i = 0; i < NR_IPI; i++)
551 sum += __get_irq_stat(cpu, ipi_irqs[i]);
552
553 return sum;
554 }
555
556 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
557 {
558 smp_cross_call(mask, IPI_CALL_FUNC);
559 }
560
561 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
562 {
563 smp_cross_call(mask, IPI_WAKEUP);
564 }
565
566 void arch_send_call_function_single_ipi(int cpu)
567 {
568 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
569 }
570
571 #ifdef CONFIG_IRQ_WORK
572 void arch_irq_work_raise(void)
573 {
574 if (arch_irq_work_has_interrupt())
575 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
576 }
577 #endif
578
579 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
580 void tick_broadcast(const struct cpumask *mask)
581 {
582 smp_cross_call(mask, IPI_TIMER);
583 }
584 #endif
585
586 static DEFINE_RAW_SPINLOCK(stop_lock);
587
588
589
590
591 static void ipi_cpu_stop(unsigned int cpu)
592 {
593 if (system_state <= SYSTEM_RUNNING) {
594 raw_spin_lock(&stop_lock);
595 pr_crit("CPU%u: stopping\n", cpu);
596 dump_stack();
597 raw_spin_unlock(&stop_lock);
598 }
599
600 set_cpu_online(cpu, false);
601
602 local_fiq_disable();
603 local_irq_disable();
604
605 while (1) {
606 cpu_relax();
607 wfe();
608 }
609 }
610
611 static DEFINE_PER_CPU(struct completion *, cpu_completion);
612
613 int register_ipi_completion(struct completion *completion, int cpu)
614 {
615 per_cpu(cpu_completion, cpu) = completion;
616 return IPI_COMPLETION;
617 }
618
619 static void ipi_complete(unsigned int cpu)
620 {
621 complete(per_cpu(cpu_completion, cpu));
622 }
623
624
625
626
627 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
628 {
629 handle_IPI(ipinr, regs);
630 }
631
632 void handle_IPI(int ipinr, struct pt_regs *regs)
633 {
634 unsigned int cpu = smp_processor_id();
635 struct pt_regs *old_regs = set_irq_regs(regs);
636
637 if ((unsigned)ipinr < NR_IPI) {
638 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
639 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
640 }
641
642 switch (ipinr) {
643 case IPI_WAKEUP:
644 break;
645
646 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
647 case IPI_TIMER:
648 irq_enter();
649 tick_receive_broadcast();
650 irq_exit();
651 break;
652 #endif
653
654 case IPI_RESCHEDULE:
655 scheduler_ipi();
656 break;
657
658 case IPI_CALL_FUNC:
659 irq_enter();
660 generic_smp_call_function_interrupt();
661 irq_exit();
662 break;
663
664 case IPI_CPU_STOP:
665 irq_enter();
666 ipi_cpu_stop(cpu);
667 irq_exit();
668 break;
669
670 #ifdef CONFIG_IRQ_WORK
671 case IPI_IRQ_WORK:
672 irq_enter();
673 irq_work_run();
674 irq_exit();
675 break;
676 #endif
677
678 case IPI_COMPLETION:
679 irq_enter();
680 ipi_complete(cpu);
681 irq_exit();
682 break;
683
684 case IPI_CPU_BACKTRACE:
685 printk_nmi_enter();
686 irq_enter();
687 nmi_cpu_backtrace(regs);
688 irq_exit();
689 printk_nmi_exit();
690 break;
691
692 default:
693 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
694 cpu, ipinr);
695 break;
696 }
697
698 if ((unsigned)ipinr < NR_IPI)
699 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
700 set_irq_regs(old_regs);
701 }
702
703 void smp_send_reschedule(int cpu)
704 {
705 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
706 }
707
708 void smp_send_stop(void)
709 {
710 unsigned long timeout;
711 struct cpumask mask;
712
713 cpumask_copy(&mask, cpu_online_mask);
714 cpumask_clear_cpu(smp_processor_id(), &mask);
715 if (!cpumask_empty(&mask))
716 smp_cross_call(&mask, IPI_CPU_STOP);
717
718
719 timeout = USEC_PER_SEC;
720 while (num_online_cpus() > 1 && timeout--)
721 udelay(1);
722
723 if (num_online_cpus() > 1)
724 pr_warn("SMP: failed to stop secondary CPUs\n");
725 }
726
727
728
729
730
731
732
733 void panic_smp_self_stop(void)
734 {
735 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
736 smp_processor_id());
737 set_cpu_online(smp_processor_id(), false);
738 while (1)
739 cpu_relax();
740 }
741
742
743
744
745 int setup_profiling_timer(unsigned int multiplier)
746 {
747 return -EINVAL;
748 }
749
750 #ifdef CONFIG_CPU_FREQ
751
752 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
753 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
754 static unsigned long global_l_p_j_ref;
755 static unsigned long global_l_p_j_ref_freq;
756
757 static int cpufreq_callback(struct notifier_block *nb,
758 unsigned long val, void *data)
759 {
760 struct cpufreq_freqs *freq = data;
761 struct cpumask *cpus = freq->policy->cpus;
762 int cpu, first = cpumask_first(cpus);
763 unsigned int lpj;
764
765 if (freq->flags & CPUFREQ_CONST_LOOPS)
766 return NOTIFY_OK;
767
768 if (!per_cpu(l_p_j_ref, first)) {
769 for_each_cpu(cpu, cpus) {
770 per_cpu(l_p_j_ref, cpu) =
771 per_cpu(cpu_data, cpu).loops_per_jiffy;
772 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
773 }
774
775 if (!global_l_p_j_ref) {
776 global_l_p_j_ref = loops_per_jiffy;
777 global_l_p_j_ref_freq = freq->old;
778 }
779 }
780
781 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
782 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
783 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
784 global_l_p_j_ref_freq,
785 freq->new);
786
787 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
788 per_cpu(l_p_j_ref_freq, first), freq->new);
789 for_each_cpu(cpu, cpus)
790 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
791 }
792 return NOTIFY_OK;
793 }
794
795 static struct notifier_block cpufreq_notifier = {
796 .notifier_call = cpufreq_callback,
797 };
798
799 static int __init register_cpufreq_notifier(void)
800 {
801 return cpufreq_register_notifier(&cpufreq_notifier,
802 CPUFREQ_TRANSITION_NOTIFIER);
803 }
804 core_initcall(register_cpufreq_notifier);
805
806 #endif
807
808 static void raise_nmi(cpumask_t *mask)
809 {
810 __smp_cross_call(mask, IPI_CPU_BACKTRACE);
811 }
812
813 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
814 {
815 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
816 }