This source file includes following definitions.
- smp_store_cpu_info
- smp_setup_percpu_timer
- wait_boot_cpu_to_stop
- smp_callin
- wait_for_txrdy
- send_secondary_console_msg
- recv_secondary_console_msg
- secondary_cpu_start
- smp_boot_one_cpu
- setup_smp
- smp_prepare_cpus
- smp_prepare_boot_cpu
- __cpu_up
- smp_cpus_done
- setup_profiling_timer
- send_ipi_message
- handle_ipi
- smp_send_reschedule
- smp_send_stop
- arch_send_call_function_ipi_mask
- arch_send_call_function_single_ipi
- ipi_imb
- smp_imb
- ipi_flush_tlb_all
- flush_tlb_all
- ipi_flush_tlb_mm
- flush_tlb_mm
- ipi_flush_tlb_page
- flush_tlb_page
- flush_tlb_range
- ipi_flush_icache_page
- flush_icache_user_range
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/sched/mm.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/threads.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/spinlock.h>
27 #include <linux/irq.h>
28 #include <linux/cache.h>
29 #include <linux/profile.h>
30 #include <linux/bitops.h>
31 #include <linux/cpu.h>
32
33 #include <asm/hwrpb.h>
34 #include <asm/ptrace.h>
35 #include <linux/atomic.h>
36
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/pgtable.h>
40 #include <asm/pgalloc.h>
41 #include <asm/mmu_context.h>
42 #include <asm/tlbflush.h>
43
44 #include "proto.h"
45 #include "irq_impl.h"
46
47
48 #define DEBUG_SMP 0
49 #if DEBUG_SMP
50 #define DBGS(args) printk args
51 #else
52 #define DBGS(args)
53 #endif
54
55
56 struct cpuinfo_alpha cpu_data[NR_CPUS];
57 EXPORT_SYMBOL(cpu_data);
58
59
60 static struct {
61 unsigned long bits ____cacheline_aligned;
62 } ipi_data[NR_CPUS] __cacheline_aligned;
63
64 enum ipi_message_type {
65 IPI_RESCHEDULE,
66 IPI_CALL_FUNC,
67 IPI_CPU_STOP,
68 };
69
70
71 static int smp_secondary_alive = 0;
72
73 int smp_num_probed;
74 int smp_num_cpus = 1;
75 EXPORT_SYMBOL(smp_num_cpus);
76
77
78
79
80
81 static inline void __init
82 smp_store_cpu_info(int cpuid)
83 {
84 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86 cpu_data[cpuid].need_new_asn = 0;
87 cpu_data[cpuid].asn_lock = 0;
88 }
89
90
91
92
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid)
95 {
96 cpu_data[cpuid].prof_counter = 1;
97 cpu_data[cpuid].prof_multiplier = 1;
98 }
99
100 static void __init
101 wait_boot_cpu_to_stop(int cpuid)
102 {
103 unsigned long stop = jiffies + 10*HZ;
104
105 while (time_before(jiffies, stop)) {
106 if (!smp_secondary_alive)
107 return;
108 barrier();
109 }
110
111 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112 for (;;)
113 barrier();
114 }
115
116
117
118
119 void __init
120 smp_callin(void)
121 {
122 int cpuid = hard_smp_processor_id();
123
124 if (cpu_online(cpuid)) {
125 printk("??, cpu 0x%x already present??\n", cpuid);
126 BUG();
127 }
128 set_cpu_online(cpuid, true);
129
130
131 wrmces(7);
132
133
134 trap_init();
135
136
137 wrent(entInt, 0);
138
139
140 smp_setup_percpu_timer(cpuid);
141 init_clockevent();
142
143
144 if (alpha_mv.smp_callin)
145 alpha_mv.smp_callin();
146
147
148 mmgrab(&init_mm);
149 current->active_mm = &init_mm;
150
151
152 notify_cpu_starting(cpuid);
153
154
155 local_irq_enable();
156
157
158
159 wait_boot_cpu_to_stop(cpuid);
160 mb();
161 calibrate_delay();
162
163 smp_store_cpu_info(cpuid);
164
165 wmb();
166 smp_secondary_alive = 1;
167
168 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169 cpuid, current, current->active_mm));
170
171 preempt_disable();
172 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
173 }
174
175
176 static int
177 wait_for_txrdy (unsigned long cpumask)
178 {
179 unsigned long timeout;
180
181 if (!(hwrpb->txrdy & cpumask))
182 return 0;
183
184 timeout = jiffies + 10*HZ;
185 while (time_before(jiffies, timeout)) {
186 if (!(hwrpb->txrdy & cpumask))
187 return 0;
188 udelay(10);
189 barrier();
190 }
191
192 return -1;
193 }
194
195
196
197
198
199 static void
200 send_secondary_console_msg(char *str, int cpuid)
201 {
202 struct percpu_struct *cpu;
203 register char *cp1, *cp2;
204 unsigned long cpumask;
205 size_t len;
206
207 cpu = (struct percpu_struct *)
208 ((char*)hwrpb
209 + hwrpb->processor_offset
210 + cpuid * hwrpb->processor_size);
211
212 cpumask = (1UL << cpuid);
213 if (wait_for_txrdy(cpumask))
214 goto timeout;
215
216 cp2 = str;
217 len = strlen(cp2);
218 *(unsigned int *)&cpu->ipc_buffer[0] = len;
219 cp1 = (char *) &cpu->ipc_buffer[1];
220 memcpy(cp1, cp2, len);
221
222
223 wmb();
224 set_bit(cpuid, &hwrpb->rxrdy);
225
226 if (wait_for_txrdy(cpumask))
227 goto timeout;
228 return;
229
230 timeout:
231 printk("Processor %x not ready\n", cpuid);
232 }
233
234
235
236
237 static void
238 recv_secondary_console_msg(void)
239 {
240 int mycpu, i, cnt;
241 unsigned long txrdy = hwrpb->txrdy;
242 char *cp1, *cp2, buf[80];
243 struct percpu_struct *cpu;
244
245 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246
247 mycpu = hard_smp_processor_id();
248
249 for (i = 0; i < NR_CPUS; i++) {
250 if (!(txrdy & (1UL << i)))
251 continue;
252
253 DBGS(("recv_secondary_console_msg: "
254 "TXRDY contains CPU %d.\n", i));
255
256 cpu = (struct percpu_struct *)
257 ((char*)hwrpb
258 + hwrpb->processor_offset
259 + i * hwrpb->processor_size);
260
261 DBGS(("recv_secondary_console_msg: on %d from %d"
262 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263 mycpu, i, cpu->halt_reason, cpu->flags));
264
265 cnt = cpu->ipc_buffer[0] >> 32;
266 if (cnt <= 0 || cnt >= 80)
267 strcpy(buf, "<<< BOGUS MSG >>>");
268 else {
269 cp1 = (char *) &cpu->ipc_buffer[1];
270 cp2 = buf;
271 memcpy(cp2, cp1, cnt);
272 cp2[cnt] = '\0';
273
274 while ((cp2 = strchr(cp2, '\r')) != 0) {
275 *cp2 = ' ';
276 if (cp2[1] == '\n')
277 cp2[1] = ' ';
278 }
279 }
280
281 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
282 "message is '%s'\n", mycpu, buf));
283 }
284
285 hwrpb->txrdy = 0;
286 }
287
288
289
290
291 static int
292 secondary_cpu_start(int cpuid, struct task_struct *idle)
293 {
294 struct percpu_struct *cpu;
295 struct pcb_struct *hwpcb, *ipcb;
296 unsigned long timeout;
297
298 cpu = (struct percpu_struct *)
299 ((char*)hwrpb
300 + hwrpb->processor_offset
301 + cpuid * hwrpb->processor_size);
302 hwpcb = (struct pcb_struct *) cpu->hwpcb;
303 ipcb = &task_thread_info(idle)->pcb;
304
305
306
307
308
309 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310 hwpcb->usp = 0;
311 hwpcb->ptbr = ipcb->ptbr;
312 hwpcb->pcc = 0;
313 hwpcb->asn = 0;
314 hwpcb->unique = virt_to_phys(ipcb);
315 hwpcb->flags = ipcb->flags;
316 hwpcb->res1 = hwpcb->res2 = 0;
317
318 #if 0
319 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
320 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321 #endif
322 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
323 cpuid, idle->state, ipcb->flags));
324
325
326 hwrpb->CPU_restart = __smp_callin;
327 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
328
329
330 hwrpb_update_checksum(hwrpb);
331
332
333
334
335
336
337 cpu->flags |= 0x22;
338 cpu->flags &= ~1;
339 wmb();
340
341 send_secondary_console_msg("START\r\n", cpuid);
342
343
344 timeout = jiffies + 10*HZ;
345 while (time_before(jiffies, timeout)) {
346 if (cpu->flags & 1)
347 goto started;
348 udelay(10);
349 barrier();
350 }
351 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
352 return -1;
353
354 started:
355 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
356 return 0;
357 }
358
359
360
361
362 static int
363 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
364 {
365 unsigned long timeout;
366
367
368 smp_secondary_alive = -1;
369
370
371 if (secondary_cpu_start(cpuid, idle))
372 return -1;
373
374
375 mb();
376 smp_secondary_alive = 0;
377
378
379
380 timeout = jiffies + 1*HZ;
381 while (time_before(jiffies, timeout)) {
382 if (smp_secondary_alive == 1)
383 goto alive;
384 udelay(10);
385 barrier();
386 }
387
388
389
390 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
391 return -1;
392
393 alive:
394
395 return 0;
396 }
397
398
399
400
401
402 void __init
403 setup_smp(void)
404 {
405 struct percpu_struct *cpubase, *cpu;
406 unsigned long i;
407
408 if (boot_cpuid != 0) {
409 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
410 boot_cpuid);
411 }
412
413 if (hwrpb->nr_processors > 1) {
414 int boot_cpu_palrev;
415
416 DBGS(("setup_smp: nr_processors %ld\n",
417 hwrpb->nr_processors));
418
419 cpubase = (struct percpu_struct *)
420 ((char*)hwrpb + hwrpb->processor_offset);
421 boot_cpu_palrev = cpubase->pal_revision;
422
423 for (i = 0; i < hwrpb->nr_processors; i++) {
424 cpu = (struct percpu_struct *)
425 ((char *)cpubase + i*hwrpb->processor_size);
426 if ((cpu->flags & 0x1cc) == 0x1cc) {
427 smp_num_probed++;
428 set_cpu_possible(i, true);
429 set_cpu_present(i, true);
430 cpu->pal_revision = boot_cpu_palrev;
431 }
432
433 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
434 i, cpu->flags, cpu->type));
435 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
436 i, cpu->pal_revision));
437 }
438 } else {
439 smp_num_probed = 1;
440 }
441
442 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
443 smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
444 }
445
446
447
448
449 void __init
450 smp_prepare_cpus(unsigned int max_cpus)
451 {
452
453 memset(ipi_data, 0, sizeof(ipi_data));
454
455 current_thread_info()->cpu = boot_cpuid;
456
457 smp_store_cpu_info(boot_cpuid);
458 smp_setup_percpu_timer(boot_cpuid);
459
460
461 if (smp_num_probed == 1 || max_cpus == 0) {
462 init_cpu_possible(cpumask_of(boot_cpuid));
463 init_cpu_present(cpumask_of(boot_cpuid));
464 printk(KERN_INFO "SMP mode deactivated.\n");
465 return;
466 }
467
468 printk(KERN_INFO "SMP starting up secondaries.\n");
469
470 smp_num_cpus = smp_num_probed;
471 }
472
473 void
474 smp_prepare_boot_cpu(void)
475 {
476 }
477
478 int
479 __cpu_up(unsigned int cpu, struct task_struct *tidle)
480 {
481 smp_boot_one_cpu(cpu, tidle);
482
483 return cpu_online(cpu) ? 0 : -ENOSYS;
484 }
485
486 void __init
487 smp_cpus_done(unsigned int max_cpus)
488 {
489 int cpu;
490 unsigned long bogosum = 0;
491
492 for(cpu = 0; cpu < NR_CPUS; cpu++)
493 if (cpu_online(cpu))
494 bogosum += cpu_data[cpu].loops_per_jiffy;
495
496 printk(KERN_INFO "SMP: Total of %d processors activated "
497 "(%lu.%02lu BogoMIPS).\n",
498 num_online_cpus(),
499 (bogosum + 2500) / (500000/HZ),
500 ((bogosum + 2500) / (5000/HZ)) % 100);
501 }
502
503 int
504 setup_profiling_timer(unsigned int multiplier)
505 {
506 return -EINVAL;
507 }
508
509 static void
510 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
511 {
512 int i;
513
514 mb();
515 for_each_cpu(i, to_whom)
516 set_bit(operation, &ipi_data[i].bits);
517
518 mb();
519 for_each_cpu(i, to_whom)
520 wripir(i);
521 }
522
523 void
524 handle_ipi(struct pt_regs *regs)
525 {
526 int this_cpu = smp_processor_id();
527 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
528 unsigned long ops;
529
530 #if 0
531 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
532 this_cpu, *pending_ipis, regs->pc));
533 #endif
534
535 mb();
536 while ((ops = xchg(pending_ipis, 0)) != 0) {
537 mb();
538 do {
539 unsigned long which;
540
541 which = ops & -ops;
542 ops &= ~which;
543 which = __ffs(which);
544
545 switch (which) {
546 case IPI_RESCHEDULE:
547 scheduler_ipi();
548 break;
549
550 case IPI_CALL_FUNC:
551 generic_smp_call_function_interrupt();
552 break;
553
554 case IPI_CPU_STOP:
555 halt();
556
557 default:
558 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
559 this_cpu, which);
560 break;
561 }
562 } while (ops);
563
564 mb();
565 }
566
567 cpu_data[this_cpu].ipi_count++;
568
569 if (hwrpb->txrdy)
570 recv_secondary_console_msg();
571 }
572
573 void
574 smp_send_reschedule(int cpu)
575 {
576 #ifdef DEBUG_IPI_MSG
577 if (cpu == hard_smp_processor_id())
578 printk(KERN_WARNING
579 "smp_send_reschedule: Sending IPI to self.\n");
580 #endif
581 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
582 }
583
584 void
585 smp_send_stop(void)
586 {
587 cpumask_t to_whom;
588 cpumask_copy(&to_whom, cpu_possible_mask);
589 cpumask_clear_cpu(smp_processor_id(), &to_whom);
590 #ifdef DEBUG_IPI_MSG
591 if (hard_smp_processor_id() != boot_cpu_id)
592 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
593 #endif
594 send_ipi_message(&to_whom, IPI_CPU_STOP);
595 }
596
597 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
598 {
599 send_ipi_message(mask, IPI_CALL_FUNC);
600 }
601
602 void arch_send_call_function_single_ipi(int cpu)
603 {
604 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
605 }
606
607 static void
608 ipi_imb(void *ignored)
609 {
610 imb();
611 }
612
613 void
614 smp_imb(void)
615 {
616
617 on_each_cpu(ipi_imb, NULL, 1);
618 }
619 EXPORT_SYMBOL(smp_imb);
620
621 static void
622 ipi_flush_tlb_all(void *ignored)
623 {
624 tbia();
625 }
626
627 void
628 flush_tlb_all(void)
629 {
630
631
632 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
633 }
634
635 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
636
637 static void
638 ipi_flush_tlb_mm(void *x)
639 {
640 struct mm_struct *mm = (struct mm_struct *) x;
641 if (mm == current->active_mm && !asn_locked())
642 flush_tlb_current(mm);
643 else
644 flush_tlb_other(mm);
645 }
646
647 void
648 flush_tlb_mm(struct mm_struct *mm)
649 {
650 preempt_disable();
651
652 if (mm == current->active_mm) {
653 flush_tlb_current(mm);
654 if (atomic_read(&mm->mm_users) <= 1) {
655 int cpu, this_cpu = smp_processor_id();
656 for (cpu = 0; cpu < NR_CPUS; cpu++) {
657 if (!cpu_online(cpu) || cpu == this_cpu)
658 continue;
659 if (mm->context[cpu])
660 mm->context[cpu] = 0;
661 }
662 preempt_enable();
663 return;
664 }
665 }
666
667 smp_call_function(ipi_flush_tlb_mm, mm, 1);
668
669 preempt_enable();
670 }
671 EXPORT_SYMBOL(flush_tlb_mm);
672
673 struct flush_tlb_page_struct {
674 struct vm_area_struct *vma;
675 struct mm_struct *mm;
676 unsigned long addr;
677 };
678
679 static void
680 ipi_flush_tlb_page(void *x)
681 {
682 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
683 struct mm_struct * mm = data->mm;
684
685 if (mm == current->active_mm && !asn_locked())
686 flush_tlb_current_page(mm, data->vma, data->addr);
687 else
688 flush_tlb_other(mm);
689 }
690
691 void
692 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
693 {
694 struct flush_tlb_page_struct data;
695 struct mm_struct *mm = vma->vm_mm;
696
697 preempt_disable();
698
699 if (mm == current->active_mm) {
700 flush_tlb_current_page(mm, vma, addr);
701 if (atomic_read(&mm->mm_users) <= 1) {
702 int cpu, this_cpu = smp_processor_id();
703 for (cpu = 0; cpu < NR_CPUS; cpu++) {
704 if (!cpu_online(cpu) || cpu == this_cpu)
705 continue;
706 if (mm->context[cpu])
707 mm->context[cpu] = 0;
708 }
709 preempt_enable();
710 return;
711 }
712 }
713
714 data.vma = vma;
715 data.mm = mm;
716 data.addr = addr;
717
718 smp_call_function(ipi_flush_tlb_page, &data, 1);
719
720 preempt_enable();
721 }
722 EXPORT_SYMBOL(flush_tlb_page);
723
724 void
725 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
726 {
727
728 flush_tlb_mm(vma->vm_mm);
729 }
730 EXPORT_SYMBOL(flush_tlb_range);
731
732 static void
733 ipi_flush_icache_page(void *x)
734 {
735 struct mm_struct *mm = (struct mm_struct *) x;
736 if (mm == current->active_mm && !asn_locked())
737 __load_new_mm_context(mm);
738 else
739 flush_tlb_other(mm);
740 }
741
742 void
743 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
744 unsigned long addr, int len)
745 {
746 struct mm_struct *mm = vma->vm_mm;
747
748 if ((vma->vm_flags & VM_EXEC) == 0)
749 return;
750
751 preempt_disable();
752
753 if (mm == current->active_mm) {
754 __load_new_mm_context(mm);
755 if (atomic_read(&mm->mm_users) <= 1) {
756 int cpu, this_cpu = smp_processor_id();
757 for (cpu = 0; cpu < NR_CPUS; cpu++) {
758 if (!cpu_online(cpu) || cpu == this_cpu)
759 continue;
760 if (mm->context[cpu])
761 mm->context[cpu] = 0;
762 }
763 preempt_enable();
764 return;
765 }
766 }
767
768 smp_call_function(ipi_flush_icache_page, mm, 1);
769
770 preempt_enable();
771 }