1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36
37 #include <linux/atomic.h>
38 #include <asm/cpu.h>
39 #include <asm/processor.h>
40 #include <asm/idle.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
43 #include <asm/time.h>
44 #include <asm/setup.h>
45
46 cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
47
48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
49 EXPORT_SYMBOL(__cpu_number_map);
50
51 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 EXPORT_SYMBOL(__cpu_logical_map);
53
54 /* Number of TCs (or siblings in Intel speak) per CPU core */
55 int smp_num_siblings = 1;
56 EXPORT_SYMBOL(smp_num_siblings);
57
58 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
59 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
60 EXPORT_SYMBOL(cpu_sibling_map);
61
62 /* representing the core map of multi-core chips of each logical CPU */
63 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
64 EXPORT_SYMBOL(cpu_core_map);
65
66 /*
67 * A logcal cpu mask containing only one VPE per core to
68 * reduce the number of IPIs on large MT systems.
69 */
70 cpumask_t cpu_foreign_map __read_mostly;
71 EXPORT_SYMBOL(cpu_foreign_map);
72
73 /* representing cpus for which sibling maps can be computed */
74 static cpumask_t cpu_sibling_setup_map;
75
76 /* representing cpus for which core maps can be computed */
77 static cpumask_t cpu_core_setup_map;
78
79 cpumask_t cpu_coherent_mask;
80
set_cpu_sibling_map(int cpu)81 static inline void set_cpu_sibling_map(int cpu)
82 {
83 int i;
84
85 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
86
87 if (smp_num_siblings > 1) {
88 for_each_cpu(i, &cpu_sibling_setup_map) {
89 if (cpu_data[cpu].package == cpu_data[i].package &&
90 cpu_data[cpu].core == cpu_data[i].core) {
91 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
92 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
93 }
94 }
95 } else
96 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
97 }
98
set_cpu_core_map(int cpu)99 static inline void set_cpu_core_map(int cpu)
100 {
101 int i;
102
103 cpumask_set_cpu(cpu, &cpu_core_setup_map);
104
105 for_each_cpu(i, &cpu_core_setup_map) {
106 if (cpu_data[cpu].package == cpu_data[i].package) {
107 cpumask_set_cpu(i, &cpu_core_map[cpu]);
108 cpumask_set_cpu(cpu, &cpu_core_map[i]);
109 }
110 }
111 }
112
113 /*
114 * Calculate a new cpu_foreign_map mask whenever a
115 * new cpu appears or disappears.
116 */
calculate_cpu_foreign_map(void)117 static inline void calculate_cpu_foreign_map(void)
118 {
119 int i, k, core_present;
120 cpumask_t temp_foreign_map;
121
122 /* Re-calculate the mask */
123 cpumask_clear(&temp_foreign_map);
124 for_each_online_cpu(i) {
125 core_present = 0;
126 for_each_cpu(k, &temp_foreign_map)
127 if (cpu_data[i].package == cpu_data[k].package &&
128 cpu_data[i].core == cpu_data[k].core)
129 core_present = 1;
130 if (!core_present)
131 cpumask_set_cpu(i, &temp_foreign_map);
132 }
133
134 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
135 }
136
137 struct plat_smp_ops *mp_ops;
138 EXPORT_SYMBOL(mp_ops);
139
register_smp_ops(struct plat_smp_ops * ops)140 void register_smp_ops(struct plat_smp_ops *ops)
141 {
142 if (mp_ops)
143 printk(KERN_WARNING "Overriding previously set SMP ops\n");
144
145 mp_ops = ops;
146 }
147
148 /*
149 * First C code run on the secondary CPUs after being started up by
150 * the master.
151 */
start_secondary(void)152 asmlinkage void start_secondary(void)
153 {
154 unsigned int cpu;
155
156 cpu_probe();
157 per_cpu_trap_init(false);
158 mips_clockevent_init();
159 mp_ops->init_secondary();
160 cpu_report();
161
162 /*
163 * XXX parity protection should be folded in here when it's converted
164 * to an option instead of something based on .cputype
165 */
166
167 calibrate_delay();
168 preempt_disable();
169 cpu = smp_processor_id();
170 cpu_data[cpu].udelay_val = loops_per_jiffy;
171
172 cpumask_set_cpu(cpu, &cpu_coherent_mask);
173 notify_cpu_starting(cpu);
174
175 set_cpu_online(cpu, true);
176
177 set_cpu_sibling_map(cpu);
178 set_cpu_core_map(cpu);
179
180 calculate_cpu_foreign_map();
181
182 cpumask_set_cpu(cpu, &cpu_callin_map);
183
184 synchronise_count_slave(cpu);
185
186 /*
187 * irq will be enabled in ->smp_finish(), enabling it too early
188 * is dangerous.
189 */
190 WARN_ON_ONCE(!irqs_disabled());
191 mp_ops->smp_finish();
192
193 cpu_startup_entry(CPUHP_ONLINE);
194 }
195
196 /*
197 * Call into both interrupt handlers, as we share the IPI for them
198 */
smp_call_function_interrupt(void)199 void __irq_entry smp_call_function_interrupt(void)
200 {
201 irq_enter();
202 generic_smp_call_function_interrupt();
203 irq_exit();
204 }
205
stop_this_cpu(void * dummy)206 static void stop_this_cpu(void *dummy)
207 {
208 /*
209 * Remove this CPU. Be a bit slow here and
210 * set the bits for every online CPU so we don't miss
211 * any IPI whilst taking this VPE down.
212 */
213
214 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
215
216 /* Make it visible to every other CPU */
217 smp_mb();
218
219 set_cpu_online(smp_processor_id(), false);
220 calculate_cpu_foreign_map();
221 local_irq_disable();
222 while (1);
223 }
224
smp_send_stop(void)225 void smp_send_stop(void)
226 {
227 smp_call_function(stop_this_cpu, NULL, 0);
228 }
229
smp_cpus_done(unsigned int max_cpus)230 void __init smp_cpus_done(unsigned int max_cpus)
231 {
232 }
233
234 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)235 void __init smp_prepare_cpus(unsigned int max_cpus)
236 {
237 init_new_context(current, &init_mm);
238 current_thread_info()->cpu = 0;
239 mp_ops->prepare_cpus(max_cpus);
240 set_cpu_sibling_map(0);
241 set_cpu_core_map(0);
242 calculate_cpu_foreign_map();
243 #ifndef CONFIG_HOTPLUG_CPU
244 init_cpu_present(cpu_possible_mask);
245 #endif
246 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
247 }
248
249 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)250 void smp_prepare_boot_cpu(void)
251 {
252 set_cpu_possible(0, true);
253 set_cpu_online(0, true);
254 cpumask_set_cpu(0, &cpu_callin_map);
255 }
256
__cpu_up(unsigned int cpu,struct task_struct * tidle)257 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
258 {
259 mp_ops->boot_secondary(cpu, tidle);
260
261 /*
262 * Trust is futile. We should really have timeouts ...
263 */
264 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
265 udelay(100);
266 schedule();
267 }
268
269 synchronise_count_master(cpu);
270 return 0;
271 }
272
273 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)274 int setup_profiling_timer(unsigned int multiplier)
275 {
276 return 0;
277 }
278
flush_tlb_all_ipi(void * info)279 static void flush_tlb_all_ipi(void *info)
280 {
281 local_flush_tlb_all();
282 }
283
flush_tlb_all(void)284 void flush_tlb_all(void)
285 {
286 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
287 }
288
flush_tlb_mm_ipi(void * mm)289 static void flush_tlb_mm_ipi(void *mm)
290 {
291 local_flush_tlb_mm((struct mm_struct *)mm);
292 }
293
294 /*
295 * Special Variant of smp_call_function for use by TLB functions:
296 *
297 * o No return value
298 * o collapses to normal function call on UP kernels
299 * o collapses to normal function call on systems with a single shared
300 * primary cache.
301 */
smp_on_other_tlbs(void (* func)(void * info),void * info)302 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
303 {
304 smp_call_function(func, info, 1);
305 }
306
smp_on_each_tlb(void (* func)(void * info),void * info)307 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
308 {
309 preempt_disable();
310
311 smp_on_other_tlbs(func, info);
312 func(info);
313
314 preempt_enable();
315 }
316
317 /*
318 * The following tlb flush calls are invoked when old translations are
319 * being torn down, or pte attributes are changing. For single threaded
320 * address spaces, a new context is obtained on the current cpu, and tlb
321 * context on other cpus are invalidated to force a new context allocation
322 * at switch_mm time, should the mm ever be used on other cpus. For
323 * multithreaded address spaces, intercpu interrupts have to be sent.
324 * Another case where intercpu interrupts are required is when the target
325 * mm might be active on another cpu (eg debuggers doing the flushes on
326 * behalf of debugees, kswapd stealing pages from another process etc).
327 * Kanoj 07/00.
328 */
329
flush_tlb_mm(struct mm_struct * mm)330 void flush_tlb_mm(struct mm_struct *mm)
331 {
332 preempt_disable();
333
334 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
335 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
336 } else {
337 unsigned int cpu;
338
339 for_each_online_cpu(cpu) {
340 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
341 cpu_context(cpu, mm) = 0;
342 }
343 }
344 local_flush_tlb_mm(mm);
345
346 preempt_enable();
347 }
348
349 struct flush_tlb_data {
350 struct vm_area_struct *vma;
351 unsigned long addr1;
352 unsigned long addr2;
353 };
354
flush_tlb_range_ipi(void * info)355 static void flush_tlb_range_ipi(void *info)
356 {
357 struct flush_tlb_data *fd = info;
358
359 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
360 }
361
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)362 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
363 {
364 struct mm_struct *mm = vma->vm_mm;
365
366 preempt_disable();
367 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
368 struct flush_tlb_data fd = {
369 .vma = vma,
370 .addr1 = start,
371 .addr2 = end,
372 };
373
374 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
375 } else {
376 unsigned int cpu;
377
378 for_each_online_cpu(cpu) {
379 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
380 cpu_context(cpu, mm) = 0;
381 }
382 }
383 local_flush_tlb_range(vma, start, end);
384 preempt_enable();
385 }
386
flush_tlb_kernel_range_ipi(void * info)387 static void flush_tlb_kernel_range_ipi(void *info)
388 {
389 struct flush_tlb_data *fd = info;
390
391 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
392 }
393
flush_tlb_kernel_range(unsigned long start,unsigned long end)394 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
395 {
396 struct flush_tlb_data fd = {
397 .addr1 = start,
398 .addr2 = end,
399 };
400
401 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
402 }
403
flush_tlb_page_ipi(void * info)404 static void flush_tlb_page_ipi(void *info)
405 {
406 struct flush_tlb_data *fd = info;
407
408 local_flush_tlb_page(fd->vma, fd->addr1);
409 }
410
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)411 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
412 {
413 preempt_disable();
414 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
415 struct flush_tlb_data fd = {
416 .vma = vma,
417 .addr1 = page,
418 };
419
420 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
421 } else {
422 unsigned int cpu;
423
424 for_each_online_cpu(cpu) {
425 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
426 cpu_context(cpu, vma->vm_mm) = 0;
427 }
428 }
429 local_flush_tlb_page(vma, page);
430 preempt_enable();
431 }
432
flush_tlb_one_ipi(void * info)433 static void flush_tlb_one_ipi(void *info)
434 {
435 unsigned long vaddr = (unsigned long) info;
436
437 local_flush_tlb_one(vaddr);
438 }
439
flush_tlb_one(unsigned long vaddr)440 void flush_tlb_one(unsigned long vaddr)
441 {
442 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
443 }
444
445 EXPORT_SYMBOL(flush_tlb_page);
446 EXPORT_SYMBOL(flush_tlb_one);
447
448 #if defined(CONFIG_KEXEC)
449 void (*dump_ipi_function_ptr)(void *) = NULL;
dump_send_ipi(void (* dump_ipi_callback)(void *))450 void dump_send_ipi(void (*dump_ipi_callback)(void *))
451 {
452 int i;
453 int cpu = smp_processor_id();
454
455 dump_ipi_function_ptr = dump_ipi_callback;
456 smp_mb();
457 for_each_online_cpu(i)
458 if (i != cpu)
459 mp_ops->send_ipi_single(i, SMP_DUMP);
460
461 }
462 EXPORT_SYMBOL(dump_send_ipi);
463 #endif
464
465 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
466
467 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
468 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
469
tick_broadcast(const struct cpumask * mask)470 void tick_broadcast(const struct cpumask *mask)
471 {
472 atomic_t *count;
473 struct call_single_data *csd;
474 int cpu;
475
476 for_each_cpu(cpu, mask) {
477 count = &per_cpu(tick_broadcast_count, cpu);
478 csd = &per_cpu(tick_broadcast_csd, cpu);
479
480 if (atomic_inc_return(count) == 1)
481 smp_call_function_single_async(cpu, csd);
482 }
483 }
484
tick_broadcast_callee(void * info)485 static void tick_broadcast_callee(void *info)
486 {
487 int cpu = smp_processor_id();
488 tick_receive_broadcast();
489 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
490 }
491
tick_broadcast_init(void)492 static int __init tick_broadcast_init(void)
493 {
494 struct call_single_data *csd;
495 int cpu;
496
497 for (cpu = 0; cpu < NR_CPUS; cpu++) {
498 csd = &per_cpu(tick_broadcast_csd, cpu);
499 csd->func = tick_broadcast_callee;
500 }
501
502 return 0;
503 }
504 early_initcall(tick_broadcast_init);
505
506 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
507