1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/module.h> 16#include <linux/init.h> 17#include <linux/kernel.h> 18#include <linux/mm.h> 19#include <linux/sched.h> 20#include <linux/kernel_stat.h> 21#include <linux/bootmem.h> 22#include <linux/notifier.h> 23#include <linux/cpu.h> 24#include <linux/percpu.h> 25#include <linux/delay.h> 26#include <linux/err.h> 27#include <linux/irq.h> 28#include <asm/mmu_context.h> 29#include <asm/tlbflush.h> 30#include <asm/sections.h> 31 32/* State of each CPU. */ 33static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 34 35/* The messaging code jumps to this pointer during boot-up */ 36unsigned long start_cpu_function_addr; 37 38/* Called very early during startup to mark boot cpu as online */ 39void __init smp_prepare_boot_cpu(void) 40{ 41 int cpu = smp_processor_id(); 42 set_cpu_online(cpu, 1); 43 set_cpu_present(cpu, 1); 44 __this_cpu_write(cpu_state, CPU_ONLINE); 45 46 init_messaging(); 47} 48 49static void start_secondary(void); 50 51/* 52 * Called at the top of init() to launch all the other CPUs. 53 * They run free to complete their initialization and then wait 54 * until they get an IPI from the boot cpu to come online. 55 */ 56void __init smp_prepare_cpus(unsigned int max_cpus) 57{ 58 long rc; 59 int cpu, cpu_count; 60 int boot_cpu = smp_processor_id(); 61 62 current_thread_info()->cpu = boot_cpu; 63 64 /* 65 * Pin this task to the boot CPU while we bring up the others, 66 * just to make sure we don't uselessly migrate as they come up. 67 */ 68 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); 69 if (rc != 0) 70 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); 71 72 /* Print information about disabled and dataplane cpus. */ 73 print_disabled_cpus(); 74 75 /* 76 * Tell the messaging subsystem how to respond to the 77 * startup message. We use a level of indirection to avoid 78 * confusing the linker with the fact that the messaging 79 * subsystem is calling __init code. 80 */ 81 start_cpu_function_addr = (unsigned long) &online_secondary; 82 83 /* Set up thread context for all new processors. */ 84 cpu_count = 1; 85 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 86 struct task_struct *idle; 87 88 if (cpu == boot_cpu) 89 continue; 90 91 if (!cpu_possible(cpu)) { 92 /* 93 * Make this processor do nothing on boot. 94 * Note that we don't give the boot_pc function 95 * a stack, so it has to be assembly code. 96 */ 97 per_cpu(boot_sp, cpu) = 0; 98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; 99 continue; 100 } 101 102 /* Create a new idle thread to run start_secondary() */ 103 idle = fork_idle(cpu); 104 if (IS_ERR(idle)) 105 panic("failed fork for CPU %d", cpu); 106 idle->thread.pc = (unsigned long) start_secondary; 107 108 /* Make this thread the boot thread for this processor */ 109 per_cpu(boot_sp, cpu) = task_ksp0(idle); 110 per_cpu(boot_pc, cpu) = idle->thread.pc; 111 112 ++cpu_count; 113 } 114 BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); 115 116 /* Fire up the other tiles, if any */ 117 init_cpu_present(cpu_possible_mask); 118 if (cpumask_weight(cpu_present_mask) > 1) { 119 mb(); /* make sure all data is visible to new processors */ 120 hv_start_all_tiles(); 121 } 122} 123 124static __initdata struct cpumask init_affinity; 125 126static __init int reset_init_affinity(void) 127{ 128 long rc = sched_setaffinity(current->pid, &init_affinity); 129 if (rc != 0) 130 pr_warn("couldn't reset init affinity (%ld)\n", rc); 131 return 0; 132} 133late_initcall(reset_init_affinity); 134 135static struct cpumask cpu_started; 136 137/* 138 * Activate a secondary processor. Very minimal; don't add anything 139 * to this path without knowing what you're doing, since SMP booting 140 * is pretty fragile. 141 */ 142static void start_secondary(void) 143{ 144 int cpuid; 145 146 preempt_disable(); 147 148 cpuid = smp_processor_id(); 149 150 /* Set our thread pointer appropriately. */ 151 set_my_cpu_offset(__per_cpu_offset[cpuid]); 152 153 /* 154 * In large machines even this will slow us down, since we 155 * will be contending for for the printk spinlock. 156 */ 157 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ 158 159 /* Initialize the current asid for our first page table. */ 160 __this_cpu_write(current_asid, min_asid); 161 162 /* Set up this thread as another owner of the init_mm */ 163 atomic_inc(&init_mm.mm_count); 164 current->active_mm = &init_mm; 165 if (current->mm) 166 BUG(); 167 enter_lazy_tlb(&init_mm, current); 168 169 /* Allow hypervisor messages to be received */ 170 init_messaging(); 171 local_irq_enable(); 172 173 /* Indicate that we're ready to come up. */ 174 /* Must not do this before we're ready to receive messages */ 175 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { 176 pr_warn("CPU#%d already started!\n", cpuid); 177 for (;;) 178 local_irq_enable(); 179 } 180 181 smp_nap(); 182} 183 184/* 185 * Bring a secondary processor online. 186 */ 187void online_secondary(void) 188{ 189 /* 190 * low-memory mappings have been cleared, flush them from 191 * the local TLBs too. 192 */ 193 local_flush_tlb(); 194 195 BUG_ON(in_interrupt()); 196 197 /* This must be done before setting cpu_online_mask */ 198 wmb(); 199 200 notify_cpu_starting(smp_processor_id()); 201 202 set_cpu_online(smp_processor_id(), 1); 203 __this_cpu_write(cpu_state, CPU_ONLINE); 204 205 /* Set up tile-specific state for this cpu. */ 206 setup_cpu(0); 207 208 /* Set up tile-timer clock-event device on this cpu */ 209 setup_tile_timer(); 210 211 cpu_startup_entry(CPUHP_ONLINE); 212} 213 214int __cpu_up(unsigned int cpu, struct task_struct *tidle) 215{ 216 /* Wait 5s total for all CPUs for them to come online */ 217 static int timeout; 218 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { 219 if (timeout >= 50000) { 220 pr_info("skipping unresponsive cpu%d\n", cpu); 221 local_irq_enable(); 222 return -EIO; 223 } 224 udelay(100); 225 } 226 227 local_irq_enable(); 228 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 229 230 /* Unleash the CPU! */ 231 send_IPI_single(cpu, MSG_TAG_START_CPU); 232 while (!cpumask_test_cpu(cpu, cpu_online_mask)) 233 cpu_relax(); 234 return 0; 235} 236 237static void panic_start_cpu(void) 238{ 239 panic("Received a MSG_START_CPU IPI after boot finished."); 240} 241 242void __init smp_cpus_done(unsigned int max_cpus) 243{ 244 int cpu, next, rc; 245 246 /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ 247 start_cpu_function_addr = (unsigned long) &panic_start_cpu; 248 249 cpumask_copy(&init_affinity, cpu_online_mask); 250 251 /* 252 * Pin ourselves to a single cpu in the initial affinity set 253 * so that kernel mappings for the rootfs are not in the dataplane, 254 * if set, and to avoid unnecessary migrating during bringup. 255 * Use the last cpu just in case the whole chip has been 256 * isolated from the scheduler, to keep init away from likely 257 * more useful user code. This also ensures that work scheduled 258 * via schedule_delayed_work() in the init routines will land 259 * on this cpu. 260 */ 261 for (cpu = cpumask_first(&init_affinity); 262 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; 263 cpu = next) 264 ; 265 rc = sched_setaffinity(current->pid, cpumask_of(cpu)); 266 if (rc != 0) 267 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); 268} 269