root/arch/powerpc/platforms/85xx/smp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mpc85xx_give_timebase
  2. mpc85xx_take_timebase
  3. smp_85xx_mach_cpu_die
  4. qoriq_cpu_kill
  5. flush_spin_table
  6. read_spin_table_addr_l
  7. wake_hw_thread
  8. smp_85xx_start_cpu
  9. smp_85xx_kick_cpu
  10. mpc85xx_smp_kexec_cpu_down
  11. mpc85xx_smp_kexec_down
  12. mpc85xx_smp_kexec_cpu_down
  13. mpc85xx_smp_machine_kexec
  14. smp_85xx_setup_cpu
  15. mpc85xx_smp_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Author: Andy Fleming <afleming@freescale.com>
   4  *         Kumar Gala <galak@kernel.crashing.org>
   5  *
   6  * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
   7  */
   8 
   9 #include <linux/stddef.h>
  10 #include <linux/kernel.h>
  11 #include <linux/sched/hotplug.h>
  12 #include <linux/init.h>
  13 #include <linux/delay.h>
  14 #include <linux/of.h>
  15 #include <linux/kexec.h>
  16 #include <linux/highmem.h>
  17 #include <linux/cpu.h>
  18 #include <linux/fsl/guts.h>
  19 
  20 #include <asm/machdep.h>
  21 #include <asm/pgtable.h>
  22 #include <asm/page.h>
  23 #include <asm/mpic.h>
  24 #include <asm/cacheflush.h>
  25 #include <asm/dbell.h>
  26 #include <asm/code-patching.h>
  27 #include <asm/cputhreads.h>
  28 #include <asm/fsl_pm.h>
  29 
  30 #include <sysdev/fsl_soc.h>
  31 #include <sysdev/mpic.h>
  32 #include "smp.h"
  33 
  34 struct epapr_spin_table {
  35         u32     addr_h;
  36         u32     addr_l;
  37         u32     r3_h;
  38         u32     r3_l;
  39         u32     reserved;
  40         u32     pir;
  41 };
  42 
  43 #ifdef CONFIG_HOTPLUG_CPU
  44 static u64 timebase;
  45 static int tb_req;
  46 static int tb_valid;
  47 
  48 static void mpc85xx_give_timebase(void)
  49 {
  50         unsigned long flags;
  51 
  52         local_irq_save(flags);
  53         hard_irq_disable();
  54 
  55         while (!tb_req)
  56                 barrier();
  57         tb_req = 0;
  58 
  59         qoriq_pm_ops->freeze_time_base(true);
  60 #ifdef CONFIG_PPC64
  61         /*
  62          * e5500/e6500 have a workaround for erratum A-006958 in place
  63          * that will reread the timebase until TBL is non-zero.
  64          * That would be a bad thing when the timebase is frozen.
  65          *
  66          * Thus, we read it manually, and instead of checking that
  67          * TBL is non-zero, we ensure that TB does not change.  We don't
  68          * do that for the main mftb implementation, because it requires
  69          * a scratch register
  70          */
  71         {
  72                 u64 prev;
  73 
  74                 asm volatile("mfspr %0, %1" : "=r" (timebase) :
  75                              "i" (SPRN_TBRL));
  76 
  77                 do {
  78                         prev = timebase;
  79                         asm volatile("mfspr %0, %1" : "=r" (timebase) :
  80                                      "i" (SPRN_TBRL));
  81                 } while (prev != timebase);
  82         }
  83 #else
  84         timebase = get_tb();
  85 #endif
  86         mb();
  87         tb_valid = 1;
  88 
  89         while (tb_valid)
  90                 barrier();
  91 
  92         qoriq_pm_ops->freeze_time_base(false);
  93 
  94         local_irq_restore(flags);
  95 }
  96 
  97 static void mpc85xx_take_timebase(void)
  98 {
  99         unsigned long flags;
 100 
 101         local_irq_save(flags);
 102         hard_irq_disable();
 103 
 104         tb_req = 1;
 105         while (!tb_valid)
 106                 barrier();
 107 
 108         set_tb(timebase >> 32, timebase & 0xffffffff);
 109         isync();
 110         tb_valid = 0;
 111 
 112         local_irq_restore(flags);
 113 }
 114 
 115 static void smp_85xx_mach_cpu_die(void)
 116 {
 117         unsigned int cpu = smp_processor_id();
 118 
 119         local_irq_disable();
 120         hard_irq_disable();
 121         /* mask all irqs to prevent cpu wakeup */
 122         qoriq_pm_ops->irq_mask(cpu);
 123 
 124         idle_task_exit();
 125 
 126         mtspr(SPRN_TCR, 0);
 127         mtspr(SPRN_TSR, mfspr(SPRN_TSR));
 128 
 129         generic_set_cpu_dead(cpu);
 130 
 131         cur_cpu_spec->cpu_down_flush();
 132 
 133         qoriq_pm_ops->cpu_die(cpu);
 134 
 135         while (1)
 136                 ;
 137 }
 138 
 139 static void qoriq_cpu_kill(unsigned int cpu)
 140 {
 141         int i;
 142 
 143         for (i = 0; i < 500; i++) {
 144                 if (is_cpu_dead(cpu)) {
 145 #ifdef CONFIG_PPC64
 146                         paca_ptrs[cpu]->cpu_start = 0;
 147 #endif
 148                         return;
 149                 }
 150                 msleep(20);
 151         }
 152         pr_err("CPU%d didn't die...\n", cpu);
 153 }
 154 #endif
 155 
 156 /*
 157  * To keep it compatible with old boot program which uses
 158  * cache-inhibit spin table, we need to flush the cache
 159  * before accessing spin table to invalidate any staled data.
 160  * We also need to flush the cache after writing to spin
 161  * table to push data out.
 162  */
 163 static inline void flush_spin_table(void *spin_table)
 164 {
 165         flush_dcache_range((ulong)spin_table,
 166                 (ulong)spin_table + sizeof(struct epapr_spin_table));
 167 }
 168 
 169 static inline u32 read_spin_table_addr_l(void *spin_table)
 170 {
 171         flush_dcache_range((ulong)spin_table,
 172                 (ulong)spin_table + sizeof(struct epapr_spin_table));
 173         return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
 174 }
 175 
 176 #ifdef CONFIG_PPC64
 177 static void wake_hw_thread(void *info)
 178 {
 179         void fsl_secondary_thread_init(void);
 180         unsigned long inia;
 181         int cpu = *(const int *)info;
 182 
 183         inia = *(unsigned long *)fsl_secondary_thread_init;
 184         book3e_start_thread(cpu_thread_in_core(cpu), inia);
 185 }
 186 #endif
 187 
 188 static int smp_85xx_start_cpu(int cpu)
 189 {
 190         int ret = 0;
 191         struct device_node *np;
 192         const u64 *cpu_rel_addr;
 193         unsigned long flags;
 194         int ioremappable;
 195         int hw_cpu = get_hard_smp_processor_id(cpu);
 196         struct epapr_spin_table __iomem *spin_table;
 197 
 198         np = of_get_cpu_node(cpu, NULL);
 199         cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
 200         if (!cpu_rel_addr) {
 201                 pr_err("No cpu-release-addr for cpu %d\n", cpu);
 202                 return -ENOENT;
 203         }
 204 
 205         /*
 206          * A secondary core could be in a spinloop in the bootpage
 207          * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
 208          * The bootpage and highmem can be accessed via ioremap(), but
 209          * we need to directly access the spinloop if its in lowmem.
 210          */
 211         ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
 212 
 213         /* Map the spin table */
 214         if (ioremappable)
 215                 spin_table = ioremap_coherent(*cpu_rel_addr,
 216                                               sizeof(struct epapr_spin_table));
 217         else
 218                 spin_table = phys_to_virt(*cpu_rel_addr);
 219 
 220         local_irq_save(flags);
 221         hard_irq_disable();
 222 
 223         if (qoriq_pm_ops)
 224                 qoriq_pm_ops->cpu_up_prepare(cpu);
 225 
 226         /* if cpu is not spinning, reset it */
 227         if (read_spin_table_addr_l(spin_table) != 1) {
 228                 /*
 229                  * We don't set the BPTR register here since it already points
 230                  * to the boot page properly.
 231                  */
 232                 mpic_reset_core(cpu);
 233 
 234                 /*
 235                  * wait until core is ready...
 236                  * We need to invalidate the stale data, in case the boot
 237                  * loader uses a cache-inhibited spin table.
 238                  */
 239                 if (!spin_event_timeout(
 240                                 read_spin_table_addr_l(spin_table) == 1,
 241                                 10000, 100)) {
 242                         pr_err("timeout waiting for cpu %d to reset\n",
 243                                 hw_cpu);
 244                         ret = -EAGAIN;
 245                         goto err;
 246                 }
 247         }
 248 
 249         flush_spin_table(spin_table);
 250         out_be32(&spin_table->pir, hw_cpu);
 251 #ifdef CONFIG_PPC64
 252         out_be64((u64 *)(&spin_table->addr_h),
 253                 __pa(ppc_function_entry(generic_secondary_smp_init)));
 254 #else
 255         out_be32(&spin_table->addr_l, __pa(__early_start));
 256 #endif
 257         flush_spin_table(spin_table);
 258 err:
 259         local_irq_restore(flags);
 260 
 261         if (ioremappable)
 262                 iounmap(spin_table);
 263 
 264         return ret;
 265 }
 266 
 267 static int smp_85xx_kick_cpu(int nr)
 268 {
 269         int ret = 0;
 270 #ifdef CONFIG_PPC64
 271         int primary = nr;
 272 #endif
 273 
 274         WARN_ON(nr < 0 || nr >= num_possible_cpus());
 275 
 276         pr_debug("kick CPU #%d\n", nr);
 277 
 278 #ifdef CONFIG_PPC64
 279         if (threads_per_core == 2) {
 280                 if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
 281                         return -ENOENT;
 282 
 283                 booting_thread_hwid = cpu_thread_in_core(nr);
 284                 primary = cpu_first_thread_sibling(nr);
 285 
 286                 if (qoriq_pm_ops)
 287                         qoriq_pm_ops->cpu_up_prepare(nr);
 288 
 289                 /*
 290                  * If either thread in the core is online, use it to start
 291                  * the other.
 292                  */
 293                 if (cpu_online(primary)) {
 294                         smp_call_function_single(primary,
 295                                         wake_hw_thread, &nr, 1);
 296                         goto done;
 297                 } else if (cpu_online(primary + 1)) {
 298                         smp_call_function_single(primary + 1,
 299                                         wake_hw_thread, &nr, 1);
 300                         goto done;
 301                 }
 302 
 303                 /*
 304                  * If getting here, it means both threads in the core are
 305                  * offline. So start the primary thread, then it will start
 306                  * the thread specified in booting_thread_hwid, the one
 307                  * corresponding to nr.
 308                  */
 309 
 310         } else if (threads_per_core == 1) {
 311                 /*
 312                  * If one core has only one thread, set booting_thread_hwid to
 313                  * an invalid value.
 314                  */
 315                 booting_thread_hwid = INVALID_THREAD_HWID;
 316 
 317         } else if (threads_per_core > 2) {
 318                 pr_err("Do not support more than 2 threads per CPU.");
 319                 return -EINVAL;
 320         }
 321 
 322         ret = smp_85xx_start_cpu(primary);
 323         if (ret)
 324                 return ret;
 325 
 326 done:
 327         paca_ptrs[nr]->cpu_start = 1;
 328         generic_set_cpu_up(nr);
 329 
 330         return ret;
 331 #else
 332         ret = smp_85xx_start_cpu(nr);
 333         if (ret)
 334                 return ret;
 335 
 336         generic_set_cpu_up(nr);
 337 
 338         return ret;
 339 #endif
 340 }
 341 
 342 struct smp_ops_t smp_85xx_ops = {
 343         .cause_nmi_ipi = NULL,
 344         .kick_cpu = smp_85xx_kick_cpu,
 345         .cpu_bootable = smp_generic_cpu_bootable,
 346 #ifdef CONFIG_HOTPLUG_CPU
 347         .cpu_disable    = generic_cpu_disable,
 348         .cpu_die        = generic_cpu_die,
 349 #endif
 350 #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64)
 351         .give_timebase  = smp_generic_give_timebase,
 352         .take_timebase  = smp_generic_take_timebase,
 353 #endif
 354 };
 355 
 356 #ifdef CONFIG_KEXEC_CORE
 357 #ifdef CONFIG_PPC32
 358 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 359 
 360 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 361 {
 362         local_irq_disable();
 363 
 364         if (secondary) {
 365                 cur_cpu_spec->cpu_down_flush();
 366                 atomic_inc(&kexec_down_cpus);
 367                 /* loop forever */
 368                 while (1);
 369         }
 370 }
 371 
 372 static void mpc85xx_smp_kexec_down(void *arg)
 373 {
 374         if (ppc_md.kexec_cpu_down)
 375                 ppc_md.kexec_cpu_down(0,1);
 376 }
 377 #else
 378 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 379 {
 380         int cpu = smp_processor_id();
 381         int sibling = cpu_last_thread_sibling(cpu);
 382         bool notified = false;
 383         int disable_cpu;
 384         int disable_threadbit = 0;
 385         long start = mftb();
 386         long now;
 387 
 388         local_irq_disable();
 389         hard_irq_disable();
 390         mpic_teardown_this_cpu(secondary);
 391 
 392         if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
 393                 /*
 394                  * We enter the crash kernel on whatever cpu crashed,
 395                  * even if it's a secondary thread.  If that's the case,
 396                  * disable the corresponding primary thread.
 397                  */
 398                 disable_threadbit = 1;
 399                 disable_cpu = cpu_first_thread_sibling(cpu);
 400         } else if (sibling != crashing_cpu &&
 401                    cpu_thread_in_core(cpu) == 0 &&
 402                    cpu_thread_in_core(sibling) != 0) {
 403                 disable_threadbit = 2;
 404                 disable_cpu = sibling;
 405         }
 406 
 407         if (disable_threadbit) {
 408                 while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) {
 409                         barrier();
 410                         now = mftb();
 411                         if (!notified && now - start > 1000000) {
 412                                 pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n",
 413                                         __func__, smp_processor_id(),
 414                                         disable_cpu,
 415                                         paca_ptrs[disable_cpu]->kexec_state);
 416                                 notified = true;
 417                         }
 418                 }
 419 
 420                 if (notified) {
 421                         pr_info("%s: cpu %d done waiting\n",
 422                                 __func__, disable_cpu);
 423                 }
 424 
 425                 mtspr(SPRN_TENC, disable_threadbit);
 426                 while (mfspr(SPRN_TENSR) & disable_threadbit)
 427                         cpu_relax();
 428         }
 429 }
 430 #endif
 431 
 432 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 433 {
 434 #ifdef CONFIG_PPC32
 435         int timeout = INT_MAX;
 436         int i, num_cpus = num_present_cpus();
 437 
 438         if (image->type == KEXEC_TYPE_DEFAULT)
 439                 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 440 
 441         while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
 442                 ( timeout > 0 ) )
 443         {
 444                 timeout--;
 445         }
 446 
 447         if ( !timeout )
 448                 printk(KERN_ERR "Unable to bring down secondary cpu(s)");
 449 
 450         for_each_online_cpu(i)
 451         {
 452                 if ( i == smp_processor_id() ) continue;
 453                 mpic_reset_core(i);
 454         }
 455 #endif
 456 
 457         default_machine_kexec(image);
 458 }
 459 #endif /* CONFIG_KEXEC_CORE */
 460 
 461 static void smp_85xx_setup_cpu(int cpu_nr)
 462 {
 463         mpic_setup_this_cpu();
 464 }
 465 
 466 void __init mpc85xx_smp_init(void)
 467 {
 468         struct device_node *np;
 469 
 470 
 471         np = of_find_node_by_type(NULL, "open-pic");
 472         if (np) {
 473                 smp_85xx_ops.probe = smp_mpic_probe;
 474                 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
 475                 smp_85xx_ops.message_pass = smp_mpic_message_pass;
 476         } else
 477                 smp_85xx_ops.setup_cpu = NULL;
 478 
 479         if (cpu_has_feature(CPU_FTR_DBELL)) {
 480                 /*
 481                  * If left NULL, .message_pass defaults to
 482                  * smp_muxed_ipi_message_pass
 483                  */
 484                 smp_85xx_ops.message_pass = NULL;
 485                 smp_85xx_ops.cause_ipi = doorbell_global_ipi;
 486                 smp_85xx_ops.probe = NULL;
 487         }
 488 
 489 #ifdef CONFIG_HOTPLUG_CPU
 490 #ifdef CONFIG_FSL_CORENET_RCPM
 491         fsl_rcpm_init();
 492 #endif
 493 
 494 #ifdef CONFIG_FSL_PMC
 495         mpc85xx_setup_pmc();
 496 #endif
 497         if (qoriq_pm_ops) {
 498                 smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 499                 smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
 500                 ppc_md.cpu_die = smp_85xx_mach_cpu_die;
 501                 smp_85xx_ops.cpu_die = qoriq_cpu_kill;
 502         }
 503 #endif
 504         smp_ops = &smp_85xx_ops;
 505 
 506 #ifdef CONFIG_KEXEC_CORE
 507         ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
 508         ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
 509 #endif
 510 }

/* [<][>][^][v][top][bottom][index][help] */