root/arch/ia64/kernel/smp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. stop_this_cpu
  2. cpu_die
  3. handle_IPI
  4. send_IPI_single
  5. send_IPI_allbutself
  6. send_IPI_mask
  7. send_IPI_all
  8. send_IPI_self
  9. kdump_smp_send_stop
  10. kdump_smp_send_init
  11. smp_send_reschedule
  12. smp_send_local_flush_tlb
  13. smp_local_flush_tlb
  14. smp_flush_tlb_cpumask
  15. smp_flush_tlb_all
  16. smp_flush_tlb_mm
  17. arch_send_call_function_single_ipi
  18. arch_send_call_function_ipi_mask
  19. smp_send_stop
  20. setup_profiling_timer

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * SMP Support
   4  *
   5  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   6  * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
   7  *
   8  * Lots of stuff stolen from arch/alpha/kernel/smp.c
   9  *
  10  * 01/05/16 Rohit Seth <rohit.seth@intel.com>  IA64-SMP functions. Reorganized
  11  * the existing code (on the lines of x86 port).
  12  * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
  13  * calibration on each CPU.
  14  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
  15  * 00/03/31 Rohit Seth <rohit.seth@intel.com>   Fixes for Bootstrap Processor
  16  * & cpu_online_map now gets done here (instead of setup.c)
  17  * 99/10/05 davidm      Update to bring it in sync with new command-line processing
  18  *  scheme.
  19  * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
  20  *              smp_call_function_single to resend IPI on timeouts
  21  */
  22 #include <linux/module.h>
  23 #include <linux/kernel.h>
  24 #include <linux/sched.h>
  25 #include <linux/init.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/smp.h>
  28 #include <linux/kernel_stat.h>
  29 #include <linux/mm.h>
  30 #include <linux/cache.h>
  31 #include <linux/delay.h>
  32 #include <linux/efi.h>
  33 #include <linux/bitops.h>
  34 #include <linux/kexec.h>
  35 
  36 #include <linux/atomic.h>
  37 #include <asm/current.h>
  38 #include <asm/delay.h>
  39 #include <asm/io.h>
  40 #include <asm/irq.h>
  41 #include <asm/page.h>
  42 #include <asm/pgalloc.h>
  43 #include <asm/pgtable.h>
  44 #include <asm/processor.h>
  45 #include <asm/ptrace.h>
  46 #include <asm/sal.h>
  47 #include <asm/tlbflush.h>
  48 #include <asm/unistd.h>
  49 #include <asm/mca.h>
  50 
  51 /*
  52  * Note: alignment of 4 entries/cacheline was empirically determined
  53  * to be a good tradeoff between hot cachelines & spreading the array
  54  * across too many cacheline.
  55  */
  56 static struct local_tlb_flush_counts {
  57         unsigned int count;
  58 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
  59 
  60 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
  61                                      shadow_flush_counts);
  62 
  63 #define IPI_CALL_FUNC           0
  64 #define IPI_CPU_STOP            1
  65 #define IPI_CALL_FUNC_SINGLE    2
  66 #define IPI_KDUMP_CPU_STOP      3
  67 
  68 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
  69 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation);
  70 
  71 extern void cpu_halt (void);
  72 
  73 static void
  74 stop_this_cpu(void)
  75 {
  76         /*
  77          * Remove this CPU:
  78          */
  79         set_cpu_online(smp_processor_id(), false);
  80         max_xtp();
  81         local_irq_disable();
  82         cpu_halt();
  83 }
  84 
  85 void
  86 cpu_die(void)
  87 {
  88         max_xtp();
  89         local_irq_disable();
  90         cpu_halt();
  91         /* Should never be here */
  92         BUG();
  93         for (;;);
  94 }
  95 
  96 irqreturn_t
  97 handle_IPI (int irq, void *dev_id)
  98 {
  99         int this_cpu = get_cpu();
 100         unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
 101         unsigned long ops;
 102 
 103         mb();   /* Order interrupt and bit testing. */
 104         while ((ops = xchg(pending_ipis, 0)) != 0) {
 105                 mb();   /* Order bit clearing and data access. */
 106                 do {
 107                         unsigned long which;
 108 
 109                         which = ffz(~ops);
 110                         ops &= ~(1 << which);
 111 
 112                         switch (which) {
 113                         case IPI_CPU_STOP:
 114                                 stop_this_cpu();
 115                                 break;
 116                         case IPI_CALL_FUNC:
 117                                 generic_smp_call_function_interrupt();
 118                                 break;
 119                         case IPI_CALL_FUNC_SINGLE:
 120                                 generic_smp_call_function_single_interrupt();
 121                                 break;
 122 #ifdef CONFIG_KEXEC
 123                         case IPI_KDUMP_CPU_STOP:
 124                                 unw_init_running(kdump_cpu_freeze, NULL);
 125                                 break;
 126 #endif
 127                         default:
 128                                 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
 129                                                 this_cpu, which);
 130                                 break;
 131                         }
 132                 } while (ops);
 133                 mb();   /* Order data access and bit testing. */
 134         }
 135         put_cpu();
 136         return IRQ_HANDLED;
 137 }
 138 
 139 
 140 
 141 /*
 142  * Called with preemption disabled.
 143  */
 144 static inline void
 145 send_IPI_single (int dest_cpu, int op)
 146 {
 147         set_bit(op, &per_cpu(ipi_operation, dest_cpu));
 148         ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
 149 }
 150 
 151 /*
 152  * Called with preemption disabled.
 153  */
 154 static inline void
 155 send_IPI_allbutself (int op)
 156 {
 157         unsigned int i;
 158 
 159         for_each_online_cpu(i) {
 160                 if (i != smp_processor_id())
 161                         send_IPI_single(i, op);
 162         }
 163 }
 164 
 165 /*
 166  * Called with preemption disabled.
 167  */
 168 static inline void
 169 send_IPI_mask(const struct cpumask *mask, int op)
 170 {
 171         unsigned int cpu;
 172 
 173         for_each_cpu(cpu, mask) {
 174                         send_IPI_single(cpu, op);
 175         }
 176 }
 177 
 178 /*
 179  * Called with preemption disabled.
 180  */
 181 static inline void
 182 send_IPI_all (int op)
 183 {
 184         int i;
 185 
 186         for_each_online_cpu(i) {
 187                 send_IPI_single(i, op);
 188         }
 189 }
 190 
 191 /*
 192  * Called with preemption disabled.
 193  */
 194 static inline void
 195 send_IPI_self (int op)
 196 {
 197         send_IPI_single(smp_processor_id(), op);
 198 }
 199 
 200 #ifdef CONFIG_KEXEC
 201 void
 202 kdump_smp_send_stop(void)
 203 {
 204         send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
 205 }
 206 
 207 void
 208 kdump_smp_send_init(void)
 209 {
 210         unsigned int cpu, self_cpu;
 211         self_cpu = smp_processor_id();
 212         for_each_online_cpu(cpu) {
 213                 if (cpu != self_cpu) {
 214                         if(kdump_status[cpu] == 0)
 215                                 ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
 216                 }
 217         }
 218 }
 219 #endif
 220 /*
 221  * Called with preemption disabled.
 222  */
 223 void
 224 smp_send_reschedule (int cpu)
 225 {
 226         ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 227 }
 228 EXPORT_SYMBOL_GPL(smp_send_reschedule);
 229 
 230 /*
 231  * Called with preemption disabled.
 232  */
 233 static void
 234 smp_send_local_flush_tlb (int cpu)
 235 {
 236         ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
 237 }
 238 
 239 void
 240 smp_local_flush_tlb(void)
 241 {
 242         /*
 243          * Use atomic ops. Otherwise, the load/increment/store sequence from
 244          * a "++" operation can have the line stolen between the load & store.
 245          * The overhead of the atomic op in negligible in this case & offers
 246          * significant benefit for the brief periods where lots of cpus
 247          * are simultaneously flushing TLBs.
 248          */
 249         ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
 250         local_flush_tlb_all();
 251 }
 252 
 253 #define FLUSH_DELAY     5 /* Usec backoff to eliminate excessive cacheline bouncing */
 254 
 255 void
 256 smp_flush_tlb_cpumask(cpumask_t xcpumask)
 257 {
 258         unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
 259         cpumask_t cpumask = xcpumask;
 260         int mycpu, cpu, flush_mycpu = 0;
 261 
 262         preempt_disable();
 263         mycpu = smp_processor_id();
 264 
 265         for_each_cpu(cpu, &cpumask)
 266                 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
 267 
 268         mb();
 269         for_each_cpu(cpu, &cpumask) {
 270                 if (cpu == mycpu)
 271                         flush_mycpu = 1;
 272                 else
 273                         smp_send_local_flush_tlb(cpu);
 274         }
 275 
 276         if (flush_mycpu)
 277                 smp_local_flush_tlb();
 278 
 279         for_each_cpu(cpu, &cpumask)
 280                 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
 281                         udelay(FLUSH_DELAY);
 282 
 283         preempt_enable();
 284 }
 285 
 286 void
 287 smp_flush_tlb_all (void)
 288 {
 289         on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
 290 }
 291 
 292 void
 293 smp_flush_tlb_mm (struct mm_struct *mm)
 294 {
 295         cpumask_var_t cpus;
 296         preempt_disable();
 297         /* this happens for the common case of a single-threaded fork():  */
 298         if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
 299         {
 300                 local_finish_flush_tlb_mm(mm);
 301                 preempt_enable();
 302                 return;
 303         }
 304         if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
 305                 smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
 306                         mm, 1);
 307         } else {
 308                 cpumask_copy(cpus, mm_cpumask(mm));
 309                 smp_call_function_many(cpus,
 310                         (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
 311                 free_cpumask_var(cpus);
 312         }
 313         local_irq_disable();
 314         local_finish_flush_tlb_mm(mm);
 315         local_irq_enable();
 316         preempt_enable();
 317 }
 318 
 319 void arch_send_call_function_single_ipi(int cpu)
 320 {
 321         send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
 322 }
 323 
 324 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 325 {
 326         send_IPI_mask(mask, IPI_CALL_FUNC);
 327 }
 328 
 329 /*
 330  * this function calls the 'stop' function on all other CPUs in the system.
 331  */
 332 void
 333 smp_send_stop (void)
 334 {
 335         send_IPI_allbutself(IPI_CPU_STOP);
 336 }
 337 
 338 int
 339 setup_profiling_timer (unsigned int multiplier)
 340 {
 341         return -EINVAL;
 342 }

/* [<][>][^][v][top][bottom][index][help] */