root/kernel/smp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. smpcfd_prepare_cpu
  2. smpcfd_dead_cpu
  3. smpcfd_dying_cpu
  4. call_function_init
  5. csd_lock_wait
  6. csd_lock
  7. csd_unlock
  8. generic_exec_single
  9. generic_smp_call_function_single_interrupt
  10. flush_smp_call_function_queue
  11. smp_call_function_single
  12. smp_call_function_single_async
  13. smp_call_function_any
  14. smp_call_function_many
  15. smp_call_function
  16. arch_disable_smp_support
  17. nosmp
  18. nrcpus
  19. maxcpus
  20. setup_nr_cpu_ids
  21. smp_init
  22. on_each_cpu
  23. on_each_cpu_mask
  24. on_each_cpu_cond_mask
  25. on_each_cpu_cond
  26. do_nothing
  27. kick_all_cpus_sync
  28. wake_up_all_idle_cpus
  29. smp_call_on_cpu_callback
  30. smp_call_on_cpu

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Generic helpers for smp ipi calls
   4  *
   5  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
   6  */
   7 
   8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9 
  10 #include <linux/irq_work.h>
  11 #include <linux/rcupdate.h>
  12 #include <linux/rculist.h>
  13 #include <linux/kernel.h>
  14 #include <linux/export.h>
  15 #include <linux/percpu.h>
  16 #include <linux/init.h>
  17 #include <linux/gfp.h>
  18 #include <linux/smp.h>
  19 #include <linux/cpu.h>
  20 #include <linux/sched.h>
  21 #include <linux/sched/idle.h>
  22 #include <linux/hypervisor.h>
  23 
  24 #include "smpboot.h"
  25 
  26 enum {
  27         CSD_FLAG_LOCK           = 0x01,
  28         CSD_FLAG_SYNCHRONOUS    = 0x02,
  29 };
  30 
  31 struct call_function_data {
  32         call_single_data_t      __percpu *csd;
  33         cpumask_var_t           cpumask;
  34         cpumask_var_t           cpumask_ipi;
  35 };
  36 
  37 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
  38 
  39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
  40 
  41 static void flush_smp_call_function_queue(bool warn_cpu_offline);
  42 
  43 int smpcfd_prepare_cpu(unsigned int cpu)
  44 {
  45         struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  46 
  47         if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
  48                                      cpu_to_node(cpu)))
  49                 return -ENOMEM;
  50         if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
  51                                      cpu_to_node(cpu))) {
  52                 free_cpumask_var(cfd->cpumask);
  53                 return -ENOMEM;
  54         }
  55         cfd->csd = alloc_percpu(call_single_data_t);
  56         if (!cfd->csd) {
  57                 free_cpumask_var(cfd->cpumask);
  58                 free_cpumask_var(cfd->cpumask_ipi);
  59                 return -ENOMEM;
  60         }
  61 
  62         return 0;
  63 }
  64 
  65 int smpcfd_dead_cpu(unsigned int cpu)
  66 {
  67         struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  68 
  69         free_cpumask_var(cfd->cpumask);
  70         free_cpumask_var(cfd->cpumask_ipi);
  71         free_percpu(cfd->csd);
  72         return 0;
  73 }
  74 
  75 int smpcfd_dying_cpu(unsigned int cpu)
  76 {
  77         /*
  78          * The IPIs for the smp-call-function callbacks queued by other
  79          * CPUs might arrive late, either due to hardware latencies or
  80          * because this CPU disabled interrupts (inside stop-machine)
  81          * before the IPIs were sent. So flush out any pending callbacks
  82          * explicitly (without waiting for the IPIs to arrive), to
  83          * ensure that the outgoing CPU doesn't go offline with work
  84          * still pending.
  85          */
  86         flush_smp_call_function_queue(false);
  87         return 0;
  88 }
  89 
  90 void __init call_function_init(void)
  91 {
  92         int i;
  93 
  94         for_each_possible_cpu(i)
  95                 init_llist_head(&per_cpu(call_single_queue, i));
  96 
  97         smpcfd_prepare_cpu(smp_processor_id());
  98 }
  99 
 100 /*
 101  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 102  *
 103  * For non-synchronous ipi calls the csd can still be in use by the
 104  * previous function call. For multi-cpu calls its even more interesting
 105  * as we'll have to ensure no other cpu is observing our csd.
 106  */
 107 static __always_inline void csd_lock_wait(call_single_data_t *csd)
 108 {
 109         smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
 110 }
 111 
 112 static __always_inline void csd_lock(call_single_data_t *csd)
 113 {
 114         csd_lock_wait(csd);
 115         csd->flags |= CSD_FLAG_LOCK;
 116 
 117         /*
 118          * prevent CPU from reordering the above assignment
 119          * to ->flags with any subsequent assignments to other
 120          * fields of the specified call_single_data_t structure:
 121          */
 122         smp_wmb();
 123 }
 124 
 125 static __always_inline void csd_unlock(call_single_data_t *csd)
 126 {
 127         WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
 128 
 129         /*
 130          * ensure we're all done before releasing data:
 131          */
 132         smp_store_release(&csd->flags, 0);
 133 }
 134 
 135 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
 136 
 137 /*
 138  * Insert a previously allocated call_single_data_t element
 139  * for execution on the given CPU. data must already have
 140  * ->func, ->info, and ->flags set.
 141  */
 142 static int generic_exec_single(int cpu, call_single_data_t *csd,
 143                                smp_call_func_t func, void *info)
 144 {
 145         if (cpu == smp_processor_id()) {
 146                 unsigned long flags;
 147 
 148                 /*
 149                  * We can unlock early even for the synchronous on-stack case,
 150                  * since we're doing this from the same CPU..
 151                  */
 152                 csd_unlock(csd);
 153                 local_irq_save(flags);
 154                 func(info);
 155                 local_irq_restore(flags);
 156                 return 0;
 157         }
 158 
 159 
 160         if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 161                 csd_unlock(csd);
 162                 return -ENXIO;
 163         }
 164 
 165         csd->func = func;
 166         csd->info = info;
 167 
 168         /*
 169          * The list addition should be visible before sending the IPI
 170          * handler locks the list to pull the entry off it because of
 171          * normal cache coherency rules implied by spinlocks.
 172          *
 173          * If IPIs can go out of order to the cache coherency protocol
 174          * in an architecture, sufficient synchronisation should be added
 175          * to arch code to make it appear to obey cache coherency WRT
 176          * locking and barrier primitives. Generic code isn't really
 177          * equipped to do the right thing...
 178          */
 179         if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 180                 arch_send_call_function_single_ipi(cpu);
 181 
 182         return 0;
 183 }
 184 
 185 /**
 186  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
 187  *
 188  * Invoked by arch to handle an IPI for call function single.
 189  * Must be called with interrupts disabled.
 190  */
 191 void generic_smp_call_function_single_interrupt(void)
 192 {
 193         flush_smp_call_function_queue(true);
 194 }
 195 
 196 /**
 197  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 198  *
 199  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 200  *                    offline CPU. Skip this check if set to 'false'.
 201  *
 202  * Flush any pending smp-call-function callbacks queued on this CPU. This is
 203  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 204  * to ensure that all pending IPI callbacks are run before it goes completely
 205  * offline.
 206  *
 207  * Loop through the call_single_queue and run all the queued callbacks.
 208  * Must be called with interrupts disabled.
 209  */
 210 static void flush_smp_call_function_queue(bool warn_cpu_offline)
 211 {
 212         struct llist_head *head;
 213         struct llist_node *entry;
 214         call_single_data_t *csd, *csd_next;
 215         static bool warned;
 216 
 217         lockdep_assert_irqs_disabled();
 218 
 219         head = this_cpu_ptr(&call_single_queue);
 220         entry = llist_del_all(head);
 221         entry = llist_reverse_order(entry);
 222 
 223         /* There shouldn't be any pending callbacks on an offline CPU. */
 224         if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
 225                      !warned && !llist_empty(head))) {
 226                 warned = true;
 227                 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 228 
 229                 /*
 230                  * We don't have to use the _safe() variant here
 231                  * because we are not invoking the IPI handlers yet.
 232                  */
 233                 llist_for_each_entry(csd, entry, llist)
 234                         pr_warn("IPI callback %pS sent to offline CPU\n",
 235                                 csd->func);
 236         }
 237 
 238         llist_for_each_entry_safe(csd, csd_next, entry, llist) {
 239                 smp_call_func_t func = csd->func;
 240                 void *info = csd->info;
 241 
 242                 /* Do we wait until *after* callback? */
 243                 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
 244                         func(info);
 245                         csd_unlock(csd);
 246                 } else {
 247                         csd_unlock(csd);
 248                         func(info);
 249                 }
 250         }
 251 
 252         /*
 253          * Handle irq works queued remotely by irq_work_queue_on().
 254          * Smp functions above are typically synchronous so they
 255          * better run first since some other CPUs may be busy waiting
 256          * for them.
 257          */
 258         irq_work_run();
 259 }
 260 
 261 /*
 262  * smp_call_function_single - Run a function on a specific CPU
 263  * @func: The function to run. This must be fast and non-blocking.
 264  * @info: An arbitrary pointer to pass to the function.
 265  * @wait: If true, wait until function has completed on other CPUs.
 266  *
 267  * Returns 0 on success, else a negative status code.
 268  */
 269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 270                              int wait)
 271 {
 272         call_single_data_t *csd;
 273         call_single_data_t csd_stack = {
 274                 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
 275         };
 276         int this_cpu;
 277         int err;
 278 
 279         /*
 280          * prevent preemption and reschedule on another processor,
 281          * as well as CPU removal
 282          */
 283         this_cpu = get_cpu();
 284 
 285         /*
 286          * Can deadlock when called with interrupts disabled.
 287          * We allow cpu's that are not yet online though, as no one else can
 288          * send smp call function interrupt to this cpu and as such deadlocks
 289          * can't happen.
 290          */
 291         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 292                      && !oops_in_progress);
 293 
 294         /*
 295          * When @wait we can deadlock when we interrupt between llist_add() and
 296          * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
 297          * csd_lock() on because the interrupt context uses the same csd
 298          * storage.
 299          */
 300         WARN_ON_ONCE(!in_task());
 301 
 302         csd = &csd_stack;
 303         if (!wait) {
 304                 csd = this_cpu_ptr(&csd_data);
 305                 csd_lock(csd);
 306         }
 307 
 308         err = generic_exec_single(cpu, csd, func, info);
 309 
 310         if (wait)
 311                 csd_lock_wait(csd);
 312 
 313         put_cpu();
 314 
 315         return err;
 316 }
 317 EXPORT_SYMBOL(smp_call_function_single);
 318 
 319 /**
 320  * smp_call_function_single_async(): Run an asynchronous function on a
 321  *                               specific CPU.
 322  * @cpu: The CPU to run on.
 323  * @csd: Pre-allocated and setup data structure
 324  *
 325  * Like smp_call_function_single(), but the call is asynchonous and
 326  * can thus be done from contexts with disabled interrupts.
 327  *
 328  * The caller passes his own pre-allocated data structure
 329  * (ie: embedded in an object) and is responsible for synchronizing it
 330  * such that the IPIs performed on the @csd are strictly serialized.
 331  *
 332  * NOTE: Be careful, there is unfortunately no current debugging facility to
 333  * validate the correctness of this serialization.
 334  */
 335 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 336 {
 337         int err = 0;
 338 
 339         preempt_disable();
 340 
 341         /* We could deadlock if we have to wait here with interrupts disabled! */
 342         if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
 343                 csd_lock_wait(csd);
 344 
 345         csd->flags = CSD_FLAG_LOCK;
 346         smp_wmb();
 347 
 348         err = generic_exec_single(cpu, csd, csd->func, csd->info);
 349         preempt_enable();
 350 
 351         return err;
 352 }
 353 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
 354 
 355 /*
 356  * smp_call_function_any - Run a function on any of the given cpus
 357  * @mask: The mask of cpus it can run on.
 358  * @func: The function to run. This must be fast and non-blocking.
 359  * @info: An arbitrary pointer to pass to the function.
 360  * @wait: If true, wait until function has completed.
 361  *
 362  * Returns 0 on success, else a negative status code (if no cpus were online).
 363  *
 364  * Selection preference:
 365  *      1) current cpu if in @mask
 366  *      2) any cpu of current node if in @mask
 367  *      3) any other online cpu in @mask
 368  */
 369 int smp_call_function_any(const struct cpumask *mask,
 370                           smp_call_func_t func, void *info, int wait)
 371 {
 372         unsigned int cpu;
 373         const struct cpumask *nodemask;
 374         int ret;
 375 
 376         /* Try for same CPU (cheapest) */
 377         cpu = get_cpu();
 378         if (cpumask_test_cpu(cpu, mask))
 379                 goto call;
 380 
 381         /* Try for same node. */
 382         nodemask = cpumask_of_node(cpu_to_node(cpu));
 383         for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
 384              cpu = cpumask_next_and(cpu, nodemask, mask)) {
 385                 if (cpu_online(cpu))
 386                         goto call;
 387         }
 388 
 389         /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
 390         cpu = cpumask_any_and(mask, cpu_online_mask);
 391 call:
 392         ret = smp_call_function_single(cpu, func, info, wait);
 393         put_cpu();
 394         return ret;
 395 }
 396 EXPORT_SYMBOL_GPL(smp_call_function_any);
 397 
 398 /**
 399  * smp_call_function_many(): Run a function on a set of other CPUs.
 400  * @mask: The set of cpus to run on (only runs on online subset).
 401  * @func: The function to run. This must be fast and non-blocking.
 402  * @info: An arbitrary pointer to pass to the function.
 403  * @wait: If true, wait (atomically) until function has completed
 404  *        on other CPUs.
 405  *
 406  * If @wait is true, then returns once @func has returned.
 407  *
 408  * You must not call this function with disabled interrupts or from a
 409  * hardware interrupt handler or from a bottom half handler. Preemption
 410  * must be disabled when calling this function.
 411  */
 412 void smp_call_function_many(const struct cpumask *mask,
 413                             smp_call_func_t func, void *info, bool wait)
 414 {
 415         struct call_function_data *cfd;
 416         int cpu, next_cpu, this_cpu = smp_processor_id();
 417 
 418         /*
 419          * Can deadlock when called with interrupts disabled.
 420          * We allow cpu's that are not yet online though, as no one else can
 421          * send smp call function interrupt to this cpu and as such deadlocks
 422          * can't happen.
 423          */
 424         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 425                      && !oops_in_progress && !early_boot_irqs_disabled);
 426 
 427         /*
 428          * When @wait we can deadlock when we interrupt between llist_add() and
 429          * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
 430          * csd_lock() on because the interrupt context uses the same csd
 431          * storage.
 432          */
 433         WARN_ON_ONCE(!in_task());
 434 
 435         /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
 436         cpu = cpumask_first_and(mask, cpu_online_mask);
 437         if (cpu == this_cpu)
 438                 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 439 
 440         /* No online cpus?  We're done. */
 441         if (cpu >= nr_cpu_ids)
 442                 return;
 443 
 444         /* Do we have another CPU which isn't us? */
 445         next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 446         if (next_cpu == this_cpu)
 447                 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
 448 
 449         /* Fastpath: do that cpu by itself. */
 450         if (next_cpu >= nr_cpu_ids) {
 451                 smp_call_function_single(cpu, func, info, wait);
 452                 return;
 453         }
 454 
 455         cfd = this_cpu_ptr(&cfd_data);
 456 
 457         cpumask_and(cfd->cpumask, mask, cpu_online_mask);
 458         __cpumask_clear_cpu(this_cpu, cfd->cpumask);
 459 
 460         /* Some callers race with other cpus changing the passed mask */
 461         if (unlikely(!cpumask_weight(cfd->cpumask)))
 462                 return;
 463 
 464         cpumask_clear(cfd->cpumask_ipi);
 465         for_each_cpu(cpu, cfd->cpumask) {
 466                 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 467 
 468                 csd_lock(csd);
 469                 if (wait)
 470                         csd->flags |= CSD_FLAG_SYNCHRONOUS;
 471                 csd->func = func;
 472                 csd->info = info;
 473                 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 474                         __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 475         }
 476 
 477         /* Send a message to all CPUs in the map */
 478         arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 479 
 480         if (wait) {
 481                 for_each_cpu(cpu, cfd->cpumask) {
 482                         call_single_data_t *csd;
 483 
 484                         csd = per_cpu_ptr(cfd->csd, cpu);
 485                         csd_lock_wait(csd);
 486                 }
 487         }
 488 }
 489 EXPORT_SYMBOL(smp_call_function_many);
 490 
 491 /**
 492  * smp_call_function(): Run a function on all other CPUs.
 493  * @func: The function to run. This must be fast and non-blocking.
 494  * @info: An arbitrary pointer to pass to the function.
 495  * @wait: If true, wait (atomically) until function has completed
 496  *        on other CPUs.
 497  *
 498  * Returns 0.
 499  *
 500  * If @wait is true, then returns once @func has returned; otherwise
 501  * it returns just before the target cpu calls @func.
 502  *
 503  * You must not call this function with disabled interrupts or from a
 504  * hardware interrupt handler or from a bottom half handler.
 505  */
 506 void smp_call_function(smp_call_func_t func, void *info, int wait)
 507 {
 508         preempt_disable();
 509         smp_call_function_many(cpu_online_mask, func, info, wait);
 510         preempt_enable();
 511 }
 512 EXPORT_SYMBOL(smp_call_function);
 513 
 514 /* Setup configured maximum number of CPUs to activate */
 515 unsigned int setup_max_cpus = NR_CPUS;
 516 EXPORT_SYMBOL(setup_max_cpus);
 517 
 518 
 519 /*
 520  * Setup routine for controlling SMP activation
 521  *
 522  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 523  * activation entirely (the MPS table probe still happens, though).
 524  *
 525  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 526  * greater than 0, limits the maximum number of CPUs activated in
 527  * SMP mode to <NUM>.
 528  */
 529 
 530 void __weak arch_disable_smp_support(void) { }
 531 
 532 static int __init nosmp(char *str)
 533 {
 534         setup_max_cpus = 0;
 535         arch_disable_smp_support();
 536 
 537         return 0;
 538 }
 539 
 540 early_param("nosmp", nosmp);
 541 
 542 /* this is hard limit */
 543 static int __init nrcpus(char *str)
 544 {
 545         int nr_cpus;
 546 
 547         get_option(&str, &nr_cpus);
 548         if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 549                 nr_cpu_ids = nr_cpus;
 550 
 551         return 0;
 552 }
 553 
 554 early_param("nr_cpus", nrcpus);
 555 
 556 static int __init maxcpus(char *str)
 557 {
 558         get_option(&str, &setup_max_cpus);
 559         if (setup_max_cpus == 0)
 560                 arch_disable_smp_support();
 561 
 562         return 0;
 563 }
 564 
 565 early_param("maxcpus", maxcpus);
 566 
 567 /* Setup number of possible processor ids */
 568 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
 569 EXPORT_SYMBOL(nr_cpu_ids);
 570 
 571 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
 572 void __init setup_nr_cpu_ids(void)
 573 {
 574         nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
 575 }
 576 
 577 /* Called by boot processor to activate the rest. */
 578 void __init smp_init(void)
 579 {
 580         int num_nodes, num_cpus;
 581         unsigned int cpu;
 582 
 583         idle_threads_init();
 584         cpuhp_threads_init();
 585 
 586         pr_info("Bringing up secondary CPUs ...\n");
 587 
 588         /* FIXME: This should be done in userspace --RR */
 589         for_each_present_cpu(cpu) {
 590                 if (num_online_cpus() >= setup_max_cpus)
 591                         break;
 592                 if (!cpu_online(cpu))
 593                         cpu_up(cpu);
 594         }
 595 
 596         num_nodes = num_online_nodes();
 597         num_cpus  = num_online_cpus();
 598         pr_info("Brought up %d node%s, %d CPU%s\n",
 599                 num_nodes, (num_nodes > 1 ? "s" : ""),
 600                 num_cpus,  (num_cpus  > 1 ? "s" : ""));
 601 
 602         /* Any cleanup work */
 603         smp_cpus_done(setup_max_cpus);
 604 }
 605 
 606 /*
 607  * Call a function on all processors.  May be used during early boot while
 608  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 609  * of local_irq_disable/enable().
 610  */
 611 void on_each_cpu(void (*func) (void *info), void *info, int wait)
 612 {
 613         unsigned long flags;
 614 
 615         preempt_disable();
 616         smp_call_function(func, info, wait);
 617         local_irq_save(flags);
 618         func(info);
 619         local_irq_restore(flags);
 620         preempt_enable();
 621 }
 622 EXPORT_SYMBOL(on_each_cpu);
 623 
 624 /**
 625  * on_each_cpu_mask(): Run a function on processors specified by
 626  * cpumask, which may include the local processor.
 627  * @mask: The set of cpus to run on (only runs on online subset).
 628  * @func: The function to run. This must be fast and non-blocking.
 629  * @info: An arbitrary pointer to pass to the function.
 630  * @wait: If true, wait (atomically) until function has completed
 631  *        on other CPUs.
 632  *
 633  * If @wait is true, then returns once @func has returned.
 634  *
 635  * You must not call this function with disabled interrupts or from a
 636  * hardware interrupt handler or from a bottom half handler.  The
 637  * exception is that it may be used during early boot while
 638  * early_boot_irqs_disabled is set.
 639  */
 640 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 641                         void *info, bool wait)
 642 {
 643         int cpu = get_cpu();
 644 
 645         smp_call_function_many(mask, func, info, wait);
 646         if (cpumask_test_cpu(cpu, mask)) {
 647                 unsigned long flags;
 648                 local_irq_save(flags);
 649                 func(info);
 650                 local_irq_restore(flags);
 651         }
 652         put_cpu();
 653 }
 654 EXPORT_SYMBOL(on_each_cpu_mask);
 655 
 656 /*
 657  * on_each_cpu_cond(): Call a function on each processor for which
 658  * the supplied function cond_func returns true, optionally waiting
 659  * for all the required CPUs to finish. This may include the local
 660  * processor.
 661  * @cond_func:  A callback function that is passed a cpu id and
 662  *              the the info parameter. The function is called
 663  *              with preemption disabled. The function should
 664  *              return a blooean value indicating whether to IPI
 665  *              the specified CPU.
 666  * @func:       The function to run on all applicable CPUs.
 667  *              This must be fast and non-blocking.
 668  * @info:       An arbitrary pointer to pass to both functions.
 669  * @wait:       If true, wait (atomically) until function has
 670  *              completed on other CPUs.
 671  * @gfp_flags:  GFP flags to use when allocating the cpumask
 672  *              used internally by the function.
 673  *
 674  * The function might sleep if the GFP flags indicates a non
 675  * atomic allocation is allowed.
 676  *
 677  * Preemption is disabled to protect against CPUs going offline but not online.
 678  * CPUs going online during the call will not be seen or sent an IPI.
 679  *
 680  * You must not call this function with disabled interrupts or
 681  * from a hardware interrupt handler or from a bottom half handler.
 682  */
 683 void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 684                         smp_call_func_t func, void *info, bool wait,
 685                         gfp_t gfp_flags, const struct cpumask *mask)
 686 {
 687         cpumask_var_t cpus;
 688         int cpu, ret;
 689 
 690         might_sleep_if(gfpflags_allow_blocking(gfp_flags));
 691 
 692         if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
 693                 preempt_disable();
 694                 for_each_cpu(cpu, mask)
 695                         if (cond_func(cpu, info))
 696                                 __cpumask_set_cpu(cpu, cpus);
 697                 on_each_cpu_mask(cpus, func, info, wait);
 698                 preempt_enable();
 699                 free_cpumask_var(cpus);
 700         } else {
 701                 /*
 702                  * No free cpumask, bother. No matter, we'll
 703                  * just have to IPI them one by one.
 704                  */
 705                 preempt_disable();
 706                 for_each_cpu(cpu, mask)
 707                         if (cond_func(cpu, info)) {
 708                                 ret = smp_call_function_single(cpu, func,
 709                                                                 info, wait);
 710                                 WARN_ON_ONCE(ret);
 711                         }
 712                 preempt_enable();
 713         }
 714 }
 715 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 716 
 717 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 718                         smp_call_func_t func, void *info, bool wait,
 719                         gfp_t gfp_flags)
 720 {
 721         on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
 722                                 cpu_online_mask);
 723 }
 724 EXPORT_SYMBOL(on_each_cpu_cond);
 725 
 726 static void do_nothing(void *unused)
 727 {
 728 }
 729 
 730 /**
 731  * kick_all_cpus_sync - Force all cpus out of idle
 732  *
 733  * Used to synchronize the update of pm_idle function pointer. It's
 734  * called after the pointer is updated and returns after the dummy
 735  * callback function has been executed on all cpus. The execution of
 736  * the function can only happen on the remote cpus after they have
 737  * left the idle function which had been called via pm_idle function
 738  * pointer. So it's guaranteed that nothing uses the previous pointer
 739  * anymore.
 740  */
 741 void kick_all_cpus_sync(void)
 742 {
 743         /* Make sure the change is visible before we kick the cpus */
 744         smp_mb();
 745         smp_call_function(do_nothing, NULL, 1);
 746 }
 747 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
 748 
 749 /**
 750  * wake_up_all_idle_cpus - break all cpus out of idle
 751  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
 752  * including idle polling cpus, for non-idle cpus, we will do nothing
 753  * for them.
 754  */
 755 void wake_up_all_idle_cpus(void)
 756 {
 757         int cpu;
 758 
 759         preempt_disable();
 760         for_each_online_cpu(cpu) {
 761                 if (cpu == smp_processor_id())
 762                         continue;
 763 
 764                 wake_up_if_idle(cpu);
 765         }
 766         preempt_enable();
 767 }
 768 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
 769 
 770 /**
 771  * smp_call_on_cpu - Call a function on a specific cpu
 772  *
 773  * Used to call a function on a specific cpu and wait for it to return.
 774  * Optionally make sure the call is done on a specified physical cpu via vcpu
 775  * pinning in order to support virtualized environments.
 776  */
 777 struct smp_call_on_cpu_struct {
 778         struct work_struct      work;
 779         struct completion       done;
 780         int                     (*func)(void *);
 781         void                    *data;
 782         int                     ret;
 783         int                     cpu;
 784 };
 785 
 786 static void smp_call_on_cpu_callback(struct work_struct *work)
 787 {
 788         struct smp_call_on_cpu_struct *sscs;
 789 
 790         sscs = container_of(work, struct smp_call_on_cpu_struct, work);
 791         if (sscs->cpu >= 0)
 792                 hypervisor_pin_vcpu(sscs->cpu);
 793         sscs->ret = sscs->func(sscs->data);
 794         if (sscs->cpu >= 0)
 795                 hypervisor_pin_vcpu(-1);
 796 
 797         complete(&sscs->done);
 798 }
 799 
 800 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
 801 {
 802         struct smp_call_on_cpu_struct sscs = {
 803                 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
 804                 .func = func,
 805                 .data = par,
 806                 .cpu  = phys ? cpu : -1,
 807         };
 808 
 809         INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
 810 
 811         if (cpu >= nr_cpu_ids || !cpu_online(cpu))
 812                 return -ENXIO;
 813 
 814         queue_work_on(cpu, system_wq, &sscs.work);
 815         wait_for_completion(&sscs.done);
 816 
 817         return sscs.ret;
 818 }
 819 EXPORT_SYMBOL_GPL(smp_call_on_cpu);

/* [<][>][^][v][top][bottom][index][help] */