root/kernel/rcu/tree.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rcu_get_gp_kthreads_prio
  2. rcu_rnp_online_cpus
  3. rcu_gp_in_progress
  4. rcu_get_n_cbs_cpu
  5. rcu_softirq_qs
  6. rcu_dynticks_eqs_enter
  7. rcu_dynticks_eqs_exit
  8. rcu_dynticks_eqs_online
  9. rcu_dynticks_curr_cpu_in_eqs
  10. rcu_dynticks_snap
  11. rcu_dynticks_in_eqs
  12. rcu_dynticks_in_eqs_since
  13. rcu_eqs_special_set
  14. rcu_momentary_dyntick_idle
  15. rcu_is_cpu_rrupt_from_idle
  16. adjust_jiffies_till_sched_qs
  17. param_set_first_fqs_jiffies
  18. param_set_next_fqs_jiffies
  19. rcu_get_gp_seq
  20. rcu_exp_batches_completed
  21. rcu_get_root
  22. gp_state_getname
  23. rcutorture_get_gp_data
  24. rcu_eqs_enter
  25. rcu_idle_enter
  26. rcu_user_enter
  27. rcu_nmi_exit_common
  28. rcu_nmi_exit
  29. rcu_irq_exit
  30. rcu_irq_exit_irqson
  31. rcu_eqs_exit
  32. rcu_idle_exit
  33. rcu_user_exit
  34. rcu_nmi_enter_common
  35. rcu_nmi_enter
  36. rcu_irq_enter
  37. rcu_irq_enter_irqson
  38. rcu_is_watching
  39. rcu_request_urgent_qs_task
  40. rcu_lockdep_current_cpu_online
  41. rcu_gpnum_ovf
  42. dyntick_save_progress_counter
  43. rcu_implicit_dynticks_qs
  44. trace_rcu_this_gp
  45. rcu_start_this_gp
  46. rcu_future_gp_cleanup
  47. rcu_gp_kthread_wake
  48. rcu_accelerate_cbs
  49. rcu_accelerate_cbs_unlocked
  50. rcu_advance_cbs
  51. rcu_advance_cbs_nowake
  52. __note_gp_changes
  53. note_gp_changes
  54. rcu_gp_slow
  55. rcu_gp_init
  56. rcu_gp_fqs_check_wake
  57. rcu_gp_fqs
  58. rcu_gp_fqs_loop
  59. rcu_gp_cleanup
  60. rcu_gp_kthread
  61. rcu_report_qs_rsp
  62. rcu_report_qs_rnp
  63. rcu_report_unblock_qs_rnp
  64. rcu_report_qs_rdp
  65. rcu_check_quiescent_state
  66. rcutree_dying_cpu
  67. rcu_cleanup_dead_rnp
  68. rcutree_dead_cpu
  69. rcu_do_batch
  70. rcu_sched_clock_irq
  71. force_qs_rnp
  72. rcu_force_quiescent_state
  73. rcu_core
  74. rcu_core_si
  75. rcu_wake_cond
  76. invoke_rcu_core_kthread
  77. invoke_rcu_core
  78. rcu_cpu_kthread_park
  79. rcu_cpu_kthread_should_run
  80. rcu_cpu_kthread
  81. rcu_spawn_core_kthreads
  82. __call_rcu_core
  83. rcu_leak_callback
  84. __call_rcu
  85. call_rcu
  86. kfree_call_rcu
  87. rcu_blocking_is_gp
  88. synchronize_rcu
  89. get_state_synchronize_rcu
  90. cond_synchronize_rcu
  91. rcu_pending
  92. rcu_barrier_trace
  93. rcu_barrier_callback
  94. rcu_barrier_func
  95. rcu_barrier
  96. rcu_init_new_rnp
  97. rcu_boot_init_percpu_data
  98. rcutree_prepare_cpu
  99. rcutree_affinity_setting
  100. rcutree_online_cpu
  101. rcutree_offline_cpu
  102. rcu_cpu_starting
  103. rcu_report_dead
  104. rcutree_migrate_callbacks
  105. rcu_pm_notify
  106. rcu_spawn_gp_kthread
  107. rcu_scheduler_starting
  108. rcu_init_one
  109. rcu_init_geometry
  110. rcu_dump_rcu_node_tree
  111. rcu_init

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * Read-Copy Update mechanism for mutual exclusion
   4  *
   5  * Copyright IBM Corporation, 2008
   6  *
   7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   8  *          Manfred Spraul <manfred@colorfullife.com>
   9  *          Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version
  10  *
  11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
  12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  13  *
  14  * For detailed explanation of Read-Copy Update mechanism see -
  15  *      Documentation/RCU
  16  */
  17 
  18 #define pr_fmt(fmt) "rcu: " fmt
  19 
  20 #include <linux/types.h>
  21 #include <linux/kernel.h>
  22 #include <linux/init.h>
  23 #include <linux/spinlock.h>
  24 #include <linux/smp.h>
  25 #include <linux/rcupdate_wait.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/sched.h>
  28 #include <linux/sched/debug.h>
  29 #include <linux/nmi.h>
  30 #include <linux/atomic.h>
  31 #include <linux/bitops.h>
  32 #include <linux/export.h>
  33 #include <linux/completion.h>
  34 #include <linux/moduleparam.h>
  35 #include <linux/percpu.h>
  36 #include <linux/notifier.h>
  37 #include <linux/cpu.h>
  38 #include <linux/mutex.h>
  39 #include <linux/time.h>
  40 #include <linux/kernel_stat.h>
  41 #include <linux/wait.h>
  42 #include <linux/kthread.h>
  43 #include <uapi/linux/sched/types.h>
  44 #include <linux/prefetch.h>
  45 #include <linux/delay.h>
  46 #include <linux/stop_machine.h>
  47 #include <linux/random.h>
  48 #include <linux/trace_events.h>
  49 #include <linux/suspend.h>
  50 #include <linux/ftrace.h>
  51 #include <linux/tick.h>
  52 #include <linux/sysrq.h>
  53 #include <linux/kprobes.h>
  54 #include <linux/gfp.h>
  55 #include <linux/oom.h>
  56 #include <linux/smpboot.h>
  57 #include <linux/jiffies.h>
  58 #include <linux/sched/isolation.h>
  59 #include <linux/sched/clock.h>
  60 #include "../time/tick-internal.h"
  61 
  62 #include "tree.h"
  63 #include "rcu.h"
  64 
  65 #ifdef MODULE_PARAM_PREFIX
  66 #undef MODULE_PARAM_PREFIX
  67 #endif
  68 #define MODULE_PARAM_PREFIX "rcutree."
  69 
  70 /* Data structures. */
  71 
  72 /*
  73  * Steal a bit from the bottom of ->dynticks for idle entry/exit
  74  * control.  Initially this is for TLB flushing.
  75  */
  76 #define RCU_DYNTICK_CTRL_MASK 0x1
  77 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
  78 #ifndef rcu_eqs_special_exit
  79 #define rcu_eqs_special_exit() do { } while (0)
  80 #endif
  81 
  82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
  83         .dynticks_nesting = 1,
  84         .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
  85         .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
  86 };
  87 struct rcu_state rcu_state = {
  88         .level = { &rcu_state.node[0] },
  89         .gp_state = RCU_GP_IDLE,
  90         .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
  91         .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
  92         .name = RCU_NAME,
  93         .abbr = RCU_ABBR,
  94         .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
  95         .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
  96         .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
  97 };
  98 
  99 /* Dump rcu_node combining tree at boot to verify correct setup. */
 100 static bool dump_tree;
 101 module_param(dump_tree, bool, 0444);
 102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
 103 static bool use_softirq = 1;
 104 module_param(use_softirq, bool, 0444);
 105 /* Control rcu_node-tree auto-balancing at boot time. */
 106 static bool rcu_fanout_exact;
 107 module_param(rcu_fanout_exact, bool, 0444);
 108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
 109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 110 module_param(rcu_fanout_leaf, int, 0444);
 111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 112 /* Number of rcu_nodes at specified level. */
 113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 115 
 116 /*
 117  * The rcu_scheduler_active variable is initialized to the value
 118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 120  * RCU can assume that there is but one task, allowing RCU to (for example)
 121  * optimize synchronize_rcu() to a simple barrier().  When this variable
 122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 123  * to detect real grace periods.  This variable is also used to suppress
 124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
 125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 126  * is fully initialized, including all of its kthreads having been spawned.
 127  */
 128 int rcu_scheduler_active __read_mostly;
 129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 130 
 131 /*
 132  * The rcu_scheduler_fully_active variable transitions from zero to one
 133  * during the early_initcall() processing, which is after the scheduler
 134  * is capable of creating new tasks.  So RCU processing (for example,
 135  * creating tasks for RCU priority boosting) must be delayed until after
 136  * rcu_scheduler_fully_active transitions from zero to one.  We also
 137  * currently delay invocation of any RCU callbacks until after this point.
 138  *
 139  * It might later prove better for people registering RCU callbacks during
 140  * early boot to take responsibility for these callbacks, but one step at
 141  * a time.
 142  */
 143 static int rcu_scheduler_fully_active __read_mostly;
 144 
 145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 146                               unsigned long gps, unsigned long flags);
 147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 150 static void invoke_rcu_core(void);
 151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
 152 static void sync_sched_exp_online_cleanup(int cpu);
 153 
 154 /* rcuc/rcub kthread realtime priority */
 155 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
 156 module_param(kthread_prio, int, 0444);
 157 
 158 /* Delay in jiffies for grace-period initialization delays, debug only. */
 159 
 160 static int gp_preinit_delay;
 161 module_param(gp_preinit_delay, int, 0444);
 162 static int gp_init_delay;
 163 module_param(gp_init_delay, int, 0444);
 164 static int gp_cleanup_delay;
 165 module_param(gp_cleanup_delay, int, 0444);
 166 
 167 /* Retrieve RCU kthreads priority for rcutorture */
 168 int rcu_get_gp_kthreads_prio(void)
 169 {
 170         return kthread_prio;
 171 }
 172 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
 173 
 174 /*
 175  * Number of grace periods between delays, normalized by the duration of
 176  * the delay.  The longer the delay, the more the grace periods between
 177  * each delay.  The reason for this normalization is that it means that,
 178  * for non-zero delays, the overall slowdown of grace periods is constant
 179  * regardless of the duration of the delay.  This arrangement balances
 180  * the need for long delays to increase some race probabilities with the
 181  * need for fast grace periods to increase other race probabilities.
 182  */
 183 #define PER_RCU_NODE_PERIOD 3   /* Number of grace periods between delays. */
 184 
 185 /*
 186  * Compute the mask of online CPUs for the specified rcu_node structure.
 187  * This will not be stable unless the rcu_node structure's ->lock is
 188  * held, but the bit corresponding to the current CPU will be stable
 189  * in most contexts.
 190  */
 191 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
 192 {
 193         return READ_ONCE(rnp->qsmaskinitnext);
 194 }
 195 
 196 /*
 197  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
 198  * permit this function to be invoked without holding the root rcu_node
 199  * structure's ->lock, but of course results can be subject to change.
 200  */
 201 static int rcu_gp_in_progress(void)
 202 {
 203         return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
 204 }
 205 
 206 /*
 207  * Return the number of callbacks queued on the specified CPU.
 208  * Handles both the nocbs and normal cases.
 209  */
 210 static long rcu_get_n_cbs_cpu(int cpu)
 211 {
 212         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 213 
 214         if (rcu_segcblist_is_enabled(&rdp->cblist))
 215                 return rcu_segcblist_n_cbs(&rdp->cblist);
 216         return 0;
 217 }
 218 
 219 void rcu_softirq_qs(void)
 220 {
 221         rcu_qs();
 222         rcu_preempt_deferred_qs(current);
 223 }
 224 
 225 /*
 226  * Record entry into an extended quiescent state.  This is only to be
 227  * called when not already in an extended quiescent state.
 228  */
 229 static void rcu_dynticks_eqs_enter(void)
 230 {
 231         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 232         int seq;
 233 
 234         /*
 235          * CPUs seeing atomic_add_return() must see prior RCU read-side
 236          * critical sections, and we also must force ordering with the
 237          * next idle sojourn.
 238          */
 239         seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 240         /* Better be in an extended quiescent state! */
 241         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 242                      (seq & RCU_DYNTICK_CTRL_CTR));
 243         /* Better not have special action (TLB flush) pending! */
 244         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 245                      (seq & RCU_DYNTICK_CTRL_MASK));
 246 }
 247 
 248 /*
 249  * Record exit from an extended quiescent state.  This is only to be
 250  * called from an extended quiescent state.
 251  */
 252 static void rcu_dynticks_eqs_exit(void)
 253 {
 254         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 255         int seq;
 256 
 257         /*
 258          * CPUs seeing atomic_add_return() must see prior idle sojourns,
 259          * and we also must force ordering with the next RCU read-side
 260          * critical section.
 261          */
 262         seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 263         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 264                      !(seq & RCU_DYNTICK_CTRL_CTR));
 265         if (seq & RCU_DYNTICK_CTRL_MASK) {
 266                 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
 267                 smp_mb__after_atomic(); /* _exit after clearing mask. */
 268                 /* Prefer duplicate flushes to losing a flush. */
 269                 rcu_eqs_special_exit();
 270         }
 271 }
 272 
 273 /*
 274  * Reset the current CPU's ->dynticks counter to indicate that the
 275  * newly onlined CPU is no longer in an extended quiescent state.
 276  * This will either leave the counter unchanged, or increment it
 277  * to the next non-quiescent value.
 278  *
 279  * The non-atomic test/increment sequence works because the upper bits
 280  * of the ->dynticks counter are manipulated only by the corresponding CPU,
 281  * or when the corresponding CPU is offline.
 282  */
 283 static void rcu_dynticks_eqs_online(void)
 284 {
 285         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 286 
 287         if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
 288                 return;
 289         atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 290 }
 291 
 292 /*
 293  * Is the current CPU in an extended quiescent state?
 294  *
 295  * No ordering, as we are sampling CPU-local information.
 296  */
 297 bool rcu_dynticks_curr_cpu_in_eqs(void)
 298 {
 299         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 300 
 301         return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 302 }
 303 
 304 /*
 305  * Snapshot the ->dynticks counter with full ordering so as to allow
 306  * stable comparison of this counter with past and future snapshots.
 307  */
 308 int rcu_dynticks_snap(struct rcu_data *rdp)
 309 {
 310         int snap = atomic_add_return(0, &rdp->dynticks);
 311 
 312         return snap & ~RCU_DYNTICK_CTRL_MASK;
 313 }
 314 
 315 /*
 316  * Return true if the snapshot returned from rcu_dynticks_snap()
 317  * indicates that RCU is in an extended quiescent state.
 318  */
 319 static bool rcu_dynticks_in_eqs(int snap)
 320 {
 321         return !(snap & RCU_DYNTICK_CTRL_CTR);
 322 }
 323 
 324 /*
 325  * Return true if the CPU corresponding to the specified rcu_data
 326  * structure has spent some time in an extended quiescent state since
 327  * rcu_dynticks_snap() returned the specified snapshot.
 328  */
 329 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
 330 {
 331         return snap != rcu_dynticks_snap(rdp);
 332 }
 333 
 334 /*
 335  * Set the special (bottom) bit of the specified CPU so that it
 336  * will take special action (such as flushing its TLB) on the
 337  * next exit from an extended quiescent state.  Returns true if
 338  * the bit was successfully set, or false if the CPU was not in
 339  * an extended quiescent state.
 340  */
 341 bool rcu_eqs_special_set(int cpu)
 342 {
 343         int old;
 344         int new;
 345         struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 346 
 347         do {
 348                 old = atomic_read(&rdp->dynticks);
 349                 if (old & RCU_DYNTICK_CTRL_CTR)
 350                         return false;
 351                 new = old | RCU_DYNTICK_CTRL_MASK;
 352         } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old);
 353         return true;
 354 }
 355 
 356 /*
 357  * Let the RCU core know that this CPU has gone through the scheduler,
 358  * which is a quiescent state.  This is called when the need for a
 359  * quiescent state is urgent, so we burn an atomic operation and full
 360  * memory barriers to let the RCU core know about it, regardless of what
 361  * this CPU might (or might not) do in the near future.
 362  *
 363  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
 364  *
 365  * The caller must have disabled interrupts and must not be idle.
 366  */
 367 static void __maybe_unused rcu_momentary_dyntick_idle(void)
 368 {
 369         int special;
 370 
 371         raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
 372         special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
 373                                     &this_cpu_ptr(&rcu_data)->dynticks);
 374         /* It is illegal to call this from idle state. */
 375         WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 376         rcu_preempt_deferred_qs(current);
 377 }
 378 
 379 /**
 380  * rcu_is_cpu_rrupt_from_idle - see if interrupted from idle
 381  *
 382  * If the current CPU is idle and running at a first-level (not nested)
 383  * interrupt from idle, return true.  The caller must have at least
 384  * disabled preemption.
 385  */
 386 static int rcu_is_cpu_rrupt_from_idle(void)
 387 {
 388         /* Called only from within the scheduling-clock interrupt */
 389         lockdep_assert_in_irq();
 390 
 391         /* Check for counter underflows */
 392         RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
 393                          "RCU dynticks_nesting counter underflow!");
 394         RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
 395                          "RCU dynticks_nmi_nesting counter underflow/zero!");
 396 
 397         /* Are we at first interrupt nesting level? */
 398         if (__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 1)
 399                 return false;
 400 
 401         /* Does CPU appear to be idle from an RCU standpoint? */
 402         return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
 403 }
 404 
 405 #define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch ... */
 406 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
 407 static long blimit = DEFAULT_RCU_BLIMIT;
 408 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
 409 static long qhimark = DEFAULT_RCU_QHIMARK;
 410 #define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
 411 static long qlowmark = DEFAULT_RCU_QLOMARK;
 412 
 413 module_param(blimit, long, 0444);
 414 module_param(qhimark, long, 0444);
 415 module_param(qlowmark, long, 0444);
 416 
 417 static ulong jiffies_till_first_fqs = ULONG_MAX;
 418 static ulong jiffies_till_next_fqs = ULONG_MAX;
 419 static bool rcu_kick_kthreads;
 420 static int rcu_divisor = 7;
 421 module_param(rcu_divisor, int, 0644);
 422 
 423 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
 424 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
 425 module_param(rcu_resched_ns, long, 0644);
 426 
 427 /*
 428  * How long the grace period must be before we start recruiting
 429  * quiescent-state help from rcu_note_context_switch().
 430  */
 431 static ulong jiffies_till_sched_qs = ULONG_MAX;
 432 module_param(jiffies_till_sched_qs, ulong, 0444);
 433 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
 434 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
 435 
 436 /*
 437  * Make sure that we give the grace-period kthread time to detect any
 438  * idle CPUs before taking active measures to force quiescent states.
 439  * However, don't go below 100 milliseconds, adjusted upwards for really
 440  * large systems.
 441  */
 442 static void adjust_jiffies_till_sched_qs(void)
 443 {
 444         unsigned long j;
 445 
 446         /* If jiffies_till_sched_qs was specified, respect the request. */
 447         if (jiffies_till_sched_qs != ULONG_MAX) {
 448                 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
 449                 return;
 450         }
 451         /* Otherwise, set to third fqs scan, but bound below on large system. */
 452         j = READ_ONCE(jiffies_till_first_fqs) +
 453                       2 * READ_ONCE(jiffies_till_next_fqs);
 454         if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
 455                 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
 456         pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
 457         WRITE_ONCE(jiffies_to_sched_qs, j);
 458 }
 459 
 460 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
 461 {
 462         ulong j;
 463         int ret = kstrtoul(val, 0, &j);
 464 
 465         if (!ret) {
 466                 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
 467                 adjust_jiffies_till_sched_qs();
 468         }
 469         return ret;
 470 }
 471 
 472 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
 473 {
 474         ulong j;
 475         int ret = kstrtoul(val, 0, &j);
 476 
 477         if (!ret) {
 478                 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
 479                 adjust_jiffies_till_sched_qs();
 480         }
 481         return ret;
 482 }
 483 
 484 static struct kernel_param_ops first_fqs_jiffies_ops = {
 485         .set = param_set_first_fqs_jiffies,
 486         .get = param_get_ulong,
 487 };
 488 
 489 static struct kernel_param_ops next_fqs_jiffies_ops = {
 490         .set = param_set_next_fqs_jiffies,
 491         .get = param_get_ulong,
 492 };
 493 
 494 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
 495 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 496 module_param(rcu_kick_kthreads, bool, 0644);
 497 
 498 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
 499 static int rcu_pending(void);
 500 
 501 /*
 502  * Return the number of RCU GPs completed thus far for debug & stats.
 503  */
 504 unsigned long rcu_get_gp_seq(void)
 505 {
 506         return READ_ONCE(rcu_state.gp_seq);
 507 }
 508 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 509 
 510 /*
 511  * Return the number of RCU expedited batches completed thus far for
 512  * debug & stats.  Odd numbers mean that a batch is in progress, even
 513  * numbers mean idle.  The value returned will thus be roughly double
 514  * the cumulative batches since boot.
 515  */
 516 unsigned long rcu_exp_batches_completed(void)
 517 {
 518         return rcu_state.expedited_sequence;
 519 }
 520 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
 521 
 522 /*
 523  * Return the root node of the rcu_state structure.
 524  */
 525 static struct rcu_node *rcu_get_root(void)
 526 {
 527         return &rcu_state.node[0];
 528 }
 529 
 530 /*
 531  * Convert a ->gp_state value to a character string.
 532  */
 533 static const char *gp_state_getname(short gs)
 534 {
 535         if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
 536                 return "???";
 537         return gp_state_names[gs];
 538 }
 539 
 540 /*
 541  * Send along grace-period-related data for rcutorture diagnostics.
 542  */
 543 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 544                             unsigned long *gp_seq)
 545 {
 546         switch (test_type) {
 547         case RCU_FLAVOR:
 548                 *flags = READ_ONCE(rcu_state.gp_flags);
 549                 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
 550                 break;
 551         default:
 552                 break;
 553         }
 554 }
 555 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 556 
 557 /*
 558  * Enter an RCU extended quiescent state, which can be either the
 559  * idle loop or adaptive-tickless usermode execution.
 560  *
 561  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
 562  * the possibility of usermode upcalls having messed up our count
 563  * of interrupt nesting level during the prior busy period.
 564  */
 565 static void rcu_eqs_enter(bool user)
 566 {
 567         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 568 
 569         WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
 570         WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
 571         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 572                      rdp->dynticks_nesting == 0);
 573         if (rdp->dynticks_nesting != 1) {
 574                 rdp->dynticks_nesting--;
 575                 return;
 576         }
 577 
 578         lockdep_assert_irqs_disabled();
 579         trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
 580         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 581         rdp = this_cpu_ptr(&rcu_data);
 582         do_nocb_deferred_wakeup(rdp);
 583         rcu_prepare_for_idle();
 584         rcu_preempt_deferred_qs(current);
 585         WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
 586         rcu_dynticks_eqs_enter();
 587         rcu_dynticks_task_enter();
 588 }
 589 
 590 /**
 591  * rcu_idle_enter - inform RCU that current CPU is entering idle
 592  *
 593  * Enter idle mode, in other words, -leave- the mode in which RCU
 594  * read-side critical sections can occur.  (Though RCU read-side
 595  * critical sections can occur in irq handlers in idle, a possibility
 596  * handled by irq_enter() and irq_exit().)
 597  *
 598  * If you add or remove a call to rcu_idle_enter(), be sure to test with
 599  * CONFIG_RCU_EQS_DEBUG=y.
 600  */
 601 void rcu_idle_enter(void)
 602 {
 603         lockdep_assert_irqs_disabled();
 604         rcu_eqs_enter(false);
 605 }
 606 
 607 #ifdef CONFIG_NO_HZ_FULL
 608 /**
 609  * rcu_user_enter - inform RCU that we are resuming userspace.
 610  *
 611  * Enter RCU idle mode right before resuming userspace.  No use of RCU
 612  * is permitted between this call and rcu_user_exit(). This way the
 613  * CPU doesn't need to maintain the tick for RCU maintenance purposes
 614  * when the CPU runs in userspace.
 615  *
 616  * If you add or remove a call to rcu_user_enter(), be sure to test with
 617  * CONFIG_RCU_EQS_DEBUG=y.
 618  */
 619 void rcu_user_enter(void)
 620 {
 621         lockdep_assert_irqs_disabled();
 622         rcu_eqs_enter(true);
 623 }
 624 #endif /* CONFIG_NO_HZ_FULL */
 625 
 626 /*
 627  * If we are returning from the outermost NMI handler that interrupted an
 628  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
 629  * to let the RCU grace-period handling know that the CPU is back to
 630  * being RCU-idle.
 631  *
 632  * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
 633  * with CONFIG_RCU_EQS_DEBUG=y.
 634  */
 635 static __always_inline void rcu_nmi_exit_common(bool irq)
 636 {
 637         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 638 
 639         /*
 640          * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
 641          * (We are exiting an NMI handler, so RCU better be paying attention
 642          * to us!)
 643          */
 644         WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
 645         WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
 646 
 647         /*
 648          * If the nesting level is not 1, the CPU wasn't RCU-idle, so
 649          * leave it in non-RCU-idle state.
 650          */
 651         if (rdp->dynticks_nmi_nesting != 1) {
 652                 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
 653                                   atomic_read(&rdp->dynticks));
 654                 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
 655                            rdp->dynticks_nmi_nesting - 2);
 656                 return;
 657         }
 658 
 659         /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
 660         trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
 661         WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 662 
 663         if (irq)
 664                 rcu_prepare_for_idle();
 665 
 666         rcu_dynticks_eqs_enter();
 667 
 668         if (irq)
 669                 rcu_dynticks_task_enter();
 670 }
 671 
 672 /**
 673  * rcu_nmi_exit - inform RCU of exit from NMI context
 674  *
 675  * If you add or remove a call to rcu_nmi_exit(), be sure to test
 676  * with CONFIG_RCU_EQS_DEBUG=y.
 677  */
 678 void rcu_nmi_exit(void)
 679 {
 680         rcu_nmi_exit_common(false);
 681 }
 682 
 683 /**
 684  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 685  *
 686  * Exit from an interrupt handler, which might possibly result in entering
 687  * idle mode, in other words, leaving the mode in which read-side critical
 688  * sections can occur.  The caller must have disabled interrupts.
 689  *
 690  * This code assumes that the idle loop never does anything that might
 691  * result in unbalanced calls to irq_enter() and irq_exit().  If your
 692  * architecture's idle loop violates this assumption, RCU will give you what
 693  * you deserve, good and hard.  But very infrequently and irreproducibly.
 694  *
 695  * Use things like work queues to work around this limitation.
 696  *
 697  * You have been warned.
 698  *
 699  * If you add or remove a call to rcu_irq_exit(), be sure to test with
 700  * CONFIG_RCU_EQS_DEBUG=y.
 701  */
 702 void rcu_irq_exit(void)
 703 {
 704         lockdep_assert_irqs_disabled();
 705         rcu_nmi_exit_common(true);
 706 }
 707 
 708 /*
 709  * Wrapper for rcu_irq_exit() where interrupts are enabled.
 710  *
 711  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
 712  * with CONFIG_RCU_EQS_DEBUG=y.
 713  */
 714 void rcu_irq_exit_irqson(void)
 715 {
 716         unsigned long flags;
 717 
 718         local_irq_save(flags);
 719         rcu_irq_exit();
 720         local_irq_restore(flags);
 721 }
 722 
 723 /*
 724  * Exit an RCU extended quiescent state, which can be either the
 725  * idle loop or adaptive-tickless usermode execution.
 726  *
 727  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
 728  * allow for the possibility of usermode upcalls messing up our count of
 729  * interrupt nesting level during the busy period that is just now starting.
 730  */
 731 static void rcu_eqs_exit(bool user)
 732 {
 733         struct rcu_data *rdp;
 734         long oldval;
 735 
 736         lockdep_assert_irqs_disabled();
 737         rdp = this_cpu_ptr(&rcu_data);
 738         oldval = rdp->dynticks_nesting;
 739         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
 740         if (oldval) {
 741                 rdp->dynticks_nesting++;
 742                 return;
 743         }
 744         rcu_dynticks_task_exit();
 745         rcu_dynticks_eqs_exit();
 746         rcu_cleanup_after_idle();
 747         trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
 748         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 749         WRITE_ONCE(rdp->dynticks_nesting, 1);
 750         WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
 751         WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
 752 }
 753 
 754 /**
 755  * rcu_idle_exit - inform RCU that current CPU is leaving idle
 756  *
 757  * Exit idle mode, in other words, -enter- the mode in which RCU
 758  * read-side critical sections can occur.
 759  *
 760  * If you add or remove a call to rcu_idle_exit(), be sure to test with
 761  * CONFIG_RCU_EQS_DEBUG=y.
 762  */
 763 void rcu_idle_exit(void)
 764 {
 765         unsigned long flags;
 766 
 767         local_irq_save(flags);
 768         rcu_eqs_exit(false);
 769         local_irq_restore(flags);
 770 }
 771 
 772 #ifdef CONFIG_NO_HZ_FULL
 773 /**
 774  * rcu_user_exit - inform RCU that we are exiting userspace.
 775  *
 776  * Exit RCU idle mode while entering the kernel because it can
 777  * run a RCU read side critical section anytime.
 778  *
 779  * If you add or remove a call to rcu_user_exit(), be sure to test with
 780  * CONFIG_RCU_EQS_DEBUG=y.
 781  */
 782 void rcu_user_exit(void)
 783 {
 784         rcu_eqs_exit(1);
 785 }
 786 #endif /* CONFIG_NO_HZ_FULL */
 787 
 788 /**
 789  * rcu_nmi_enter_common - inform RCU of entry to NMI context
 790  * @irq: Is this call from rcu_irq_enter?
 791  *
 792  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
 793  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
 794  * that the CPU is active.  This implementation permits nested NMIs, as
 795  * long as the nesting level does not overflow an int.  (You will probably
 796  * run out of stack space first.)
 797  *
 798  * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
 799  * with CONFIG_RCU_EQS_DEBUG=y.
 800  */
 801 static __always_inline void rcu_nmi_enter_common(bool irq)
 802 {
 803         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 804         long incby = 2;
 805 
 806         /* Complain about underflow. */
 807         WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
 808 
 809         /*
 810          * If idle from RCU viewpoint, atomically increment ->dynticks
 811          * to mark non-idle and increment ->dynticks_nmi_nesting by one.
 812          * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
 813          * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
 814          * to be in the outermost NMI handler that interrupted an RCU-idle
 815          * period (observation due to Andy Lutomirski).
 816          */
 817         if (rcu_dynticks_curr_cpu_in_eqs()) {
 818 
 819                 if (irq)
 820                         rcu_dynticks_task_exit();
 821 
 822                 rcu_dynticks_eqs_exit();
 823 
 824                 if (irq)
 825                         rcu_cleanup_after_idle();
 826 
 827                 incby = 1;
 828         }
 829         trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
 830                           rdp->dynticks_nmi_nesting,
 831                           rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
 832         WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
 833                    rdp->dynticks_nmi_nesting + incby);
 834         barrier();
 835 }
 836 
 837 /**
 838  * rcu_nmi_enter - inform RCU of entry to NMI context
 839  */
 840 void rcu_nmi_enter(void)
 841 {
 842         rcu_nmi_enter_common(false);
 843 }
 844 NOKPROBE_SYMBOL(rcu_nmi_enter);
 845 
 846 /**
 847  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
 848  *
 849  * Enter an interrupt handler, which might possibly result in exiting
 850  * idle mode, in other words, entering the mode in which read-side critical
 851  * sections can occur.  The caller must have disabled interrupts.
 852  *
 853  * Note that the Linux kernel is fully capable of entering an interrupt
 854  * handler that it never exits, for example when doing upcalls to user mode!
 855  * This code assumes that the idle loop never does upcalls to user mode.
 856  * If your architecture's idle loop does do upcalls to user mode (or does
 857  * anything else that results in unbalanced calls to the irq_enter() and
 858  * irq_exit() functions), RCU will give you what you deserve, good and hard.
 859  * But very infrequently and irreproducibly.
 860  *
 861  * Use things like work queues to work around this limitation.
 862  *
 863  * You have been warned.
 864  *
 865  * If you add or remove a call to rcu_irq_enter(), be sure to test with
 866  * CONFIG_RCU_EQS_DEBUG=y.
 867  */
 868 void rcu_irq_enter(void)
 869 {
 870         lockdep_assert_irqs_disabled();
 871         rcu_nmi_enter_common(true);
 872 }
 873 
 874 /*
 875  * Wrapper for rcu_irq_enter() where interrupts are enabled.
 876  *
 877  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
 878  * with CONFIG_RCU_EQS_DEBUG=y.
 879  */
 880 void rcu_irq_enter_irqson(void)
 881 {
 882         unsigned long flags;
 883 
 884         local_irq_save(flags);
 885         rcu_irq_enter();
 886         local_irq_restore(flags);
 887 }
 888 
 889 /**
 890  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
 891  *
 892  * Return true if RCU is watching the running CPU, which means that this
 893  * CPU can safely enter RCU read-side critical sections.  In other words,
 894  * if the current CPU is not in its idle loop or is in an interrupt or
 895  * NMI handler, return true.
 896  */
 897 bool notrace rcu_is_watching(void)
 898 {
 899         bool ret;
 900 
 901         preempt_disable_notrace();
 902         ret = !rcu_dynticks_curr_cpu_in_eqs();
 903         preempt_enable_notrace();
 904         return ret;
 905 }
 906 EXPORT_SYMBOL_GPL(rcu_is_watching);
 907 
 908 /*
 909  * If a holdout task is actually running, request an urgent quiescent
 910  * state from its CPU.  This is unsynchronized, so migrations can cause
 911  * the request to go to the wrong CPU.  Which is OK, all that will happen
 912  * is that the CPU's next context switch will be a bit slower and next
 913  * time around this task will generate another request.
 914  */
 915 void rcu_request_urgent_qs_task(struct task_struct *t)
 916 {
 917         int cpu;
 918 
 919         barrier();
 920         cpu = task_cpu(t);
 921         if (!task_curr(t))
 922                 return; /* This task is not running on that CPU. */
 923         smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
 924 }
 925 
 926 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 927 
 928 /*
 929  * Is the current CPU online as far as RCU is concerned?
 930  *
 931  * Disable preemption to avoid false positives that could otherwise
 932  * happen due to the current CPU number being sampled, this task being
 933  * preempted, its old CPU being taken offline, resuming on some other CPU,
 934  * then determining that its old CPU is now offline.
 935  *
 936  * Disable checking if in an NMI handler because we cannot safely
 937  * report errors from NMI handlers anyway.  In addition, it is OK to use
 938  * RCU on an offline processor during initial boot, hence the check for
 939  * rcu_scheduler_fully_active.
 940  */
 941 bool rcu_lockdep_current_cpu_online(void)
 942 {
 943         struct rcu_data *rdp;
 944         struct rcu_node *rnp;
 945         bool ret = false;
 946 
 947         if (in_nmi() || !rcu_scheduler_fully_active)
 948                 return true;
 949         preempt_disable();
 950         rdp = this_cpu_ptr(&rcu_data);
 951         rnp = rdp->mynode;
 952         if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
 953                 ret = true;
 954         preempt_enable();
 955         return ret;
 956 }
 957 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 958 
 959 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
 960 
 961 /*
 962  * We are reporting a quiescent state on behalf of some other CPU, so
 963  * it is our responsibility to check for and handle potential overflow
 964  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
 965  * After all, the CPU might be in deep idle state, and thus executing no
 966  * code whatsoever.
 967  */
 968 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
 969 {
 970         raw_lockdep_assert_held_rcu_node(rnp);
 971         if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
 972                          rnp->gp_seq))
 973                 WRITE_ONCE(rdp->gpwrap, true);
 974         if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
 975                 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 976 }
 977 
 978 /*
 979  * Snapshot the specified CPU's dynticks counter so that we can later
 980  * credit them with an implicit quiescent state.  Return 1 if this CPU
 981  * is in dynticks idle mode, which is an extended quiescent state.
 982  */
 983 static int dyntick_save_progress_counter(struct rcu_data *rdp)
 984 {
 985         rdp->dynticks_snap = rcu_dynticks_snap(rdp);
 986         if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
 987                 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
 988                 rcu_gpnum_ovf(rdp->mynode, rdp);
 989                 return 1;
 990         }
 991         return 0;
 992 }
 993 
 994 /*
 995  * Return true if the specified CPU has passed through a quiescent
 996  * state by virtue of being in or having passed through an dynticks
 997  * idle state since the last call to dyntick_save_progress_counter()
 998  * for this same CPU, or by virtue of having been offline.
 999  */
1000 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1001 {
1002         unsigned long jtsq;
1003         bool *rnhqp;
1004         bool *ruqp;
1005         struct rcu_node *rnp = rdp->mynode;
1006 
1007         /*
1008          * If the CPU passed through or entered a dynticks idle phase with
1009          * no active irq/NMI handlers, then we can safely pretend that the CPU
1010          * already acknowledged the request to pass through a quiescent
1011          * state.  Either way, that CPU cannot possibly be in an RCU
1012          * read-side critical section that started before the beginning
1013          * of the current RCU grace period.
1014          */
1015         if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1016                 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1017                 rcu_gpnum_ovf(rnp, rdp);
1018                 return 1;
1019         }
1020 
1021         /* If waiting too long on an offline CPU, complain. */
1022         if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
1023             time_after(jiffies, rcu_state.gp_start + HZ)) {
1024                 bool onl;
1025                 struct rcu_node *rnp1;
1026 
1027                 WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
1028                 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1029                         __func__, rnp->grplo, rnp->grphi, rnp->level,
1030                         (long)rnp->gp_seq, (long)rnp->completedqs);
1031                 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1032                         pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1033                                 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1034                 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1035                 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1036                         __func__, rdp->cpu, ".o"[onl],
1037                         (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1038                         (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1039                 return 1; /* Break things loose after complaining. */
1040         }
1041 
1042         /*
1043          * A CPU running for an extended time within the kernel can
1044          * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1045          * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1046          * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1047          * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1048          * variable are safe because the assignments are repeated if this
1049          * CPU failed to pass through a quiescent state.  This code
1050          * also checks .jiffies_resched in case jiffies_to_sched_qs
1051          * is set way high.
1052          */
1053         jtsq = READ_ONCE(jiffies_to_sched_qs);
1054         ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1055         rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1056         if (!READ_ONCE(*rnhqp) &&
1057             (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1058              time_after(jiffies, rcu_state.jiffies_resched))) {
1059                 WRITE_ONCE(*rnhqp, true);
1060                 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1061                 smp_store_release(ruqp, true);
1062         } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1063                 WRITE_ONCE(*ruqp, true);
1064         }
1065 
1066         /*
1067          * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1068          * The above code handles this, but only for straight cond_resched().
1069          * And some in-kernel loops check need_resched() before calling
1070          * cond_resched(), which defeats the above code for CPUs that are
1071          * running in-kernel with scheduling-clock interrupts disabled.
1072          * So hit them over the head with the resched_cpu() hammer!
1073          */
1074         if (tick_nohz_full_cpu(rdp->cpu) &&
1075                    time_after(jiffies,
1076                               READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
1077                 resched_cpu(rdp->cpu);
1078                 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1079         }
1080 
1081         /*
1082          * If more than halfway to RCU CPU stall-warning time, invoke
1083          * resched_cpu() more frequently to try to loosen things up a bit.
1084          * Also check to see if the CPU is getting hammered with interrupts,
1085          * but only once per grace period, just to keep the IPIs down to
1086          * a dull roar.
1087          */
1088         if (time_after(jiffies, rcu_state.jiffies_resched)) {
1089                 if (time_after(jiffies,
1090                                READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1091                         resched_cpu(rdp->cpu);
1092                         WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1093                 }
1094                 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1095                     !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1096                     (rnp->ffmask & rdp->grpmask)) {
1097                         init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1098                         rdp->rcu_iw_pending = true;
1099                         rdp->rcu_iw_gp_seq = rnp->gp_seq;
1100                         irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1101                 }
1102         }
1103 
1104         return 0;
1105 }
1106 
1107 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1108 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1109                               unsigned long gp_seq_req, const char *s)
1110 {
1111         trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
1112                                       rnp->level, rnp->grplo, rnp->grphi, s);
1113 }
1114 
1115 /*
1116  * rcu_start_this_gp - Request the start of a particular grace period
1117  * @rnp_start: The leaf node of the CPU from which to start.
1118  * @rdp: The rcu_data corresponding to the CPU from which to start.
1119  * @gp_seq_req: The gp_seq of the grace period to start.
1120  *
1121  * Start the specified grace period, as needed to handle newly arrived
1122  * callbacks.  The required future grace periods are recorded in each
1123  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1124  * is reason to awaken the grace-period kthread.
1125  *
1126  * The caller must hold the specified rcu_node structure's ->lock, which
1127  * is why the caller is responsible for waking the grace-period kthread.
1128  *
1129  * Returns true if the GP thread needs to be awakened else false.
1130  */
1131 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1132                               unsigned long gp_seq_req)
1133 {
1134         bool ret = false;
1135         struct rcu_node *rnp;
1136 
1137         /*
1138          * Use funnel locking to either acquire the root rcu_node
1139          * structure's lock or bail out if the need for this grace period
1140          * has already been recorded -- or if that grace period has in
1141          * fact already started.  If there is already a grace period in
1142          * progress in a non-leaf node, no recording is needed because the
1143          * end of the grace period will scan the leaf rcu_node structures.
1144          * Note that rnp_start->lock must not be released.
1145          */
1146         raw_lockdep_assert_held_rcu_node(rnp_start);
1147         trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1148         for (rnp = rnp_start; 1; rnp = rnp->parent) {
1149                 if (rnp != rnp_start)
1150                         raw_spin_lock_rcu_node(rnp);
1151                 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1152                     rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1153                     (rnp != rnp_start &&
1154                      rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1155                         trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1156                                           TPS("Prestarted"));
1157                         goto unlock_out;
1158                 }
1159                 rnp->gp_seq_needed = gp_seq_req;
1160                 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1161                         /*
1162                          * We just marked the leaf or internal node, and a
1163                          * grace period is in progress, which means that
1164                          * rcu_gp_cleanup() will see the marking.  Bail to
1165                          * reduce contention.
1166                          */
1167                         trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1168                                           TPS("Startedleaf"));
1169                         goto unlock_out;
1170                 }
1171                 if (rnp != rnp_start && rnp->parent != NULL)
1172                         raw_spin_unlock_rcu_node(rnp);
1173                 if (!rnp->parent)
1174                         break;  /* At root, and perhaps also leaf. */
1175         }
1176 
1177         /* If GP already in progress, just leave, otherwise start one. */
1178         if (rcu_gp_in_progress()) {
1179                 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1180                 goto unlock_out;
1181         }
1182         trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1183         WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1184         rcu_state.gp_req_activity = jiffies;
1185         if (!rcu_state.gp_kthread) {
1186                 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1187                 goto unlock_out;
1188         }
1189         trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
1190         ret = true;  /* Caller must wake GP kthread. */
1191 unlock_out:
1192         /* Push furthest requested GP to leaf node and rcu_data structure. */
1193         if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1194                 rnp_start->gp_seq_needed = rnp->gp_seq_needed;
1195                 rdp->gp_seq_needed = rnp->gp_seq_needed;
1196         }
1197         if (rnp != rnp_start)
1198                 raw_spin_unlock_rcu_node(rnp);
1199         return ret;
1200 }
1201 
1202 /*
1203  * Clean up any old requests for the just-ended grace period.  Also return
1204  * whether any additional grace periods have been requested.
1205  */
1206 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1207 {
1208         bool needmore;
1209         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1210 
1211         needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1212         if (!needmore)
1213                 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1214         trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1215                           needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1216         return needmore;
1217 }
1218 
1219 /*
1220  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in
1221  * an interrupt or softirq handler), and don't bother awakening when there
1222  * is nothing for the grace-period kthread to do (as in several CPUs raced
1223  * to awaken, and we lost), and finally don't try to awaken a kthread that
1224  * has not yet been created.  If all those checks are passed, track some
1225  * debug information and awaken.
1226  *
1227  * So why do the self-wakeup when in an interrupt or softirq handler
1228  * in the grace-period kthread's context?  Because the kthread might have
1229  * been interrupted just as it was going to sleep, and just after the final
1230  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1231  * is required, and is therefore supplied.
1232  */
1233 static void rcu_gp_kthread_wake(void)
1234 {
1235         if ((current == rcu_state.gp_kthread &&
1236              !in_irq() && !in_serving_softirq()) ||
1237             !READ_ONCE(rcu_state.gp_flags) ||
1238             !rcu_state.gp_kthread)
1239                 return;
1240         WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1241         WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1242         swake_up_one(&rcu_state.gp_wq);
1243 }
1244 
1245 /*
1246  * If there is room, assign a ->gp_seq number to any callbacks on this
1247  * CPU that have not already been assigned.  Also accelerate any callbacks
1248  * that were previously assigned a ->gp_seq number that has since proven
1249  * to be too conservative, which can happen if callbacks get assigned a
1250  * ->gp_seq number while RCU is idle, but with reference to a non-root
1251  * rcu_node structure.  This function is idempotent, so it does not hurt
1252  * to call it repeatedly.  Returns an flag saying that we should awaken
1253  * the RCU grace-period kthread.
1254  *
1255  * The caller must hold rnp->lock with interrupts disabled.
1256  */
1257 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1258 {
1259         unsigned long gp_seq_req;
1260         bool ret = false;
1261 
1262         rcu_lockdep_assert_cblist_protected(rdp);
1263         raw_lockdep_assert_held_rcu_node(rnp);
1264 
1265         /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1266         if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1267                 return false;
1268 
1269         /*
1270          * Callbacks are often registered with incomplete grace-period
1271          * information.  Something about the fact that getting exact
1272          * information requires acquiring a global lock...  RCU therefore
1273          * makes a conservative estimate of the grace period number at which
1274          * a given callback will become ready to invoke.        The following
1275          * code checks this estimate and improves it when possible, thus
1276          * accelerating callback invocation to an earlier grace-period
1277          * number.
1278          */
1279         gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1280         if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1281                 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1282 
1283         /* Trace depending on how much we were able to accelerate. */
1284         if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1285                 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
1286         else
1287                 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
1288         return ret;
1289 }
1290 
1291 /*
1292  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1293  * rcu_node structure's ->lock be held.  It consults the cached value
1294  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1295  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1296  * while holding the leaf rcu_node structure's ->lock.
1297  */
1298 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1299                                         struct rcu_data *rdp)
1300 {
1301         unsigned long c;
1302         bool needwake;
1303 
1304         rcu_lockdep_assert_cblist_protected(rdp);
1305         c = rcu_seq_snap(&rcu_state.gp_seq);
1306         if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1307                 /* Old request still live, so mark recent callbacks. */
1308                 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1309                 return;
1310         }
1311         raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1312         needwake = rcu_accelerate_cbs(rnp, rdp);
1313         raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1314         if (needwake)
1315                 rcu_gp_kthread_wake();
1316 }
1317 
1318 /*
1319  * Move any callbacks whose grace period has completed to the
1320  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1321  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1322  * sublist.  This function is idempotent, so it does not hurt to
1323  * invoke it repeatedly.  As long as it is not invoked -too- often...
1324  * Returns true if the RCU grace-period kthread needs to be awakened.
1325  *
1326  * The caller must hold rnp->lock with interrupts disabled.
1327  */
1328 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1329 {
1330         rcu_lockdep_assert_cblist_protected(rdp);
1331         raw_lockdep_assert_held_rcu_node(rnp);
1332 
1333         /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1334         if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1335                 return false;
1336 
1337         /*
1338          * Find all callbacks whose ->gp_seq numbers indicate that they
1339          * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1340          */
1341         rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1342 
1343         /* Classify any remaining callbacks. */
1344         return rcu_accelerate_cbs(rnp, rdp);
1345 }
1346 
1347 /*
1348  * Move and classify callbacks, but only if doing so won't require
1349  * that the RCU grace-period kthread be awakened.
1350  */
1351 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1352                                                   struct rcu_data *rdp)
1353 {
1354         rcu_lockdep_assert_cblist_protected(rdp);
1355         if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1356             !raw_spin_trylock_rcu_node(rnp))
1357                 return;
1358         WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1359         raw_spin_unlock_rcu_node(rnp);
1360 }
1361 
1362 /*
1363  * Update CPU-local rcu_data state to record the beginnings and ends of
1364  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1365  * structure corresponding to the current CPU, and must have irqs disabled.
1366  * Returns true if the grace-period kthread needs to be awakened.
1367  */
1368 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1369 {
1370         bool ret = false;
1371         bool need_gp;
1372         const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1373                                rcu_segcblist_is_offloaded(&rdp->cblist);
1374 
1375         raw_lockdep_assert_held_rcu_node(rnp);
1376 
1377         if (rdp->gp_seq == rnp->gp_seq)
1378                 return false; /* Nothing to do. */
1379 
1380         /* Handle the ends of any preceding grace periods first. */
1381         if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1382             unlikely(READ_ONCE(rdp->gpwrap))) {
1383                 if (!offloaded)
1384                         ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1385                 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1386         } else {
1387                 if (!offloaded)
1388                         ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1389         }
1390 
1391         /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1392         if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1393             unlikely(READ_ONCE(rdp->gpwrap))) {
1394                 /*
1395                  * If the current grace period is waiting for this CPU,
1396                  * set up to detect a quiescent state, otherwise don't
1397                  * go looking for one.
1398                  */
1399                 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1400                 need_gp = !!(rnp->qsmask & rdp->grpmask);
1401                 rdp->cpu_no_qs.b.norm = need_gp;
1402                 rdp->core_needs_qs = need_gp;
1403                 zero_cpu_stall_ticks(rdp);
1404         }
1405         rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1406         if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1407                 rdp->gp_seq_needed = rnp->gp_seq_needed;
1408         WRITE_ONCE(rdp->gpwrap, false);
1409         rcu_gpnum_ovf(rnp, rdp);
1410         return ret;
1411 }
1412 
1413 static void note_gp_changes(struct rcu_data *rdp)
1414 {
1415         unsigned long flags;
1416         bool needwake;
1417         struct rcu_node *rnp;
1418 
1419         local_irq_save(flags);
1420         rnp = rdp->mynode;
1421         if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1422              !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1423             !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1424                 local_irq_restore(flags);
1425                 return;
1426         }
1427         needwake = __note_gp_changes(rnp, rdp);
1428         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1429         if (needwake)
1430                 rcu_gp_kthread_wake();
1431 }
1432 
1433 static void rcu_gp_slow(int delay)
1434 {
1435         if (delay > 0 &&
1436             !(rcu_seq_ctr(rcu_state.gp_seq) %
1437               (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1438                 schedule_timeout_uninterruptible(delay);
1439 }
1440 
1441 /*
1442  * Initialize a new grace period.  Return false if no grace period required.
1443  */
1444 static bool rcu_gp_init(void)
1445 {
1446         unsigned long flags;
1447         unsigned long oldmask;
1448         unsigned long mask;
1449         struct rcu_data *rdp;
1450         struct rcu_node *rnp = rcu_get_root();
1451 
1452         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1453         raw_spin_lock_irq_rcu_node(rnp);
1454         if (!READ_ONCE(rcu_state.gp_flags)) {
1455                 /* Spurious wakeup, tell caller to go back to sleep.  */
1456                 raw_spin_unlock_irq_rcu_node(rnp);
1457                 return false;
1458         }
1459         WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1460 
1461         if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1462                 /*
1463                  * Grace period already in progress, don't start another.
1464                  * Not supposed to be able to happen.
1465                  */
1466                 raw_spin_unlock_irq_rcu_node(rnp);
1467                 return false;
1468         }
1469 
1470         /* Advance to a new grace period and initialize state. */
1471         record_gp_stall_check_time();
1472         /* Record GP times before starting GP, hence rcu_seq_start(). */
1473         rcu_seq_start(&rcu_state.gp_seq);
1474         trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1475         raw_spin_unlock_irq_rcu_node(rnp);
1476 
1477         /*
1478          * Apply per-leaf buffered online and offline operations to the
1479          * rcu_node tree.  Note that this new grace period need not wait
1480          * for subsequent online CPUs, and that quiescent-state forcing
1481          * will handle subsequent offline CPUs.
1482          */
1483         rcu_state.gp_state = RCU_GP_ONOFF;
1484         rcu_for_each_leaf_node(rnp) {
1485                 raw_spin_lock(&rcu_state.ofl_lock);
1486                 raw_spin_lock_irq_rcu_node(rnp);
1487                 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1488                     !rnp->wait_blkd_tasks) {
1489                         /* Nothing to do on this leaf rcu_node structure. */
1490                         raw_spin_unlock_irq_rcu_node(rnp);
1491                         raw_spin_unlock(&rcu_state.ofl_lock);
1492                         continue;
1493                 }
1494 
1495                 /* Record old state, apply changes to ->qsmaskinit field. */
1496                 oldmask = rnp->qsmaskinit;
1497                 rnp->qsmaskinit = rnp->qsmaskinitnext;
1498 
1499                 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1500                 if (!oldmask != !rnp->qsmaskinit) {
1501                         if (!oldmask) { /* First online CPU for rcu_node. */
1502                                 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1503                                         rcu_init_new_rnp(rnp);
1504                         } else if (rcu_preempt_has_tasks(rnp)) {
1505                                 rnp->wait_blkd_tasks = true; /* blocked tasks */
1506                         } else { /* Last offline CPU and can propagate. */
1507                                 rcu_cleanup_dead_rnp(rnp);
1508                         }
1509                 }
1510 
1511                 /*
1512                  * If all waited-on tasks from prior grace period are
1513                  * done, and if all this rcu_node structure's CPUs are
1514                  * still offline, propagate up the rcu_node tree and
1515                  * clear ->wait_blkd_tasks.  Otherwise, if one of this
1516                  * rcu_node structure's CPUs has since come back online,
1517                  * simply clear ->wait_blkd_tasks.
1518                  */
1519                 if (rnp->wait_blkd_tasks &&
1520                     (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1521                         rnp->wait_blkd_tasks = false;
1522                         if (!rnp->qsmaskinit)
1523                                 rcu_cleanup_dead_rnp(rnp);
1524                 }
1525 
1526                 raw_spin_unlock_irq_rcu_node(rnp);
1527                 raw_spin_unlock(&rcu_state.ofl_lock);
1528         }
1529         rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1530 
1531         /*
1532          * Set the quiescent-state-needed bits in all the rcu_node
1533          * structures for all currently online CPUs in breadth-first
1534          * order, starting from the root rcu_node structure, relying on the
1535          * layout of the tree within the rcu_state.node[] array.  Note that
1536          * other CPUs will access only the leaves of the hierarchy, thus
1537          * seeing that no grace period is in progress, at least until the
1538          * corresponding leaf node has been initialized.
1539          *
1540          * The grace period cannot complete until the initialization
1541          * process finishes, because this kthread handles both.
1542          */
1543         rcu_state.gp_state = RCU_GP_INIT;
1544         rcu_for_each_node_breadth_first(rnp) {
1545                 rcu_gp_slow(gp_init_delay);
1546                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1547                 rdp = this_cpu_ptr(&rcu_data);
1548                 rcu_preempt_check_blocked_tasks(rnp);
1549                 rnp->qsmask = rnp->qsmaskinit;
1550                 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1551                 if (rnp == rdp->mynode)
1552                         (void)__note_gp_changes(rnp, rdp);
1553                 rcu_preempt_boost_start_gp(rnp);
1554                 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1555                                             rnp->level, rnp->grplo,
1556                                             rnp->grphi, rnp->qsmask);
1557                 /* Quiescent states for tasks on any now-offline CPUs. */
1558                 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1559                 rnp->rcu_gp_init_mask = mask;
1560                 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1561                         rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1562                 else
1563                         raw_spin_unlock_irq_rcu_node(rnp);
1564                 cond_resched_tasks_rcu_qs();
1565                 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1566         }
1567 
1568         return true;
1569 }
1570 
1571 /*
1572  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1573  * time.
1574  */
1575 static bool rcu_gp_fqs_check_wake(int *gfp)
1576 {
1577         struct rcu_node *rnp = rcu_get_root();
1578 
1579         /* Someone like call_rcu() requested a force-quiescent-state scan. */
1580         *gfp = READ_ONCE(rcu_state.gp_flags);
1581         if (*gfp & RCU_GP_FLAG_FQS)
1582                 return true;
1583 
1584         /* The current grace period has completed. */
1585         if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1586                 return true;
1587 
1588         return false;
1589 }
1590 
1591 /*
1592  * Do one round of quiescent-state forcing.
1593  */
1594 static void rcu_gp_fqs(bool first_time)
1595 {
1596         struct rcu_node *rnp = rcu_get_root();
1597 
1598         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1599         rcu_state.n_force_qs++;
1600         if (first_time) {
1601                 /* Collect dyntick-idle snapshots. */
1602                 force_qs_rnp(dyntick_save_progress_counter);
1603         } else {
1604                 /* Handle dyntick-idle and offline CPUs. */
1605                 force_qs_rnp(rcu_implicit_dynticks_qs);
1606         }
1607         /* Clear flag to prevent immediate re-entry. */
1608         if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1609                 raw_spin_lock_irq_rcu_node(rnp);
1610                 WRITE_ONCE(rcu_state.gp_flags,
1611                            READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1612                 raw_spin_unlock_irq_rcu_node(rnp);
1613         }
1614 }
1615 
1616 /*
1617  * Loop doing repeated quiescent-state forcing until the grace period ends.
1618  */
1619 static void rcu_gp_fqs_loop(void)
1620 {
1621         bool first_gp_fqs;
1622         int gf;
1623         unsigned long j;
1624         int ret;
1625         struct rcu_node *rnp = rcu_get_root();
1626 
1627         first_gp_fqs = true;
1628         j = READ_ONCE(jiffies_till_first_fqs);
1629         ret = 0;
1630         for (;;) {
1631                 if (!ret) {
1632                         rcu_state.jiffies_force_qs = jiffies + j;
1633                         WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1634                                    jiffies + (j ? 3 * j : 2));
1635                 }
1636                 trace_rcu_grace_period(rcu_state.name,
1637                                        READ_ONCE(rcu_state.gp_seq),
1638                                        TPS("fqswait"));
1639                 rcu_state.gp_state = RCU_GP_WAIT_FQS;
1640                 ret = swait_event_idle_timeout_exclusive(
1641                                 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1642                 rcu_state.gp_state = RCU_GP_DOING_FQS;
1643                 /* Locking provides needed memory barriers. */
1644                 /* If grace period done, leave loop. */
1645                 if (!READ_ONCE(rnp->qsmask) &&
1646                     !rcu_preempt_blocked_readers_cgp(rnp))
1647                         break;
1648                 /* If time for quiescent-state forcing, do it. */
1649                 if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
1650                     (gf & RCU_GP_FLAG_FQS)) {
1651                         trace_rcu_grace_period(rcu_state.name,
1652                                                READ_ONCE(rcu_state.gp_seq),
1653                                                TPS("fqsstart"));
1654                         rcu_gp_fqs(first_gp_fqs);
1655                         first_gp_fqs = false;
1656                         trace_rcu_grace_period(rcu_state.name,
1657                                                READ_ONCE(rcu_state.gp_seq),
1658                                                TPS("fqsend"));
1659                         cond_resched_tasks_rcu_qs();
1660                         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1661                         ret = 0; /* Force full wait till next FQS. */
1662                         j = READ_ONCE(jiffies_till_next_fqs);
1663                 } else {
1664                         /* Deal with stray signal. */
1665                         cond_resched_tasks_rcu_qs();
1666                         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1667                         WARN_ON(signal_pending(current));
1668                         trace_rcu_grace_period(rcu_state.name,
1669                                                READ_ONCE(rcu_state.gp_seq),
1670                                                TPS("fqswaitsig"));
1671                         ret = 1; /* Keep old FQS timing. */
1672                         j = jiffies;
1673                         if (time_after(jiffies, rcu_state.jiffies_force_qs))
1674                                 j = 1;
1675                         else
1676                                 j = rcu_state.jiffies_force_qs - j;
1677                 }
1678         }
1679 }
1680 
1681 /*
1682  * Clean up after the old grace period.
1683  */
1684 static void rcu_gp_cleanup(void)
1685 {
1686         unsigned long gp_duration;
1687         bool needgp = false;
1688         unsigned long new_gp_seq;
1689         bool offloaded;
1690         struct rcu_data *rdp;
1691         struct rcu_node *rnp = rcu_get_root();
1692         struct swait_queue_head *sq;
1693 
1694         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1695         raw_spin_lock_irq_rcu_node(rnp);
1696         rcu_state.gp_end = jiffies;
1697         gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1698         if (gp_duration > rcu_state.gp_max)
1699                 rcu_state.gp_max = gp_duration;
1700 
1701         /*
1702          * We know the grace period is complete, but to everyone else
1703          * it appears to still be ongoing.  But it is also the case
1704          * that to everyone else it looks like there is nothing that
1705          * they can do to advance the grace period.  It is therefore
1706          * safe for us to drop the lock in order to mark the grace
1707          * period as completed in all of the rcu_node structures.
1708          */
1709         raw_spin_unlock_irq_rcu_node(rnp);
1710 
1711         /*
1712          * Propagate new ->gp_seq value to rcu_node structures so that
1713          * other CPUs don't have to wait until the start of the next grace
1714          * period to process their callbacks.  This also avoids some nasty
1715          * RCU grace-period initialization races by forcing the end of
1716          * the current grace period to be completely recorded in all of
1717          * the rcu_node structures before the beginning of the next grace
1718          * period is recorded in any of the rcu_node structures.
1719          */
1720         new_gp_seq = rcu_state.gp_seq;
1721         rcu_seq_end(&new_gp_seq);
1722         rcu_for_each_node_breadth_first(rnp) {
1723                 raw_spin_lock_irq_rcu_node(rnp);
1724                 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1725                         dump_blkd_tasks(rnp, 10);
1726                 WARN_ON_ONCE(rnp->qsmask);
1727                 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1728                 rdp = this_cpu_ptr(&rcu_data);
1729                 if (rnp == rdp->mynode)
1730                         needgp = __note_gp_changes(rnp, rdp) || needgp;
1731                 /* smp_mb() provided by prior unlock-lock pair. */
1732                 needgp = rcu_future_gp_cleanup(rnp) || needgp;
1733                 sq = rcu_nocb_gp_get(rnp);
1734                 raw_spin_unlock_irq_rcu_node(rnp);
1735                 rcu_nocb_gp_cleanup(sq);
1736                 cond_resched_tasks_rcu_qs();
1737                 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1738                 rcu_gp_slow(gp_cleanup_delay);
1739         }
1740         rnp = rcu_get_root();
1741         raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1742 
1743         /* Declare grace period done, trace first to use old GP number. */
1744         trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1745         rcu_seq_end(&rcu_state.gp_seq);
1746         rcu_state.gp_state = RCU_GP_IDLE;
1747         /* Check for GP requests since above loop. */
1748         rdp = this_cpu_ptr(&rcu_data);
1749         if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1750                 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1751                                   TPS("CleanupMore"));
1752                 needgp = true;
1753         }
1754         /* Advance CBs to reduce false positives below. */
1755         offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1756                     rcu_segcblist_is_offloaded(&rdp->cblist);
1757         if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1758                 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1759                 rcu_state.gp_req_activity = jiffies;
1760                 trace_rcu_grace_period(rcu_state.name,
1761                                        READ_ONCE(rcu_state.gp_seq),
1762                                        TPS("newreq"));
1763         } else {
1764                 WRITE_ONCE(rcu_state.gp_flags,
1765                            rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1766         }
1767         raw_spin_unlock_irq_rcu_node(rnp);
1768 }
1769 
1770 /*
1771  * Body of kthread that handles grace periods.
1772  */
1773 static int __noreturn rcu_gp_kthread(void *unused)
1774 {
1775         rcu_bind_gp_kthread();
1776         for (;;) {
1777 
1778                 /* Handle grace-period start. */
1779                 for (;;) {
1780                         trace_rcu_grace_period(rcu_state.name,
1781                                                READ_ONCE(rcu_state.gp_seq),
1782                                                TPS("reqwait"));
1783                         rcu_state.gp_state = RCU_GP_WAIT_GPS;
1784                         swait_event_idle_exclusive(rcu_state.gp_wq,
1785                                          READ_ONCE(rcu_state.gp_flags) &
1786                                          RCU_GP_FLAG_INIT);
1787                         rcu_state.gp_state = RCU_GP_DONE_GPS;
1788                         /* Locking provides needed memory barrier. */
1789                         if (rcu_gp_init())
1790                                 break;
1791                         cond_resched_tasks_rcu_qs();
1792                         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1793                         WARN_ON(signal_pending(current));
1794                         trace_rcu_grace_period(rcu_state.name,
1795                                                READ_ONCE(rcu_state.gp_seq),
1796                                                TPS("reqwaitsig"));
1797                 }
1798 
1799                 /* Handle quiescent-state forcing. */
1800                 rcu_gp_fqs_loop();
1801 
1802                 /* Handle grace-period end. */
1803                 rcu_state.gp_state = RCU_GP_CLEANUP;
1804                 rcu_gp_cleanup();
1805                 rcu_state.gp_state = RCU_GP_CLEANED;
1806         }
1807 }
1808 
1809 /*
1810  * Report a full set of quiescent states to the rcu_state data structure.
1811  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1812  * another grace period is required.  Whether we wake the grace-period
1813  * kthread or it awakens itself for the next round of quiescent-state
1814  * forcing, that kthread will clean up after the just-completed grace
1815  * period.  Note that the caller must hold rnp->lock, which is released
1816  * before return.
1817  */
1818 static void rcu_report_qs_rsp(unsigned long flags)
1819         __releases(rcu_get_root()->lock)
1820 {
1821         raw_lockdep_assert_held_rcu_node(rcu_get_root());
1822         WARN_ON_ONCE(!rcu_gp_in_progress());
1823         WRITE_ONCE(rcu_state.gp_flags,
1824                    READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1825         raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1826         rcu_gp_kthread_wake();
1827 }
1828 
1829 /*
1830  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1831  * Allows quiescent states for a group of CPUs to be reported at one go
1832  * to the specified rcu_node structure, though all the CPUs in the group
1833  * must be represented by the same rcu_node structure (which need not be a
1834  * leaf rcu_node structure, though it often will be).  The gps parameter
1835  * is the grace-period snapshot, which means that the quiescent states
1836  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1837  * must be held upon entry, and it is released before return.
1838  *
1839  * As a special case, if mask is zero, the bit-already-cleared check is
1840  * disabled.  This allows propagating quiescent state due to resumed tasks
1841  * during grace-period initialization.
1842  */
1843 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1844                               unsigned long gps, unsigned long flags)
1845         __releases(rnp->lock)
1846 {
1847         unsigned long oldmask = 0;
1848         struct rcu_node *rnp_c;
1849 
1850         raw_lockdep_assert_held_rcu_node(rnp);
1851 
1852         /* Walk up the rcu_node hierarchy. */
1853         for (;;) {
1854                 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1855 
1856                         /*
1857                          * Our bit has already been cleared, or the
1858                          * relevant grace period is already over, so done.
1859                          */
1860                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1861                         return;
1862                 }
1863                 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1864                 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1865                              rcu_preempt_blocked_readers_cgp(rnp));
1866                 rnp->qsmask &= ~mask;
1867                 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1868                                                  mask, rnp->qsmask, rnp->level,
1869                                                  rnp->grplo, rnp->grphi,
1870                                                  !!rnp->gp_tasks);
1871                 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1872 
1873                         /* Other bits still set at this level, so done. */
1874                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1875                         return;
1876                 }
1877                 rnp->completedqs = rnp->gp_seq;
1878                 mask = rnp->grpmask;
1879                 if (rnp->parent == NULL) {
1880 
1881                         /* No more levels.  Exit loop holding root lock. */
1882 
1883                         break;
1884                 }
1885                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1886                 rnp_c = rnp;
1887                 rnp = rnp->parent;
1888                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1889                 oldmask = rnp_c->qsmask;
1890         }
1891 
1892         /*
1893          * Get here if we are the last CPU to pass through a quiescent
1894          * state for this grace period.  Invoke rcu_report_qs_rsp()
1895          * to clean up and start the next grace period if one is needed.
1896          */
1897         rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1898 }
1899 
1900 /*
1901  * Record a quiescent state for all tasks that were previously queued
1902  * on the specified rcu_node structure and that were blocking the current
1903  * RCU grace period.  The caller must hold the corresponding rnp->lock with
1904  * irqs disabled, and this lock is released upon return, but irqs remain
1905  * disabled.
1906  */
1907 static void __maybe_unused
1908 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1909         __releases(rnp->lock)
1910 {
1911         unsigned long gps;
1912         unsigned long mask;
1913         struct rcu_node *rnp_p;
1914 
1915         raw_lockdep_assert_held_rcu_node(rnp);
1916         if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
1917             WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1918             rnp->qsmask != 0) {
1919                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1920                 return;  /* Still need more quiescent states! */
1921         }
1922 
1923         rnp->completedqs = rnp->gp_seq;
1924         rnp_p = rnp->parent;
1925         if (rnp_p == NULL) {
1926                 /*
1927                  * Only one rcu_node structure in the tree, so don't
1928                  * try to report up to its nonexistent parent!
1929                  */
1930                 rcu_report_qs_rsp(flags);
1931                 return;
1932         }
1933 
1934         /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1935         gps = rnp->gp_seq;
1936         mask = rnp->grpmask;
1937         raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
1938         raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
1939         rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1940 }
1941 
1942 /*
1943  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1944  * structure.  This must be called from the specified CPU.
1945  */
1946 static void
1947 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
1948 {
1949         unsigned long flags;
1950         unsigned long mask;
1951         bool needwake = false;
1952         const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1953                                rcu_segcblist_is_offloaded(&rdp->cblist);
1954         struct rcu_node *rnp;
1955 
1956         rnp = rdp->mynode;
1957         raw_spin_lock_irqsave_rcu_node(rnp, flags);
1958         if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
1959             rdp->gpwrap) {
1960 
1961                 /*
1962                  * The grace period in which this quiescent state was
1963                  * recorded has ended, so don't report it upwards.
1964                  * We will instead need a new quiescent state that lies
1965                  * within the current grace period.
1966                  */
1967                 rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
1968                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1969                 return;
1970         }
1971         mask = rdp->grpmask;
1972         rdp->core_needs_qs = false;
1973         if ((rnp->qsmask & mask) == 0) {
1974                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1975         } else {
1976                 /*
1977                  * This GP can't end until cpu checks in, so all of our
1978                  * callbacks can be processed during the next GP.
1979                  */
1980                 if (!offloaded)
1981                         needwake = rcu_accelerate_cbs(rnp, rdp);
1982 
1983                 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1984                 /* ^^^ Released rnp->lock */
1985                 if (needwake)
1986                         rcu_gp_kthread_wake();
1987         }
1988 }
1989 
1990 /*
1991  * Check to see if there is a new grace period of which this CPU
1992  * is not yet aware, and if so, set up local rcu_data state for it.
1993  * Otherwise, see if this CPU has just passed through its first
1994  * quiescent state for this grace period, and record that fact if so.
1995  */
1996 static void
1997 rcu_check_quiescent_state(struct rcu_data *rdp)
1998 {
1999         /* Check for grace-period ends and beginnings. */
2000         note_gp_changes(rdp);
2001 
2002         /*
2003          * Does this CPU still need to do its part for current grace period?
2004          * If no, return and let the other CPUs do their part as well.
2005          */
2006         if (!rdp->core_needs_qs)
2007                 return;
2008 
2009         /*
2010          * Was there a quiescent state since the beginning of the grace
2011          * period? If no, then exit and wait for the next call.
2012          */
2013         if (rdp->cpu_no_qs.b.norm)
2014                 return;
2015 
2016         /*
2017          * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2018          * judge of that).
2019          */
2020         rcu_report_qs_rdp(rdp->cpu, rdp);
2021 }
2022 
2023 /*
2024  * Near the end of the offline process.  Trace the fact that this CPU
2025  * is going offline.
2026  */
2027 int rcutree_dying_cpu(unsigned int cpu)
2028 {
2029         bool blkd;
2030         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2031         struct rcu_node *rnp = rdp->mynode;
2032 
2033         if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2034                 return 0;
2035 
2036         blkd = !!(rnp->qsmask & rdp->grpmask);
2037         trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
2038                                blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2039         return 0;
2040 }
2041 
2042 /*
2043  * All CPUs for the specified rcu_node structure have gone offline,
2044  * and all tasks that were preempted within an RCU read-side critical
2045  * section while running on one of those CPUs have since exited their RCU
2046  * read-side critical section.  Some other CPU is reporting this fact with
2047  * the specified rcu_node structure's ->lock held and interrupts disabled.
2048  * This function therefore goes up the tree of rcu_node structures,
2049  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2050  * the leaf rcu_node structure's ->qsmaskinit field has already been
2051  * updated.
2052  *
2053  * This function does check that the specified rcu_node structure has
2054  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2055  * prematurely.  That said, invoking it after the fact will cost you
2056  * a needless lock acquisition.  So once it has done its work, don't
2057  * invoke it again.
2058  */
2059 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2060 {
2061         long mask;
2062         struct rcu_node *rnp = rnp_leaf;
2063 
2064         raw_lockdep_assert_held_rcu_node(rnp_leaf);
2065         if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2066             WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2067             WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2068                 return;
2069         for (;;) {
2070                 mask = rnp->grpmask;
2071                 rnp = rnp->parent;
2072                 if (!rnp)
2073                         break;
2074                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2075                 rnp->qsmaskinit &= ~mask;
2076                 /* Between grace periods, so better already be zero! */
2077                 WARN_ON_ONCE(rnp->qsmask);
2078                 if (rnp->qsmaskinit) {
2079                         raw_spin_unlock_rcu_node(rnp);
2080                         /* irqs remain disabled. */
2081                         return;
2082                 }
2083                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2084         }
2085 }
2086 
2087 /*
2088  * The CPU has been completely removed, and some other CPU is reporting
2089  * this fact from process context.  Do the remainder of the cleanup.
2090  * There can only be one CPU hotplug operation at a time, so no need for
2091  * explicit locking.
2092  */
2093 int rcutree_dead_cpu(unsigned int cpu)
2094 {
2095         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2096         struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2097 
2098         if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2099                 return 0;
2100 
2101         /* Adjust any no-longer-needed kthreads. */
2102         rcu_boost_kthread_setaffinity(rnp, -1);
2103         /* Do any needed no-CB deferred wakeups from this CPU. */
2104         do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2105         return 0;
2106 }
2107 
2108 /*
2109  * Invoke any RCU callbacks that have made it to the end of their grace
2110  * period.  Thottle as specified by rdp->blimit.
2111  */
2112 static void rcu_do_batch(struct rcu_data *rdp)
2113 {
2114         unsigned long flags;
2115         const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2116                                rcu_segcblist_is_offloaded(&rdp->cblist);
2117         struct rcu_head *rhp;
2118         struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2119         long bl, count;
2120         long pending, tlimit = 0;
2121 
2122         /* If no callbacks are ready, just return. */
2123         if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2124                 trace_rcu_batch_start(rcu_state.name,
2125                                       rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2126                                       rcu_segcblist_n_cbs(&rdp->cblist), 0);
2127                 trace_rcu_batch_end(rcu_state.name, 0,
2128                                     !rcu_segcblist_empty(&rdp->cblist),
2129                                     need_resched(), is_idle_task(current),
2130                                     rcu_is_callbacks_kthread());
2131                 return;
2132         }
2133 
2134         /*
2135          * Extract the list of ready callbacks, disabling to prevent
2136          * races with call_rcu() from interrupt handlers.  Leave the
2137          * callback counts, as rcu_barrier() needs to be conservative.
2138          */
2139         local_irq_save(flags);
2140         rcu_nocb_lock(rdp);
2141         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2142         pending = rcu_segcblist_n_cbs(&rdp->cblist);
2143         bl = max(rdp->blimit, pending >> rcu_divisor);
2144         if (unlikely(bl > 100))
2145                 tlimit = local_clock() + rcu_resched_ns;
2146         trace_rcu_batch_start(rcu_state.name,
2147                               rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2148                               rcu_segcblist_n_cbs(&rdp->cblist), bl);
2149         rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2150         if (offloaded)
2151                 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2152         rcu_nocb_unlock_irqrestore(rdp, flags);
2153 
2154         /* Invoke callbacks. */
2155         rhp = rcu_cblist_dequeue(&rcl);
2156         for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2157                 debug_rcu_head_unqueue(rhp);
2158                 if (__rcu_reclaim(rcu_state.name, rhp))
2159                         rcu_cblist_dequeued_lazy(&rcl);
2160                 /*
2161                  * Stop only if limit reached and CPU has something to do.
2162                  * Note: The rcl structure counts down from zero.
2163                  */
2164                 if (-rcl.len >= bl && !offloaded &&
2165                     (need_resched() ||
2166                      (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2167                         break;
2168                 if (unlikely(tlimit)) {
2169                         /* only call local_clock() every 32 callbacks */
2170                         if (likely((-rcl.len & 31) || local_clock() < tlimit))
2171                                 continue;
2172                         /* Exceeded the time limit, so leave. */
2173                         break;
2174                 }
2175                 if (offloaded) {
2176                         WARN_ON_ONCE(in_serving_softirq());
2177                         local_bh_enable();
2178                         lockdep_assert_irqs_enabled();
2179                         cond_resched_tasks_rcu_qs();
2180                         lockdep_assert_irqs_enabled();
2181                         local_bh_disable();
2182                 }
2183         }
2184 
2185         local_irq_save(flags);
2186         rcu_nocb_lock(rdp);
2187         count = -rcl.len;
2188         trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2189                             is_idle_task(current), rcu_is_callbacks_kthread());
2190 
2191         /* Update counts and requeue any remaining callbacks. */
2192         rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2193         smp_mb(); /* List handling before counting for rcu_barrier(). */
2194         rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2195 
2196         /* Reinstate batch limit if we have worked down the excess. */
2197         count = rcu_segcblist_n_cbs(&rdp->cblist);
2198         if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2199                 rdp->blimit = blimit;
2200 
2201         /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2202         if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2203                 rdp->qlen_last_fqs_check = 0;
2204                 rdp->n_force_qs_snap = rcu_state.n_force_qs;
2205         } else if (count < rdp->qlen_last_fqs_check - qhimark)
2206                 rdp->qlen_last_fqs_check = count;
2207 
2208         /*
2209          * The following usually indicates a double call_rcu().  To track
2210          * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2211          */
2212         WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2213         WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2214                      count != 0 && rcu_segcblist_empty(&rdp->cblist));
2215 
2216         rcu_nocb_unlock_irqrestore(rdp, flags);
2217 
2218         /* Re-invoke RCU core processing if there are callbacks remaining. */
2219         if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2220                 invoke_rcu_core();
2221 }
2222 
2223 /*
2224  * This function is invoked from each scheduling-clock interrupt,
2225  * and checks to see if this CPU is in a non-context-switch quiescent
2226  * state, for example, user mode or idle loop.  It also schedules RCU
2227  * core processing.  If the current grace period has gone on too long,
2228  * it will ask the scheduler to manufacture a context switch for the sole
2229  * purpose of providing a providing the needed quiescent state.
2230  */
2231 void rcu_sched_clock_irq(int user)
2232 {
2233         trace_rcu_utilization(TPS("Start scheduler-tick"));
2234         raw_cpu_inc(rcu_data.ticks_this_gp);
2235         /* The load-acquire pairs with the store-release setting to true. */
2236         if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2237                 /* Idle and userspace execution already are quiescent states. */
2238                 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2239                         set_tsk_need_resched(current);
2240                         set_preempt_need_resched();
2241                 }
2242                 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2243         }
2244         rcu_flavor_sched_clock_irq(user);
2245         if (rcu_pending())
2246                 invoke_rcu_core();
2247 
2248         trace_rcu_utilization(TPS("End scheduler-tick"));
2249 }
2250 
2251 /*
2252  * Scan the leaf rcu_node structures.  For each structure on which all
2253  * CPUs have reported a quiescent state and on which there are tasks
2254  * blocking the current grace period, initiate RCU priority boosting.
2255  * Otherwise, invoke the specified function to check dyntick state for
2256  * each CPU that has not yet reported a quiescent state.
2257  */
2258 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2259 {
2260         int cpu;
2261         unsigned long flags;
2262         unsigned long mask;
2263         struct rcu_node *rnp;
2264 
2265         rcu_for_each_leaf_node(rnp) {
2266                 cond_resched_tasks_rcu_qs();
2267                 mask = 0;
2268                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2269                 if (rnp->qsmask == 0) {
2270                         if (!IS_ENABLED(CONFIG_PREEMPTION) ||
2271                             rcu_preempt_blocked_readers_cgp(rnp)) {
2272                                 /*
2273                                  * No point in scanning bits because they
2274                                  * are all zero.  But we might need to
2275                                  * priority-boost blocked readers.
2276                                  */
2277                                 rcu_initiate_boost(rnp, flags);
2278                                 /* rcu_initiate_boost() releases rnp->lock */
2279                                 continue;
2280                         }
2281                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2282                         continue;
2283                 }
2284                 for_each_leaf_node_possible_cpu(rnp, cpu) {
2285                         unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2286                         if ((rnp->qsmask & bit) != 0) {
2287                                 if (f(per_cpu_ptr(&rcu_data, cpu)))
2288                                         mask |= bit;
2289                         }
2290                 }
2291                 if (mask != 0) {
2292                         /* Idle/offline CPUs, report (releases rnp->lock). */
2293                         rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2294                 } else {
2295                         /* Nothing to do here, so just drop the lock. */
2296                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2297                 }
2298         }
2299 }
2300 
2301 /*
2302  * Force quiescent states on reluctant CPUs, and also detect which
2303  * CPUs are in dyntick-idle mode.
2304  */
2305 void rcu_force_quiescent_state(void)
2306 {
2307         unsigned long flags;
2308         bool ret;
2309         struct rcu_node *rnp;
2310         struct rcu_node *rnp_old = NULL;
2311 
2312         /* Funnel through hierarchy to reduce memory contention. */
2313         rnp = __this_cpu_read(rcu_data.mynode);
2314         for (; rnp != NULL; rnp = rnp->parent) {
2315                 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2316                       !raw_spin_trylock(&rnp->fqslock);
2317                 if (rnp_old != NULL)
2318                         raw_spin_unlock(&rnp_old->fqslock);
2319                 if (ret)
2320                         return;
2321                 rnp_old = rnp;
2322         }
2323         /* rnp_old == rcu_get_root(), rnp == NULL. */
2324 
2325         /* Reached the root of the rcu_node tree, acquire lock. */
2326         raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2327         raw_spin_unlock(&rnp_old->fqslock);
2328         if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2329                 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2330                 return;  /* Someone beat us to it. */
2331         }
2332         WRITE_ONCE(rcu_state.gp_flags,
2333                    READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2334         raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2335         rcu_gp_kthread_wake();
2336 }
2337 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2338 
2339 /* Perform RCU core processing work for the current CPU.  */
2340 static __latent_entropy void rcu_core(void)
2341 {
2342         unsigned long flags;
2343         struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2344         struct rcu_node *rnp = rdp->mynode;
2345         const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2346                                rcu_segcblist_is_offloaded(&rdp->cblist);
2347 
2348         if (cpu_is_offline(smp_processor_id()))
2349                 return;
2350         trace_rcu_utilization(TPS("Start RCU core"));
2351         WARN_ON_ONCE(!rdp->beenonline);
2352 
2353         /* Report any deferred quiescent states if preemption enabled. */
2354         if (!(preempt_count() & PREEMPT_MASK)) {
2355                 rcu_preempt_deferred_qs(current);
2356         } else if (rcu_preempt_need_deferred_qs(current)) {
2357                 set_tsk_need_resched(current);
2358                 set_preempt_need_resched();
2359         }
2360 
2361         /* Update RCU state based on any recent quiescent states. */
2362         rcu_check_quiescent_state(rdp);
2363 
2364         /* No grace period and unregistered callbacks? */
2365         if (!rcu_gp_in_progress() &&
2366             rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2367                 local_irq_save(flags);
2368                 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2369                         rcu_accelerate_cbs_unlocked(rnp, rdp);
2370                 local_irq_restore(flags);
2371         }
2372 
2373         rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2374 
2375         /* If there are callbacks ready, invoke them. */
2376         if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2377             likely(READ_ONCE(rcu_scheduler_fully_active)))
2378                 rcu_do_batch(rdp);
2379 
2380         /* Do any needed deferred wakeups of rcuo kthreads. */
2381         do_nocb_deferred_wakeup(rdp);
2382         trace_rcu_utilization(TPS("End RCU core"));
2383 }
2384 
2385 static void rcu_core_si(struct softirq_action *h)
2386 {
2387         rcu_core();
2388 }
2389 
2390 static void rcu_wake_cond(struct task_struct *t, int status)
2391 {
2392         /*
2393          * If the thread is yielding, only wake it when this
2394          * is invoked from idle
2395          */
2396         if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2397                 wake_up_process(t);
2398 }
2399 
2400 static void invoke_rcu_core_kthread(void)
2401 {
2402         struct task_struct *t;
2403         unsigned long flags;
2404 
2405         local_irq_save(flags);
2406         __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2407         t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2408         if (t != NULL && t != current)
2409                 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2410         local_irq_restore(flags);
2411 }
2412 
2413 /*
2414  * Wake up this CPU's rcuc kthread to do RCU core processing.
2415  */
2416 static void invoke_rcu_core(void)
2417 {
2418         if (!cpu_online(smp_processor_id()))
2419                 return;
2420         if (use_softirq)
2421                 raise_softirq(RCU_SOFTIRQ);
2422         else
2423                 invoke_rcu_core_kthread();
2424 }
2425 
2426 static void rcu_cpu_kthread_park(unsigned int cpu)
2427 {
2428         per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2429 }
2430 
2431 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2432 {
2433         return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2434 }
2435 
2436 /*
2437  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2438  * the RCU softirq used in configurations of RCU that do not support RCU
2439  * priority boosting.
2440  */
2441 static void rcu_cpu_kthread(unsigned int cpu)
2442 {
2443         unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2444         char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2445         int spincnt;
2446 
2447         for (spincnt = 0; spincnt < 10; spincnt++) {
2448                 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
2449                 local_bh_disable();
2450                 *statusp = RCU_KTHREAD_RUNNING;
2451                 local_irq_disable();
2452                 work = *workp;
2453                 *workp = 0;
2454                 local_irq_enable();
2455                 if (work)
2456                         rcu_core();
2457                 local_bh_enable();
2458                 if (*workp == 0) {
2459                         trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2460                         *statusp = RCU_KTHREAD_WAITING;
2461                         return;
2462                 }
2463         }
2464         *statusp = RCU_KTHREAD_YIELDING;
2465         trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2466         schedule_timeout_interruptible(2);
2467         trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2468         *statusp = RCU_KTHREAD_WAITING;
2469 }
2470 
2471 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2472         .store                  = &rcu_data.rcu_cpu_kthread_task,
2473         .thread_should_run      = rcu_cpu_kthread_should_run,
2474         .thread_fn              = rcu_cpu_kthread,
2475         .thread_comm            = "rcuc/%u",
2476         .setup                  = rcu_cpu_kthread_setup,
2477         .park                   = rcu_cpu_kthread_park,
2478 };
2479 
2480 /*
2481  * Spawn per-CPU RCU core processing kthreads.
2482  */
2483 static int __init rcu_spawn_core_kthreads(void)
2484 {
2485         int cpu;
2486 
2487         for_each_possible_cpu(cpu)
2488                 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2489         if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2490                 return 0;
2491         WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2492                   "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2493         return 0;
2494 }
2495 early_initcall(rcu_spawn_core_kthreads);
2496 
2497 /*
2498  * Handle any core-RCU processing required by a call_rcu() invocation.
2499  */
2500 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2501                             unsigned long flags)
2502 {
2503         /*
2504          * If called from an extended quiescent state, invoke the RCU
2505          * core in order to force a re-evaluation of RCU's idleness.
2506          */
2507         if (!rcu_is_watching())
2508                 invoke_rcu_core();
2509 
2510         /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2511         if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2512                 return;
2513 
2514         /*
2515          * Force the grace period if too many callbacks or too long waiting.
2516          * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2517          * if some other CPU has recently done so.  Also, don't bother
2518          * invoking rcu_force_quiescent_state() if the newly enqueued callback
2519          * is the only one waiting for a grace period to complete.
2520          */
2521         if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2522                      rdp->qlen_last_fqs_check + qhimark)) {
2523 
2524                 /* Are we ignoring a completed grace period? */
2525                 note_gp_changes(rdp);
2526 
2527                 /* Start a new grace period if one not already started. */
2528                 if (!rcu_gp_in_progress()) {
2529                         rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2530                 } else {
2531                         /* Give the grace period a kick. */
2532                         rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2533                         if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2534                             rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2535                                 rcu_force_quiescent_state();
2536                         rdp->n_force_qs_snap = rcu_state.n_force_qs;
2537                         rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2538                 }
2539         }
2540 }
2541 
2542 /*
2543  * RCU callback function to leak a callback.
2544  */
2545 static void rcu_leak_callback(struct rcu_head *rhp)
2546 {
2547 }
2548 
2549 /*
2550  * Helper function for call_rcu() and friends.  The cpu argument will
2551  * normally be -1, indicating "currently running CPU".  It may specify
2552  * a CPU only if that CPU is a no-CBs CPU.  Currently, only rcu_barrier()
2553  * is expected to specify a CPU.
2554  */
2555 static void
2556 __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
2557 {
2558         unsigned long flags;
2559         struct rcu_data *rdp;
2560         bool was_alldone;
2561 
2562         /* Misaligned rcu_head! */
2563         WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2564 
2565         if (debug_rcu_head_queue(head)) {
2566                 /*
2567                  * Probable double call_rcu(), so leak the callback.
2568                  * Use rcu:rcu_callback trace event to find the previous
2569                  * time callback was passed to __call_rcu().
2570                  */
2571                 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2572                           head, head->func);
2573                 WRITE_ONCE(head->func, rcu_leak_callback);
2574                 return;
2575         }
2576         head->func = func;
2577         head->next = NULL;
2578         local_irq_save(flags);
2579         rdp = this_cpu_ptr(&rcu_data);
2580 
2581         /* Add the callback to our list. */
2582         if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2583                 // This can trigger due to call_rcu() from offline CPU:
2584                 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2585                 WARN_ON_ONCE(!rcu_is_watching());
2586                 // Very early boot, before rcu_init().  Initialize if needed
2587                 // and then drop through to queue the callback.
2588                 if (rcu_segcblist_empty(&rdp->cblist))
2589                         rcu_segcblist_init(&rdp->cblist);
2590         }
2591         if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2592                 return; // Enqueued onto ->nocb_bypass, so just leave.
2593         /* If we get here, rcu_nocb_try_bypass() acquired ->nocb_lock. */
2594         rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
2595         if (__is_kfree_rcu_offset((unsigned long)func))
2596                 trace_rcu_kfree_callback(rcu_state.name, head,
2597                                          (unsigned long)func,
2598                                          rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2599                                          rcu_segcblist_n_cbs(&rdp->cblist));
2600         else
2601                 trace_rcu_callback(rcu_state.name, head,
2602                                    rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2603                                    rcu_segcblist_n_cbs(&rdp->cblist));
2604 
2605         /* Go handle any RCU core processing required. */
2606         if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2607             unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2608                 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2609         } else {
2610                 __call_rcu_core(rdp, head, flags);
2611                 local_irq_restore(flags);
2612         }
2613 }
2614 
2615 /**
2616  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2617  * @head: structure to be used for queueing the RCU updates.
2618  * @func: actual callback function to be invoked after the grace period
2619  *
2620  * The callback function will be invoked some time after a full grace
2621  * period elapses, in other words after all pre-existing RCU read-side
2622  * critical sections have completed.  However, the callback function
2623  * might well execute concurrently with RCU read-side critical sections
2624  * that started after call_rcu() was invoked.  RCU read-side critical
2625  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
2626  * may be nested.  In addition, regions of code across which interrupts,
2627  * preemption, or softirqs have been disabled also serve as RCU read-side
2628  * critical sections.  This includes hardware interrupt handlers, softirq
2629  * handlers, and NMI handlers.
2630  *
2631  * Note that all CPUs must agree that the grace period extended beyond
2632  * all pre-existing RCU read-side critical section.  On systems with more
2633  * than one CPU, this means that when "func()" is invoked, each CPU is
2634  * guaranteed to have executed a full memory barrier since the end of its
2635  * last RCU read-side critical section whose beginning preceded the call
2636  * to call_rcu().  It also means that each CPU executing an RCU read-side
2637  * critical section that continues beyond the start of "func()" must have
2638  * executed a memory barrier after the call_rcu() but before the beginning
2639  * of that RCU read-side critical section.  Note that these guarantees
2640  * include CPUs that are offline, idle, or executing in user mode, as
2641  * well as CPUs that are executing in the kernel.
2642  *
2643  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2644  * resulting RCU callback function "func()", then both CPU A and CPU B are
2645  * guaranteed to execute a full memory barrier during the time interval
2646  * between the call to call_rcu() and the invocation of "func()" -- even
2647  * if CPU A and CPU B are the same CPU (but again only if the system has
2648  * more than one CPU).
2649  */
2650 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2651 {
2652         __call_rcu(head, func, 0);
2653 }
2654 EXPORT_SYMBOL_GPL(call_rcu);
2655 
2656 /*
2657  * Queue an RCU callback for lazy invocation after a grace period.
2658  * This will likely be later named something like "call_rcu_lazy()",
2659  * but this change will require some way of tagging the lazy RCU
2660  * callbacks in the list of pending callbacks. Until then, this
2661  * function may only be called from __kfree_rcu().
2662  */
2663 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
2664 {
2665         __call_rcu(head, func, 1);
2666 }
2667 EXPORT_SYMBOL_GPL(kfree_call_rcu);
2668 
2669 /*
2670  * During early boot, any blocking grace-period wait automatically
2671  * implies a grace period.  Later on, this is never the case for PREEMPT.
2672  *
2673  * Howevr, because a context switch is a grace period for !PREEMPT, any
2674  * blocking grace-period wait automatically implies a grace period if
2675  * there is only one CPU online at any point time during execution of
2676  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
2677  * occasionally incorrectly indicate that there are multiple CPUs online
2678  * when there was in fact only one the whole time, as this just adds some
2679  * overhead: RCU still operates correctly.
2680  */
2681 static int rcu_blocking_is_gp(void)
2682 {
2683         int ret;
2684 
2685         if (IS_ENABLED(CONFIG_PREEMPTION))
2686                 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
2687         might_sleep();  /* Check for RCU read-side critical section. */
2688         preempt_disable();
2689         ret = num_online_cpus() <= 1;
2690         preempt_enable();
2691         return ret;
2692 }
2693 
2694 /**
2695  * synchronize_rcu - wait until a grace period has elapsed.
2696  *
2697  * Control will return to the caller some time after a full grace
2698  * period has elapsed, in other words after all currently executing RCU
2699  * read-side critical sections have completed.  Note, however, that
2700  * upon return from synchronize_rcu(), the caller might well be executing
2701  * concurrently with new RCU read-side critical sections that began while
2702  * synchronize_rcu() was waiting.  RCU read-side critical sections are
2703  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
2704  * In addition, regions of code across which interrupts, preemption, or
2705  * softirqs have been disabled also serve as RCU read-side critical
2706  * sections.  This includes hardware interrupt handlers, softirq handlers,
2707  * and NMI handlers.
2708  *
2709  * Note that this guarantee implies further memory-ordering guarantees.
2710  * On systems with more than one CPU, when synchronize_rcu() returns,
2711  * each CPU is guaranteed to have executed a full memory barrier since
2712  * the end of its last RCU read-side critical section whose beginning
2713  * preceded the call to synchronize_rcu().  In addition, each CPU having
2714  * an RCU read-side critical section that extends beyond the return from
2715  * synchronize_rcu() is guaranteed to have executed a full memory barrier
2716  * after the beginning of synchronize_rcu() and before the beginning of
2717  * that RCU read-side critical section.  Note that these guarantees include
2718  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2719  * that are executing in the kernel.
2720  *
2721  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
2722  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2723  * to have executed a full memory barrier during the execution of
2724  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
2725  * again only if the system has more than one CPU).
2726  */
2727 void synchronize_rcu(void)
2728 {
2729         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
2730                          lock_is_held(&rcu_lock_map) ||
2731                          lock_is_held(&rcu_sched_lock_map),
2732                          "Illegal synchronize_rcu() in RCU read-side critical section");
2733         if (rcu_blocking_is_gp())
2734                 return;
2735         if (rcu_gp_is_expedited())
2736                 synchronize_rcu_expedited();
2737         else
2738                 wait_rcu_gp(call_rcu);
2739 }
2740 EXPORT_SYMBOL_GPL(synchronize_rcu);
2741 
2742 /**
2743  * get_state_synchronize_rcu - Snapshot current RCU state
2744  *
2745  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2746  * to determine whether or not a full grace period has elapsed in the
2747  * meantime.
2748  */
2749 unsigned long get_state_synchronize_rcu(void)
2750 {
2751         /*
2752          * Any prior manipulation of RCU-protected data must happen
2753          * before the load from ->gp_seq.
2754          */
2755         smp_mb();  /* ^^^ */
2756         return rcu_seq_snap(&rcu_state.gp_seq);
2757 }
2758 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2759 
2760 /**
2761  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2762  *
2763  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2764  *
2765  * If a full RCU grace period has elapsed since the earlier call to
2766  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
2767  * synchronize_rcu() to wait for a full grace period.
2768  *
2769  * Yes, this function does not take counter wrap into account.  But
2770  * counter wrap is harmless.  If the counter wraps, we have waited for
2771  * more than 2 billion grace periods (and way more on a 64-bit system!),
2772  * so waiting for one additional grace period should be just fine.
2773  */
2774 void cond_synchronize_rcu(unsigned long oldstate)
2775 {
2776         if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
2777                 synchronize_rcu();
2778         else
2779                 smp_mb(); /* Ensure GP ends before subsequent accesses. */
2780 }
2781 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2782 
2783 /*
2784  * Check to see if there is any immediate RCU-related work to be done by
2785  * the current CPU, returning 1 if so and zero otherwise.  The checks are
2786  * in order of increasing expense: checks that can be carried out against
2787  * CPU-local state are performed first.  However, we must check for CPU
2788  * stalls first, else we might not get a chance.
2789  */
2790 static int rcu_pending(void)
2791 {
2792         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2793         struct rcu_node *rnp = rdp->mynode;
2794 
2795         /* Check for CPU stalls, if enabled. */
2796         check_cpu_stall(rdp);
2797 
2798         /* Does this CPU need a deferred NOCB wakeup? */
2799         if (rcu_nocb_need_deferred_wakeup(rdp))
2800                 return 1;
2801 
2802         /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2803         if (rcu_nohz_full_cpu())
2804                 return 0;
2805 
2806         /* Is the RCU core waiting for a quiescent state from this CPU? */
2807         if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
2808                 return 1;
2809 
2810         /* Does this CPU have callbacks ready to invoke? */
2811         if (rcu_segcblist_ready_cbs(&rdp->cblist))
2812                 return 1;
2813 
2814         /* Has RCU gone idle with this CPU needing another grace period? */
2815         if (!rcu_gp_in_progress() &&
2816             rcu_segcblist_is_enabled(&rdp->cblist) &&
2817             (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
2818              !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
2819             !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2820                 return 1;
2821 
2822         /* Have RCU grace period completed or started?  */
2823         if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
2824             unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
2825                 return 1;
2826 
2827         /* nothing to do */
2828         return 0;
2829 }
2830 
2831 /*
2832  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
2833  * the compiler is expected to optimize this away.
2834  */
2835 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
2836 {
2837         trace_rcu_barrier(rcu_state.name, s, cpu,
2838                           atomic_read(&rcu_state.barrier_cpu_count), done);
2839 }
2840 
2841 /*
2842  * RCU callback function for rcu_barrier().  If we are last, wake
2843  * up the task executing rcu_barrier().
2844  */
2845 static void rcu_barrier_callback(struct rcu_head *rhp)
2846 {
2847         if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
2848                 rcu_barrier_trace(TPS("LastCB"), -1,
2849                                    rcu_state.barrier_sequence);
2850                 complete(&rcu_state.barrier_completion);
2851         } else {
2852                 rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
2853         }
2854 }
2855 
2856 /*
2857  * Called with preemption disabled, and from cross-cpu IRQ context.
2858  */
2859 static void rcu_barrier_func(void *unused)
2860 {
2861         struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2862 
2863         rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
2864         rdp->barrier_head.func = rcu_barrier_callback;
2865         debug_rcu_head_queue(&rdp->barrier_head);
2866         rcu_nocb_lock(rdp);
2867         WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
2868         if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
2869                 atomic_inc(&rcu_state.barrier_cpu_count);
2870         } else {
2871                 debug_rcu_head_unqueue(&rdp->barrier_head);
2872                 rcu_barrier_trace(TPS("IRQNQ"), -1,
2873                                    rcu_state.barrier_sequence);
2874         }
2875         rcu_nocb_unlock(rdp);
2876 }
2877 
2878 /**
2879  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
2880  *
2881  * Note that this primitive does not necessarily wait for an RCU grace period
2882  * to complete.  For example, if there are no RCU callbacks queued anywhere
2883  * in the system, then rcu_barrier() is within its rights to return
2884  * immediately, without waiting for anything, much less an RCU grace period.
2885  */
2886 void rcu_barrier(void)
2887 {
2888         int cpu;
2889         struct rcu_data *rdp;
2890         unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
2891 
2892         rcu_barrier_trace(TPS("Begin"), -1, s);
2893 
2894         /* Take mutex to serialize concurrent rcu_barrier() requests. */
2895         mutex_lock(&rcu_state.barrier_mutex);
2896 
2897         /* Did someone else do our work for us? */
2898         if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
2899                 rcu_barrier_trace(TPS("EarlyExit"), -1,
2900                                    rcu_state.barrier_sequence);
2901                 smp_mb(); /* caller's subsequent code after above check. */
2902                 mutex_unlock(&rcu_state.barrier_mutex);
2903                 return;
2904         }
2905 
2906         /* Mark the start of the barrier operation. */
2907         rcu_seq_start(&rcu_state.barrier_sequence);
2908         rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
2909 
2910         /*
2911          * Initialize the count to one rather than to zero in order to
2912          * avoid a too-soon return to zero in case of a short grace period
2913          * (or preemption of this task).  Exclude CPU-hotplug operations
2914          * to ensure that no offline CPU has callbacks queued.
2915          */
2916         init_completion(&rcu_state.barrier_completion);
2917         atomic_set(&rcu_state.barrier_cpu_count, 1);
2918         get_online_cpus();
2919 
2920         /*
2921          * Force each CPU with callbacks to register a new callback.
2922          * When that callback is invoked, we will know that all of the
2923          * corresponding CPU's preceding callbacks have been invoked.
2924          */
2925         for_each_possible_cpu(cpu) {
2926                 rdp = per_cpu_ptr(&rcu_data, cpu);
2927                 if (!cpu_online(cpu) &&
2928                     !rcu_segcblist_is_offloaded(&rdp->cblist))
2929                         continue;
2930                 if (rcu_segcblist_n_cbs(&rdp->cblist)) {
2931                         rcu_barrier_trace(TPS("OnlineQ"), cpu,
2932                                            rcu_state.barrier_sequence);
2933                         smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
2934                 } else {
2935                         rcu_barrier_trace(TPS("OnlineNQ"), cpu,
2936                                            rcu_state.barrier_sequence);
2937                 }
2938         }
2939         put_online_cpus();
2940 
2941         /*
2942          * Now that we have an rcu_barrier_callback() callback on each
2943          * CPU, and thus each counted, remove the initial count.
2944          */
2945         if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
2946                 complete(&rcu_state.barrier_completion);
2947 
2948         /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
2949         wait_for_completion(&rcu_state.barrier_completion);
2950 
2951         /* Mark the end of the barrier operation. */
2952         rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
2953         rcu_seq_end(&rcu_state.barrier_sequence);
2954 
2955         /* Other rcu_barrier() invocations can now safely proceed. */
2956         mutex_unlock(&rcu_state.barrier_mutex);
2957 }
2958 EXPORT_SYMBOL_GPL(rcu_barrier);
2959 
2960 /*
2961  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
2962  * first CPU in a given leaf rcu_node structure coming online.  The caller
2963  * must hold the corresponding leaf rcu_node ->lock with interrrupts
2964  * disabled.
2965  */
2966 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
2967 {
2968         long mask;
2969         long oldmask;
2970         struct rcu_node *rnp = rnp_leaf;
2971 
2972         raw_lockdep_assert_held_rcu_node(rnp_leaf);
2973         WARN_ON_ONCE(rnp->wait_blkd_tasks);
2974         for (;;) {
2975                 mask = rnp->grpmask;
2976                 rnp = rnp->parent;
2977                 if (rnp == NULL)
2978                         return;
2979                 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
2980                 oldmask = rnp->qsmaskinit;
2981                 rnp->qsmaskinit |= mask;
2982                 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
2983                 if (oldmask)
2984                         return;
2985         }
2986 }
2987 
2988 /*
2989  * Do boot-time initialization of a CPU's per-CPU RCU data.
2990  */
2991 static void __init
2992 rcu_boot_init_percpu_data(int cpu)
2993 {
2994         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2995 
2996         /* Set up local state, ensuring consistent view of global state. */
2997         rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
2998         WARN_ON_ONCE(rdp->dynticks_nesting != 1);
2999         WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3000         rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3001         rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3002         rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3003         rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3004         rdp->cpu = cpu;
3005         rcu_boot_init_nocb_percpu_data(rdp);
3006 }
3007 
3008 /*
3009  * Invoked early in the CPU-online process, when pretty much all services
3010  * are available.  The incoming CPU is not present.
3011  *
3012  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3013  * offline event can be happening at a given time.  Note also that we can
3014  * accept some slop in the rsp->gp_seq access due to the fact that this
3015  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3016  * And any offloaded callbacks are being numbered elsewhere.
3017  */
3018 int rcutree_prepare_cpu(unsigned int cpu)
3019 {
3020         unsigned long flags;
3021         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3022         struct rcu_node *rnp = rcu_get_root();
3023 
3024         /* Set up local state, ensuring consistent view of global state. */
3025         raw_spin_lock_irqsave_rcu_node(rnp, flags);
3026         rdp->qlen_last_fqs_check = 0;
3027         rdp->n_force_qs_snap = rcu_state.n_force_qs;
3028         rdp->blimit = blimit;
3029         if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3030             !rcu_segcblist_is_offloaded(&rdp->cblist))
3031                 rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3032         rdp->dynticks_nesting = 1;      /* CPU not up, no tearing. */
3033         rcu_dynticks_eqs_online();
3034         raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
3035 
3036         /*
3037          * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
3038          * propagation up the rcu_node tree will happen at the beginning
3039          * of the next grace period.
3040          */
3041         rnp = rdp->mynode;
3042         raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
3043         rdp->beenonline = true;  /* We have now been online. */
3044         rdp->gp_seq = rnp->gp_seq;
3045         rdp->gp_seq_needed = rnp->gp_seq;
3046         rdp->cpu_no_qs.b.norm = true;
3047         rdp->core_needs_qs = false;
3048         rdp->rcu_iw_pending = false;
3049         rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
3050         trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3051         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3052         rcu_prepare_kthreads(cpu);
3053         rcu_spawn_cpu_nocb_kthread(cpu);
3054 
3055         return 0;
3056 }
3057 
3058 /*
3059  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3060  */
3061 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3062 {
3063         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3064 
3065         rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3066 }
3067 
3068 /*
3069  * Near the end of the CPU-online process.  Pretty much all services
3070  * enabled, and the CPU is now very much alive.
3071  */
3072 int rcutree_online_cpu(unsigned int cpu)
3073 {
3074         unsigned long flags;
3075         struct rcu_data *rdp;
3076         struct rcu_node *rnp;
3077 
3078         rdp = per_cpu_ptr(&rcu_data, cpu);
3079         rnp = rdp->mynode;
3080         raw_spin_lock_irqsave_rcu_node(rnp, flags);
3081         rnp->ffmask |= rdp->grpmask;
3082         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3083         if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3084                 return 0; /* Too early in boot for scheduler work. */
3085         sync_sched_exp_online_cleanup(cpu);
3086         rcutree_affinity_setting(cpu, -1);
3087         return 0;
3088 }
3089 
3090 /*
3091  * Near the beginning of the process.  The CPU is still very much alive
3092  * with pretty much all services enabled.
3093  */
3094 int rcutree_offline_cpu(unsigned int cpu)
3095 {
3096         unsigned long flags;
3097         struct rcu_data *rdp;
3098         struct rcu_node *rnp;
3099 
3100         rdp = per_cpu_ptr(&rcu_data, cpu);
3101         rnp = rdp->mynode;
3102         raw_spin_lock_irqsave_rcu_node(rnp, flags);
3103         rnp->ffmask &= ~rdp->grpmask;
3104         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3105 
3106         rcutree_affinity_setting(cpu, cpu);
3107         return 0;
3108 }
3109 
3110 static DEFINE_PER_CPU(int, rcu_cpu_started);
3111 
3112 /*
3113  * Mark the specified CPU as being online so that subsequent grace periods
3114  * (both expedited and normal) will wait on it.  Note that this means that
3115  * incoming CPUs are not allowed to use RCU read-side critical sections
3116  * until this function is called.  Failing to observe this restriction
3117  * will result in lockdep splats.
3118  *
3119  * Note that this function is special in that it is invoked directly
3120  * from the incoming CPU rather than from the cpuhp_step mechanism.
3121  * This is because this function must be invoked at a precise location.
3122  */
3123 void rcu_cpu_starting(unsigned int cpu)
3124 {
3125         unsigned long flags;
3126         unsigned long mask;
3127         int nbits;
3128         unsigned long oldmask;
3129         struct rcu_data *rdp;
3130         struct rcu_node *rnp;
3131 
3132         if (per_cpu(rcu_cpu_started, cpu))
3133                 return;
3134 
3135         per_cpu(rcu_cpu_started, cpu) = 1;
3136 
3137         rdp = per_cpu_ptr(&rcu_data, cpu);
3138         rnp = rdp->mynode;
3139         mask = rdp->grpmask;
3140         raw_spin_lock_irqsave_rcu_node(rnp, flags);
3141         rnp->qsmaskinitnext |= mask;
3142         oldmask = rnp->expmaskinitnext;
3143         rnp->expmaskinitnext |= mask;
3144         oldmask ^= rnp->expmaskinitnext;
3145         nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3146         /* Allow lockless access for expedited grace periods. */
3147         smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
3148         rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
3149         rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
3150         rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3151         if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
3152                 /* Report QS -after- changing ->qsmaskinitnext! */
3153                 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
3154         } else {
3155                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3156         }
3157         smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3158 }
3159 
3160 #ifdef CONFIG_HOTPLUG_CPU
3161 /*
3162  * The outgoing function has no further need of RCU, so remove it from
3163  * the rcu_node tree's ->qsmaskinitnext bit masks.
3164  *
3165  * Note that this function is special in that it is invoked directly
3166  * from the outgoing CPU rather than from the cpuhp_step mechanism.
3167  * This is because this function must be invoked at a precise location.
3168  */
3169 void rcu_report_dead(unsigned int cpu)
3170 {
3171         unsigned long flags;
3172         unsigned long mask;
3173         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3174         struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
3175 
3176         /* QS for any half-done expedited grace period. */
3177         preempt_disable();
3178         rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
3179         preempt_enable();
3180         rcu_preempt_deferred_qs(current);
3181 
3182         /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
3183         mask = rdp->grpmask;
3184         raw_spin_lock(&rcu_state.ofl_lock);
3185         raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3186         rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
3187         rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3188         if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
3189                 /* Report quiescent state -before- changing ->qsmaskinitnext! */
3190                 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
3191                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3192         }
3193         rnp->qsmaskinitnext &= ~mask;
3194         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3195         raw_spin_unlock(&rcu_state.ofl_lock);
3196 
3197         per_cpu(rcu_cpu_started, cpu) = 0;
3198 }
3199 
3200 /*
3201  * The outgoing CPU has just passed through the dying-idle state, and we
3202  * are being invoked from the CPU that was IPIed to continue the offline
3203  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
3204  */
3205 void rcutree_migrate_callbacks(int cpu)
3206 {
3207         unsigned long flags;
3208         struct rcu_data *my_rdp;
3209         struct rcu_node *my_rnp;
3210         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3211         bool needwake;
3212 
3213         if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
3214             rcu_segcblist_empty(&rdp->cblist))
3215                 return;  /* No callbacks to migrate. */
3216 
3217         local_irq_save(flags);
3218         my_rdp = this_cpu_ptr(&rcu_data);
3219         my_rnp = my_rdp->mynode;
3220         rcu_nocb_lock(my_rdp); /* irqs already disabled. */
3221         WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
3222         raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
3223         /* Leverage recent GPs and set GP for new callbacks. */
3224         needwake = rcu_advance_cbs(my_rnp, rdp) ||
3225                    rcu_advance_cbs(my_rnp, my_rdp);
3226         rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3227         needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
3228         rcu_segcblist_disable(&rdp->cblist);
3229         WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3230                      !rcu_segcblist_n_cbs(&my_rdp->cblist));
3231         if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
3232                 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
3233                 __call_rcu_nocb_wake(my_rdp, true, flags);
3234         } else {
3235                 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
3236                 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
3237         }
3238         if (needwake)
3239                 rcu_gp_kthread_wake();
3240         lockdep_assert_irqs_enabled();
3241         WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3242                   !rcu_segcblist_empty(&rdp->cblist),
3243                   "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3244                   cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3245                   rcu_segcblist_first_cb(&rdp->cblist));
3246 }
3247 #endif
3248 
3249 /*
3250  * On non-huge systems, use expedited RCU grace periods to make suspend
3251  * and hibernation run faster.
3252  */
3253 static int rcu_pm_notify(struct notifier_block *self,
3254                          unsigned long action, void *hcpu)
3255 {
3256         switch (action) {
3257         case PM_HIBERNATION_PREPARE:
3258         case PM_SUSPEND_PREPARE:
3259                 rcu_expedite_gp();
3260                 break;
3261         case PM_POST_HIBERNATION:
3262         case PM_POST_SUSPEND:
3263                 rcu_unexpedite_gp();
3264                 break;
3265         default:
3266                 break;
3267         }
3268         return NOTIFY_OK;
3269 }
3270 
3271 /*
3272  * Spawn the kthreads that handle RCU's grace periods.
3273  */
3274 static int __init rcu_spawn_gp_kthread(void)
3275 {
3276         unsigned long flags;
3277         int kthread_prio_in = kthread_prio;
3278         struct rcu_node *rnp;
3279         struct sched_param sp;
3280         struct task_struct *t;
3281 
3282         /* Force priority into range. */
3283         if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
3284             && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
3285                 kthread_prio = 2;
3286         else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3287                 kthread_prio = 1;
3288         else if (kthread_prio < 0)
3289                 kthread_prio = 0;
3290         else if (kthread_prio > 99)
3291                 kthread_prio = 99;
3292 
3293         if (kthread_prio != kthread_prio_in)
3294                 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3295                          kthread_prio, kthread_prio_in);
3296 
3297         rcu_scheduler_fully_active = 1;
3298         t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
3299         if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
3300                 return 0;
3301         if (kthread_prio) {
3302                 sp.sched_priority = kthread_prio;
3303                 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3304         }
3305         rnp = rcu_get_root();
3306         raw_spin_lock_irqsave_rcu_node(rnp, flags);
3307         rcu_state.gp_kthread = t;
3308         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3309         wake_up_process(t);
3310         rcu_spawn_nocb_kthreads();
3311         rcu_spawn_boost_kthreads();
3312         return 0;
3313 }
3314 early_initcall(rcu_spawn_gp_kthread);
3315 
3316 /*
3317  * This function is invoked towards the end of the scheduler's
3318  * initialization process.  Before this is called, the idle task might
3319  * contain synchronous grace-period primitives (during which time, this idle
3320  * task is booting the system, and such primitives are no-ops).  After this
3321  * function is called, any synchronous grace-period primitives are run as
3322  * expedited, with the requesting task driving the grace period forward.
3323  * A later core_initcall() rcu_set_runtime_mode() will switch to full
3324  * runtime RCU functionality.
3325  */
3326 void rcu_scheduler_starting(void)
3327 {
3328         WARN_ON(num_online_cpus() != 1);
3329         WARN_ON(nr_context_switches() > 0);
3330         rcu_test_sync_prims();
3331         rcu_scheduler_active = RCU_SCHEDULER_INIT;
3332         rcu_test_sync_prims();
3333 }
3334 
3335 /*
3336  * Helper function for rcu_init() that initializes the rcu_state structure.
3337  */
3338 static void __init rcu_init_one(void)
3339 {
3340         static const char * const buf[] = RCU_NODE_NAME_INIT;
3341         static const char * const fqs[] = RCU_FQS_NAME_INIT;
3342         static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
3343         static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3344 
3345         int levelspread[RCU_NUM_LVLS];          /* kids/node in each level. */
3346         int cpustride = 1;
3347         int i;
3348         int j;
3349         struct rcu_node *rnp;
3350 
3351         BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3352 
3353         /* Silence gcc 4.8 false positive about array index out of range. */
3354         if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
3355                 panic("rcu_init_one: rcu_num_lvls out of range");
3356 
3357         /* Initialize the level-tracking arrays. */
3358 
3359         for (i = 1; i < rcu_num_lvls; i++)
3360                 rcu_state.level[i] =
3361                         rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
3362         rcu_init_levelspread(levelspread, num_rcu_lvl);
3363 
3364         /* Initialize the elements themselves, starting from the leaves. */
3365 
3366         for (i = rcu_num_lvls - 1; i >= 0; i--) {
3367                 cpustride *= levelspread[i];
3368                 rnp = rcu_state.level[i];
3369                 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
3370                         raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
3371                         lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
3372                                                    &rcu_node_class[i], buf[i]);
3373                         raw_spin_lock_init(&rnp->fqslock);
3374                         lockdep_set_class_and_name(&rnp->fqslock,
3375                                                    &rcu_fqs_class[i], fqs[i]);
3376                         rnp->gp_seq = rcu_state.gp_seq;
3377                         rnp->gp_seq_needed = rcu_state.gp_seq;
3378                         rnp->completedqs = rcu_state.gp_seq;
3379                         rnp->qsmask = 0;
3380                         rnp->qsmaskinit = 0;
3381                         rnp->grplo = j * cpustride;
3382                         rnp->grphi = (j + 1) * cpustride - 1;
3383                         if (rnp->grphi >= nr_cpu_ids)
3384                                 rnp->grphi = nr_cpu_ids - 1;
3385                         if (i == 0) {
3386                                 rnp->grpnum = 0;
3387                                 rnp->grpmask = 0;
3388                                 rnp->parent = NULL;
3389                         } else {
3390                                 rnp->grpnum = j % levelspread[i - 1];
3391                                 rnp->grpmask = BIT(rnp->grpnum);
3392                                 rnp->parent = rcu_state.level[i - 1] +
3393                                               j / levelspread[i - 1];
3394                         }
3395                         rnp->level = i;
3396                         INIT_LIST_HEAD(&rnp->blkd_tasks);
3397                         rcu_init_one_nocb(rnp);
3398                         init_waitqueue_head(&rnp->exp_wq[0]);
3399                         init_waitqueue_head(&rnp->exp_wq[1]);
3400                         init_waitqueue_head(&rnp->exp_wq[2]);
3401                         init_waitqueue_head(&rnp->exp_wq[3]);
3402                         spin_lock_init(&rnp->exp_lock);
3403                 }
3404         }
3405 
3406         init_swait_queue_head(&rcu_state.gp_wq);
3407         init_swait_queue_head(&rcu_state.expedited_wq);
3408         rnp = rcu_first_leaf_node();
3409         for_each_possible_cpu(i) {
3410                 while (i > rnp->grphi)
3411                         rnp++;
3412                 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
3413                 rcu_boot_init_percpu_data(i);
3414         }
3415 }
3416 
3417 /*
3418  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3419  * replace the definitions in tree.h because those are needed to size
3420  * the ->node array in the rcu_state structure.
3421  */
3422 static void __init rcu_init_geometry(void)
3423 {
3424         ulong d;
3425         int i;
3426         int rcu_capacity[RCU_NUM_LVLS];
3427 
3428         /*
3429          * Initialize any unspecified boot parameters.
3430          * The default values of jiffies_till_first_fqs and
3431          * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3432          * value, which is a function of HZ, then adding one for each
3433          * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3434          */
3435         d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3436         if (jiffies_till_first_fqs == ULONG_MAX)
3437                 jiffies_till_first_fqs = d;
3438         if (jiffies_till_next_fqs == ULONG_MAX)
3439                 jiffies_till_next_fqs = d;
3440         adjust_jiffies_till_sched_qs();
3441 
3442         /* If the compile-time values are accurate, just leave. */
3443         if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3444             nr_cpu_ids == NR_CPUS)
3445                 return;
3446         pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
3447                 rcu_fanout_leaf, nr_cpu_ids);
3448 
3449         /*
3450          * The boot-time rcu_fanout_leaf parameter must be at least two
3451          * and cannot exceed the number of bits in the rcu_node masks.
3452          * Complain and fall back to the compile-time values if this
3453          * limit is exceeded.
3454          */
3455         if (rcu_fanout_leaf < 2 ||
3456             rcu_fanout_leaf > sizeof(unsigned long) * 8) {
3457                 rcu_fanout_leaf = RCU_FANOUT_LEAF;
3458                 WARN_ON(1);
3459                 return;
3460         }
3461 
3462         /*
3463          * Compute number of nodes that can be handled an rcu_node tree
3464          * with the given number of levels.
3465          */
3466         rcu_capacity[0] = rcu_fanout_leaf;
3467         for (i = 1; i < RCU_NUM_LVLS; i++)
3468                 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
3469 
3470         /*
3471          * The tree must be able to accommodate the configured number of CPUs.
3472          * If this limit is exceeded, fall back to the compile-time values.
3473          */
3474         if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
3475                 rcu_fanout_leaf = RCU_FANOUT_LEAF;
3476                 WARN_ON(1);
3477                 return;
3478         }
3479 
3480         /* Calculate the number of levels in the tree. */
3481         for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
3482         }
3483         rcu_num_lvls = i + 1;
3484 
3485         /* Calculate the number of rcu_nodes at each level of the tree. */
3486         for (i = 0; i < rcu_num_lvls; i++) {
3487                 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
3488                 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
3489         }
3490 
3491         /* Calculate the total number of rcu_node structures. */
3492         rcu_num_nodes = 0;
3493         for (i = 0; i < rcu_num_lvls; i++)
3494                 rcu_num_nodes += num_rcu_lvl[i];
3495 }
3496 
3497 /*
3498  * Dump out the structure of the rcu_node combining tree associated
3499  * with the rcu_state structure.
3500  */
3501 static void __init rcu_dump_rcu_node_tree(void)
3502 {
3503         int level = 0;
3504         struct rcu_node *rnp;
3505 
3506         pr_info("rcu_node tree layout dump\n");
3507         pr_info(" ");
3508         rcu_for_each_node_breadth_first(rnp) {
3509                 if (rnp->level != level) {
3510                         pr_cont("\n");
3511                         pr_info(" ");
3512                         level = rnp->level;
3513                 }
3514                 pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
3515         }
3516         pr_cont("\n");
3517 }
3518 
3519 struct workqueue_struct *rcu_gp_wq;
3520 struct workqueue_struct *rcu_par_gp_wq;
3521 
3522 void __init rcu_init(void)
3523 {
3524         int cpu;
3525 
3526         rcu_early_boot_tests();
3527 
3528         rcu_bootup_announce();
3529         rcu_init_geometry();
3530         rcu_init_one();
3531         if (dump_tree)
3532                 rcu_dump_rcu_node_tree();
3533         if (use_softirq)
3534                 open_softirq(RCU_SOFTIRQ, rcu_core_si);
3535 
3536         /*
3537          * We don't need protection against CPU-hotplug here because
3538          * this is called early in boot, before either interrupts
3539          * or the scheduler are operational.
3540          */
3541         pm_notifier(rcu_pm_notify, 0);
3542         for_each_online_cpu(cpu) {
3543                 rcutree_prepare_cpu(cpu);
3544                 rcu_cpu_starting(cpu);
3545                 rcutree_online_cpu(cpu);
3546         }
3547 
3548         /* Create workqueue for expedited GPs and for Tree SRCU. */
3549         rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
3550         WARN_ON(!rcu_gp_wq);
3551         rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
3552         WARN_ON(!rcu_par_gp_wq);
3553         srcu_init();
3554 }
3555 
3556 #include "tree_stall.h"
3557 #include "tree_exp.h"
3558 #include "tree_plugin.h"

/* [<][>][^][v][top][bottom][index][help] */