root/kernel/rcu/tree_stall.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. rcu_jiffies_till_stall_check
  2. rcu_sysrq_start
  3. rcu_sysrq_end
  4. rcu_panic
  5. check_cpu_stall_init
  6. panic_on_rcu_stall
  7. rcu_cpu_stall_reset
  8. record_gp_stall_check_time
  9. zero_cpu_stall_ticks
  10. rcu_stall_kick_kthreads
  11. rcu_iw_handler
  12. rcu_print_detail_task_stall_rnp
  13. rcu_print_task_stall
  14. rcu_print_detail_task_stall_rnp
  15. rcu_print_task_stall
  16. rcu_dump_cpu_stacks
  17. print_cpu_stall_fast_no_hz
  18. print_cpu_stall_fast_no_hz
  19. print_cpu_stall_info
  20. rcu_check_gp_kthread_starvation
  21. print_other_cpu_stall
  22. print_cpu_stall
  23. check_cpu_stall
  24. show_rcu_gp_kthreads
  25. rcu_check_gp_start_stall
  26. rcu_fwd_progress_check
  27. sysrq_show_rcu
  28. rcu_sysrq_init

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * RCU CPU stall warnings for normal RCU grace periods
   4  *
   5  * Copyright IBM Corporation, 2019
   6  *
   7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8  */
   9 
  10 //////////////////////////////////////////////////////////////////////////////
  11 //
  12 // Controlling CPU stall warnings, including delay calculation.
  13 
  14 /* panic() on RCU Stall sysctl. */
  15 int sysctl_panic_on_rcu_stall __read_mostly;
  16 
  17 #ifdef CONFIG_PROVE_RCU
  18 #define RCU_STALL_DELAY_DELTA          (5 * HZ)
  19 #else
  20 #define RCU_STALL_DELAY_DELTA          0
  21 #endif
  22 
  23 /* Limit-check stall timeouts specified at boottime and runtime. */
  24 int rcu_jiffies_till_stall_check(void)
  25 {
  26         int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
  27 
  28         /*
  29          * Limit check must be consistent with the Kconfig limits
  30          * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  31          */
  32         if (till_stall_check < 3) {
  33                 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
  34                 till_stall_check = 3;
  35         } else if (till_stall_check > 300) {
  36                 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
  37                 till_stall_check = 300;
  38         }
  39         return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  40 }
  41 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
  42 
  43 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
  44 void rcu_sysrq_start(void)
  45 {
  46         if (!rcu_cpu_stall_suppress)
  47                 rcu_cpu_stall_suppress = 2;
  48 }
  49 
  50 void rcu_sysrq_end(void)
  51 {
  52         if (rcu_cpu_stall_suppress == 2)
  53                 rcu_cpu_stall_suppress = 0;
  54 }
  55 
  56 /* Don't print RCU CPU stall warnings during a kernel panic. */
  57 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  58 {
  59         rcu_cpu_stall_suppress = 1;
  60         return NOTIFY_DONE;
  61 }
  62 
  63 static struct notifier_block rcu_panic_block = {
  64         .notifier_call = rcu_panic,
  65 };
  66 
  67 static int __init check_cpu_stall_init(void)
  68 {
  69         atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  70         return 0;
  71 }
  72 early_initcall(check_cpu_stall_init);
  73 
  74 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
  75 static void panic_on_rcu_stall(void)
  76 {
  77         if (sysctl_panic_on_rcu_stall)
  78                 panic("RCU Stall\n");
  79 }
  80 
  81 /**
  82  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  83  *
  84  * Set the stall-warning timeout way off into the future, thus preventing
  85  * any RCU CPU stall-warning messages from appearing in the current set of
  86  * RCU grace periods.
  87  *
  88  * The caller must disable hard irqs.
  89  */
  90 void rcu_cpu_stall_reset(void)
  91 {
  92         WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
  93 }
  94 
  95 //////////////////////////////////////////////////////////////////////////////
  96 //
  97 // Interaction with RCU grace periods
  98 
  99 /* Start of new grace period, so record stall time (and forcing times). */
 100 static void record_gp_stall_check_time(void)
 101 {
 102         unsigned long j = jiffies;
 103         unsigned long j1;
 104 
 105         rcu_state.gp_start = j;
 106         j1 = rcu_jiffies_till_stall_check();
 107         /* Record ->gp_start before ->jiffies_stall. */
 108         smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
 109         rcu_state.jiffies_resched = j + j1 / 2;
 110         rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
 111 }
 112 
 113 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
 114 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
 115 {
 116         rdp->ticks_this_gp = 0;
 117         rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
 118         WRITE_ONCE(rdp->last_fqs_resched, jiffies);
 119 }
 120 
 121 /*
 122  * If too much time has passed in the current grace period, and if
 123  * so configured, go kick the relevant kthreads.
 124  */
 125 static void rcu_stall_kick_kthreads(void)
 126 {
 127         unsigned long j;
 128 
 129         if (!rcu_kick_kthreads)
 130                 return;
 131         j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
 132         if (time_after(jiffies, j) && rcu_state.gp_kthread &&
 133             (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
 134                 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
 135                           rcu_state.name);
 136                 rcu_ftrace_dump(DUMP_ALL);
 137                 wake_up_process(rcu_state.gp_kthread);
 138                 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
 139         }
 140 }
 141 
 142 /*
 143  * Handler for the irq_work request posted about halfway into the RCU CPU
 144  * stall timeout, and used to detect excessive irq disabling.  Set state
 145  * appropriately, but just complain if there is unexpected state on entry.
 146  */
 147 static void rcu_iw_handler(struct irq_work *iwp)
 148 {
 149         struct rcu_data *rdp;
 150         struct rcu_node *rnp;
 151 
 152         rdp = container_of(iwp, struct rcu_data, rcu_iw);
 153         rnp = rdp->mynode;
 154         raw_spin_lock_rcu_node(rnp);
 155         if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
 156                 rdp->rcu_iw_gp_seq = rnp->gp_seq;
 157                 rdp->rcu_iw_pending = false;
 158         }
 159         raw_spin_unlock_rcu_node(rnp);
 160 }
 161 
 162 //////////////////////////////////////////////////////////////////////////////
 163 //
 164 // Printing RCU CPU stall warnings
 165 
 166 #ifdef CONFIG_PREEMPTION
 167 
 168 /*
 169  * Dump detailed information for all tasks blocking the current RCU
 170  * grace period on the specified rcu_node structure.
 171  */
 172 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 173 {
 174         unsigned long flags;
 175         struct task_struct *t;
 176 
 177         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 178         if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 179                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 180                 return;
 181         }
 182         t = list_entry(rnp->gp_tasks->prev,
 183                        struct task_struct, rcu_node_entry);
 184         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 185                 /*
 186                  * We could be printing a lot while holding a spinlock.
 187                  * Avoid triggering hard lockup.
 188                  */
 189                 touch_nmi_watchdog();
 190                 sched_show_task(t);
 191         }
 192         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 193 }
 194 
 195 /*
 196  * Scan the current list of tasks blocked within RCU read-side critical
 197  * sections, printing out the tid of each.
 198  */
 199 static int rcu_print_task_stall(struct rcu_node *rnp)
 200 {
 201         struct task_struct *t;
 202         int ndetected = 0;
 203 
 204         if (!rcu_preempt_blocked_readers_cgp(rnp))
 205                 return 0;
 206         pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 207                rnp->level, rnp->grplo, rnp->grphi);
 208         t = list_entry(rnp->gp_tasks->prev,
 209                        struct task_struct, rcu_node_entry);
 210         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 211                 pr_cont(" P%d", t->pid);
 212                 ndetected++;
 213         }
 214         pr_cont("\n");
 215         return ndetected;
 216 }
 217 
 218 #else /* #ifdef CONFIG_PREEMPTION */
 219 
 220 /*
 221  * Because preemptible RCU does not exist, we never have to check for
 222  * tasks blocked within RCU read-side critical sections.
 223  */
 224 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 225 {
 226 }
 227 
 228 /*
 229  * Because preemptible RCU does not exist, we never have to check for
 230  * tasks blocked within RCU read-side critical sections.
 231  */
 232 static int rcu_print_task_stall(struct rcu_node *rnp)
 233 {
 234         return 0;
 235 }
 236 #endif /* #else #ifdef CONFIG_PREEMPTION */
 237 
 238 /*
 239  * Dump stacks of all tasks running on stalled CPUs.  First try using
 240  * NMIs, but fall back to manual remote stack tracing on architectures
 241  * that don't support NMI-based stack dumps.  The NMI-triggered stack
 242  * traces are more accurate because they are printed by the target CPU.
 243  */
 244 static void rcu_dump_cpu_stacks(void)
 245 {
 246         int cpu;
 247         unsigned long flags;
 248         struct rcu_node *rnp;
 249 
 250         rcu_for_each_leaf_node(rnp) {
 251                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
 252                 for_each_leaf_node_possible_cpu(rnp, cpu)
 253                         if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
 254                                 if (!trigger_single_cpu_backtrace(cpu))
 255                                         dump_cpu_task(cpu);
 256                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 257         }
 258 }
 259 
 260 #ifdef CONFIG_RCU_FAST_NO_HZ
 261 
 262 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 263 {
 264         struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 265 
 266         sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
 267                 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
 268                 ".l"[rdp->all_lazy],
 269                 ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
 270                 ".D"[!!rdp->tick_nohz_enabled_snap]);
 271 }
 272 
 273 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 274 
 275 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 276 {
 277         *cp = '\0';
 278 }
 279 
 280 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
 281 
 282 /*
 283  * Print out diagnostic information for the specified stalled CPU.
 284  *
 285  * If the specified CPU is aware of the current RCU grace period, then
 286  * print the number of scheduling clock interrupts the CPU has taken
 287  * during the time that it has been aware.  Otherwise, print the number
 288  * of RCU grace periods that this CPU is ignorant of, for example, "1"
 289  * if the CPU was aware of the previous grace period.
 290  *
 291  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
 292  */
 293 static void print_cpu_stall_info(int cpu)
 294 {
 295         unsigned long delta;
 296         char fast_no_hz[72];
 297         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 298         char *ticks_title;
 299         unsigned long ticks_value;
 300 
 301         /*
 302          * We could be printing a lot while holding a spinlock.  Avoid
 303          * triggering hard lockup.
 304          */
 305         touch_nmi_watchdog();
 306 
 307         ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
 308         if (ticks_value) {
 309                 ticks_title = "GPs behind";
 310         } else {
 311                 ticks_title = "ticks this GP";
 312                 ticks_value = rdp->ticks_this_gp;
 313         }
 314         print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
 315         delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
 316         pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
 317                cpu,
 318                "O."[!!cpu_online(cpu)],
 319                "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
 320                "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
 321                !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
 322                         rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
 323                                 "!."[!delta],
 324                ticks_value, ticks_title,
 325                rcu_dynticks_snap(rdp) & 0xfff,
 326                rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
 327                rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 328                READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
 329                fast_no_hz);
 330 }
 331 
 332 /* Complain about starvation of grace-period kthread.  */
 333 static void rcu_check_gp_kthread_starvation(void)
 334 {
 335         struct task_struct *gpk = rcu_state.gp_kthread;
 336         unsigned long j;
 337 
 338         j = jiffies - READ_ONCE(rcu_state.gp_activity);
 339         if (j > 2 * HZ) {
 340                 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
 341                        rcu_state.name, j,
 342                        (long)rcu_seq_current(&rcu_state.gp_seq),
 343                        READ_ONCE(rcu_state.gp_flags),
 344                        gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
 345                        gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
 346                 if (gpk) {
 347                         pr_err("RCU grace-period kthread stack dump:\n");
 348                         sched_show_task(gpk);
 349                         wake_up_process(gpk);
 350                 }
 351         }
 352 }
 353 
 354 static void print_other_cpu_stall(unsigned long gp_seq)
 355 {
 356         int cpu;
 357         unsigned long flags;
 358         unsigned long gpa;
 359         unsigned long j;
 360         int ndetected = 0;
 361         struct rcu_node *rnp;
 362         long totqlen = 0;
 363 
 364         /* Kick and suppress, if so configured. */
 365         rcu_stall_kick_kthreads();
 366         if (rcu_cpu_stall_suppress)
 367                 return;
 368 
 369         /*
 370          * OK, time to rat on our buddy...
 371          * See Documentation/RCU/stallwarn.txt for info on how to debug
 372          * RCU CPU stall warnings.
 373          */
 374         pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
 375         rcu_for_each_leaf_node(rnp) {
 376                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
 377                 ndetected += rcu_print_task_stall(rnp);
 378                 if (rnp->qsmask != 0) {
 379                         for_each_leaf_node_possible_cpu(rnp, cpu)
 380                                 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
 381                                         print_cpu_stall_info(cpu);
 382                                         ndetected++;
 383                                 }
 384                 }
 385                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 386         }
 387 
 388         for_each_possible_cpu(cpu)
 389                 totqlen += rcu_get_n_cbs_cpu(cpu);
 390         pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
 391                smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
 392                (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
 393         if (ndetected) {
 394                 rcu_dump_cpu_stacks();
 395 
 396                 /* Complain about tasks blocking the grace period. */
 397                 rcu_for_each_leaf_node(rnp)
 398                         rcu_print_detail_task_stall_rnp(rnp);
 399         } else {
 400                 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
 401                         pr_err("INFO: Stall ended before state dump start\n");
 402                 } else {
 403                         j = jiffies;
 404                         gpa = READ_ONCE(rcu_state.gp_activity);
 405                         pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
 406                                rcu_state.name, j - gpa, j, gpa,
 407                                READ_ONCE(jiffies_till_next_fqs),
 408                                rcu_get_root()->qsmask);
 409                         /* In this case, the current CPU might be at fault. */
 410                         sched_show_task(current);
 411                 }
 412         }
 413         /* Rewrite if needed in case of slow consoles. */
 414         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 415                 WRITE_ONCE(rcu_state.jiffies_stall,
 416                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 417 
 418         rcu_check_gp_kthread_starvation();
 419 
 420         panic_on_rcu_stall();
 421 
 422         rcu_force_quiescent_state();  /* Kick them all. */
 423 }
 424 
 425 static void print_cpu_stall(void)
 426 {
 427         int cpu;
 428         unsigned long flags;
 429         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 430         struct rcu_node *rnp = rcu_get_root();
 431         long totqlen = 0;
 432 
 433         /* Kick and suppress, if so configured. */
 434         rcu_stall_kick_kthreads();
 435         if (rcu_cpu_stall_suppress)
 436                 return;
 437 
 438         /*
 439          * OK, time to rat on ourselves...
 440          * See Documentation/RCU/stallwarn.txt for info on how to debug
 441          * RCU CPU stall warnings.
 442          */
 443         pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
 444         raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
 445         print_cpu_stall_info(smp_processor_id());
 446         raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
 447         for_each_possible_cpu(cpu)
 448                 totqlen += rcu_get_n_cbs_cpu(cpu);
 449         pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
 450                 jiffies - rcu_state.gp_start,
 451                 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
 452 
 453         rcu_check_gp_kthread_starvation();
 454 
 455         rcu_dump_cpu_stacks();
 456 
 457         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 458         /* Rewrite if needed in case of slow consoles. */
 459         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 460                 WRITE_ONCE(rcu_state.jiffies_stall,
 461                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 462         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 463 
 464         panic_on_rcu_stall();
 465 
 466         /*
 467          * Attempt to revive the RCU machinery by forcing a context switch.
 468          *
 469          * A context switch would normally allow the RCU state machine to make
 470          * progress and it could be we're stuck in kernel space without context
 471          * switches for an entirely unreasonable amount of time.
 472          */
 473         set_tsk_need_resched(current);
 474         set_preempt_need_resched();
 475 }
 476 
 477 static void check_cpu_stall(struct rcu_data *rdp)
 478 {
 479         unsigned long gs1;
 480         unsigned long gs2;
 481         unsigned long gps;
 482         unsigned long j;
 483         unsigned long jn;
 484         unsigned long js;
 485         struct rcu_node *rnp;
 486 
 487         if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
 488             !rcu_gp_in_progress())
 489                 return;
 490         rcu_stall_kick_kthreads();
 491         j = jiffies;
 492 
 493         /*
 494          * Lots of memory barriers to reject false positives.
 495          *
 496          * The idea is to pick up rcu_state.gp_seq, then
 497          * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
 498          * another copy of rcu_state.gp_seq.  These values are updated in
 499          * the opposite order with memory barriers (or equivalent) during
 500          * grace-period initialization and cleanup.  Now, a false positive
 501          * can occur if we get an new value of rcu_state.gp_start and a old
 502          * value of rcu_state.jiffies_stall.  But given the memory barriers,
 503          * the only way that this can happen is if one grace period ends
 504          * and another starts between these two fetches.  This is detected
 505          * by comparing the second fetch of rcu_state.gp_seq with the
 506          * previous fetch from rcu_state.gp_seq.
 507          *
 508          * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
 509          * and rcu_state.gp_start suffice to forestall false positives.
 510          */
 511         gs1 = READ_ONCE(rcu_state.gp_seq);
 512         smp_rmb(); /* Pick up ->gp_seq first... */
 513         js = READ_ONCE(rcu_state.jiffies_stall);
 514         smp_rmb(); /* ...then ->jiffies_stall before the rest... */
 515         gps = READ_ONCE(rcu_state.gp_start);
 516         smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
 517         gs2 = READ_ONCE(rcu_state.gp_seq);
 518         if (gs1 != gs2 ||
 519             ULONG_CMP_LT(j, js) ||
 520             ULONG_CMP_GE(gps, js))
 521                 return; /* No stall or GP completed since entering function. */
 522         rnp = rdp->mynode;
 523         jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 524         if (rcu_gp_in_progress() &&
 525             (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
 526             cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 527 
 528                 /* We haven't checked in, so go dump stack. */
 529                 print_cpu_stall();
 530                 if (rcu_cpu_stall_ftrace_dump)
 531                         rcu_ftrace_dump(DUMP_ALL);
 532 
 533         } else if (rcu_gp_in_progress() &&
 534                    ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
 535                    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 536 
 537                 /* They had a few time units to dump stack, so complain. */
 538                 print_other_cpu_stall(gs2);
 539                 if (rcu_cpu_stall_ftrace_dump)
 540                         rcu_ftrace_dump(DUMP_ALL);
 541         }
 542 }
 543 
 544 //////////////////////////////////////////////////////////////////////////////
 545 //
 546 // RCU forward-progress mechanisms, including of callback invocation.
 547 
 548 
 549 /*
 550  * Show the state of the grace-period kthreads.
 551  */
 552 void show_rcu_gp_kthreads(void)
 553 {
 554         int cpu;
 555         unsigned long j;
 556         unsigned long ja;
 557         unsigned long jr;
 558         unsigned long jw;
 559         struct rcu_data *rdp;
 560         struct rcu_node *rnp;
 561 
 562         j = jiffies;
 563         ja = j - READ_ONCE(rcu_state.gp_activity);
 564         jr = j - READ_ONCE(rcu_state.gp_req_activity);
 565         jw = j - READ_ONCE(rcu_state.gp_wake_time);
 566         pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
 567                 rcu_state.name, gp_state_getname(rcu_state.gp_state),
 568                 rcu_state.gp_state,
 569                 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
 570                 ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
 571                 (long)READ_ONCE(rcu_state.gp_seq),
 572                 (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
 573                 READ_ONCE(rcu_state.gp_flags));
 574         rcu_for_each_node_breadth_first(rnp) {
 575                 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
 576                         continue;
 577                 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
 578                         rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
 579                         (long)rnp->gp_seq_needed);
 580                 if (!rcu_is_leaf_node(rnp))
 581                         continue;
 582                 for_each_leaf_node_possible_cpu(rnp, cpu) {
 583                         rdp = per_cpu_ptr(&rcu_data, cpu);
 584                         if (rdp->gpwrap ||
 585                             ULONG_CMP_GE(rcu_state.gp_seq,
 586                                          rdp->gp_seq_needed))
 587                                 continue;
 588                         pr_info("\tcpu %d ->gp_seq_needed %ld\n",
 589                                 cpu, (long)rdp->gp_seq_needed);
 590                 }
 591         }
 592         for_each_possible_cpu(cpu) {
 593                 rdp = per_cpu_ptr(&rcu_data, cpu);
 594                 if (rcu_segcblist_is_offloaded(&rdp->cblist))
 595                         show_rcu_nocb_state(rdp);
 596         }
 597         /* sched_show_task(rcu_state.gp_kthread); */
 598 }
 599 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 600 
 601 /*
 602  * This function checks for grace-period requests that fail to motivate
 603  * RCU to come out of its idle mode.
 604  */
 605 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
 606                                      const unsigned long gpssdelay)
 607 {
 608         unsigned long flags;
 609         unsigned long j;
 610         struct rcu_node *rnp_root = rcu_get_root();
 611         static atomic_t warned = ATOMIC_INIT(0);
 612 
 613         if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 614             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
 615                 return;
 616         j = jiffies; /* Expensive access, and in common case don't get here. */
 617         if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 618             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 619             atomic_read(&warned))
 620                 return;
 621 
 622         raw_spin_lock_irqsave_rcu_node(rnp, flags);
 623         j = jiffies;
 624         if (rcu_gp_in_progress() ||
 625             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 626             time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 627             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 628             atomic_read(&warned)) {
 629                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 630                 return;
 631         }
 632         /* Hold onto the leaf lock to make others see warned==1. */
 633 
 634         if (rnp_root != rnp)
 635                 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 636         j = jiffies;
 637         if (rcu_gp_in_progress() ||
 638             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 639             time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
 640             time_before(j, rcu_state.gp_activity + gpssdelay) ||
 641             atomic_xchg(&warned, 1)) {
 642                 if (rnp_root != rnp)
 643                         /* irqs remain disabled. */
 644                         raw_spin_unlock_rcu_node(rnp_root);
 645                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 646                 return;
 647         }
 648         WARN_ON(1);
 649         if (rnp_root != rnp)
 650                 raw_spin_unlock_rcu_node(rnp_root);
 651         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 652         show_rcu_gp_kthreads();
 653 }
 654 
 655 /*
 656  * Do a forward-progress check for rcutorture.  This is normally invoked
 657  * due to an OOM event.  The argument "j" gives the time period during
 658  * which rcutorture would like progress to have been made.
 659  */
 660 void rcu_fwd_progress_check(unsigned long j)
 661 {
 662         unsigned long cbs;
 663         int cpu;
 664         unsigned long max_cbs = 0;
 665         int max_cpu = -1;
 666         struct rcu_data *rdp;
 667 
 668         if (rcu_gp_in_progress()) {
 669                 pr_info("%s: GP age %lu jiffies\n",
 670                         __func__, jiffies - rcu_state.gp_start);
 671                 show_rcu_gp_kthreads();
 672         } else {
 673                 pr_info("%s: Last GP end %lu jiffies ago\n",
 674                         __func__, jiffies - rcu_state.gp_end);
 675                 preempt_disable();
 676                 rdp = this_cpu_ptr(&rcu_data);
 677                 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
 678                 preempt_enable();
 679         }
 680         for_each_possible_cpu(cpu) {
 681                 cbs = rcu_get_n_cbs_cpu(cpu);
 682                 if (!cbs)
 683                         continue;
 684                 if (max_cpu < 0)
 685                         pr_info("%s: callbacks", __func__);
 686                 pr_cont(" %d: %lu", cpu, cbs);
 687                 if (cbs <= max_cbs)
 688                         continue;
 689                 max_cbs = cbs;
 690                 max_cpu = cpu;
 691         }
 692         if (max_cpu >= 0)
 693                 pr_cont("\n");
 694 }
 695 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
 696 
 697 /* Commandeer a sysrq key to dump RCU's tree. */
 698 static bool sysrq_rcu;
 699 module_param(sysrq_rcu, bool, 0444);
 700 
 701 /* Dump grace-period-request information due to commandeered sysrq. */
 702 static void sysrq_show_rcu(int key)
 703 {
 704         show_rcu_gp_kthreads();
 705 }
 706 
 707 static struct sysrq_key_op sysrq_rcudump_op = {
 708         .handler = sysrq_show_rcu,
 709         .help_msg = "show-rcu(y)",
 710         .action_msg = "Show RCU tree",
 711         .enable_mask = SYSRQ_ENABLE_DUMP,
 712 };
 713 
 714 static int __init rcu_sysrq_init(void)
 715 {
 716         if (sysrq_rcu)
 717                 return register_sysrq_key('y', &sysrq_rcudump_op);
 718         return 0;
 719 }
 720 early_initcall(rcu_sysrq_init);

/* [<][>][^][v][top][bottom][index][help] */