Lines Matching refs:rnp

160 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
226 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
228 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
408 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
596 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_future_needs_gp() local
597 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; in rcu_future_needs_gp()
598 int *fp = &rnp->need_future_gp[idx]; in rcu_future_needs_gp()
1033 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
1040 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1041 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) || in rcu_lockdep_current_cpu_online()
1214 struct rcu_node *rnp; in rcu_dump_cpu_stacks() local
1216 rcu_for_each_leaf_node(rsp, rnp) { in rcu_dump_cpu_stacks()
1217 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_dump_cpu_stacks()
1218 if (rnp->qsmask != 0) { in rcu_dump_cpu_stacks()
1219 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) in rcu_dump_cpu_stacks()
1220 if (rnp->qsmask & (1UL << cpu)) in rcu_dump_cpu_stacks()
1221 dump_cpu_task(rnp->grplo + cpu); in rcu_dump_cpu_stacks()
1223 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_dump_cpu_stacks()
1235 struct rcu_node *rnp = rcu_get_root(rsp); in print_other_cpu_stall() local
1240 raw_spin_lock_irqsave(&rnp->lock, flags); in print_other_cpu_stall()
1243 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1248 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1258 rcu_for_each_leaf_node(rsp, rnp) { in print_other_cpu_stall()
1259 raw_spin_lock_irqsave(&rnp->lock, flags); in print_other_cpu_stall()
1260 ndetected += rcu_print_task_stall(rnp); in print_other_cpu_stall()
1261 if (rnp->qsmask != 0) { in print_other_cpu_stall()
1262 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) in print_other_cpu_stall()
1263 if (rnp->qsmask & (1UL << cpu)) { in print_other_cpu_stall()
1265 rnp->grplo + cpu); in print_other_cpu_stall()
1269 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1308 struct rcu_node *rnp = rcu_get_root(rsp); in print_cpu_stall() local
1330 raw_spin_lock_irqsave(&rnp->lock, flags); in print_cpu_stall()
1334 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_cpu_stall()
1353 struct rcu_node *rnp; in check_cpu_stall() local
1387 rnp = rdp->mynode; in check_cpu_stall()
1389 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { in check_cpu_stall()
1453 struct rcu_node *rnp) in rcu_cbs_completed() argument
1462 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) in rcu_cbs_completed()
1463 return rnp->completed + 1; in rcu_cbs_completed()
1469 return rnp->completed + 2; in rcu_cbs_completed()
1476 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_future_gp() argument
1479 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, in trace_rcu_future_gp()
1480 rnp->completed, c, rnp->level, in trace_rcu_future_gp()
1481 rnp->grplo, rnp->grphi, s); in trace_rcu_future_gp()
1493 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, in rcu_start_future_gp() argument
1505 c = rcu_cbs_completed(rdp->rsp, rnp); in rcu_start_future_gp()
1506 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); in rcu_start_future_gp()
1507 if (rnp->need_future_gp[c & 0x1]) { in rcu_start_future_gp()
1508 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); in rcu_start_future_gp()
1525 if (rnp->gpnum != rnp->completed || in rcu_start_future_gp()
1527 rnp->need_future_gp[c & 0x1]++; in rcu_start_future_gp()
1528 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); in rcu_start_future_gp()
1537 if (rnp != rnp_root) { in rcu_start_future_gp()
1558 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); in rcu_start_future_gp()
1567 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); in rcu_start_future_gp()
1569 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); in rcu_start_future_gp()
1573 if (rnp != rnp_root) in rcu_start_future_gp()
1587 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1589 int c = rnp->completed; in rcu_future_gp_cleanup()
1593 rcu_nocb_gp_cleanup(rsp, rnp); in rcu_future_gp_cleanup()
1594 rnp->need_future_gp[c & 0x1] = 0; in rcu_future_gp_cleanup()
1595 needmore = rnp->need_future_gp[(c + 1) & 0x1]; in rcu_future_gp_cleanup()
1596 trace_rcu_future_gp(rnp, rdp, c, in rcu_future_gp_cleanup()
1629 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_accelerate_cbs() argument
1654 c = rcu_cbs_completed(rsp, rnp); in rcu_accelerate_cbs()
1679 ret = rcu_start_future_gp(rnp, rdp, NULL); in rcu_accelerate_cbs()
1699 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_advance_cbs() argument
1713 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) in rcu_advance_cbs()
1730 return rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_advance_cbs()
1739 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, in __note_gp_changes() argument
1745 if (rdp->completed == rnp->completed && in __note_gp_changes()
1749 ret = rcu_accelerate_cbs(rsp, rnp, rdp); in __note_gp_changes()
1754 ret = rcu_advance_cbs(rsp, rnp, rdp); in __note_gp_changes()
1757 rdp->completed = rnp->completed; in __note_gp_changes()
1761 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1767 rdp->gpnum = rnp->gpnum; in __note_gp_changes()
1771 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1782 struct rcu_node *rnp; in note_gp_changes() local
1785 rnp = rdp->mynode; in note_gp_changes()
1786 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && in note_gp_changes()
1787 rdp->completed == READ_ONCE(rnp->completed) && in note_gp_changes()
1789 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ in note_gp_changes()
1794 needwake = __note_gp_changes(rsp, rnp, rdp); in note_gp_changes()
1795 raw_spin_unlock_irqrestore(&rnp->lock, flags); in note_gp_changes()
1814 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_init() local
1817 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1821 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1831 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1840 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1848 rcu_for_each_leaf_node(rsp, rnp) { in rcu_gp_init()
1850 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1852 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1853 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1855 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1860 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1861 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1864 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1866 rcu_init_new_rnp(rnp); in rcu_gp_init()
1867 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */ in rcu_gp_init()
1868 rnp->wait_blkd_tasks = true; in rcu_gp_init()
1870 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1882 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1883 (!rcu_preempt_has_tasks(rnp) || in rcu_gp_init()
1884 rnp->qsmaskinit)) { in rcu_gp_init()
1885 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1886 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1889 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1905 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_init()
1907 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1910 rcu_preempt_check_blocked_tasks(rnp); in rcu_gp_init()
1911 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1912 WRITE_ONCE(rnp->gpnum, rsp->gpnum); in rcu_gp_init()
1913 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) in rcu_gp_init()
1914 WRITE_ONCE(rnp->completed, rsp->completed); in rcu_gp_init()
1915 if (rnp == rdp->mynode) in rcu_gp_init()
1916 (void)__note_gp_changes(rsp, rnp, rdp); in rcu_gp_init()
1917 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
1918 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, in rcu_gp_init()
1919 rnp->level, rnp->grplo, in rcu_gp_init()
1920 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1921 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1935 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs_check_wake() local
1943 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1956 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs() local
1976 raw_spin_lock_irq(&rnp->lock); in rcu_gp_fqs()
1980 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_fqs()
1993 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_cleanup() local
1996 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
2010 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
2021 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_cleanup()
2022 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
2024 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); in rcu_gp_cleanup()
2025 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2026 WRITE_ONCE(rnp->completed, rsp->gpnum); in rcu_gp_cleanup()
2028 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2029 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2031 nocb += rcu_future_gp_cleanup(rsp, rnp); in rcu_gp_cleanup()
2032 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
2037 rnp = rcu_get_root(rsp); in rcu_gp_cleanup()
2038 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
2040 rcu_nocb_gp_set(rnp, nocb); in rcu_gp_cleanup()
2048 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2055 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
2068 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_kthread() local
2114 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_kthread()
2115 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_kthread()
2168 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_start_gp_advanced() argument
2204 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_start_gp() local
2215 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; in rcu_start_gp()
2216 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; in rcu_start_gp()
2248 struct rcu_node *rnp, unsigned long gps, unsigned long flags) in rcu_report_qs_rnp() argument
2249 __releases(rnp->lock) in rcu_report_qs_rnp()
2256 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { in rcu_report_qs_rnp()
2262 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2266 rnp->qsmask &= ~mask; in rcu_report_qs_rnp()
2267 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, in rcu_report_qs_rnp()
2268 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2269 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2270 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2271 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2274 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2277 mask = rnp->grpmask; in rcu_report_qs_rnp()
2278 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2284 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2285 rnp_c = rnp; in rcu_report_qs_rnp()
2286 rnp = rnp->parent; in rcu_report_qs_rnp()
2287 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_qs_rnp()
2308 struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
2309 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2316 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_unblock_qs_rnp()
2317 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_unblock_qs_rnp()
2321 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2332 gps = rnp->gpnum; in rcu_report_unblock_qs_rnp()
2333 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2334 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
2355 struct rcu_node *rnp; in rcu_report_qs_rdp() local
2357 rnp = rdp->mynode; in rcu_report_qs_rdp()
2358 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_qs_rdp()
2362 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || in rcu_report_qs_rdp()
2373 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rdp()
2377 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2378 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rdp()
2386 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_report_qs_rdp()
2388 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in rcu_report_qs_rdp()
2436 struct rcu_node *rnp, struct rcu_data *rdp) in rcu_send_cbs_to_orphanage() argument
2544 RCU_TRACE(struct rcu_node *rnp = rdp->mynode); in rcu_cleanup_dying_cpu()
2551 rnp->gpnum + 1 - !!(rnp->qsmask & mask), in rcu_cleanup_dying_cpu()
2575 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2578 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) in rcu_cleanup_dead_rnp()
2581 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2582 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2583 if (!rnp) in rcu_cleanup_dead_rnp()
2585 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2587 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2588 rnp->qsmask &= ~mask; in rcu_cleanup_dead_rnp()
2589 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2590 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2593 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2607 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dying_idle_cpu() local
2614 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_cleanup_dying_idle_cpu()
2616 rnp->qsmaskinitnext &= ~mask; in rcu_cleanup_dying_idle_cpu()
2617 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_cleanup_dying_idle_cpu()
2631 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dead_cpu() local
2637 rcu_boost_kthread_setaffinity(rnp, -1); in rcu_cleanup_dead_cpu()
2641 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); in rcu_cleanup_dead_cpu()
2807 struct rcu_node *rnp; in force_qs_rnp() local
2809 rcu_for_each_leaf_node(rsp, rnp) { in force_qs_rnp()
2812 raw_spin_lock_irqsave(&rnp->lock, flags); in force_qs_rnp()
2814 if (rnp->qsmask == 0) { in force_qs_rnp()
2817 rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2823 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2827 if (rnp->parent && in force_qs_rnp()
2828 (rnp->parent->qsmask & rnp->grpmask)) { in force_qs_rnp()
2834 rcu_report_unblock_qs_rnp(rsp, rnp, flags); in force_qs_rnp()
2839 cpu = rnp->grplo; in force_qs_rnp()
2841 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { in force_qs_rnp()
2842 if ((rnp->qsmask & bit) != 0) { in force_qs_rnp()
2849 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in force_qs_rnp()
2852 raw_spin_unlock_irqrestore(&rnp->lock, flags); in force_qs_rnp()
2865 struct rcu_node *rnp; in force_quiescent_state() local
2869 rnp = __this_cpu_read(rsp->rda->mynode); in force_quiescent_state()
2870 for (; rnp != NULL; rnp = rnp->parent) { in force_quiescent_state()
2872 !raw_spin_trylock(&rnp->fqslock); in force_quiescent_state()
2879 rnp_old = rnp; in force_quiescent_state()
3416 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local
3428 rcu_for_each_leaf_node(rsp, rnp) { in sync_exp_reset_tree_hotplug()
3429 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_exp_reset_tree_hotplug()
3431 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
3432 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_exp_reset_tree_hotplug()
3437 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
3438 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
3439 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_exp_reset_tree_hotplug()
3446 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
3447 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug()
3471 struct rcu_node *rnp; in sync_exp_reset_tree() local
3474 rcu_for_each_node_breadth_first(rsp, rnp) { in sync_exp_reset_tree()
3475 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_exp_reset_tree()
3477 WARN_ON_ONCE(rnp->expmask); in sync_exp_reset_tree()
3478 rnp->expmask = rnp->expmaskinit; in sync_exp_reset_tree()
3479 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_exp_reset_tree()
3492 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) in sync_rcu_preempt_exp_done() argument
3494 return rnp->exp_tasks == NULL && in sync_rcu_preempt_exp_done()
3495 READ_ONCE(rnp->expmask) == 0; in sync_rcu_preempt_exp_done()
3509 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, in __rcu_report_exp_rnp() argument
3511 __releases(rnp->lock) in __rcu_report_exp_rnp()
3516 if (!sync_rcu_preempt_exp_done(rnp)) { in __rcu_report_exp_rnp()
3517 if (!rnp->expmask) in __rcu_report_exp_rnp()
3518 rcu_initiate_boost(rnp, flags); in __rcu_report_exp_rnp()
3520 raw_spin_unlock_irqrestore(&rnp->lock, flags); in __rcu_report_exp_rnp()
3523 if (rnp->parent == NULL) { in __rcu_report_exp_rnp()
3524 raw_spin_unlock_irqrestore(&rnp->lock, flags); in __rcu_report_exp_rnp()
3531 mask = rnp->grpmask; in __rcu_report_exp_rnp()
3532 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ in __rcu_report_exp_rnp()
3533 rnp = rnp->parent; in __rcu_report_exp_rnp()
3534 raw_spin_lock(&rnp->lock); /* irqs already disabled */ in __rcu_report_exp_rnp()
3536 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
3537 rnp->expmask &= ~mask; in __rcu_report_exp_rnp()
3548 struct rcu_node *rnp, bool wake) in rcu_report_exp_rnp() argument
3552 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_exp_rnp()
3554 __rcu_report_exp_rnp(rsp, rnp, wake, flags); in rcu_report_exp_rnp()
3562 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_report_exp_cpu_mult() argument
3567 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_exp_cpu_mult()
3569 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult()
3570 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_exp_cpu_mult()
3573 rnp->expmask &= ~mask; in rcu_report_exp_cpu_mult()
3574 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
3588 static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, in sync_exp_work_done() argument
3593 if (rnp) in sync_exp_work_done()
3594 mutex_unlock(&rnp->exp_funnel_mutex); in sync_exp_work_done()
3666 struct rcu_node *rnp; in sync_sched_exp_handler() local
3670 rnp = rdp->mynode; in sync_sched_exp_handler()
3671 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in sync_sched_exp_handler()
3683 struct rcu_node *rnp; in sync_sched_exp_online_cleanup() local
3687 rnp = rdp->mynode; in sync_sched_exp_online_cleanup()
3688 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) in sync_sched_exp_online_cleanup()
3707 struct rcu_node *rnp; in sync_rcu_exp_select_cpus() local
3710 rcu_for_each_leaf_node(rsp, rnp) { in sync_rcu_exp_select_cpus()
3711 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_rcu_exp_select_cpus()
3716 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in sync_rcu_exp_select_cpus()
3724 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; in sync_rcu_exp_select_cpus()
3731 if (rcu_preempt_has_tasks(rnp)) in sync_rcu_exp_select_cpus()
3732 rnp->exp_tasks = rnp->blkd_tasks.next; in sync_rcu_exp_select_cpus()
3733 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_exp_select_cpus()
3737 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { in sync_rcu_exp_select_cpus()
3746 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_rcu_exp_select_cpus()
3748 (rnp->expmask & mask)) { in sync_rcu_exp_select_cpus()
3749 raw_spin_unlock_irqrestore(&rnp->lock, in sync_rcu_exp_select_cpus()
3753 (rnp->expmask & mask)) in sync_rcu_exp_select_cpus()
3755 raw_spin_lock_irqsave(&rnp->lock, in sync_rcu_exp_select_cpus()
3758 if (!(rnp->expmask & mask)) in sync_rcu_exp_select_cpus()
3760 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_exp_select_cpus()
3766 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); in sync_rcu_exp_select_cpus()
3776 struct rcu_node *rnp; in synchronize_sched_expedited_wait() local
3798 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
3799 (void)rcu_print_task_exp_stall(rnp); in synchronize_sched_expedited_wait()
3801 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { in synchronize_sched_expedited_wait()
3804 if (!(rnp->expmask & mask)) in synchronize_sched_expedited_wait()
3809 "o."[!!(rdp->grpmask & rnp->expmaskinit)], in synchronize_sched_expedited_wait()
3810 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); in synchronize_sched_expedited_wait()
3816 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
3818 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { in synchronize_sched_expedited_wait()
3819 if (!(rnp->expmask & mask)) in synchronize_sched_expedited_wait()
3847 struct rcu_node *rnp; in synchronize_sched_expedited() local
3853 rnp = exp_funnel_lock(rsp, s); in synchronize_sched_expedited()
3854 if (rnp == NULL) in synchronize_sched_expedited()
3862 mutex_unlock(&rnp->exp_funnel_mutex); in synchronize_sched_expedited()
3875 struct rcu_node *rnp = rdp->mynode; in __rcu_pending() local
3911 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ in __rcu_pending()
3917 if (READ_ONCE(rnp->gpnum) != rdp->gpnum || in __rcu_pending()
4131 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
4134 mask = rnp->grpmask; in rcu_init_new_rnp()
4135 rnp = rnp->parent; in rcu_init_new_rnp()
4136 if (rnp == NULL) in rcu_init_new_rnp()
4138 raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */ in rcu_init_new_rnp()
4139 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4140 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
4152 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_boot_init_percpu_data() local
4155 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_boot_init_percpu_data()
4164 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boot_init_percpu_data()
4179 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_init_percpu_data() local
4182 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_init_percpu_data()
4192 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_init_percpu_data()
4199 rnp = rdp->mynode; in rcu_init_percpu_data()
4201 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_init_percpu_data()
4203 rnp->qsmaskinitnext |= mask; in rcu_init_percpu_data()
4204 rnp->expmaskinitnext |= mask; in rcu_init_percpu_data()
4208 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ in rcu_init_percpu_data()
4209 rdp->completed = rnp->completed; in rcu_init_percpu_data()
4214 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_init_percpu_data()
4233 struct rcu_node *rnp = rdp->mynode; in rcu_cpu_notify() local
4246 rcu_boost_kthread_setaffinity(rnp, -1); in rcu_cpu_notify()
4249 rcu_boost_kthread_setaffinity(rnp, cpu); in rcu_cpu_notify()
4309 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
4329 rnp = rcu_get_root(rsp); in rcu_spawn_gp_kthread()
4330 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_spawn_gp_kthread()
4337 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_spawn_gp_kthread()
4401 struct rcu_node *rnp; in rcu_init_one() local
4423 rnp = rsp->level[i]; in rcu_init_one()
4424 for (j = 0; j < levelcnt[i]; j++, rnp++) { in rcu_init_one()
4425 raw_spin_lock_init(&rnp->lock); in rcu_init_one()
4426 lockdep_set_class_and_name(&rnp->lock, in rcu_init_one()
4428 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4429 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4431 rnp->gpnum = rsp->gpnum; in rcu_init_one()
4432 rnp->completed = rsp->completed; in rcu_init_one()
4433 rnp->qsmask = 0; in rcu_init_one()
4434 rnp->qsmaskinit = 0; in rcu_init_one()
4435 rnp->grplo = j * cpustride; in rcu_init_one()
4436 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4437 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4438 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4440 rnp->grpnum = 0; in rcu_init_one()
4441 rnp->grpmask = 0; in rcu_init_one()
4442 rnp->parent = NULL; in rcu_init_one()
4444 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4445 rnp->grpmask = 1UL << rnp->grpnum; in rcu_init_one()
4446 rnp->parent = rsp->level[i - 1] + in rcu_init_one()
4449 rnp->level = i; in rcu_init_one()
4450 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4451 rcu_init_one_nocb(rnp); in rcu_init_one()
4452 mutex_init(&rnp->exp_funnel_mutex); in rcu_init_one()
4453 lockdep_set_class_and_name(&rnp->exp_funnel_mutex, in rcu_init_one()
4460 rnp = rsp->level[rcu_num_lvls - 1]; in rcu_init_one()
4462 while (i > rnp->grphi) in rcu_init_one()
4463 rnp++; in rcu_init_one()
4464 per_cpu_ptr(rsp->rda, i)->mynode = rnp; in rcu_init_one()
4556 struct rcu_node *rnp; in rcu_dump_rcu_node_tree() local
4560 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_dump_rcu_node_tree()
4561 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4564 level = rnp->level; in rcu_dump_rcu_node_tree()
4566 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()