rnp               323 kernel/rcu/rcu.h #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
rnp               326 kernel/rcu/rcu.h #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
rnp               333 kernel/rcu/rcu.h #define srcu_for_each_node_breadth_first(sp, rnp) \
rnp               334 kernel/rcu/rcu.h 	for ((rnp) = &(sp)->node[0]; \
rnp               335 kernel/rcu/rcu.h 	     (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
rnp               336 kernel/rcu/rcu.h #define rcu_for_each_node_breadth_first(rnp) \
rnp               337 kernel/rcu/rcu.h 	srcu_for_each_node_breadth_first(&rcu_state, rnp)
rnp               345 kernel/rcu/rcu.h #define rcu_for_each_leaf_node(rnp) \
rnp               346 kernel/rcu/rcu.h 	for ((rnp) = rcu_first_leaf_node(); \
rnp               347 kernel/rcu/rcu.h 	     (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
rnp               352 kernel/rcu/rcu.h #define for_each_leaf_node_possible_cpu(rnp, cpu) \
rnp               353 kernel/rcu/rcu.h 	for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
rnp               354 kernel/rcu/rcu.h 	     (cpu) <= rnp->grphi; \
rnp               360 kernel/rcu/rcu.h #define rcu_find_next_bit(rnp, cpu, mask) \
rnp               361 kernel/rcu/rcu.h 	((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
rnp               362 kernel/rcu/rcu.h #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
rnp               363 kernel/rcu/rcu.h 	for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
rnp               364 kernel/rcu/rcu.h 	     (cpu) <= rnp->grphi; \
rnp               365 kernel/rcu/rcu.h 	     (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
rnp               145 kernel/rcu/tree.c static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
rnp               149 kernel/rcu/tree.c static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
rnp               191 kernel/rcu/tree.c unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
rnp               193 kernel/rcu/tree.c 	return READ_ONCE(rnp->qsmaskinitnext);
rnp               944 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp               951 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp               952 kernel/rcu/tree.c 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
rnp               968 kernel/rcu/tree.c static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
rnp               970 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp               972 kernel/rcu/tree.c 			 rnp->gp_seq))
rnp               974 kernel/rcu/tree.c 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
rnp               975 kernel/rcu/tree.c 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
rnp              1005 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;
rnp              1017 kernel/rcu/tree.c 		rcu_gpnum_ovf(rnp, rdp);
rnp              1022 kernel/rcu/tree.c 	if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
rnp              1029 kernel/rcu/tree.c 			__func__, rnp->grplo, rnp->grphi, rnp->level,
rnp              1030 kernel/rcu/tree.c 			(long)rnp->gp_seq, (long)rnp->completedqs);
rnp              1031 kernel/rcu/tree.c 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
rnp              1034 kernel/rcu/tree.c 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
rnp              1095 kernel/rcu/tree.c 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
rnp              1096 kernel/rcu/tree.c 		    (rnp->ffmask & rdp->grpmask)) {
rnp              1099 kernel/rcu/tree.c 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
rnp              1108 kernel/rcu/tree.c static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
rnp              1111 kernel/rcu/tree.c 	trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
rnp              1112 kernel/rcu/tree.c 				      rnp->level, rnp->grplo, rnp->grphi, s);
rnp              1135 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              1148 kernel/rcu/tree.c 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
rnp              1149 kernel/rcu/tree.c 		if (rnp != rnp_start)
rnp              1150 kernel/rcu/tree.c 			raw_spin_lock_rcu_node(rnp);
rnp              1151 kernel/rcu/tree.c 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
rnp              1152 kernel/rcu/tree.c 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
rnp              1153 kernel/rcu/tree.c 		    (rnp != rnp_start &&
rnp              1154 kernel/rcu/tree.c 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
rnp              1155 kernel/rcu/tree.c 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
rnp              1159 kernel/rcu/tree.c 		rnp->gp_seq_needed = gp_seq_req;
rnp              1160 kernel/rcu/tree.c 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
rnp              1171 kernel/rcu/tree.c 		if (rnp != rnp_start && rnp->parent != NULL)
rnp              1172 kernel/rcu/tree.c 			raw_spin_unlock_rcu_node(rnp);
rnp              1173 kernel/rcu/tree.c 		if (!rnp->parent)
rnp              1179 kernel/rcu/tree.c 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
rnp              1182 kernel/rcu/tree.c 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
rnp              1186 kernel/rcu/tree.c 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
rnp              1193 kernel/rcu/tree.c 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
rnp              1194 kernel/rcu/tree.c 		rnp_start->gp_seq_needed = rnp->gp_seq_needed;
rnp              1195 kernel/rcu/tree.c 		rdp->gp_seq_needed = rnp->gp_seq_needed;
rnp              1197 kernel/rcu/tree.c 	if (rnp != rnp_start)
rnp              1198 kernel/rcu/tree.c 		raw_spin_unlock_rcu_node(rnp);
rnp              1206 kernel/rcu/tree.c static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
rnp              1211 kernel/rcu/tree.c 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
rnp              1213 kernel/rcu/tree.c 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
rnp              1214 kernel/rcu/tree.c 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
rnp              1257 kernel/rcu/tree.c static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
rnp              1263 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1281 kernel/rcu/tree.c 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
rnp              1298 kernel/rcu/tree.c static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
rnp              1311 kernel/rcu/tree.c 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp              1312 kernel/rcu/tree.c 	needwake = rcu_accelerate_cbs(rnp, rdp);
rnp              1313 kernel/rcu/tree.c 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
rnp              1328 kernel/rcu/tree.c static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
rnp              1331 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1341 kernel/rcu/tree.c 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
rnp              1344 kernel/rcu/tree.c 	return rcu_accelerate_cbs(rnp, rdp);
rnp              1351 kernel/rcu/tree.c static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
rnp              1355 kernel/rcu/tree.c 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
rnp              1356 kernel/rcu/tree.c 	    !raw_spin_trylock_rcu_node(rnp))
rnp              1358 kernel/rcu/tree.c 	WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
rnp              1359 kernel/rcu/tree.c 	raw_spin_unlock_rcu_node(rnp);
rnp              1368 kernel/rcu/tree.c static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
rnp              1375 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1377 kernel/rcu/tree.c 	if (rdp->gp_seq == rnp->gp_seq)
rnp              1381 kernel/rcu/tree.c 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
rnp              1384 kernel/rcu/tree.c 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
rnp              1388 kernel/rcu/tree.c 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
rnp              1392 kernel/rcu/tree.c 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
rnp              1399 kernel/rcu/tree.c 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
rnp              1400 kernel/rcu/tree.c 		need_gp = !!(rnp->qsmask & rdp->grpmask);
rnp              1405 kernel/rcu/tree.c 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
rnp              1406 kernel/rcu/tree.c 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
rnp              1407 kernel/rcu/tree.c 		rdp->gp_seq_needed = rnp->gp_seq_needed;
rnp              1409 kernel/rcu/tree.c 	rcu_gpnum_ovf(rnp, rdp);
rnp              1417 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              1420 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              1421 kernel/rcu/tree.c 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
rnp              1423 kernel/rcu/tree.c 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
rnp              1427 kernel/rcu/tree.c 	needwake = __note_gp_changes(rnp, rdp);
rnp              1428 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1450 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              1453 kernel/rcu/tree.c 	raw_spin_lock_irq_rcu_node(rnp);
rnp              1456 kernel/rcu/tree.c 		raw_spin_unlock_irq_rcu_node(rnp);
rnp              1466 kernel/rcu/tree.c 		raw_spin_unlock_irq_rcu_node(rnp);
rnp              1475 kernel/rcu/tree.c 	raw_spin_unlock_irq_rcu_node(rnp);
rnp              1484 kernel/rcu/tree.c 	rcu_for_each_leaf_node(rnp) {
rnp              1486 kernel/rcu/tree.c 		raw_spin_lock_irq_rcu_node(rnp);
rnp              1487 kernel/rcu/tree.c 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
rnp              1488 kernel/rcu/tree.c 		    !rnp->wait_blkd_tasks) {
rnp              1490 kernel/rcu/tree.c 			raw_spin_unlock_irq_rcu_node(rnp);
rnp              1496 kernel/rcu/tree.c 		oldmask = rnp->qsmaskinit;
rnp              1497 kernel/rcu/tree.c 		rnp->qsmaskinit = rnp->qsmaskinitnext;
rnp              1500 kernel/rcu/tree.c 		if (!oldmask != !rnp->qsmaskinit) {
rnp              1502 kernel/rcu/tree.c 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
rnp              1503 kernel/rcu/tree.c 					rcu_init_new_rnp(rnp);
rnp              1504 kernel/rcu/tree.c 			} else if (rcu_preempt_has_tasks(rnp)) {
rnp              1505 kernel/rcu/tree.c 				rnp->wait_blkd_tasks = true; /* blocked tasks */
rnp              1507 kernel/rcu/tree.c 				rcu_cleanup_dead_rnp(rnp);
rnp              1519 kernel/rcu/tree.c 		if (rnp->wait_blkd_tasks &&
rnp              1520 kernel/rcu/tree.c 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
rnp              1521 kernel/rcu/tree.c 			rnp->wait_blkd_tasks = false;
rnp              1522 kernel/rcu/tree.c 			if (!rnp->qsmaskinit)
rnp              1523 kernel/rcu/tree.c 				rcu_cleanup_dead_rnp(rnp);
rnp              1526 kernel/rcu/tree.c 		raw_spin_unlock_irq_rcu_node(rnp);
rnp              1544 kernel/rcu/tree.c 	rcu_for_each_node_breadth_first(rnp) {
rnp              1546 kernel/rcu/tree.c 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              1548 kernel/rcu/tree.c 		rcu_preempt_check_blocked_tasks(rnp);
rnp              1549 kernel/rcu/tree.c 		rnp->qsmask = rnp->qsmaskinit;
rnp              1550 kernel/rcu/tree.c 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
rnp              1551 kernel/rcu/tree.c 		if (rnp == rdp->mynode)
rnp              1552 kernel/rcu/tree.c 			(void)__note_gp_changes(rnp, rdp);
rnp              1553 kernel/rcu/tree.c 		rcu_preempt_boost_start_gp(rnp);
rnp              1554 kernel/rcu/tree.c 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
rnp              1555 kernel/rcu/tree.c 					    rnp->level, rnp->grplo,
rnp              1556 kernel/rcu/tree.c 					    rnp->grphi, rnp->qsmask);
rnp              1558 kernel/rcu/tree.c 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
rnp              1559 kernel/rcu/tree.c 		rnp->rcu_gp_init_mask = mask;
rnp              1560 kernel/rcu/tree.c 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
rnp              1561 kernel/rcu/tree.c 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
rnp              1563 kernel/rcu/tree.c 			raw_spin_unlock_irq_rcu_node(rnp);
rnp              1577 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              1585 kernel/rcu/tree.c 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
rnp              1596 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              1609 kernel/rcu/tree.c 		raw_spin_lock_irq_rcu_node(rnp);
rnp              1612 kernel/rcu/tree.c 		raw_spin_unlock_irq_rcu_node(rnp);
rnp              1625 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              1645 kernel/rcu/tree.c 		if (!READ_ONCE(rnp->qsmask) &&
rnp              1646 kernel/rcu/tree.c 		    !rcu_preempt_blocked_readers_cgp(rnp))
rnp              1691 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              1695 kernel/rcu/tree.c 	raw_spin_lock_irq_rcu_node(rnp);
rnp              1709 kernel/rcu/tree.c 	raw_spin_unlock_irq_rcu_node(rnp);
rnp              1722 kernel/rcu/tree.c 	rcu_for_each_node_breadth_first(rnp) {
rnp              1723 kernel/rcu/tree.c 		raw_spin_lock_irq_rcu_node(rnp);
rnp              1724 kernel/rcu/tree.c 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
rnp              1725 kernel/rcu/tree.c 			dump_blkd_tasks(rnp, 10);
rnp              1726 kernel/rcu/tree.c 		WARN_ON_ONCE(rnp->qsmask);
rnp              1727 kernel/rcu/tree.c 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
rnp              1729 kernel/rcu/tree.c 		if (rnp == rdp->mynode)
rnp              1730 kernel/rcu/tree.c 			needgp = __note_gp_changes(rnp, rdp) || needgp;
rnp              1732 kernel/rcu/tree.c 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
rnp              1733 kernel/rcu/tree.c 		sq = rcu_nocb_gp_get(rnp);
rnp              1734 kernel/rcu/tree.c 		raw_spin_unlock_irq_rcu_node(rnp);
rnp              1740 kernel/rcu/tree.c 	rnp = rcu_get_root();
rnp              1741 kernel/rcu/tree.c 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
rnp              1749 kernel/rcu/tree.c 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
rnp              1750 kernel/rcu/tree.c 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
rnp              1757 kernel/rcu/tree.c 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
rnp              1767 kernel/rcu/tree.c 	raw_spin_unlock_irq_rcu_node(rnp);
rnp              1843 kernel/rcu/tree.c static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
rnp              1845 kernel/rcu/tree.c 	__releases(rnp->lock)
rnp              1850 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1854 kernel/rcu/tree.c 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
rnp              1860 kernel/rcu/tree.c 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1864 kernel/rcu/tree.c 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
rnp              1865 kernel/rcu/tree.c 			     rcu_preempt_blocked_readers_cgp(rnp));
rnp              1866 kernel/rcu/tree.c 		rnp->qsmask &= ~mask;
rnp              1867 kernel/rcu/tree.c 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
rnp              1868 kernel/rcu/tree.c 						 mask, rnp->qsmask, rnp->level,
rnp              1869 kernel/rcu/tree.c 						 rnp->grplo, rnp->grphi,
rnp              1870 kernel/rcu/tree.c 						 !!rnp->gp_tasks);
rnp              1871 kernel/rcu/tree.c 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
rnp              1874 kernel/rcu/tree.c 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1877 kernel/rcu/tree.c 		rnp->completedqs = rnp->gp_seq;
rnp              1878 kernel/rcu/tree.c 		mask = rnp->grpmask;
rnp              1879 kernel/rcu/tree.c 		if (rnp->parent == NULL) {
rnp              1885 kernel/rcu/tree.c 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1886 kernel/rcu/tree.c 		rnp_c = rnp;
rnp              1887 kernel/rcu/tree.c 		rnp = rnp->parent;
rnp              1888 kernel/rcu/tree.c 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              1908 kernel/rcu/tree.c rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
rnp              1909 kernel/rcu/tree.c 	__releases(rnp->lock)
rnp              1915 kernel/rcu/tree.c 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1917 kernel/rcu/tree.c 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
rnp              1918 kernel/rcu/tree.c 	    rnp->qsmask != 0) {
rnp              1919 kernel/rcu/tree.c 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1923 kernel/rcu/tree.c 	rnp->completedqs = rnp->gp_seq;
rnp              1924 kernel/rcu/tree.c 	rnp_p = rnp->parent;
rnp              1935 kernel/rcu/tree.c 	gps = rnp->gp_seq;
rnp              1936 kernel/rcu/tree.c 	mask = rnp->grpmask;
rnp              1937 kernel/rcu/tree.c 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
rnp              1954 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              1956 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              1957 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              1958 kernel/rcu/tree.c 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
rnp              1968 kernel/rcu/tree.c 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1973 kernel/rcu/tree.c 	if ((rnp->qsmask & mask) == 0) {
rnp              1974 kernel/rcu/tree.c 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1981 kernel/rcu/tree.c 			needwake = rcu_accelerate_cbs(rnp, rdp);
rnp              1983 kernel/rcu/tree.c 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
rnp              2031 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;
rnp              2036 kernel/rcu/tree.c 	blkd = !!(rnp->qsmask & rdp->grpmask);
rnp              2037 kernel/rcu/tree.c 	trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
rnp              2062 kernel/rcu/tree.c 	struct rcu_node *rnp = rnp_leaf;
rnp              2070 kernel/rcu/tree.c 		mask = rnp->grpmask;
rnp              2071 kernel/rcu/tree.c 		rnp = rnp->parent;
rnp              2072 kernel/rcu/tree.c 		if (!rnp)
rnp              2074 kernel/rcu/tree.c 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp              2075 kernel/rcu/tree.c 		rnp->qsmaskinit &= ~mask;
rnp              2077 kernel/rcu/tree.c 		WARN_ON_ONCE(rnp->qsmask);
rnp              2078 kernel/rcu/tree.c 		if (rnp->qsmaskinit) {
rnp              2079 kernel/rcu/tree.c 			raw_spin_unlock_rcu_node(rnp);
rnp              2083 kernel/rcu/tree.c 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
rnp              2096 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
rnp              2102 kernel/rcu/tree.c 	rcu_boost_kthread_setaffinity(rnp, -1);
rnp              2263 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              2265 kernel/rcu/tree.c 	rcu_for_each_leaf_node(rnp) {
rnp              2268 kernel/rcu/tree.c 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              2269 kernel/rcu/tree.c 		if (rnp->qsmask == 0) {
rnp              2271 kernel/rcu/tree.c 			    rcu_preempt_blocked_readers_cgp(rnp)) {
rnp              2277 kernel/rcu/tree.c 				rcu_initiate_boost(rnp, flags);
rnp              2281 kernel/rcu/tree.c 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              2284 kernel/rcu/tree.c 		for_each_leaf_node_possible_cpu(rnp, cpu) {
rnp              2285 kernel/rcu/tree.c 			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
rnp              2286 kernel/rcu/tree.c 			if ((rnp->qsmask & bit) != 0) {
rnp              2293 kernel/rcu/tree.c 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
rnp              2296 kernel/rcu/tree.c 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              2309 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              2313 kernel/rcu/tree.c 	rnp = __this_cpu_read(rcu_data.mynode);
rnp              2314 kernel/rcu/tree.c 	for (; rnp != NULL; rnp = rnp->parent) {
rnp              2316 kernel/rcu/tree.c 		      !raw_spin_trylock(&rnp->fqslock);
rnp              2321 kernel/rcu/tree.c 		rnp_old = rnp;
rnp              2344 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;
rnp              2369 kernel/rcu/tree.c 			rcu_accelerate_cbs_unlocked(rnp, rdp);
rnp              2373 kernel/rcu/tree.c 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
rnp              2793 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;
rnp              2823 kernel/rcu/tree.c 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
rnp              2970 kernel/rcu/tree.c 	struct rcu_node *rnp = rnp_leaf;
rnp              2973 kernel/rcu/tree.c 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
rnp              2975 kernel/rcu/tree.c 		mask = rnp->grpmask;
rnp              2976 kernel/rcu/tree.c 		rnp = rnp->parent;
rnp              2977 kernel/rcu/tree.c 		if (rnp == NULL)
rnp              2979 kernel/rcu/tree.c 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
rnp              2980 kernel/rcu/tree.c 		oldmask = rnp->qsmaskinit;
rnp              2981 kernel/rcu/tree.c 		rnp->qsmaskinit |= mask;
rnp              2982 kernel/rcu/tree.c 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
rnp              3022 kernel/rcu/tree.c 	struct rcu_node *rnp = rcu_get_root();
rnp              3025 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3034 kernel/rcu/tree.c 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
rnp              3041 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              3042 kernel/rcu/tree.c 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
rnp              3044 kernel/rcu/tree.c 	rdp->gp_seq = rnp->gp_seq;
rnp              3045 kernel/rcu/tree.c 	rdp->gp_seq_needed = rnp->gp_seq;
rnp              3049 kernel/rcu/tree.c 	rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
rnp              3051 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3076 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3079 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              3080 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3081 kernel/rcu/tree.c 	rnp->ffmask |= rdp->grpmask;
rnp              3082 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3098 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3101 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              3102 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3103 kernel/rcu/tree.c 	rnp->ffmask &= ~rdp->grpmask;
rnp              3104 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3130 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3138 kernel/rcu/tree.c 	rnp = rdp->mynode;
rnp              3140 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3141 kernel/rcu/tree.c 	rnp->qsmaskinitnext |= mask;
rnp              3142 kernel/rcu/tree.c 	oldmask = rnp->expmaskinitnext;
rnp              3143 kernel/rcu/tree.c 	rnp->expmaskinitnext |= mask;
rnp              3144 kernel/rcu/tree.c 	oldmask ^= rnp->expmaskinitnext;
rnp              3148 kernel/rcu/tree.c 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
rnp              3151 kernel/rcu/tree.c 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
rnp              3153 kernel/rcu/tree.c 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
rnp              3155 kernel/rcu/tree.c 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3174 kernel/rcu/tree.c 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
rnp              3185 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
rnp              3188 kernel/rcu/tree.c 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
rnp              3190 kernel/rcu/tree.c 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
rnp              3191 kernel/rcu/tree.c 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3193 kernel/rcu/tree.c 	rnp->qsmaskinitnext &= ~mask;
rnp              3194 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3278 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3305 kernel/rcu/tree.c 	rnp = rcu_get_root();
rnp              3306 kernel/rcu/tree.c 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              3308 kernel/rcu/tree.c 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              3349 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3368 kernel/rcu/tree.c 		rnp = rcu_state.level[i];
rnp              3369 kernel/rcu/tree.c 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
rnp              3370 kernel/rcu/tree.c 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
rnp              3371 kernel/rcu/tree.c 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
rnp              3373 kernel/rcu/tree.c 			raw_spin_lock_init(&rnp->fqslock);
rnp              3374 kernel/rcu/tree.c 			lockdep_set_class_and_name(&rnp->fqslock,
rnp              3376 kernel/rcu/tree.c 			rnp->gp_seq = rcu_state.gp_seq;
rnp              3377 kernel/rcu/tree.c 			rnp->gp_seq_needed = rcu_state.gp_seq;
rnp              3378 kernel/rcu/tree.c 			rnp->completedqs = rcu_state.gp_seq;
rnp              3379 kernel/rcu/tree.c 			rnp->qsmask = 0;
rnp              3380 kernel/rcu/tree.c 			rnp->qsmaskinit = 0;
rnp              3381 kernel/rcu/tree.c 			rnp->grplo = j * cpustride;
rnp              3382 kernel/rcu/tree.c 			rnp->grphi = (j + 1) * cpustride - 1;
rnp              3383 kernel/rcu/tree.c 			if (rnp->grphi >= nr_cpu_ids)
rnp              3384 kernel/rcu/tree.c 				rnp->grphi = nr_cpu_ids - 1;
rnp              3386 kernel/rcu/tree.c 				rnp->grpnum = 0;
rnp              3387 kernel/rcu/tree.c 				rnp->grpmask = 0;
rnp              3388 kernel/rcu/tree.c 				rnp->parent = NULL;
rnp              3390 kernel/rcu/tree.c 				rnp->grpnum = j % levelspread[i - 1];
rnp              3391 kernel/rcu/tree.c 				rnp->grpmask = BIT(rnp->grpnum);
rnp              3392 kernel/rcu/tree.c 				rnp->parent = rcu_state.level[i - 1] +
rnp              3395 kernel/rcu/tree.c 			rnp->level = i;
rnp              3396 kernel/rcu/tree.c 			INIT_LIST_HEAD(&rnp->blkd_tasks);
rnp              3397 kernel/rcu/tree.c 			rcu_init_one_nocb(rnp);
rnp              3398 kernel/rcu/tree.c 			init_waitqueue_head(&rnp->exp_wq[0]);
rnp              3399 kernel/rcu/tree.c 			init_waitqueue_head(&rnp->exp_wq[1]);
rnp              3400 kernel/rcu/tree.c 			init_waitqueue_head(&rnp->exp_wq[2]);
rnp              3401 kernel/rcu/tree.c 			init_waitqueue_head(&rnp->exp_wq[3]);
rnp              3402 kernel/rcu/tree.c 			spin_lock_init(&rnp->exp_lock);
rnp              3408 kernel/rcu/tree.c 	rnp = rcu_first_leaf_node();
rnp              3410 kernel/rcu/tree.c 		while (i > rnp->grphi)
rnp              3411 kernel/rcu/tree.c 			rnp++;
rnp              3412 kernel/rcu/tree.c 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
rnp              3504 kernel/rcu/tree.c 	struct rcu_node *rnp;
rnp              3508 kernel/rcu/tree.c 	rcu_for_each_node_breadth_first(rnp) {
rnp              3509 kernel/rcu/tree.c 		if (rnp->level != level) {
rnp              3512 kernel/rcu/tree.c 			level = rnp->level;
rnp              3514 kernel/rcu/tree.c 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
rnp               134 kernel/rcu/tree.h #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
rnp               410 kernel/rcu/tree.h static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
rnp               412 kernel/rcu/tree.h static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
rnp               414 kernel/rcu/tree.h static int rcu_print_task_exp_stall(struct rcu_node *rnp);
rnp               415 kernel/rcu/tree.h static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
rnp               418 kernel/rcu/tree.h static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
rnp               419 kernel/rcu/tree.h static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
rnp               420 kernel/rcu/tree.h static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
rnp               427 kernel/rcu/tree.h static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
rnp               431 kernel/rcu/tree.h static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
rnp               433 kernel/rcu/tree.h static void rcu_init_one_nocb(struct rcu_node *rnp);
rnp               473 kernel/rcu/tree.h static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
rnp                13 kernel/rcu/tree_exp.h static int rcu_print_task_exp_stall(struct rcu_node *rnp);
rnp                78 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp                90 kernel/rcu/tree_exp.h 	rcu_for_each_leaf_node(rnp) {
rnp                91 kernel/rcu/tree_exp.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp                92 kernel/rcu/tree_exp.h 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
rnp                93 kernel/rcu/tree_exp.h 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp                98 kernel/rcu/tree_exp.h 		oldmask = rnp->expmaskinit;
rnp                99 kernel/rcu/tree_exp.h 		rnp->expmaskinit = rnp->expmaskinitnext;
rnp               100 kernel/rcu/tree_exp.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               107 kernel/rcu/tree_exp.h 		mask = rnp->grpmask;
rnp               108 kernel/rcu/tree_exp.h 		rnp_up = rnp->parent;
rnp               131 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               134 kernel/rcu/tree_exp.h 	rcu_for_each_node_breadth_first(rnp) {
rnp               135 kernel/rcu/tree_exp.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               136 kernel/rcu/tree_exp.h 		WARN_ON_ONCE(rnp->expmask);
rnp               137 kernel/rcu/tree_exp.h 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
rnp               138 kernel/rcu/tree_exp.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               151 kernel/rcu/tree_exp.h static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
rnp               153 kernel/rcu/tree_exp.h 	raw_lockdep_assert_held_rcu_node(rnp);
rnp               155 kernel/rcu/tree_exp.h 	return rnp->exp_tasks == NULL &&
rnp               156 kernel/rcu/tree_exp.h 	       READ_ONCE(rnp->expmask) == 0;
rnp               164 kernel/rcu/tree_exp.h static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
rnp               169 kernel/rcu/tree_exp.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               170 kernel/rcu/tree_exp.h 	ret = sync_rcu_preempt_exp_done(rnp);
rnp               171 kernel/rcu/tree_exp.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               187 kernel/rcu/tree_exp.h static void __rcu_report_exp_rnp(struct rcu_node *rnp,
rnp               189 kernel/rcu/tree_exp.h 	__releases(rnp->lock)
rnp               194 kernel/rcu/tree_exp.h 		if (!sync_rcu_preempt_exp_done(rnp)) {
rnp               195 kernel/rcu/tree_exp.h 			if (!rnp->expmask)
rnp               196 kernel/rcu/tree_exp.h 				rcu_initiate_boost(rnp, flags);
rnp               198 kernel/rcu/tree_exp.h 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               201 kernel/rcu/tree_exp.h 		if (rnp->parent == NULL) {
rnp               202 kernel/rcu/tree_exp.h 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               209 kernel/rcu/tree_exp.h 		mask = rnp->grpmask;
rnp               210 kernel/rcu/tree_exp.h 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
rnp               211 kernel/rcu/tree_exp.h 		rnp = rnp->parent;
rnp               212 kernel/rcu/tree_exp.h 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
rnp               213 kernel/rcu/tree_exp.h 		WARN_ON_ONCE(!(rnp->expmask & mask));
rnp               214 kernel/rcu/tree_exp.h 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
rnp               222 kernel/rcu/tree_exp.h static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
rnp               226 kernel/rcu/tree_exp.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               227 kernel/rcu/tree_exp.h 	__rcu_report_exp_rnp(rnp, wake, flags);
rnp               234 kernel/rcu/tree_exp.h static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
rnp               239 kernel/rcu/tree_exp.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               240 kernel/rcu/tree_exp.h 	if (!(rnp->expmask & mask)) {
rnp               241 kernel/rcu/tree_exp.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               244 kernel/rcu/tree_exp.h 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
rnp               245 kernel/rcu/tree_exp.h 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
rnp               278 kernel/rcu/tree_exp.h 	struct rcu_node *rnp = rdp->mynode;
rnp               282 kernel/rcu/tree_exp.h 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
rnp               283 kernel/rcu/tree_exp.h 	    (rnp == rnp_root ||
rnp               295 kernel/rcu/tree_exp.h 	for (; rnp != NULL; rnp = rnp->parent) {
rnp               300 kernel/rcu/tree_exp.h 		spin_lock(&rnp->exp_lock);
rnp               301 kernel/rcu/tree_exp.h 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
rnp               304 kernel/rcu/tree_exp.h 			spin_unlock(&rnp->exp_lock);
rnp               305 kernel/rcu/tree_exp.h 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
rnp               306 kernel/rcu/tree_exp.h 						  rnp->grplo, rnp->grphi,
rnp               308 kernel/rcu/tree_exp.h 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
rnp               312 kernel/rcu/tree_exp.h 		rnp->exp_seq_rq = s; /* Followers can wait on us. */
rnp               313 kernel/rcu/tree_exp.h 		spin_unlock(&rnp->exp_lock);
rnp               314 kernel/rcu/tree_exp.h 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
rnp               315 kernel/rcu/tree_exp.h 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
rnp               341 kernel/rcu/tree_exp.h 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
rnp               343 kernel/rcu/tree_exp.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               347 kernel/rcu/tree_exp.h 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
rnp               348 kernel/rcu/tree_exp.h 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
rnp               353 kernel/rcu/tree_exp.h 		    !(rnp->qsmaskinitnext & mask)) {
rnp               363 kernel/rcu/tree_exp.h 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
rnp               370 kernel/rcu/tree_exp.h 	if (rcu_preempt_has_tasks(rnp))
rnp               371 kernel/rcu/tree_exp.h 		rnp->exp_tasks = rnp->blkd_tasks.next;
rnp               372 kernel/rcu/tree_exp.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               375 kernel/rcu/tree_exp.h 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
rnp               376 kernel/rcu/tree_exp.h 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
rnp               395 kernel/rcu/tree_exp.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               396 kernel/rcu/tree_exp.h 		if ((rnp->qsmaskinitnext & mask) &&
rnp               397 kernel/rcu/tree_exp.h 		    (rnp->expmask & mask)) {
rnp               399 kernel/rcu/tree_exp.h 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               405 kernel/rcu/tree_exp.h 		if (!(rnp->expmask & mask))
rnp               407 kernel/rcu/tree_exp.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               412 kernel/rcu/tree_exp.h 		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
rnp               422 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               429 kernel/rcu/tree_exp.h 	rcu_for_each_leaf_node(rnp) {
rnp               430 kernel/rcu/tree_exp.h 		rnp->exp_need_flush = false;
rnp               431 kernel/rcu/tree_exp.h 		if (!READ_ONCE(rnp->expmask))
rnp               435 kernel/rcu/tree_exp.h 		    rcu_is_last_leaf_node(rnp)) {
rnp               437 kernel/rcu/tree_exp.h 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
rnp               440 kernel/rcu/tree_exp.h 		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
rnp               441 kernel/rcu/tree_exp.h 		cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
rnp               443 kernel/rcu/tree_exp.h 		if (unlikely(cpu > rnp->grphi - rnp->grplo))
rnp               446 kernel/rcu/tree_exp.h 			cpu += rnp->grplo;
rnp               447 kernel/rcu/tree_exp.h 		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
rnp               448 kernel/rcu/tree_exp.h 		rnp->exp_need_flush = true;
rnp               452 kernel/rcu/tree_exp.h 	rcu_for_each_leaf_node(rnp)
rnp               453 kernel/rcu/tree_exp.h 		if (rnp->exp_need_flush)
rnp               454 kernel/rcu/tree_exp.h 			flush_work(&rnp->rew.rew_work);
rnp               464 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               486 kernel/rcu/tree_exp.h 		rcu_for_each_leaf_node(rnp) {
rnp               487 kernel/rcu/tree_exp.h 			ndetected += rcu_print_task_exp_stall(rnp);
rnp               488 kernel/rcu/tree_exp.h 			for_each_leaf_node_possible_cpu(rnp, cpu) {
rnp               491 kernel/rcu/tree_exp.h 				mask = leaf_node_cpu_bit(rnp, cpu);
rnp               492 kernel/rcu/tree_exp.h 				if (!(READ_ONCE(rnp->expmask) & mask))
rnp               498 kernel/rcu/tree_exp.h 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
rnp               499 kernel/rcu/tree_exp.h 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
rnp               508 kernel/rcu/tree_exp.h 			rcu_for_each_node_breadth_first(rnp) {
rnp               509 kernel/rcu/tree_exp.h 				if (rnp == rnp_root)
rnp               511 kernel/rcu/tree_exp.h 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
rnp               514 kernel/rcu/tree_exp.h 					rnp->level, rnp->grplo, rnp->grphi,
rnp               515 kernel/rcu/tree_exp.h 					READ_ONCE(rnp->expmask),
rnp               516 kernel/rcu/tree_exp.h 					".T"[!!rnp->exp_tasks]);
rnp               520 kernel/rcu/tree_exp.h 		rcu_for_each_leaf_node(rnp) {
rnp               521 kernel/rcu/tree_exp.h 			for_each_leaf_node_possible_cpu(rnp, cpu) {
rnp               522 kernel/rcu/tree_exp.h 				mask = leaf_node_cpu_bit(rnp, cpu);
rnp               523 kernel/rcu/tree_exp.h 				if (!(READ_ONCE(rnp->expmask) & mask))
rnp               540 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               551 kernel/rcu/tree_exp.h 	rcu_for_each_node_breadth_first(rnp) {
rnp               552 kernel/rcu/tree_exp.h 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
rnp               553 kernel/rcu/tree_exp.h 			spin_lock(&rnp->exp_lock);
rnp               555 kernel/rcu/tree_exp.h 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
rnp               556 kernel/rcu/tree_exp.h 				rnp->exp_seq_rq = s;
rnp               557 kernel/rcu/tree_exp.h 			spin_unlock(&rnp->exp_lock);
rnp               560 kernel/rcu/tree_exp.h 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
rnp               603 kernel/rcu/tree_exp.h 	struct rcu_node *rnp = rdp->mynode;
rnp               636 kernel/rcu/tree_exp.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               637 kernel/rcu/tree_exp.h 		if (rnp->expmask & rdp->grpmask) {
rnp               641 kernel/rcu/tree_exp.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               681 kernel/rcu/tree_exp.h static int rcu_print_task_exp_stall(struct rcu_node *rnp)
rnp               686 kernel/rcu/tree_exp.h 	if (!rnp->exp_tasks)
rnp               688 kernel/rcu/tree_exp.h 	t = list_entry(rnp->exp_tasks->prev,
rnp               690 kernel/rcu/tree_exp.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
rnp               713 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               716 kernel/rcu/tree_exp.h 	rnp = rdp->mynode;
rnp               717 kernel/rcu/tree_exp.h 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
rnp               734 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               737 kernel/rcu/tree_exp.h 	rnp = rdp->mynode;
rnp               740 kernel/rcu/tree_exp.h 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
rnp               764 kernel/rcu/tree_exp.h static int rcu_print_task_exp_stall(struct rcu_node *rnp)
rnp               795 kernel/rcu/tree_exp.h 	struct rcu_node *rnp;
rnp               830 kernel/rcu/tree_exp.h 	rnp = rcu_get_root();
rnp               831 kernel/rcu/tree_exp.h 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
rnp                84 kernel/rcu/tree_plugin.h static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
rnp               130 kernel/rcu/tree_plugin.h static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rnp               131 kernel/rcu/tree_plugin.h 	__releases(rnp->lock) /* But leaves rrupts disabled. */
rnp               133 kernel/rcu/tree_plugin.h 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
rnp               134 kernel/rcu/tree_plugin.h 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
rnp               135 kernel/rcu/tree_plugin.h 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
rnp               136 kernel/rcu/tree_plugin.h 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
rnp               139 kernel/rcu/tree_plugin.h 	raw_lockdep_assert_held_rcu_node(rnp);
rnp               140 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(rdp->mynode != rnp);
rnp               141 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
rnp               143 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
rnp               164 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
rnp               182 kernel/rcu/tree_plugin.h 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
rnp               195 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
rnp               206 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
rnp               222 kernel/rcu/tree_plugin.h 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
rnp               223 kernel/rcu/tree_plugin.h 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
rnp               224 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
rnp               226 kernel/rcu/tree_plugin.h 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp               227 kernel/rcu/tree_plugin.h 		rnp->exp_tasks = &t->rcu_node_entry;
rnp               229 kernel/rcu/tree_plugin.h 		     !(rnp->qsmask & rdp->grpmask));
rnp               231 kernel/rcu/tree_plugin.h 		     !(rnp->expmask & rdp->grpmask));
rnp               232 kernel/rcu/tree_plugin.h 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
rnp               289 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp               298 kernel/rcu/tree_plugin.h 		rnp = rdp->mynode;
rnp               299 kernel/rcu/tree_plugin.h 		raw_spin_lock_rcu_node(rnp);
rnp               301 kernel/rcu/tree_plugin.h 		t->rcu_blocked_node = rnp;
rnp               308 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
rnp               312 kernel/rcu/tree_plugin.h 				       (rnp->qsmask & rdp->grpmask)
rnp               313 kernel/rcu/tree_plugin.h 				       ? rnp->gp_seq
rnp               314 kernel/rcu/tree_plugin.h 				       : rcu_seq_snap(&rnp->gp_seq));
rnp               315 kernel/rcu/tree_plugin.h 		rcu_preempt_ctxt_queue(rnp, rdp);
rnp               341 kernel/rcu/tree_plugin.h static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
rnp               343 kernel/rcu/tree_plugin.h 	return READ_ONCE(rnp->gp_tasks) != NULL;
rnp               400 kernel/rcu/tree_plugin.h 					     struct rcu_node *rnp)
rnp               405 kernel/rcu/tree_plugin.h 	if (np == &rnp->blkd_tasks)
rnp               414 kernel/rcu/tree_plugin.h static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
rnp               416 kernel/rcu/tree_plugin.h 	return !list_empty(&rnp->blkd_tasks);
rnp               433 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp               481 kernel/rcu/tree_plugin.h 		rnp = t->rcu_blocked_node;
rnp               482 kernel/rcu/tree_plugin.h 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp               483 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
rnp               484 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
rnp               485 kernel/rcu/tree_plugin.h 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
rnp               486 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
rnp               487 kernel/rcu/tree_plugin.h 			     (!empty_norm || rnp->qsmask));
rnp               488 kernel/rcu/tree_plugin.h 		empty_exp = sync_rcu_preempt_exp_done(rnp);
rnp               490 kernel/rcu/tree_plugin.h 		np = rcu_next_node_entry(t, rnp);
rnp               494 kernel/rcu/tree_plugin.h 						rnp->gp_seq, t->pid);
rnp               495 kernel/rcu/tree_plugin.h 		if (&t->rcu_node_entry == rnp->gp_tasks)
rnp               496 kernel/rcu/tree_plugin.h 			WRITE_ONCE(rnp->gp_tasks, np);
rnp               497 kernel/rcu/tree_plugin.h 		if (&t->rcu_node_entry == rnp->exp_tasks)
rnp               498 kernel/rcu/tree_plugin.h 			rnp->exp_tasks = np;
rnp               501 kernel/rcu/tree_plugin.h 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
rnp               502 kernel/rcu/tree_plugin.h 			if (&t->rcu_node_entry == rnp->boost_tasks)
rnp               503 kernel/rcu/tree_plugin.h 				rnp->boost_tasks = np;
rnp               512 kernel/rcu/tree_plugin.h 		empty_exp_now = sync_rcu_preempt_exp_done(rnp);
rnp               513 kernel/rcu/tree_plugin.h 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
rnp               515 kernel/rcu/tree_plugin.h 							 rnp->gp_seq,
rnp               516 kernel/rcu/tree_plugin.h 							 0, rnp->qsmask,
rnp               517 kernel/rcu/tree_plugin.h 							 rnp->level,
rnp               518 kernel/rcu/tree_plugin.h 							 rnp->grplo,
rnp               519 kernel/rcu/tree_plugin.h 							 rnp->grphi,
rnp               520 kernel/rcu/tree_plugin.h 							 !!rnp->gp_tasks);
rnp               521 kernel/rcu/tree_plugin.h 			rcu_report_unblock_qs_rnp(rnp, flags);
rnp               523 kernel/rcu/tree_plugin.h 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               528 kernel/rcu/tree_plugin.h 			rt_mutex_futex_unlock(&rnp->boost_mtx);
rnp               535 kernel/rcu/tree_plugin.h 			rcu_report_exp_rnp(rnp, true);
rnp               611 kernel/rcu/tree_plugin.h 		struct rcu_node *rnp = rdp->mynode;
rnp               615 kernel/rcu/tree_plugin.h 		      (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
rnp               657 kernel/rcu/tree_plugin.h static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
rnp               662 kernel/rcu/tree_plugin.h 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
rnp               663 kernel/rcu/tree_plugin.h 		dump_blkd_tasks(rnp, 10);
rnp               664 kernel/rcu/tree_plugin.h 	if (rcu_preempt_has_tasks(rnp) &&
rnp               665 kernel/rcu/tree_plugin.h 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
rnp               666 kernel/rcu/tree_plugin.h 		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
rnp               667 kernel/rcu/tree_plugin.h 		t = container_of(rnp->gp_tasks, struct task_struct,
rnp               670 kernel/rcu/tree_plugin.h 						rnp->gp_seq, t->pid);
rnp               672 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(rnp->qsmask);
rnp               743 kernel/rcu/tree_plugin.h dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
rnp               752 kernel/rcu/tree_plugin.h 	raw_lockdep_assert_held_rcu_node(rnp);
rnp               754 kernel/rcu/tree_plugin.h 		__func__, rnp->grplo, rnp->grphi, rnp->level,
rnp               755 kernel/rcu/tree_plugin.h 		(long)rnp->gp_seq, (long)rnp->completedqs);
rnp               756 kernel/rcu/tree_plugin.h 	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
rnp               760 kernel/rcu/tree_plugin.h 		__func__, READ_ONCE(rnp->gp_tasks), rnp->boost_tasks,
rnp               761 kernel/rcu/tree_plugin.h 		rnp->exp_tasks);
rnp               764 kernel/rcu/tree_plugin.h 	list_for_each(lhp, &rnp->blkd_tasks) {
rnp               770 kernel/rcu/tree_plugin.h 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
rnp               772 kernel/rcu/tree_plugin.h 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
rnp               865 kernel/rcu/tree_plugin.h static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
rnp               873 kernel/rcu/tree_plugin.h static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
rnp               893 kernel/rcu/tree_plugin.h static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
rnp               895 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(rnp->qsmask);
rnp               934 kernel/rcu/tree_plugin.h dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
rnp               936 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
rnp               964 kernel/rcu/tree_plugin.h static int rcu_boost(struct rcu_node *rnp)
rnp               970 kernel/rcu/tree_plugin.h 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
rnp               971 kernel/rcu/tree_plugin.h 	    READ_ONCE(rnp->boost_tasks) == NULL)
rnp               974 kernel/rcu/tree_plugin.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               980 kernel/rcu/tree_plugin.h 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
rnp               981 kernel/rcu/tree_plugin.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               991 kernel/rcu/tree_plugin.h 	if (rnp->exp_tasks != NULL)
rnp               992 kernel/rcu/tree_plugin.h 		tb = rnp->exp_tasks;
rnp               994 kernel/rcu/tree_plugin.h 		tb = rnp->boost_tasks;
rnp              1013 kernel/rcu/tree_plugin.h 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
rnp              1014 kernel/rcu/tree_plugin.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1016 kernel/rcu/tree_plugin.h 	rt_mutex_lock(&rnp->boost_mtx);
rnp              1017 kernel/rcu/tree_plugin.h 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
rnp              1019 kernel/rcu/tree_plugin.h 	return READ_ONCE(rnp->exp_tasks) != NULL ||
rnp              1020 kernel/rcu/tree_plugin.h 	       READ_ONCE(rnp->boost_tasks) != NULL;
rnp              1028 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp = (struct rcu_node *)arg;
rnp              1034 kernel/rcu/tree_plugin.h 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
rnp              1036 kernel/rcu/tree_plugin.h 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp              1038 kernel/rcu/tree_plugin.h 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
rnp              1039 kernel/rcu/tree_plugin.h 		more2boost = rcu_boost(rnp);
rnp              1045 kernel/rcu/tree_plugin.h 			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
rnp              1067 kernel/rcu/tree_plugin.h static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
rnp              1068 kernel/rcu/tree_plugin.h 	__releases(rnp->lock)
rnp              1070 kernel/rcu/tree_plugin.h 	raw_lockdep_assert_held_rcu_node(rnp);
rnp              1071 kernel/rcu/tree_plugin.h 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
rnp              1072 kernel/rcu/tree_plugin.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1075 kernel/rcu/tree_plugin.h 	if (rnp->exp_tasks != NULL ||
rnp              1076 kernel/rcu/tree_plugin.h 	    (rnp->gp_tasks != NULL &&
rnp              1077 kernel/rcu/tree_plugin.h 	     rnp->boost_tasks == NULL &&
rnp              1078 kernel/rcu/tree_plugin.h 	     rnp->qsmask == 0 &&
rnp              1079 kernel/rcu/tree_plugin.h 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
rnp              1080 kernel/rcu/tree_plugin.h 		if (rnp->exp_tasks == NULL)
rnp              1081 kernel/rcu/tree_plugin.h 			rnp->boost_tasks = rnp->gp_tasks;
rnp              1082 kernel/rcu/tree_plugin.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1083 kernel/rcu/tree_plugin.h 		rcu_wake_cond(rnp->boost_kthread_task,
rnp              1084 kernel/rcu/tree_plugin.h 			      rnp->boost_kthread_status);
rnp              1086 kernel/rcu/tree_plugin.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1104 kernel/rcu/tree_plugin.h static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
rnp              1106 kernel/rcu/tree_plugin.h 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
rnp              1114 kernel/rcu/tree_plugin.h static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
rnp              1116 kernel/rcu/tree_plugin.h 	int rnp_index = rnp - rcu_get_root();
rnp              1124 kernel/rcu/tree_plugin.h 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
rnp              1129 kernel/rcu/tree_plugin.h 	if (rnp->boost_kthread_task != NULL)
rnp              1132 kernel/rcu/tree_plugin.h 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
rnp              1137 kernel/rcu/tree_plugin.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp              1138 kernel/rcu/tree_plugin.h 	rnp->boost_kthread_task = t;
rnp              1139 kernel/rcu/tree_plugin.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1154 kernel/rcu/tree_plugin.h static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
rnp              1156 kernel/rcu/tree_plugin.h 	struct task_struct *t = rnp->boost_kthread_task;
rnp              1157 kernel/rcu/tree_plugin.h 	unsigned long mask = rcu_rnp_online_cpus(rnp);
rnp              1165 kernel/rcu/tree_plugin.h 	for_each_leaf_node_possible_cpu(rnp, cpu)
rnp              1166 kernel/rcu/tree_plugin.h 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
rnp              1180 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp              1182 kernel/rcu/tree_plugin.h 	rcu_for_each_leaf_node(rnp)
rnp              1183 kernel/rcu/tree_plugin.h 		rcu_spawn_one_boost_kthread(rnp);
rnp              1189 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp = rdp->mynode;
rnp              1193 kernel/rcu/tree_plugin.h 		rcu_spawn_one_boost_kthread(rnp);
rnp              1198 kernel/rcu/tree_plugin.h static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
rnp              1199 kernel/rcu/tree_plugin.h 	__releases(rnp->lock)
rnp              1201 kernel/rcu/tree_plugin.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp              1209 kernel/rcu/tree_plugin.h static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
rnp              1213 kernel/rcu/tree_plugin.h static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
rnp              1303 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp              1310 kernel/rcu/tree_plugin.h 	rnp = rdp->mynode;
rnp              1318 kernel/rcu/tree_plugin.h 				  rcu_seq_current(&rnp->gp_seq)) ||
rnp              1384 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp              1421 kernel/rcu/tree_plugin.h 		rnp = rdp->mynode;
rnp              1422 kernel/rcu/tree_plugin.h 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp              1423 kernel/rcu/tree_plugin.h 		needwake = rcu_accelerate_cbs(rnp, rdp);
rnp              1424 kernel/rcu/tree_plugin.h 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
rnp              1619 kernel/rcu/tree_plugin.h static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
rnp              1621 kernel/rcu/tree_plugin.h 	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
rnp              1624 kernel/rcu/tree_plugin.h static void rcu_init_one_nocb(struct rcu_node *rnp)
rnp              1626 kernel/rcu/tree_plugin.h 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
rnp              1627 kernel/rcu/tree_plugin.h 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
rnp              1956 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp;
rnp              1983 kernel/rcu/tree_plugin.h 		rnp = rdp->mynode;
rnp              1994 kernel/rcu/tree_plugin.h 		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
rnp              1995 kernel/rcu/tree_plugin.h 			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
rnp              1996 kernel/rcu/tree_plugin.h 			needwake_gp = rcu_advance_cbs(rnp, rdp);
rnp              1997 kernel/rcu/tree_plugin.h 			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
rnp              2048 kernel/rcu/tree_plugin.h 		rnp = my_rdp->mynode;
rnp              2049 kernel/rcu/tree_plugin.h 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
rnp              2051 kernel/rcu/tree_plugin.h 			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
rnp              2052 kernel/rcu/tree_plugin.h 			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
rnp              2054 kernel/rcu/tree_plugin.h 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
rnp              2096 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp = rdp->mynode;
rnp              2107 kernel/rcu/tree_plugin.h 	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
rnp              2108 kernel/rcu/tree_plugin.h 	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
rnp              2110 kernel/rcu/tree_plugin.h 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
rnp              2393 kernel/rcu/tree_plugin.h 	struct rcu_node *rnp = rdp->mynode;
rnp              2404 kernel/rcu/tree_plugin.h 		".W"[swait_active(&rnp->nocb_gp_wq[0])],
rnp              2405 kernel/rcu/tree_plugin.h 		".W"[swait_active(&rnp->nocb_gp_wq[1])],
rnp              2409 kernel/rcu/tree_plugin.h 		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
rnp              2489 kernel/rcu/tree_plugin.h static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
rnp              2494 kernel/rcu/tree_plugin.h static void rcu_init_one_nocb(struct rcu_node *rnp)
rnp               150 kernel/rcu/tree_stall.h 	struct rcu_node *rnp;
rnp               153 kernel/rcu/tree_stall.h 	rnp = rdp->mynode;
rnp               154 kernel/rcu/tree_stall.h 	raw_spin_lock_rcu_node(rnp);
rnp               156 kernel/rcu/tree_stall.h 		rdp->rcu_iw_gp_seq = rnp->gp_seq;
rnp               159 kernel/rcu/tree_stall.h 	raw_spin_unlock_rcu_node(rnp);
rnp               172 kernel/rcu/tree_stall.h static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
rnp               177 kernel/rcu/tree_stall.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               178 kernel/rcu/tree_stall.h 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
rnp               179 kernel/rcu/tree_stall.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               182 kernel/rcu/tree_stall.h 	t = list_entry(rnp->gp_tasks->prev,
rnp               184 kernel/rcu/tree_stall.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
rnp               192 kernel/rcu/tree_stall.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               199 kernel/rcu/tree_stall.h static int rcu_print_task_stall(struct rcu_node *rnp)
rnp               204 kernel/rcu/tree_stall.h 	if (!rcu_preempt_blocked_readers_cgp(rnp))
rnp               207 kernel/rcu/tree_stall.h 	       rnp->level, rnp->grplo, rnp->grphi);
rnp               208 kernel/rcu/tree_stall.h 	t = list_entry(rnp->gp_tasks->prev,
rnp               210 kernel/rcu/tree_stall.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
rnp               224 kernel/rcu/tree_stall.h static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
rnp               232 kernel/rcu/tree_stall.h static int rcu_print_task_stall(struct rcu_node *rnp)
rnp               248 kernel/rcu/tree_stall.h 	struct rcu_node *rnp;
rnp               250 kernel/rcu/tree_stall.h 	rcu_for_each_leaf_node(rnp) {
rnp               251 kernel/rcu/tree_stall.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               252 kernel/rcu/tree_stall.h 		for_each_leaf_node_possible_cpu(rnp, cpu)
rnp               253 kernel/rcu/tree_stall.h 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
rnp               256 kernel/rcu/tree_stall.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               361 kernel/rcu/tree_stall.h 	struct rcu_node *rnp;
rnp               375 kernel/rcu/tree_stall.h 	rcu_for_each_leaf_node(rnp) {
rnp               376 kernel/rcu/tree_stall.h 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               377 kernel/rcu/tree_stall.h 		ndetected += rcu_print_task_stall(rnp);
rnp               378 kernel/rcu/tree_stall.h 		if (rnp->qsmask != 0) {
rnp               379 kernel/rcu/tree_stall.h 			for_each_leaf_node_possible_cpu(rnp, cpu)
rnp               380 kernel/rcu/tree_stall.h 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
rnp               385 kernel/rcu/tree_stall.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               397 kernel/rcu/tree_stall.h 		rcu_for_each_leaf_node(rnp)
rnp               398 kernel/rcu/tree_stall.h 			rcu_print_detail_task_stall_rnp(rnp);
rnp               430 kernel/rcu/tree_stall.h 	struct rcu_node *rnp = rcu_get_root();
rnp               457 kernel/rcu/tree_stall.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               462 kernel/rcu/tree_stall.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               485 kernel/rcu/tree_stall.h 	struct rcu_node *rnp;
rnp               522 kernel/rcu/tree_stall.h 	rnp = rdp->mynode;
rnp               525 kernel/rcu/tree_stall.h 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
rnp               560 kernel/rcu/tree_stall.h 	struct rcu_node *rnp;
rnp               574 kernel/rcu/tree_stall.h 	rcu_for_each_node_breadth_first(rnp) {
rnp               575 kernel/rcu/tree_stall.h 		if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
rnp               578 kernel/rcu/tree_stall.h 			rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
rnp               579 kernel/rcu/tree_stall.h 			(long)rnp->gp_seq_needed);
rnp               580 kernel/rcu/tree_stall.h 		if (!rcu_is_leaf_node(rnp))
rnp               582 kernel/rcu/tree_stall.h 		for_each_leaf_node_possible_cpu(rnp, cpu) {
rnp               605 kernel/rcu/tree_stall.h static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
rnp               622 kernel/rcu/tree_stall.h 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp               629 kernel/rcu/tree_stall.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               634 kernel/rcu/tree_stall.h 	if (rnp_root != rnp)
rnp               642 kernel/rcu/tree_stall.h 		if (rnp_root != rnp)
rnp               645 kernel/rcu/tree_stall.h 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rnp               649 kernel/rcu/tree_stall.h 	if (rnp_root != rnp)
rnp               651 kernel/rcu/tree_stall.h 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);