Lines Matching refs:rsp

162 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
163 static void rcu_report_exp_rdp(struct rcu_state *rsp,
236 static int rcu_gp_in_progress(struct rcu_state *rsp) in rcu_gp_in_progress() argument
238 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); in rcu_gp_in_progress()
310 struct rcu_state *rsp; in rcu_momentary_dyntick_idle() local
322 for_each_rcu_flavor(rsp) { in rcu_momentary_dyntick_idle()
323 rdp = raw_cpu_ptr(rsp->rda); in rcu_momentary_dyntick_idle()
324 if (!(resched_mask & rsp->flavor_mask)) in rcu_momentary_dyntick_idle()
408 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
410 static void force_qs_rnp(struct rcu_state *rsp,
411 int (*f)(struct rcu_data *rsp, bool *isidle,
414 static void force_quiescent_state(struct rcu_state *rsp);
503 struct rcu_state *rsp; in show_rcu_gp_kthreads() local
505 for_each_rcu_flavor(rsp) { in show_rcu_gp_kthreads()
507 rsp->name, rsp->gp_state, rsp->gp_kthread->state); in show_rcu_gp_kthreads()
533 struct rcu_state *rsp = NULL; in rcutorture_get_gp_data() local
537 rsp = rcu_state_p; in rcutorture_get_gp_data()
540 rsp = &rcu_bh_state; in rcutorture_get_gp_data()
543 rsp = &rcu_sched_state; in rcutorture_get_gp_data()
548 if (rsp != NULL) { in rcutorture_get_gp_data()
549 *flags = READ_ONCE(rsp->gp_flags); in rcutorture_get_gp_data()
550 *gpnum = READ_ONCE(rsp->gpnum); in rcutorture_get_gp_data()
551 *completed = READ_ONCE(rsp->completed); in rcutorture_get_gp_data()
584 static struct rcu_node *rcu_get_root(struct rcu_state *rsp) in rcu_get_root() argument
586 return &rsp->node[0]; in rcu_get_root()
594 static int rcu_future_needs_gp(struct rcu_state *rsp) in rcu_future_needs_gp() argument
596 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_future_needs_gp()
609 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) in cpu_needs_another_gp() argument
613 if (rcu_gp_in_progress(rsp)) in cpu_needs_another_gp()
615 if (rcu_future_needs_gp(rsp)) in cpu_needs_another_gp()
623 ULONG_CMP_LT(READ_ONCE(rsp->completed), in cpu_needs_another_gp()
638 struct rcu_state *rsp; in rcu_eqs_enter_common() local
654 for_each_rcu_flavor(rsp) { in rcu_eqs_enter_common()
655 rdp = this_cpu_ptr(rsp->rda); in rcu_eqs_enter_common()
1073 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
1108 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
1124 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) in rcu_implicit_dynticks_qs()
1128 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); in rcu_implicit_dynticks_qs()
1156 rdp->rsp->gp_start + jiffies_till_sched_qs) || in rcu_implicit_dynticks_qs()
1157 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { in rcu_implicit_dynticks_qs()
1158 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { in rcu_implicit_dynticks_qs()
1163 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); in rcu_implicit_dynticks_qs()
1165 rdp->rsp->jiffies_resched += 5; /* Enable beating. */ in rcu_implicit_dynticks_qs()
1166 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { in rcu_implicit_dynticks_qs()
1169 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */ in rcu_implicit_dynticks_qs()
1176 static void record_gp_stall_check_time(struct rcu_state *rsp) in record_gp_stall_check_time() argument
1181 rsp->gp_start = j; in record_gp_stall_check_time()
1184 WRITE_ONCE(rsp->jiffies_stall, j + j1); in record_gp_stall_check_time()
1185 rsp->jiffies_resched = j + j1 / 2; in record_gp_stall_check_time()
1186 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); in record_gp_stall_check_time()
1192 static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) in rcu_check_gp_kthread_starvation() argument
1198 gpa = READ_ONCE(rsp->gp_activity); in rcu_check_gp_kthread_starvation()
1201 rsp->name, j - gpa, in rcu_check_gp_kthread_starvation()
1202 rsp->gpnum, rsp->completed, in rcu_check_gp_kthread_starvation()
1203 rsp->gp_flags, rsp->gp_state, in rcu_check_gp_kthread_starvation()
1204 rsp->gp_kthread ? rsp->gp_kthread->state : 0); in rcu_check_gp_kthread_starvation()
1210 static void rcu_dump_cpu_stacks(struct rcu_state *rsp) in rcu_dump_cpu_stacks() argument
1216 rcu_for_each_leaf_node(rsp, rnp) { in rcu_dump_cpu_stacks()
1227 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) in print_other_cpu_stall() argument
1235 struct rcu_node *rnp = rcu_get_root(rsp); in print_other_cpu_stall()
1241 delta = jiffies - READ_ONCE(rsp->jiffies_stall); in print_other_cpu_stall()
1242 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { in print_other_cpu_stall()
1246 WRITE_ONCE(rsp->jiffies_stall, in print_other_cpu_stall()
1256 rsp->name); in print_other_cpu_stall()
1258 rcu_for_each_leaf_node(rsp, rnp) { in print_other_cpu_stall()
1264 print_cpu_stall_info(rsp, in print_other_cpu_stall()
1274 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_other_cpu_stall()
1276 smp_processor_id(), (long)(jiffies - rsp->gp_start), in print_other_cpu_stall()
1277 (long)rsp->gpnum, (long)rsp->completed, totqlen); in print_other_cpu_stall()
1279 rcu_dump_cpu_stacks(rsp); in print_other_cpu_stall()
1281 if (READ_ONCE(rsp->gpnum) != gpnum || in print_other_cpu_stall()
1282 READ_ONCE(rsp->completed) == gpnum) { in print_other_cpu_stall()
1286 gpa = READ_ONCE(rsp->gp_activity); in print_other_cpu_stall()
1288 rsp->name, j - gpa, j, gpa, in print_other_cpu_stall()
1290 rcu_get_root(rsp)->qsmask); in print_other_cpu_stall()
1297 rcu_print_detail_task_stall(rsp); in print_other_cpu_stall()
1299 rcu_check_gp_kthread_starvation(rsp); in print_other_cpu_stall()
1301 force_quiescent_state(rsp); /* Kick them all. */ in print_other_cpu_stall()
1304 static void print_cpu_stall(struct rcu_state *rsp) in print_cpu_stall() argument
1308 struct rcu_node *rnp = rcu_get_root(rsp); in print_cpu_stall()
1316 pr_err("INFO: %s self-detected stall on CPU", rsp->name); in print_cpu_stall()
1318 print_cpu_stall_info(rsp, smp_processor_id()); in print_cpu_stall()
1321 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_cpu_stall()
1323 jiffies - rsp->gp_start, in print_cpu_stall()
1324 (long)rsp->gpnum, (long)rsp->completed, totqlen); in print_cpu_stall()
1326 rcu_check_gp_kthread_starvation(rsp); in print_cpu_stall()
1328 rcu_dump_cpu_stacks(rsp); in print_cpu_stall()
1331 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) in print_cpu_stall()
1332 WRITE_ONCE(rsp->jiffies_stall, in print_cpu_stall()
1346 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) in check_cpu_stall() argument
1355 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) in check_cpu_stall()
1376 gpnum = READ_ONCE(rsp->gpnum); in check_cpu_stall()
1378 js = READ_ONCE(rsp->jiffies_stall); in check_cpu_stall()
1380 gps = READ_ONCE(rsp->gp_start); in check_cpu_stall()
1382 completed = READ_ONCE(rsp->completed); in check_cpu_stall()
1388 if (rcu_gp_in_progress(rsp) && in check_cpu_stall()
1392 print_cpu_stall(rsp); in check_cpu_stall()
1394 } else if (rcu_gp_in_progress(rsp) && in check_cpu_stall()
1398 print_other_cpu_stall(rsp, gpnum); in check_cpu_stall()
1413 struct rcu_state *rsp; in rcu_cpu_stall_reset() local
1415 for_each_rcu_flavor(rsp) in rcu_cpu_stall_reset()
1416 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); in rcu_cpu_stall_reset()
1452 static unsigned long rcu_cbs_completed(struct rcu_state *rsp, in rcu_cbs_completed() argument
1462 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) in rcu_cbs_completed()
1479 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, in trace_rcu_future_gp()
1499 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); in rcu_start_future_gp()
1505 c = rcu_cbs_completed(rdp->rsp, rnp); in rcu_start_future_gp()
1548 c = rcu_cbs_completed(rdp->rsp, rnp_root); in rcu_start_future_gp()
1570 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); in rcu_start_future_gp()
1587 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1591 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); in rcu_future_gp_cleanup()
1593 rcu_nocb_gp_cleanup(rsp, rnp); in rcu_future_gp_cleanup()
1608 static void rcu_gp_kthread_wake(struct rcu_state *rsp) in rcu_gp_kthread_wake() argument
1610 if (current == rsp->gp_kthread || in rcu_gp_kthread_wake()
1611 !READ_ONCE(rsp->gp_flags) || in rcu_gp_kthread_wake()
1612 !rsp->gp_kthread) in rcu_gp_kthread_wake()
1614 wake_up(&rsp->gp_wq); in rcu_gp_kthread_wake()
1629 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_accelerate_cbs() argument
1654 c = rcu_cbs_completed(rsp, rnp); in rcu_accelerate_cbs()
1683 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); in rcu_accelerate_cbs()
1685 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); in rcu_accelerate_cbs()
1699 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_advance_cbs() argument
1730 return rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_advance_cbs()
1739 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, in __note_gp_changes() argument
1749 ret = rcu_accelerate_cbs(rsp, rnp, rdp); in __note_gp_changes()
1754 ret = rcu_advance_cbs(rsp, rnp, rdp); in __note_gp_changes()
1758 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); in __note_gp_changes()
1768 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); in __note_gp_changes()
1778 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) in note_gp_changes() argument
1794 needwake = __note_gp_changes(rsp, rnp, rdp); in note_gp_changes()
1797 rcu_gp_kthread_wake(rsp); in note_gp_changes()
1800 static void rcu_gp_slow(struct rcu_state *rsp, int delay) in rcu_gp_slow() argument
1803 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) in rcu_gp_slow()
1810 static int rcu_gp_init(struct rcu_state *rsp) in rcu_gp_init() argument
1814 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_init()
1816 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_init()
1819 if (!READ_ONCE(rsp->gp_flags)) { in rcu_gp_init()
1824 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ in rcu_gp_init()
1826 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { in rcu_gp_init()
1836 record_gp_stall_check_time(rsp); in rcu_gp_init()
1838 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); in rcu_gp_init()
1839 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); in rcu_gp_init()
1848 rcu_for_each_leaf_node(rsp, rnp) { in rcu_gp_init()
1849 rcu_gp_slow(rsp, gp_preinit_delay); in rcu_gp_init()
1905 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_init()
1906 rcu_gp_slow(rsp, gp_init_delay); in rcu_gp_init()
1909 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_init()
1912 WRITE_ONCE(rnp->gpnum, rsp->gpnum); in rcu_gp_init()
1913 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) in rcu_gp_init()
1914 WRITE_ONCE(rnp->completed, rsp->completed); in rcu_gp_init()
1916 (void)__note_gp_changes(rsp, rnp, rdp); in rcu_gp_init()
1918 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, in rcu_gp_init()
1923 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_init()
1933 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) in rcu_gp_fqs_check_wake() argument
1935 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs_check_wake()
1938 *gfp = READ_ONCE(rsp->gp_flags); in rcu_gp_fqs_check_wake()
1952 static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) in rcu_gp_fqs() argument
1956 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs()
1958 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_fqs()
1959 rsp->n_force_qs++; in rcu_gp_fqs()
1962 if (is_sysidle_rcu_state(rsp)) { in rcu_gp_fqs()
1966 force_qs_rnp(rsp, dyntick_save_progress_counter, in rcu_gp_fqs()
1968 rcu_sysidle_report_gp(rsp, isidle, maxj); in rcu_gp_fqs()
1972 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); in rcu_gp_fqs()
1975 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { in rcu_gp_fqs()
1978 WRITE_ONCE(rsp->gp_flags, in rcu_gp_fqs()
1979 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); in rcu_gp_fqs()
1987 static void rcu_gp_cleanup(struct rcu_state *rsp) in rcu_gp_cleanup() argument
1993 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_cleanup()
1995 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_cleanup()
1998 gp_duration = jiffies - rsp->gp_start; in rcu_gp_cleanup()
1999 if (gp_duration > rsp->gp_max) in rcu_gp_cleanup()
2000 rsp->gp_max = gp_duration; in rcu_gp_cleanup()
2021 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_cleanup()
2026 WRITE_ONCE(rnp->completed, rsp->gpnum); in rcu_gp_cleanup()
2027 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_cleanup()
2029 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2031 nocb += rcu_future_gp_cleanup(rsp, rnp); in rcu_gp_cleanup()
2034 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_cleanup()
2035 rcu_gp_slow(rsp, gp_cleanup_delay); in rcu_gp_cleanup()
2037 rnp = rcu_get_root(rsp); in rcu_gp_cleanup()
2043 WRITE_ONCE(rsp->completed, rsp->gpnum); in rcu_gp_cleanup()
2044 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); in rcu_gp_cleanup()
2045 rsp->gp_state = RCU_GP_IDLE; in rcu_gp_cleanup()
2046 rdp = this_cpu_ptr(rsp->rda); in rcu_gp_cleanup()
2048 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
2049 if (needgp || cpu_needs_another_gp(rsp, rdp)) { in rcu_gp_cleanup()
2050 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); in rcu_gp_cleanup()
2051 trace_rcu_grace_period(rsp->name, in rcu_gp_cleanup()
2052 READ_ONCE(rsp->gpnum), in rcu_gp_cleanup()
2067 struct rcu_state *rsp = arg; in rcu_gp_kthread() local
2068 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_kthread()
2075 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2076 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2078 rsp->gp_state = RCU_GP_WAIT_GPS; in rcu_gp_kthread()
2079 wait_event_interruptible(rsp->gp_wq, in rcu_gp_kthread()
2080 READ_ONCE(rsp->gp_flags) & in rcu_gp_kthread()
2082 rsp->gp_state = RCU_GP_DONE_GPS; in rcu_gp_kthread()
2084 if (rcu_gp_init(rsp)) in rcu_gp_kthread()
2087 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_kthread()
2089 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2090 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2104 rsp->jiffies_force_qs = jiffies + j; in rcu_gp_kthread()
2105 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2106 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2108 rsp->gp_state = RCU_GP_WAIT_FQS; in rcu_gp_kthread()
2109 ret = wait_event_interruptible_timeout(rsp->gp_wq, in rcu_gp_kthread()
2110 rcu_gp_fqs_check_wake(rsp, &gf), j); in rcu_gp_kthread()
2111 rsp->gp_state = RCU_GP_DOING_FQS; in rcu_gp_kthread()
2118 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || in rcu_gp_kthread()
2120 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2121 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2123 rcu_gp_fqs(rsp, first_gp_fqs); in rcu_gp_kthread()
2125 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2126 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2129 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_kthread()
2133 WRITE_ONCE(rsp->gp_activity, jiffies); in rcu_gp_kthread()
2135 trace_rcu_grace_period(rsp->name, in rcu_gp_kthread()
2136 READ_ONCE(rsp->gpnum), in rcu_gp_kthread()
2150 rsp->gp_state = RCU_GP_CLEANUP; in rcu_gp_kthread()
2151 rcu_gp_cleanup(rsp); in rcu_gp_kthread()
2152 rsp->gp_state = RCU_GP_CLEANED; in rcu_gp_kthread()
2168 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_start_gp_advanced() argument
2171 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { in rcu_start_gp_advanced()
2180 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); in rcu_start_gp_advanced()
2181 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), in rcu_start_gp_advanced()
2201 static bool rcu_start_gp(struct rcu_state *rsp) in rcu_start_gp() argument
2203 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); in rcu_start_gp()
2204 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_start_gp()
2215 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; in rcu_start_gp()
2216 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; in rcu_start_gp()
2227 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) in rcu_report_qs_rsp() argument
2228 __releases(rcu_get_root(rsp)->lock) in rcu_report_qs_rsp()
2230 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); in rcu_report_qs_rsp()
2231 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); in rcu_report_qs_rsp()
2232 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); in rcu_report_qs_rsp()
2233 rcu_gp_kthread_wake(rsp); in rcu_report_qs_rsp()
2247 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, in rcu_report_qs_rnp() argument
2267 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, in rcu_report_qs_rnp()
2297 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ in rcu_report_qs_rnp()
2307 static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, in rcu_report_unblock_qs_rnp() argument
2315 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || in rcu_report_unblock_qs_rnp()
2327 rcu_report_qs_rsp(rsp, flags); in rcu_report_unblock_qs_rnp()
2337 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); in rcu_report_unblock_qs_rnp()
2350 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2386 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_report_qs_rdp()
2388 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in rcu_report_qs_rdp()
2391 rcu_gp_kthread_wake(rsp); in rcu_report_qs_rdp()
2402 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2405 note_gp_changes(rsp, rdp); in rcu_check_quiescent_state()
2426 rcu_report_qs_rdp(rdp->cpu, rsp, rdp); in rcu_check_quiescent_state()
2435 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, in rcu_send_cbs_to_orphanage() argument
2448 rsp->qlen_lazy += rdp->qlen_lazy; in rcu_send_cbs_to_orphanage()
2449 rsp->qlen += rdp->qlen; in rcu_send_cbs_to_orphanage()
2465 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL]; in rcu_send_cbs_to_orphanage()
2466 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL]; in rcu_send_cbs_to_orphanage()
2476 *rsp->orphan_donetail = rdp->nxtlist; in rcu_send_cbs_to_orphanage()
2477 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL]; in rcu_send_cbs_to_orphanage()
2492 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) in rcu_adopt_orphan_cbs() argument
2495 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); in rcu_adopt_orphan_cbs()
2499 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) in rcu_adopt_orphan_cbs()
2503 rdp->qlen_lazy += rsp->qlen_lazy; in rcu_adopt_orphan_cbs()
2504 rdp->qlen += rsp->qlen; in rcu_adopt_orphan_cbs()
2505 rdp->n_cbs_adopted += rsp->qlen; in rcu_adopt_orphan_cbs()
2506 if (rsp->qlen_lazy != rsp->qlen) in rcu_adopt_orphan_cbs()
2508 rsp->qlen_lazy = 0; in rcu_adopt_orphan_cbs()
2509 rsp->qlen = 0; in rcu_adopt_orphan_cbs()
2518 if (rsp->orphan_donelist != NULL) { in rcu_adopt_orphan_cbs()
2519 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL]; in rcu_adopt_orphan_cbs()
2520 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist; in rcu_adopt_orphan_cbs()
2523 rdp->nxttail[i] = rsp->orphan_donetail; in rcu_adopt_orphan_cbs()
2524 rsp->orphan_donelist = NULL; in rcu_adopt_orphan_cbs()
2525 rsp->orphan_donetail = &rsp->orphan_donelist; in rcu_adopt_orphan_cbs()
2529 if (rsp->orphan_nxtlist != NULL) { in rcu_adopt_orphan_cbs()
2530 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist; in rcu_adopt_orphan_cbs()
2531 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail; in rcu_adopt_orphan_cbs()
2532 rsp->orphan_nxtlist = NULL; in rcu_adopt_orphan_cbs()
2533 rsp->orphan_nxttail = &rsp->orphan_nxtlist; in rcu_adopt_orphan_cbs()
2540 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) in rcu_cleanup_dying_cpu() argument
2543 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda)); in rcu_cleanup_dying_cpu()
2550 trace_rcu_grace_period(rsp->name, in rcu_cleanup_dying_cpu()
2602 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) in rcu_cleanup_dying_idle_cpu() argument
2606 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dying_idle_cpu()
2627 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) in rcu_cleanup_dead_cpu() argument
2630 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dead_cpu()
2640 raw_spin_lock_irqsave(&rsp->orphan_lock, flags); in rcu_cleanup_dead_cpu()
2641 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); in rcu_cleanup_dead_cpu()
2642 rcu_adopt_orphan_cbs(rsp, flags); in rcu_cleanup_dead_cpu()
2643 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); in rcu_cleanup_dead_cpu()
2654 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) in rcu_do_batch() argument
2663 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); in rcu_do_batch()
2664 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist), in rcu_do_batch()
2677 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl); in rcu_do_batch()
2693 if (__rcu_reclaim(rsp->name, list)) in rcu_do_batch()
2704 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(), in rcu_do_batch()
2730 rdp->n_force_qs_snap = rsp->n_force_qs; in rcu_do_batch()
2798 static void force_qs_rnp(struct rcu_state *rsp, in force_qs_rnp() argument
2799 int (*f)(struct rcu_data *rsp, bool *isidle, in force_qs_rnp() argument
2809 rcu_for_each_leaf_node(rsp, rnp) { in force_qs_rnp()
2816 rsp != rcu_state_p || in force_qs_rnp()
2834 rcu_report_unblock_qs_rnp(rsp, rnp, flags); in force_qs_rnp()
2843 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) in force_qs_rnp()
2849 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in force_qs_rnp()
2861 static void force_quiescent_state(struct rcu_state *rsp) in force_quiescent_state() argument
2869 rnp = __this_cpu_read(rsp->rda->mynode); in force_quiescent_state()
2871 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || in force_quiescent_state()
2876 rsp->n_force_qs_lh++; in force_quiescent_state()
2887 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { in force_quiescent_state()
2888 rsp->n_force_qs_lh++; in force_quiescent_state()
2892 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); in force_quiescent_state()
2894 rcu_gp_kthread_wake(rsp); in force_quiescent_state()
2903 __rcu_process_callbacks(struct rcu_state *rsp) in __rcu_process_callbacks() argument
2907 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); in __rcu_process_callbacks()
2912 rcu_check_quiescent_state(rsp, rdp); in __rcu_process_callbacks()
2916 if (cpu_needs_another_gp(rsp, rdp)) { in __rcu_process_callbacks()
2917 raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */ in __rcu_process_callbacks()
2918 needwake = rcu_start_gp(rsp); in __rcu_process_callbacks()
2919 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); in __rcu_process_callbacks()
2921 rcu_gp_kthread_wake(rsp); in __rcu_process_callbacks()
2928 invoke_rcu_callbacks(rsp, rdp); in __rcu_process_callbacks()
2939 struct rcu_state *rsp; in rcu_process_callbacks() local
2944 for_each_rcu_flavor(rsp) in rcu_process_callbacks()
2945 __rcu_process_callbacks(rsp); in rcu_process_callbacks()
2956 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) in invoke_rcu_callbacks() argument
2960 if (likely(!rsp->boost)) { in invoke_rcu_callbacks()
2961 rcu_do_batch(rsp, rdp); in invoke_rcu_callbacks()
2976 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, in __call_rcu_core() argument
3002 note_gp_changes(rsp, rdp); in __call_rcu_core()
3005 if (!rcu_gp_in_progress(rsp)) { in __call_rcu_core()
3006 struct rcu_node *rnp_root = rcu_get_root(rsp); in __call_rcu_core()
3010 needwake = rcu_start_gp(rsp); in __call_rcu_core()
3013 rcu_gp_kthread_wake(rsp); in __call_rcu_core()
3017 if (rsp->n_force_qs == rdp->n_force_qs_snap && in __call_rcu_core()
3019 force_quiescent_state(rsp); in __call_rcu_core()
3020 rdp->n_force_qs_snap = rsp->n_force_qs; in __call_rcu_core()
3041 struct rcu_state *rsp, int cpu, bool lazy) in __call_rcu() argument
3063 rdp = this_cpu_ptr(rsp->rda); in __call_rcu()
3070 rdp = per_cpu_ptr(rsp->rda, cpu); in __call_rcu()
3098 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, in __call_rcu()
3101 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen); in __call_rcu()
3104 __call_rcu_core(rsp, rdp, head, flags); in __call_rcu()
3384 static void rcu_exp_gp_seq_start(struct rcu_state *rsp) in rcu_exp_gp_seq_start() argument
3386 rcu_seq_start(&rsp->expedited_sequence); in rcu_exp_gp_seq_start()
3388 static void rcu_exp_gp_seq_end(struct rcu_state *rsp) in rcu_exp_gp_seq_end() argument
3390 rcu_seq_end(&rsp->expedited_sequence); in rcu_exp_gp_seq_end()
3393 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) in rcu_exp_gp_seq_snap() argument
3395 return rcu_seq_snap(&rsp->expedited_sequence); in rcu_exp_gp_seq_snap()
3397 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) in rcu_exp_gp_seq_done() argument
3399 return rcu_seq_done(&rsp->expedited_sequence, s); in rcu_exp_gp_seq_done()
3409 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) in sync_exp_reset_tree_hotplug() argument
3415 int ncpus = READ_ONCE(rsp->ncpus); in sync_exp_reset_tree_hotplug()
3420 if (likely(ncpus == rsp->ncpus_snap)) in sync_exp_reset_tree_hotplug()
3422 rsp->ncpus_snap = ncpus; in sync_exp_reset_tree_hotplug()
3428 rcu_for_each_leaf_node(rsp, rnp) { in sync_exp_reset_tree_hotplug()
3468 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) in sync_exp_reset_tree() argument
3473 sync_exp_reset_tree_hotplug(rsp); in sync_exp_reset_tree()
3474 rcu_for_each_node_breadth_first(rsp, rnp) { in sync_exp_reset_tree()
3509 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, in __rcu_report_exp_rnp() argument
3527 wake_up(&rsp->expedited_wq); in __rcu_report_exp_rnp()
3547 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, in rcu_report_exp_rnp() argument
3554 __rcu_report_exp_rnp(rsp, rnp, wake, flags); in rcu_report_exp_rnp()
3562 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_report_exp_cpu_mult() argument
3574 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
3581 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, in rcu_report_exp_rdp() argument
3584 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); in rcu_report_exp_rdp()
3588 static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, in sync_exp_work_done() argument
3592 if (rcu_exp_gp_seq_done(rsp, s)) { in sync_exp_work_done()
3610 static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s) in exp_funnel_lock() argument
3622 rnp0 = rcu_get_root(rsp); in exp_funnel_lock()
3625 if (sync_exp_work_done(rsp, rnp0, NULL, in exp_funnel_lock()
3626 &rsp->expedited_workdone0, s)) in exp_funnel_lock()
3640 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); in exp_funnel_lock()
3641 if (sync_exp_work_done(rsp, NULL, NULL, &rsp->expedited_workdone1, s)) in exp_funnel_lock()
3646 if (sync_exp_work_done(rsp, rnp1, rdp, in exp_funnel_lock()
3647 &rsp->expedited_workdone2, s)) in exp_funnel_lock()
3656 if (sync_exp_work_done(rsp, rnp1, rdp, in exp_funnel_lock()
3657 &rsp->expedited_workdone3, s)) in exp_funnel_lock()
3667 struct rcu_state *rsp = data; in sync_sched_exp_handler() local
3669 rdp = this_cpu_ptr(rsp->rda); in sync_sched_exp_handler()
3684 struct rcu_state *rsp = &rcu_sched_state; in sync_sched_exp_online_cleanup() local
3686 rdp = per_cpu_ptr(rsp->rda, cpu); in sync_sched_exp_online_cleanup()
3690 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); in sync_sched_exp_online_cleanup()
3698 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, in sync_rcu_exp_select_cpus() argument
3709 sync_exp_reset_tree(rsp); in sync_rcu_exp_select_cpus()
3710 rcu_for_each_leaf_node(rsp, rnp) { in sync_rcu_exp_select_cpus()
3717 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in sync_rcu_exp_select_cpus()
3741 ret = smp_call_function_single(cpu, func, rsp, 0); in sync_rcu_exp_select_cpus()
3766 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); in sync_rcu_exp_select_cpus()
3770 static void synchronize_sched_expedited_wait(struct rcu_state *rsp) in synchronize_sched_expedited_wait() argument
3777 struct rcu_node *rnp_root = rcu_get_root(rsp); in synchronize_sched_expedited_wait()
3785 rsp->expedited_wq, in synchronize_sched_expedited_wait()
3792 wait_event(rsp->expedited_wq, in synchronize_sched_expedited_wait()
3797 rsp->name); in synchronize_sched_expedited_wait()
3798 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
3806 rdp = per_cpu_ptr(rsp->rda, cpu); in synchronize_sched_expedited_wait()
3815 jiffies - jiffies_start, rsp->expedited_sequence); in synchronize_sched_expedited_wait()
3816 rcu_for_each_leaf_node(rsp, rnp) { in synchronize_sched_expedited_wait()
3848 struct rcu_state *rsp = &rcu_sched_state; in synchronize_sched_expedited() local
3851 s = rcu_exp_gp_seq_snap(rsp); in synchronize_sched_expedited()
3853 rnp = exp_funnel_lock(rsp, s); in synchronize_sched_expedited()
3857 rcu_exp_gp_seq_start(rsp); in synchronize_sched_expedited()
3858 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler); in synchronize_sched_expedited()
3859 synchronize_sched_expedited_wait(rsp); in synchronize_sched_expedited()
3861 rcu_exp_gp_seq_end(rsp); in synchronize_sched_expedited()
3873 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) in __rcu_pending() argument
3880 check_cpu_stall(rsp, rdp); in __rcu_pending()
3883 if (rcu_nohz_full_cpu(rsp)) in __rcu_pending()
3905 if (cpu_needs_another_gp(rsp, rdp)) { in __rcu_pending()
3941 struct rcu_state *rsp; in rcu_pending() local
3943 for_each_rcu_flavor(rsp) in rcu_pending()
3944 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda))) in rcu_pending()
3959 struct rcu_state *rsp; in rcu_cpu_has_callbacks() local
3961 for_each_rcu_flavor(rsp) { in rcu_cpu_has_callbacks()
3962 rdp = this_cpu_ptr(rsp->rda); in rcu_cpu_has_callbacks()
3980 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, in _rcu_barrier_trace() argument
3983 trace_rcu_barrier(rsp->name, s, cpu, in _rcu_barrier_trace()
3984 atomic_read(&rsp->barrier_cpu_count), done); in _rcu_barrier_trace()
3994 struct rcu_state *rsp = rdp->rsp; in rcu_barrier_callback() local
3996 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { in rcu_barrier_callback()
3997 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence); in rcu_barrier_callback()
3998 complete(&rsp->barrier_completion); in rcu_barrier_callback()
4000 _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence); in rcu_barrier_callback()
4009 struct rcu_state *rsp = type; in rcu_barrier_func() local
4010 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); in rcu_barrier_func()
4012 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence); in rcu_barrier_func()
4013 atomic_inc(&rsp->barrier_cpu_count); in rcu_barrier_func()
4014 rsp->call(&rdp->barrier_head, rcu_barrier_callback); in rcu_barrier_func()
4021 static void _rcu_barrier(struct rcu_state *rsp) in _rcu_barrier() argument
4025 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); in _rcu_barrier()
4027 _rcu_barrier_trace(rsp, "Begin", -1, s); in _rcu_barrier()
4030 mutex_lock(&rsp->barrier_mutex); in _rcu_barrier()
4033 if (rcu_seq_done(&rsp->barrier_sequence, s)) { in _rcu_barrier()
4034 _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence); in _rcu_barrier()
4036 mutex_unlock(&rsp->barrier_mutex); in _rcu_barrier()
4041 rcu_seq_start(&rsp->barrier_sequence); in _rcu_barrier()
4042 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence); in _rcu_barrier()
4050 init_completion(&rsp->barrier_completion); in _rcu_barrier()
4051 atomic_set(&rsp->barrier_cpu_count, 1); in _rcu_barrier()
4062 rdp = per_cpu_ptr(rsp->rda, cpu); in _rcu_barrier()
4064 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { in _rcu_barrier()
4065 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu, in _rcu_barrier()
4066 rsp->barrier_sequence); in _rcu_barrier()
4068 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, in _rcu_barrier()
4069 rsp->barrier_sequence); in _rcu_barrier()
4071 atomic_inc(&rsp->barrier_cpu_count); in _rcu_barrier()
4073 rcu_barrier_callback, rsp, cpu, 0); in _rcu_barrier()
4076 _rcu_barrier_trace(rsp, "OnlineQ", cpu, in _rcu_barrier()
4077 rsp->barrier_sequence); in _rcu_barrier()
4078 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); in _rcu_barrier()
4080 _rcu_barrier_trace(rsp, "OnlineNQ", cpu, in _rcu_barrier()
4081 rsp->barrier_sequence); in _rcu_barrier()
4090 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) in _rcu_barrier()
4091 complete(&rsp->barrier_completion); in _rcu_barrier()
4094 wait_for_completion(&rsp->barrier_completion); in _rcu_barrier()
4097 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence); in _rcu_barrier()
4098 rcu_seq_end(&rsp->barrier_sequence); in _rcu_barrier()
4101 mutex_unlock(&rsp->barrier_mutex); in _rcu_barrier()
4148 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) in rcu_boot_init_percpu_data() argument
4151 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_boot_init_percpu_data()
4152 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_boot_init_percpu_data()
4161 rdp->rsp = rsp; in rcu_boot_init_percpu_data()
4174 rcu_init_percpu_data(int cpu, struct rcu_state *rsp) in rcu_init_percpu_data() argument
4178 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_init_percpu_data()
4179 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_init_percpu_data()
4184 rdp->n_force_qs_snap = rsp->n_force_qs; in rcu_init_percpu_data()
4206 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); in rcu_init_percpu_data()
4213 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); in rcu_init_percpu_data()
4219 struct rcu_state *rsp; in rcu_prepare_cpu() local
4221 for_each_rcu_flavor(rsp) in rcu_prepare_cpu()
4222 rcu_init_percpu_data(cpu, rsp); in rcu_prepare_cpu()
4234 struct rcu_state *rsp; in rcu_cpu_notify() local
4253 for_each_rcu_flavor(rsp) in rcu_cpu_notify()
4254 rcu_cleanup_dying_cpu(rsp); in rcu_cpu_notify()
4263 for_each_rcu_flavor(rsp) { in rcu_cpu_notify()
4264 rcu_cleanup_dying_idle_cpu(cpu, rsp); in rcu_cpu_notify()
4271 for_each_rcu_flavor(rsp) { in rcu_cpu_notify()
4272 rcu_cleanup_dead_cpu(cpu, rsp); in rcu_cpu_notify()
4273 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); in rcu_cpu_notify()
4310 struct rcu_state *rsp; in rcu_spawn_gp_kthread() local
4326 for_each_rcu_flavor(rsp) { in rcu_spawn_gp_kthread()
4327 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); in rcu_spawn_gp_kthread()
4329 rnp = rcu_get_root(rsp); in rcu_spawn_gp_kthread()
4331 rsp->gp_kthread = t; in rcu_spawn_gp_kthread()
4388 static void __init rcu_init_one(struct rcu_state *rsp, in rcu_init_one() argument
4414 rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1]; in rcu_init_one()
4416 rsp->flavor_mask = fl_mask; in rcu_init_one()
4423 rnp = rsp->level[i]; in rcu_init_one()
4431 rnp->gpnum = rsp->gpnum; in rcu_init_one()
4432 rnp->completed = rsp->completed; in rcu_init_one()
4446 rnp->parent = rsp->level[i - 1] + in rcu_init_one()
4458 init_waitqueue_head(&rsp->gp_wq); in rcu_init_one()
4459 init_waitqueue_head(&rsp->expedited_wq); in rcu_init_one()
4460 rnp = rsp->level[rcu_num_lvls - 1]; in rcu_init_one()
4464 per_cpu_ptr(rsp->rda, i)->mynode = rnp; in rcu_init_one()
4465 rcu_boot_init_percpu_data(i, rsp); in rcu_init_one()
4467 list_add(&rsp->flavors, &rcu_struct_flavors); in rcu_init_one()
4553 static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) in rcu_dump_rcu_node_tree() argument
4560 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_dump_rcu_node_tree()