Lines Matching refs:rnp
104 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
150 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, in rcu_preempt_ctxt_queue() argument
151 unsigned long flags) __releases(rnp->lock) in rcu_preempt_ctxt_queue()
153 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
154 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
155 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
156 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
177 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
195 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
208 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
219 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
235 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) in rcu_preempt_ctxt_queue()
236 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
237 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
238 rnp->exp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
239 raw_spin_unlock(&rnp->lock); in rcu_preempt_ctxt_queue()
296 struct rcu_node *rnp; in rcu_preempt_note_context_switch() local
303 rnp = rdp->mynode; in rcu_preempt_note_context_switch()
304 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_preempt_note_context_switch()
307 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch()
314 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); in rcu_preempt_note_context_switch()
318 (rnp->qsmask & rdp->grpmask) in rcu_preempt_note_context_switch()
319 ? rnp->gpnum in rcu_preempt_note_context_switch()
320 : rnp->gpnum + 1); in rcu_preempt_note_context_switch()
321 rcu_preempt_ctxt_queue(rnp, rdp, flags); in rcu_preempt_note_context_switch()
349 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
351 return rnp->gp_tasks != NULL; in rcu_preempt_blocked_readers_cgp()
359 struct rcu_node *rnp) in rcu_next_node_entry() argument
364 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
373 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
375 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
392 struct rcu_node *rnp; in rcu_read_unlock_special() local
459 rnp = t->rcu_blocked_node; in rcu_read_unlock_special()
460 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_read_unlock_special()
462 if (rnp == t->rcu_blocked_node) in rcu_read_unlock_special()
465 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_read_unlock_special()
467 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); in rcu_read_unlock_special()
468 empty_exp = sync_rcu_preempt_exp_done(rnp); in rcu_read_unlock_special()
470 np = rcu_next_node_entry(t, rnp); in rcu_read_unlock_special()
474 rnp->gpnum, t->pid); in rcu_read_unlock_special()
475 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_read_unlock_special()
476 rnp->gp_tasks = np; in rcu_read_unlock_special()
477 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_read_unlock_special()
478 rnp->exp_tasks = np; in rcu_read_unlock_special()
480 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_read_unlock_special()
481 rnp->boost_tasks = np; in rcu_read_unlock_special()
483 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_read_unlock_special()
492 empty_exp_now = sync_rcu_preempt_exp_done(rnp); in rcu_read_unlock_special()
493 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_read_unlock_special()
495 rnp->gpnum, in rcu_read_unlock_special()
496 0, rnp->qsmask, in rcu_read_unlock_special()
497 rnp->level, in rcu_read_unlock_special()
498 rnp->grplo, in rcu_read_unlock_special()
499 rnp->grphi, in rcu_read_unlock_special()
500 !!rnp->gp_tasks); in rcu_read_unlock_special()
501 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); in rcu_read_unlock_special()
503 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_read_unlock_special()
508 rt_mutex_unlock(&rnp->boost_mtx); in rcu_read_unlock_special()
515 rcu_report_exp_rnp(rcu_state_p, rnp, true); in rcu_read_unlock_special()
525 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument
530 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
531 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp()
532 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
535 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp()
537 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) in rcu_print_detail_task_stall_rnp()
539 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
548 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_print_detail_task_stall() local
550 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
551 rcu_for_each_leaf_node(rsp, rnp) in rcu_print_detail_task_stall()
552 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
555 static void rcu_print_task_stall_begin(struct rcu_node *rnp) in rcu_print_task_stall_begin() argument
558 rnp->level, rnp->grplo, rnp->grphi); in rcu_print_task_stall_begin()
570 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
575 if (!rcu_preempt_blocked_readers_cgp(rnp)) in rcu_print_task_stall()
577 rcu_print_task_stall_begin(rnp); in rcu_print_task_stall()
578 t = list_entry(rnp->gp_tasks->prev, in rcu_print_task_stall()
580 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_stall()
593 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
598 if (!rnp->exp_tasks) in rcu_print_task_exp_stall()
600 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
602 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
619 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
621 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); in rcu_preempt_check_blocked_tasks()
622 if (rcu_preempt_has_tasks(rnp)) in rcu_preempt_check_blocked_tasks()
623 rnp->gp_tasks = rnp->blkd_tasks.next; in rcu_preempt_check_blocked_tasks()
624 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
746 struct rcu_node *rnp; in synchronize_rcu_expedited() local
763 rnp = rcu_get_root(rsp); in synchronize_rcu_expedited()
838 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
846 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
863 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
873 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
883 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
885 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
939 static void rcu_initiate_boost_trace(struct rcu_node *rnp) in rcu_initiate_boost_trace() argument
941 if (!rcu_preempt_has_tasks(rnp)) in rcu_initiate_boost_trace()
942 rnp->n_balk_blkd_tasks++; in rcu_initiate_boost_trace()
943 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) in rcu_initiate_boost_trace()
944 rnp->n_balk_exp_gp_tasks++; in rcu_initiate_boost_trace()
945 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) in rcu_initiate_boost_trace()
946 rnp->n_balk_boost_tasks++; in rcu_initiate_boost_trace()
947 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) in rcu_initiate_boost_trace()
948 rnp->n_balk_notblocked++; in rcu_initiate_boost_trace()
949 else if (rnp->gp_tasks != NULL && in rcu_initiate_boost_trace()
950 ULONG_CMP_LT(jiffies, rnp->boost_time)) in rcu_initiate_boost_trace()
951 rnp->n_balk_notyet++; in rcu_initiate_boost_trace()
953 rnp->n_balk_nos++; in rcu_initiate_boost_trace()
958 static void rcu_initiate_boost_trace(struct rcu_node *rnp) in rcu_initiate_boost_trace() argument
982 static int rcu_boost(struct rcu_node *rnp) in rcu_boost() argument
988 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
989 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
992 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_boost()
999 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1000 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boost()
1010 if (rnp->exp_tasks != NULL) { in rcu_boost()
1011 tb = rnp->exp_tasks; in rcu_boost()
1012 rnp->n_exp_boosts++; in rcu_boost()
1014 tb = rnp->boost_tasks; in rcu_boost()
1015 rnp->n_normal_boosts++; in rcu_boost()
1017 rnp->n_tasks_boosted++; in rcu_boost()
1036 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1037 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boost()
1039 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1040 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1042 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1043 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1051 struct rcu_node *rnp = (struct rcu_node *)arg; in rcu_boost_kthread() local
1057 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; in rcu_boost_kthread()
1059 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); in rcu_boost_kthread()
1061 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; in rcu_boost_kthread()
1062 more2boost = rcu_boost(rnp); in rcu_boost_kthread()
1068 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; in rcu_boost_kthread()
1090 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1091 __releases(rnp->lock) in rcu_initiate_boost()
1095 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { in rcu_initiate_boost()
1096 rnp->n_balk_exp_gp_tasks++; in rcu_initiate_boost()
1097 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1100 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1101 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1102 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1103 rnp->qsmask == 0 && in rcu_initiate_boost()
1104 ULONG_CMP_GE(jiffies, rnp->boost_time))) { in rcu_initiate_boost()
1105 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1106 rnp->boost_tasks = rnp->gp_tasks; in rcu_initiate_boost()
1107 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1108 t = rnp->boost_kthread_task; in rcu_initiate_boost()
1110 rcu_wake_cond(t, rnp->boost_kthread_status); in rcu_initiate_boost()
1112 rcu_initiate_boost_trace(rnp); in rcu_initiate_boost()
1113 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1148 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1150 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1159 struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1161 int rnp_index = rnp - &rsp->node[0]; in rcu_spawn_one_boost_kthread()
1169 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) in rcu_spawn_one_boost_kthread()
1173 if (rnp->boost_kthread_task != NULL) in rcu_spawn_one_boost_kthread()
1175 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1179 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_spawn_one_boost_kthread()
1181 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1182 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_spawn_one_boost_kthread()
1259 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1261 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1262 unsigned long mask = rcu_rnp_online_cpus(rnp); in rcu_boost_kthread_setaffinity()
1270 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) in rcu_boost_kthread_setaffinity()
1293 struct rcu_node *rnp; in rcu_spawn_boost_kthreads() local
1299 rcu_for_each_leaf_node(rcu_state_p, rnp) in rcu_spawn_boost_kthreads()
1300 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_spawn_boost_kthreads()
1306 struct rcu_node *rnp = rdp->mynode; in rcu_prepare_kthreads() local
1310 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_prepare_kthreads()
1315 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1316 __releases(rnp->lock) in rcu_initiate_boost()
1318 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1331 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1335 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1434 struct rcu_node *rnp; in rcu_try_advance_all_cbs() local
1444 rnp = rdp->mynode; in rcu_try_advance_all_cbs()
1451 if ((rdp->completed != rnp->completed || in rcu_try_advance_all_cbs()
1523 struct rcu_node *rnp; in rcu_prepare_for_idle() local
1569 rnp = rdp->mynode; in rcu_prepare_for_idle()
1570 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_prepare_for_idle()
1572 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_prepare_for_idle()
1573 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_prepare_for_idle()
1825 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_nocb_gp_cleanup() argument
1827 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); in rcu_nocb_gp_cleanup()
1838 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) in rcu_nocb_gp_set() argument
1840 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; in rcu_nocb_gp_set()
1843 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument
1845 init_waitqueue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
1846 init_waitqueue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
2069 struct rcu_node *rnp = rdp->mynode; in rcu_nocb_wait_gp() local
2071 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_nocb_wait_gp()
2073 needwake = rcu_start_future_gp(rnp, rdp, &c); in rcu_nocb_wait_gp()
2074 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_nocb_wait_gp()
2082 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); in rcu_nocb_wait_gp()
2085 rnp->nocb_gp_wq[c & 0x1], in rcu_nocb_wait_gp()
2086 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); in rcu_nocb_wait_gp()
2090 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); in rcu_nocb_wait_gp()
2092 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); in rcu_nocb_wait_gp()
2517 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_nocb_gp_cleanup() argument
2521 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) in rcu_nocb_gp_set() argument
2525 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument