Lines Matching refs:t

157 	struct task_struct *t = current;  in rcu_preempt_ctxt_queue()  local
177 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
195 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
208 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
219 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
236 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
238 rnp->exp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue()
248 t->rcu_read_unlock_special.b.exp_need_qs) { in rcu_preempt_ctxt_queue()
249 t->rcu_read_unlock_special.b.exp_need_qs = false; in rcu_preempt_ctxt_queue()
252 WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs); in rcu_preempt_ctxt_queue()
293 struct task_struct *t = current; in rcu_preempt_note_context_switch() local
298 if (t->rcu_read_lock_nesting > 0 && in rcu_preempt_note_context_switch()
299 !t->rcu_read_unlock_special.b.blocked) { in rcu_preempt_note_context_switch()
306 t->rcu_read_unlock_special.b.blocked = true; in rcu_preempt_note_context_switch()
307 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch()
315 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_preempt_note_context_switch()
317 t->pid, in rcu_preempt_note_context_switch()
322 } else if (t->rcu_read_lock_nesting < 0 && in rcu_preempt_note_context_switch()
323 t->rcu_read_unlock_special.s) { in rcu_preempt_note_context_switch()
329 rcu_read_unlock_special(t); in rcu_preempt_note_context_switch()
358 static struct list_head *rcu_next_node_entry(struct task_struct *t, in rcu_next_node_entry() argument
363 np = t->rcu_node_entry.next; in rcu_next_node_entry()
383 void rcu_read_unlock_special(struct task_struct *t) in rcu_read_unlock_special() argument
406 special = t->rcu_read_unlock_special; in rcu_read_unlock_special()
409 t->rcu_read_unlock_special.b.need_qs = false; in rcu_read_unlock_special()
410 if (!t->rcu_read_unlock_special.s) { in rcu_read_unlock_special()
425 t->rcu_read_unlock_special.b.exp_need_qs = false; in rcu_read_unlock_special()
428 if (!t->rcu_read_unlock_special.s) { in rcu_read_unlock_special()
439 t->rcu_read_unlock_special.s, in rcu_read_unlock_special()
440 t->rcu_read_unlock_special.b.blocked, in rcu_read_unlock_special()
441 t->rcu_read_unlock_special.b.exp_need_qs, in rcu_read_unlock_special()
442 t->rcu_read_unlock_special.b.need_qs); in rcu_read_unlock_special()
449 t->rcu_read_unlock_special.b.blocked = false; in rcu_read_unlock_special()
459 rnp = t->rcu_blocked_node; in rcu_read_unlock_special()
462 if (rnp == t->rcu_blocked_node) in rcu_read_unlock_special()
470 np = rcu_next_node_entry(t, rnp); in rcu_read_unlock_special()
471 list_del_init(&t->rcu_node_entry); in rcu_read_unlock_special()
472 t->rcu_blocked_node = NULL; in rcu_read_unlock_special()
474 rnp->gpnum, t->pid); in rcu_read_unlock_special()
475 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_read_unlock_special()
477 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_read_unlock_special()
480 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_read_unlock_special()
483 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_read_unlock_special()
528 struct task_struct *t; in rcu_print_detail_task_stall_rnp() local
535 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp()
537 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) in rcu_print_detail_task_stall_rnp()
538 sched_show_task(t); in rcu_print_detail_task_stall_rnp()
572 struct task_struct *t; in rcu_print_task_stall() local
578 t = list_entry(rnp->gp_tasks->prev, in rcu_print_task_stall()
580 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_stall()
581 pr_cont(" P%d", t->pid); in rcu_print_task_stall()
595 struct task_struct *t; in rcu_print_task_exp_stall() local
600 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
602 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
603 pr_cont(" P%d", t->pid); in rcu_print_task_exp_stall()
636 struct task_struct *t = current; in rcu_preempt_check_callbacks() local
638 if (t->rcu_read_lock_nesting == 0) { in rcu_preempt_check_callbacks()
642 if (t->rcu_read_lock_nesting > 0 && in rcu_preempt_check_callbacks()
645 t->rcu_read_unlock_special.b.need_qs = true; in rcu_preempt_check_callbacks()
706 struct task_struct *t = current; in sync_rcu_exp_handler() local
714 if (t->rcu_read_lock_nesting > 0 && in sync_rcu_exp_handler()
715 !t->rcu_read_unlock_special.b.blocked) { in sync_rcu_exp_handler()
716 t->rcu_read_unlock_special.b.exp_need_qs = true; in sync_rcu_exp_handler()
802 struct task_struct *t = current; in exit_rcu() local
806 t->rcu_read_lock_nesting = 1; in exit_rcu()
808 t->rcu_read_unlock_special.b.blocked = true; in exit_rcu()
964 static void rcu_wake_cond(struct task_struct *t, int status) in rcu_wake_cond() argument
971 wake_up_process(t); in rcu_wake_cond()
985 struct task_struct *t; in rcu_boost() local
1035 t = container_of(tb, struct task_struct, rcu_node_entry); in rcu_boost()
1036 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1093 struct task_struct *t; in rcu_initiate_boost() local
1108 t = rnp->boost_kthread_task; in rcu_initiate_boost()
1109 if (t) in rcu_initiate_boost()
1110 rcu_wake_cond(t, rnp->boost_kthread_status); in rcu_initiate_boost()
1164 struct task_struct *t; in rcu_spawn_one_boost_kthread() local
1175 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1177 if (IS_ERR(t)) in rcu_spawn_one_boost_kthread()
1178 return PTR_ERR(t); in rcu_spawn_one_boost_kthread()
1181 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1184 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); in rcu_spawn_one_boost_kthread()
1185 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ in rcu_spawn_one_boost_kthread()
1261 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity() local
1266 if (!t) in rcu_boost_kthread_setaffinity()
1275 set_cpus_allowed_ptr(t, cm); in rcu_boost_kthread_setaffinity()
1937 struct task_struct *t; in __call_rcu_nocb_enqueue() local
1948 t = READ_ONCE(rdp->nocb_kthread); in __call_rcu_nocb_enqueue()
1949 if (rcu_nocb_poll || !t) { in __call_rcu_nocb_enqueue()
2383 struct task_struct *t; in rcu_spawn_one_nocb_kthread() local
2413 t = kthread_run(rcu_nocb_kthread, rdp_spawn, in rcu_spawn_one_nocb_kthread()
2415 BUG_ON(IS_ERR(t)); in rcu_spawn_one_nocb_kthread()
2416 WRITE_ONCE(rdp_spawn->nocb_kthread, t); in rcu_spawn_one_nocb_kthread()