next_task          24 arch/arc/kernel/ctx_sw.c __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
next_task          28 arch/arc/kernel/ctx_sw.c 	unsigned int next = (unsigned int)next_task;
next_task         197 arch/ia64/include/asm/mmu_context.h #define switch_mm(prev_mm,next_mm,next_task)	activate_mm(prev_mm, next_mm)
next_task          29 arch/ia64/include/asm/switch_to.h extern struct task_struct *ia64_switch_to (void *next_task);
next_task         223 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq_task *next_task;
next_task         227 drivers/mailbox/mtk-cmdq-mailbox.c 	next_task = list_first_entry_or_null(&thread->task_busy_list,
next_task         229 drivers/mailbox/mtk-cmdq-mailbox.c 	if (next_task)
next_task         230 drivers/mailbox/mtk-cmdq-mailbox.c 		writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
next_task         335 include/linux/fsl/bestcomm/bestcomm_priv.h bcom_set_task_auto_start(int task, int next_task)
next_task         338 include/linux/fsl/bestcomm/bestcomm_priv.h 	out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task);
next_task         566 include/linux/sched/signal.h 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
next_task         575 include/linux/sched/signal.h 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
next_task        2048 kernel/sched/deadline.c 	struct task_struct *next_task;
next_task        2055 kernel/sched/deadline.c 	next_task = pick_next_pushable_dl_task(rq);
next_task        2056 kernel/sched/deadline.c 	if (!next_task)
next_task        2060 kernel/sched/deadline.c 	if (WARN_ON(next_task == rq->curr))
next_task        2069 kernel/sched/deadline.c 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
next_task        2076 kernel/sched/deadline.c 	get_task_struct(next_task);
next_task        2079 kernel/sched/deadline.c 	later_rq = find_lock_later_rq(next_task, rq);
next_task        2089 kernel/sched/deadline.c 		if (task == next_task) {
next_task        2101 kernel/sched/deadline.c 		put_task_struct(next_task);
next_task        2102 kernel/sched/deadline.c 		next_task = task;
next_task        2106 kernel/sched/deadline.c 	deactivate_task(rq, next_task, 0);
next_task        2107 kernel/sched/deadline.c 	set_task_cpu(next_task, later_rq->cpu);
next_task        2114 kernel/sched/deadline.c 	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
next_task        2122 kernel/sched/deadline.c 	put_task_struct(next_task);
next_task        1794 kernel/sched/rt.c 	struct task_struct *next_task;
next_task        1801 kernel/sched/rt.c 	next_task = pick_next_pushable_task(rq);
next_task        1802 kernel/sched/rt.c 	if (!next_task)
next_task        1806 kernel/sched/rt.c 	if (WARN_ON(next_task == rq->curr))
next_task        1814 kernel/sched/rt.c 	if (unlikely(next_task->prio < rq->curr->prio)) {
next_task        1820 kernel/sched/rt.c 	get_task_struct(next_task);
next_task        1823 kernel/sched/rt.c 	lowest_rq = find_lock_lowest_rq(next_task, rq);
next_task        1835 kernel/sched/rt.c 		if (task == next_task) {
next_task        1852 kernel/sched/rt.c 		put_task_struct(next_task);
next_task        1853 kernel/sched/rt.c 		next_task = task;
next_task        1857 kernel/sched/rt.c 	deactivate_task(rq, next_task, 0);
next_task        1858 kernel/sched/rt.c 	set_task_cpu(next_task, lowest_rq->cpu);
next_task        1859 kernel/sched/rt.c 	activate_task(lowest_rq, next_task, 0);
next_task        1867 kernel/sched/rt.c 	put_task_struct(next_task);