Lines Matching refs:sched_class

350 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);  in hrtick()
809 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
816 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
905 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
915 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
982 const struct sched_class *prev_class, in check_class_changed()
985 if (prev_class != p->sched_class) { in check_class_changed()
989 p->sched_class->switched_to(rq, p); in check_class_changed()
991 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
996 const struct sched_class *class; in check_preempt_curr()
998 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
999 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1002 if (class == rq->curr->sched_class) in check_preempt_curr()
1004 if (class == p->sched_class) { in check_preempt_curr()
1049 if (p->sched_class->migrate_task_rq) in set_task_cpu()
1050 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
1382 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); in select_task_rq()
1469 if (p->sched_class->task_woken) in ttwu_do_wakeup()
1470 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1693 if (p->sched_class->task_waking) in try_to_wake_up()
1694 p->sched_class->task_waking(p); in try_to_wake_up()
1935 p->sched_class = &rt_sched_class; in sched_fork()
1937 p->sched_class = &fair_sched_class; in sched_fork()
1940 if (p->sched_class->task_fork) in sched_fork()
1941 p->sched_class->task_fork(p); in sched_fork()
2100 if (p->sched_class->task_woken) in wake_up_new_task()
2101 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2237 if (prev->sched_class->task_dead) in finish_task_switch()
2238 prev->sched_class->task_dead(prev); in finish_task_switch()
2261 if (rq->curr->sched_class->post_schedule) in post_schedule()
2262 rq->curr->sched_class->post_schedule(rq); in post_schedule()
2425 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
2483 p->sched_class->update_curr(rq); in task_sched_runtime()
2505 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
2666 const struct sched_class *class = &fair_sched_class; in pick_next_task()
2673 if (likely(prev->sched_class == class && in pick_next_task()
3002 const struct sched_class *prev_class; in rt_mutex_setprio()
3028 prev_class = p->sched_class; in rt_mutex_setprio()
3054 p->sched_class = &dl_sched_class; in rt_mutex_setprio()
3060 p->sched_class = &rt_sched_class; in rt_mutex_setprio()
3066 p->sched_class = &fair_sched_class; in rt_mutex_setprio()
3072 p->sched_class->set_curr_task(rq); in rt_mutex_setprio()
3324 p->sched_class = &dl_sched_class; in __setscheduler()
3326 p->sched_class = &rt_sched_class; in __setscheduler()
3328 p->sched_class = &fair_sched_class; in __setscheduler()
3423 const struct sched_class *prev_class; in __sched_setscheduler()
3623 prev_class = p->sched_class; in __sched_setscheduler()
3627 p->sched_class->set_curr_task(rq); in __sched_setscheduler()
4217 current->sched_class->yield_task(rq); in SYSCALL_DEFINE0()
4356 if (!curr->sched_class->yield_to_task) in yield_to()
4359 if (curr->sched_class != p->sched_class) in yield_to()
4365 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
4502 if (p->sched_class->get_rr_interval) in SYSCALL_DEFINE2()
4503 time_slice = p->sched_class->get_rr_interval(rq, p); in SYSCALL_DEFINE2()
4592 idle->sched_class = &idle_sched_class; in init_idle_bootup_task()
4642 idle->sched_class = &idle_sched_class; in init_idle()
4758 if (p->sched_class->set_cpus_allowed) in do_set_cpus_allowed()
4759 p->sched_class->set_cpus_allowed(p, new_mask); in do_set_cpus_allowed()
4914 p->sched_class->set_curr_task(rq); in sched_setnuma()
4983 static const struct sched_class fake_sched_class = {
4992 .sched_class = &fake_sched_class,
5037 next->sched_class->put_prev_task(rq, next); in migrate_tasks()
5234 const struct sched_class *class; in set_rq_online()
5249 const struct sched_class *class; in set_rq_offline()
7256 current->sched_class = &fair_sched_class; in sched_init()
7349 const struct sched_class *prev_class = p->sched_class; in normalize_task()
7568 if (tsk->sched_class->task_move_group) in sched_move_task()
7569 tsk->sched_class->task_move_group(tsk, queued); in sched_move_task()
7575 tsk->sched_class->set_curr_task(rq); in sched_move_task()
8021 if (task->sched_class != &fair_sched_class) in cpu_cgroup_can_attach()