Searched refs:sched_class (Results 1 - 8 of 8) sorted by relevance

/linux-4.1.27/kernel/sched/
H A Didle_task.c85 const struct sched_class idle_sched_class = {
H A Dstop_task.c112 const struct sched_class stop_sched_class = {
H A Dcore.c350 rq->curr->sched_class->task_tick(rq, rq->curr, 1); hrtick()
809 p->sched_class->enqueue_task(rq, p, flags); enqueue_task()
816 p->sched_class->dequeue_task(rq, p, flags); dequeue_task()
905 stop->sched_class = &stop_sched_class; sched_set_stop_task()
915 old_stop->sched_class = &rt_sched_class; sched_set_stop_task()
979 * Can drop rq->lock because from sched_class::switched_from() methods drop it.
982 const struct sched_class *prev_class, check_class_changed()
985 if (prev_class != p->sched_class) { check_class_changed()
989 p->sched_class->switched_to(rq, p); check_class_changed()
991 p->sched_class->prio_changed(rq, p, oldprio); check_class_changed()
996 const struct sched_class *class; check_preempt_curr()
998 if (p->sched_class == rq->curr->sched_class) { check_preempt_curr()
999 rq->curr->sched_class->check_preempt_curr(rq, p, flags); check_preempt_curr()
1002 if (class == rq->curr->sched_class) for_each_class()
1004 if (class == p->sched_class) { for_each_class()
1049 if (p->sched_class->migrate_task_rq) set_task_cpu()
1050 p->sched_class->migrate_task_rq(p, new_cpu); set_task_cpu()
1382 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); select_task_rq()
1469 if (p->sched_class->task_woken) ttwu_do_wakeup()
1470 p->sched_class->task_woken(rq, p); ttwu_do_wakeup()
1693 if (p->sched_class->task_waking) try_to_wake_up()
1694 p->sched_class->task_waking(p); try_to_wake_up()
1935 p->sched_class = &rt_sched_class; sched_fork()
1937 p->sched_class = &fair_sched_class; sched_fork()
1940 if (p->sched_class->task_fork) sched_fork()
1941 p->sched_class->task_fork(p); sched_fork()
2100 if (p->sched_class->task_woken) wake_up_new_task()
2101 p->sched_class->task_woken(rq, p); wake_up_new_task()
2237 if (prev->sched_class->task_dead)
2238 prev->sched_class->task_dead(prev);
2261 if (rq->curr->sched_class->post_schedule) post_schedule()
2262 rq->curr->sched_class->post_schedule(rq); post_schedule()
2425 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); sched_exec()
2483 p->sched_class->update_curr(rq); task_sched_runtime()
2505 curr->sched_class->task_tick(rq, curr, 0); scheduler_tick()
2666 const struct sched_class *class = &fair_sched_class; pick_next_task()
2673 if (likely(prev->sched_class == class && pick_next_task()
3002 const struct sched_class *prev_class; rt_mutex_setprio()
3028 prev_class = p->sched_class; rt_mutex_setprio()
3054 p->sched_class = &dl_sched_class; rt_mutex_setprio()
3060 p->sched_class = &rt_sched_class; rt_mutex_setprio()
3066 p->sched_class = &fair_sched_class; rt_mutex_setprio()
3072 p->sched_class->set_curr_task(rq); rt_mutex_setprio()
3324 p->sched_class = &dl_sched_class; __setscheduler()
3326 p->sched_class = &rt_sched_class; __setscheduler()
3328 p->sched_class = &fair_sched_class; __setscheduler()
3423 const struct sched_class *prev_class; __sched_setscheduler()
3623 prev_class = p->sched_class; __sched_setscheduler()
3627 p->sched_class->set_curr_task(rq); __sched_setscheduler()
4217 current->sched_class->yield_task(rq); SYSCALL_DEFINE0()
4356 if (!curr->sched_class->yield_to_task) yield_to()
4359 if (curr->sched_class != p->sched_class) yield_to()
4365 yielded = curr->sched_class->yield_to_task(rq, p, preempt); yield_to()
4502 if (p->sched_class->get_rr_interval) SYSCALL_DEFINE2()
4503 time_slice = p->sched_class->get_rr_interval(rq, p); SYSCALL_DEFINE2()
4592 idle->sched_class = &idle_sched_class; init_idle_bootup_task()
4642 idle->sched_class = &idle_sched_class; init_idle()
4758 if (p->sched_class->set_cpus_allowed) do_set_cpus_allowed()
4759 p->sched_class->set_cpus_allowed(p, new_mask); do_set_cpus_allowed()
4914 p->sched_class->set_curr_task(rq); sched_setnuma()
4983 static const struct sched_class fake_sched_class = {
4992 .sched_class = &fake_sched_class,
5037 next->sched_class->put_prev_task(rq, next); migrate_tasks()
5234 const struct sched_class *class; set_rq_online()
5249 const struct sched_class *class; set_rq_offline()
7256 current->sched_class = &fair_sched_class;
7349 const struct sched_class *prev_class = p->sched_class; normalize_task()
7568 if (tsk->sched_class->task_move_group) sched_move_task()
7569 tsk->sched_class->task_move_group(tsk, queued); sched_move_task()
7575 tsk->sched_class->set_curr_task(rq); sched_move_task()
8021 if (task->sched_class != &fair_sched_class) cgroup_taskset_for_each()
H A Dsched.h1153 #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1163 struct sched_class { struct
1164 const struct sched_class *next;
1227 prev->sched_class->put_prev_task(rq, prev); put_prev_task()
1234 extern const struct sched_class stop_sched_class;
1235 extern const struct sched_class dl_sched_class;
1236 extern const struct sched_class rt_sched_class;
1237 extern const struct sched_class fair_sched_class;
1238 extern const struct sched_class idle_sched_class;
H A Drt.c942 if (curr->sched_class != &rt_sched_class) update_curr_rt()
1483 if (prev->sched_class == &rt_sched_class) pick_next_task_rt()
2300 const struct sched_class rt_sched_class = {
H A Ddeadline.c1117 if (prev->sched_class == &dl_sched_class) pick_next_task_dl()
1779 const struct sched_class dl_sched_class = {
H A Dfair.c240 const struct sched_class fair_sched_class;
4199 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) hrtick_update()
5109 if (prev->sched_class != &fair_sched_class) pick_next_task_fair()
5448 if (p->sched_class != &fair_sched_class) task_hot()
6858 * because of other sched_class or IRQs if more capacity stays need_active_balance()
7868 * Upon rescheduling, sched_class::put_prev_task() will place task_fork_fair()
8225 const struct sched_class fair_sched_class = {
/linux-4.1.27/include/linux/
H A Dsched.h1313 const struct sched_class *sched_class; member in struct:task_struct

Completed in 263 milliseconds