sched_class 3059 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct sched_class *e; sched_class 47 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e; sched_class 108 drivers/net/ethernet/chelsio/cxgb4/sched.c static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, sched_class 113 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e, *end; sched_class 114 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *found = NULL; sched_class 146 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e; sched_class 188 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e; sched_class 228 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e, sched_class 354 drivers/net/ethernet/chelsio/cxgb4/sched.c static struct sched_class *t4_sched_class_lookup(struct port_info *pi, sched_class 358 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e, *end; sched_class 359 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *found = NULL; sched_class 400 drivers/net/ethernet/chelsio/cxgb4/sched.c static struct sched_class *t4_sched_class_alloc(struct port_info *pi, sched_class 403 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e; sched_class 454 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, sched_class 470 drivers/net/ethernet/chelsio/cxgb4/sched.c static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) sched_class 487 drivers/net/ethernet/chelsio/cxgb4/sched.c memset(&s->tab[i], 0, sizeof(struct sched_class)); sched_class 509 drivers/net/ethernet/chelsio/cxgb4/sched.c struct sched_class *e; sched_class 77 drivers/net/ethernet/chelsio/cxgb4/sched.h struct sched_class tab[0]; sched_class 103 drivers/net/ethernet/chelsio/cxgb4/sched.h struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, sched_class 675 include/linux/sched.h const struct sched_class *sched_class; sched_class 246 kernel/sched/core.c rq->curr->sched_class->task_tick(rq, rq->curr, 1); sched_class 766 kernel/sched/core.c if (update_load && p->sched_class == &fair_sched_class) { sched_class 1024 kernel/sched/core.c if (unlikely(!p->sched_class->uclamp_enabled)) sched_class 1039 kernel/sched/core.c if (unlikely(!p->sched_class->uclamp_enabled)) sched_class 1296 kernel/sched/core.c p->sched_class->enqueue_task(rq, p, flags); sched_class 1310 kernel/sched/core.c p->sched_class->dequeue_task(rq, p, flags); sched_class 1400 kernel/sched/core.c const struct sched_class *prev_class, sched_class 1403 kernel/sched/core.c if (prev_class != p->sched_class) { sched_class 1407 kernel/sched/core.c p->sched_class->switched_to(rq, p); sched_class 1409 kernel/sched/core.c p->sched_class->prio_changed(rq, p, oldprio); sched_class 1414 kernel/sched/core.c const struct sched_class *class; sched_class 1416 kernel/sched/core.c if (p->sched_class == rq->curr->sched_class) { sched_class 1417 kernel/sched/core.c rq->curr->sched_class->check_preempt_curr(rq, p, flags); sched_class 1420 kernel/sched/core.c if (class == rq->curr->sched_class) sched_class 1422 kernel/sched/core.c if (class == p->sched_class) { sched_class 1607 kernel/sched/core.c p->sched_class->set_cpus_allowed(p, new_mask); sched_class 1718 kernel/sched/core.c p->sched_class == &fair_sched_class && sched_class 1744 kernel/sched/core.c if (p->sched_class->migrate_task_rq) sched_class 1745 kernel/sched/core.c p->sched_class->migrate_task_rq(p, new_cpu); sched_class 2104 kernel/sched/core.c cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); sched_class 2146 kernel/sched/core.c stop->sched_class = &stop_sched_class; sched_class 2156 kernel/sched/core.c old_stop->sched_class = &rt_sched_class; sched_class 2220 kernel/sched/core.c if (p->sched_class->task_woken) { sched_class 2226 kernel/sched/core.c p->sched_class->task_woken(rq, p); sched_class 2878 kernel/sched/core.c p->sched_class = &rt_sched_class; sched_class 2880 kernel/sched/core.c p->sched_class = &fair_sched_class; sched_class 2897 kernel/sched/core.c if (p->sched_class->task_fork) sched_class 2898 kernel/sched/core.c p->sched_class->task_fork(p); sched_class 2966 kernel/sched/core.c if (p->sched_class->task_woken) { sched_class 2972 kernel/sched/core.c p->sched_class->task_woken(rq, p); sched_class 3241 kernel/sched/core.c if (prev->sched_class->task_dead) sched_class 3242 kernel/sched/core.c prev->sched_class->task_dead(prev); sched_class 3497 kernel/sched/core.c dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); sched_class 3573 kernel/sched/core.c p->sched_class->update_curr(rq); sched_class 3597 kernel/sched/core.c curr->sched_class->task_tick(rq, curr, 0); sched_class 3684 kernel/sched/core.c curr->sched_class->task_tick(rq, curr, 0); sched_class 3906 kernel/sched/core.c const struct sched_class *class; sched_class 3915 kernel/sched/core.c if (likely((prev->sched_class == &idle_sched_class || sched_class 3916 kernel/sched/core.c prev->sched_class == &fair_sched_class) && sched_class 3940 kernel/sched/core.c for_class_range(class, prev->sched_class, &idle_sched_class) { sched_class 4374 kernel/sched/core.c const struct sched_class *prev_class; sched_class 4431 kernel/sched/core.c prev_class = p->sched_class; sched_class 4455 kernel/sched/core.c p->sched_class = &dl_sched_class; sched_class 4461 kernel/sched/core.c p->sched_class = &rt_sched_class; sched_class 4467 kernel/sched/core.c p->sched_class = &fair_sched_class; sched_class 4726 kernel/sched/core.c p->sched_class = &dl_sched_class; sched_class 4728 kernel/sched/core.c p->sched_class = &rt_sched_class; sched_class 4730 kernel/sched/core.c p->sched_class = &fair_sched_class; sched_class 4757 kernel/sched/core.c const struct sched_class *prev_class; sched_class 4974 kernel/sched/core.c prev_class = p->sched_class; sched_class 5588 kernel/sched/core.c current->sched_class->yield_task(rq); sched_class 5719 kernel/sched/core.c if (!curr->sched_class->yield_to_task) sched_class 5722 kernel/sched/core.c if (curr->sched_class != p->sched_class) sched_class 5728 kernel/sched/core.c yielded = curr->sched_class->yield_to_task(rq, p, preempt); sched_class 5869 kernel/sched/core.c if (p->sched_class->get_rr_interval) sched_class 5870 kernel/sched/core.c time_slice = p->sched_class->get_rr_interval(rq, p); sched_class 6070 kernel/sched/core.c idle->sched_class = &idle_sched_class; sched_class 6207 kernel/sched/core.c const struct sched_class *class; sched_class 6213 kernel/sched/core.c next->sched_class->put_prev_task(rq, next); sched_class 6307 kernel/sched/core.c const struct sched_class *class; sched_class 6322 kernel/sched/core.c const struct sched_class *class; sched_class 7019 kernel/sched/core.c if (tsk->sched_class->task_change_group) sched_class 7020 kernel/sched/core.c tsk->sched_class->task_change_group(tsk, type); sched_class 1759 kernel/sched/deadline.c if (rq->curr->sched_class != &dl_sched_class) sched_class 2434 kernel/sched/deadline.c const struct sched_class dl_sched_class = { sched_class 244 kernel/sched/fair.c const struct sched_class fair_sched_class; sched_class 800 kernel/sched/fair.c if (p->sched_class != &fair_sched_class) { sched_class 5165 kernel/sched/fair.c if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) sched_class 6788 kernel/sched/fair.c if (!prev || prev->sched_class != &fair_sched_class) sched_class 7158 kernel/sched/fair.c if (p->sched_class != &fair_sched_class) sched_class 7548 kernel/sched/fair.c const struct sched_class *curr_class; sched_class 7556 kernel/sched/fair.c curr_class = rq->curr->sched_class; sched_class 10465 kernel/sched/fair.c const struct sched_class fair_sched_class = { sched_class 455 kernel/sched/idle.c const struct sched_class idle_sched_class = { sched_class 962 kernel/sched/rt.c if (curr->sched_class != &rt_sched_class) sched_class 1533 kernel/sched/rt.c if (rq->curr->sched_class != &rt_sched_class) sched_class 2360 kernel/sched/rt.c const struct sched_class rt_sched_class = { sched_class 1709 kernel/sched/sched.h const struct sched_class *next; sched_class 1783 kernel/sched/sched.h prev->sched_class->put_prev_task(rq, prev); sched_class 1789 kernel/sched/sched.h next->sched_class->set_next_task(rq, next, false); sched_class 1804 kernel/sched/sched.h extern const struct sched_class stop_sched_class; sched_class 1805 kernel/sched/sched.h extern const struct sched_class dl_sched_class; sched_class 1806 kernel/sched/sched.h extern const struct sched_class rt_sched_class; sched_class 1807 kernel/sched/sched.h extern const struct sched_class fair_sched_class; sched_class 1808 kernel/sched/sched.h extern const struct sched_class idle_sched_class; sched_class 121 kernel/sched/stop_task.c const struct sched_class stop_sched_class = {