cfs_rq_of 700 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 783 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 870 kernel/sched/fair.c update_curr(cfs_rq_of(&rq->curr->se)); cfs_rq_of 2911 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 3100 kernel/sched/fair.c reweight_entity(cfs_rq_of(se), se, shares, runnable); cfs_rq_of 3397 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 3651 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 3664 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 4024 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 4035 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 4046 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 4516 kernel/sched/fair.c struct cfs_rq *qcfs_rq = cfs_rq_of(se); cfs_rq_of 4589 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 4608 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 5138 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 5168 kernel/sched/fair.c if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) cfs_rq_of 5232 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 5246 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 5296 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 5323 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 5349 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 6548 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 6667 kernel/sched/fair.c cfs_rq_of(se)->last = se; cfs_rq_of 6679 kernel/sched/fair.c cfs_rq_of(se)->next = se; cfs_rq_of 6686 kernel/sched/fair.c cfs_rq_of(se)->skip = se; cfs_rq_of 6709 kernel/sched/fair.c if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) cfs_rq_of 6743 kernel/sched/fair.c update_curr(cfs_rq_of(se)); cfs_rq_of 6849 kernel/sched/fair.c put_prev_entity(cfs_rq_of(pse), pse); cfs_rq_of 6853 kernel/sched/fair.c set_next_entity(cfs_rq_of(se), se); cfs_rq_of 6928 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 6974 kernel/sched/fair.c if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) cfs_rq_of 7168 kernel/sched/fair.c (&p->se == cfs_rq_of(&p->se)->next || cfs_rq_of 7169 kernel/sched/fair.c &p->se == cfs_rq_of(&p->se)->last)) cfs_rq_of 7610 kernel/sched/fair.c update_load_avg(cfs_rq_of(se), se, 0); cfs_rq_of 7644 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 9985 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 10095 kernel/sched/fair.c cfs_rq = cfs_rq_of(se); cfs_rq_of 10109 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 10120 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 10140 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 10157 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 10207 kernel/sched/fair.c struct cfs_rq *cfs_rq = cfs_rq_of(se); cfs_rq_of 10421 kernel/sched/fair.c update_load_avg(cfs_rq_of(se), se, UPDATE_TG); cfs_rq_of 10457 kernel/sched/fair.c rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));