gcfs_rq 3081 kernel/sched/fair.c struct cfs_rq *gcfs_rq = group_cfs_rq(se); gcfs_rq 3084 kernel/sched/fair.c if (!gcfs_rq) gcfs_rq 3087 kernel/sched/fair.c if (throttled_hierarchy(gcfs_rq)) gcfs_rq 3091 kernel/sched/fair.c runnable = shares = READ_ONCE(gcfs_rq->tg->shares); gcfs_rq 3096 kernel/sched/fair.c shares = calc_group_shares(gcfs_rq); gcfs_rq 3097 kernel/sched/fair.c runnable = calc_group_runnable(gcfs_rq, shares); gcfs_rq 3284 kernel/sched/fair.c update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) gcfs_rq 3286 kernel/sched/fair.c long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; gcfs_rq 3301 kernel/sched/fair.c se->avg.util_avg = gcfs_rq->avg.util_avg; gcfs_rq 3310 kernel/sched/fair.c update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) gcfs_rq 3312 kernel/sched/fair.c long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; gcfs_rq 3320 kernel/sched/fair.c gcfs_rq->prop_runnable_sum = 0; gcfs_rq 3334 kernel/sched/fair.c if (scale_load_down(gcfs_rq->load.weight)) { gcfs_rq 3335 kernel/sched/fair.c load_sum = div_s64(gcfs_rq->avg.load_sum, gcfs_rq 3336 kernel/sched/fair.c scale_load_down(gcfs_rq->load.weight)); gcfs_rq 3386 kernel/sched/fair.c struct cfs_rq *cfs_rq, *gcfs_rq; gcfs_rq 3391 kernel/sched/fair.c gcfs_rq = group_cfs_rq(se); gcfs_rq 3392 kernel/sched/fair.c if (!gcfs_rq->propagate) gcfs_rq 3395 kernel/sched/fair.c gcfs_rq->propagate = 0; gcfs_rq 3399 kernel/sched/fair.c add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); gcfs_rq 3401 kernel/sched/fair.c update_tg_cfs_util(cfs_rq, se, gcfs_rq); gcfs_rq 3402 kernel/sched/fair.c update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); gcfs_rq 3416 kernel/sched/fair.c struct cfs_rq *gcfs_rq = group_cfs_rq(se); gcfs_rq 3429 kernel/sched/fair.c if (gcfs_rq->propagate)