Lines Matching refs:tg
298 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { in list_add_leaf_cfs_rq()
1115 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
2321 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) in calc_tg_weight() argument
2330 tg_weight = atomic_long_read(&tg->load_avg); in calc_tg_weight()
2337 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2341 tg_weight = calc_tg_weight(tg, cfs_rq); in calc_cfs_shares()
2344 shares = (tg->shares * load); in calc_cfs_shares()
2350 if (shares > tg->shares) in calc_cfs_shares()
2351 shares = tg->shares; in calc_cfs_shares()
2356 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2358 return tg->shares; in calc_cfs_shares()
2381 struct task_group *tg; in update_cfs_shares() local
2385 tg = cfs_rq->tg; in update_cfs_shares()
2386 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2390 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2393 shares = calc_cfs_shares(cfs_rq, tg); in update_cfs_shares()
2625 struct task_group *tg = cfs_rq->tg; in __update_cfs_rq_tg_load_contrib() local
2635 atomic_long_add(tg_contrib, &tg->load_avg); in __update_cfs_rq_tg_load_contrib()
2647 struct task_group *tg = cfs_rq->tg; in __update_tg_runnable_avg() local
2656 atomic_add(contrib, &tg->runnable_avg); in __update_tg_runnable_avg()
2664 struct task_group *tg = cfs_rq->tg; in __update_group_entity_contrib() local
2669 contrib = cfs_rq->tg_load_contrib * tg->shares; in __update_group_entity_contrib()
2671 atomic_long_read(&tg->load_avg) + 1); in __update_group_entity_contrib()
2696 runnable_avg = atomic_read(&tg->runnable_avg); in __update_group_entity_contrib()
3451 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
3453 return &tg->cfs_bandwidth; in tg_cfs_bandwidth()
3468 struct task_group *tg = cfs_rq->tg; in assign_cfs_rq_runtime() local
3469 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); in assign_cfs_rq_runtime()
3517 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in expire_cfs_rq_runtime()
3588 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
3593 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
3594 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
3601 static int tg_unthrottle_up(struct task_group *tg, void *data) in tg_unthrottle_up() argument
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
3618 static int tg_throttle_down(struct task_group *tg, void *data) in tg_throttle_down() argument
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
3634 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
3638 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3642 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
3679 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
3684 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3696 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
3888 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
4089 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; in update_runtime_enabled()
4142 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
4154 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
4478 static long effective_load(struct task_group *tg, int cpu, long wl, long wg) in effective_load() argument
4480 struct sched_entity *se = tg->se[cpu]; in effective_load()
4482 if (!tg->parent) /* the trivial, non-cgroup case */ in effective_load()
4488 tg = se->my_q->tg; in effective_load()
4493 W = wg + calc_tg_weight(tg, se->my_q); in effective_load()
4504 wl = (w * (long)tg->shares) / W; in effective_load()
4506 wl = tg->shares; in effective_load()
4535 static long effective_load(struct task_group *tg, int cpu, long wl, long wg) in effective_load() argument
4569 struct task_group *tg; in wake_affine() local
4592 tg = task_group(current); in wake_affine()
4595 this_load += effective_load(tg, this_cpu, -weight, -weight); in wake_affine()
4596 load += effective_load(tg, prev_cpu, 0, -weight); in wake_affine()
4599 tg = task_group(p); in wake_affine()
4619 effective_load(tg, this_cpu, weight, weight); in wake_affine()
4621 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); in wake_affine()
5816 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu) in __update_blocked_averages_cpu() argument
5818 struct sched_entity *se = tg->se[cpu]; in __update_blocked_averages_cpu()
5819 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; in __update_blocked_averages_cpu()
5864 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu); in update_blocked_averages()
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
8049 void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
8053 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); in free_fair_sched_group()
8056 if (tg->cfs_rq) in free_fair_sched_group()
8057 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
8058 if (tg->se) in free_fair_sched_group()
8059 kfree(tg->se[i]); in free_fair_sched_group()
8062 kfree(tg->cfs_rq); in free_fair_sched_group()
8063 kfree(tg->se); in free_fair_sched_group()
8066 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8072 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8073 if (!tg->cfs_rq) in alloc_fair_sched_group()
8075 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8076 if (!tg->se) in alloc_fair_sched_group()
8079 tg->shares = NICE_0_LOAD; in alloc_fair_sched_group()
8081 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); in alloc_fair_sched_group()
8095 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
8106 void unregister_fair_sched_group(struct task_group *tg, int cpu) in unregister_fair_sched_group() argument
8115 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
8119 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
8123 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
8129 cfs_rq->tg = tg; in init_tg_cfs_entry()
8133 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
8134 tg->se[cpu] = se; in init_tg_cfs_entry()
8156 int sched_group_set_shares(struct task_group *tg, unsigned long shares) in sched_group_set_shares() argument
8164 if (!tg->se[0]) in sched_group_set_shares()
8170 if (tg->shares == shares) in sched_group_set_shares()
8173 tg->shares = shares; in sched_group_set_shares()
8178 se = tg->se[i]; in sched_group_set_shares()
8195 void free_fair_sched_group(struct task_group *tg) { } in free_fair_sched_group() argument
8197 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8202 void unregister_fair_sched_group(struct task_group *tg, int cpu) { } in unregister_fair_sched_group() argument