Lines Matching refs:tg
295 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { in list_add_leaf_cfs_rq()
1121 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
2362 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) in calc_tg_weight() argument
2371 tg_weight = atomic_long_read(&tg->load_avg); in calc_tg_weight()
2378 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2382 tg_weight = calc_tg_weight(tg, cfs_rq); in calc_cfs_shares()
2385 shares = (tg->shares * load); in calc_cfs_shares()
2391 if (shares > tg->shares) in calc_cfs_shares()
2392 shares = tg->shares; in calc_cfs_shares()
2397 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) in calc_cfs_shares() argument
2399 return tg->shares; in calc_cfs_shares()
2422 struct task_group *tg; in update_cfs_shares() local
2426 tg = cfs_rq->tg; in update_cfs_shares()
2427 se = tg->se[cpu_of(rq_of(cfs_rq))]; in update_cfs_shares()
2431 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2434 shares = calc_cfs_shares(cfs_rq, tg); in update_cfs_shares()
2674 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3384 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
3386 return &tg->cfs_bandwidth; in tg_cfs_bandwidth()
3401 struct task_group *tg = cfs_rq->tg; in assign_cfs_rq_runtime() local
3402 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); in assign_cfs_rq_runtime()
3441 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in expire_cfs_rq_runtime()
3512 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
3517 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
3518 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
3525 static int tg_unthrottle_up(struct task_group *tg, void *data) in tg_unthrottle_up() argument
3528 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
3542 static int tg_throttle_down(struct task_group *tg, void *data) in tg_throttle_down() argument
3545 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
3558 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
3563 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
3567 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
3612 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
3617 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3629 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
3814 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
4002 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; in update_runtime_enabled()
4055 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
4067 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
4572 static long effective_load(struct task_group *tg, int cpu, long wl, long wg) in effective_load() argument
4574 struct sched_entity *se = tg->se[cpu]; in effective_load()
4576 if (!tg->parent) /* the trivial, non-cgroup case */ in effective_load()
4582 tg = se->my_q->tg; in effective_load()
4587 W = wg + calc_tg_weight(tg, se->my_q); in effective_load()
4598 wl = (w * (long)tg->shares) / W; in effective_load()
4600 wl = tg->shares; in effective_load()
4629 static long effective_load(struct task_group *tg, int cpu, long wl, long wg) in effective_load() argument
4666 struct task_group *tg; in wake_affine() local
4682 tg = task_group(current); in wake_affine()
4685 this_load += effective_load(tg, this_cpu, -weight, -weight); in wake_affine()
4686 load += effective_load(tg, prev_cpu, 0, -weight); in wake_affine()
4689 tg = task_group(p); in wake_affine()
4709 effective_load(tg, this_cpu, weight, weight); in wake_affine()
4711 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); in wake_affine()
5928 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
8092 void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
8096 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); in free_fair_sched_group()
8099 if (tg->cfs_rq) in free_fair_sched_group()
8100 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
8101 if (tg->se) { in free_fair_sched_group()
8102 if (tg->se[i]) in free_fair_sched_group()
8103 remove_entity_load_avg(tg->se[i]); in free_fair_sched_group()
8104 kfree(tg->se[i]); in free_fair_sched_group()
8108 kfree(tg->cfs_rq); in free_fair_sched_group()
8109 kfree(tg->se); in free_fair_sched_group()
8112 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8118 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8119 if (!tg->cfs_rq) in alloc_fair_sched_group()
8121 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8122 if (!tg->se) in alloc_fair_sched_group()
8125 tg->shares = NICE_0_LOAD; in alloc_fair_sched_group()
8127 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); in alloc_fair_sched_group()
8141 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
8153 void unregister_fair_sched_group(struct task_group *tg, int cpu) in unregister_fair_sched_group() argument
8162 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
8166 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
8170 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
8176 cfs_rq->tg = tg; in init_tg_cfs_entry()
8180 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
8181 tg->se[cpu] = se; in init_tg_cfs_entry()
8203 int sched_group_set_shares(struct task_group *tg, unsigned long shares) in sched_group_set_shares() argument
8211 if (!tg->se[0]) in sched_group_set_shares()
8217 if (tg->shares == shares) in sched_group_set_shares()
8220 tg->shares = shares; in sched_group_set_shares()
8225 se = tg->se[i]; in sched_group_set_shares()
8242 void free_fair_sched_group(struct task_group *tg) { } in free_fair_sched_group() argument
8244 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8249 void unregister_fair_sched_group(struct task_group *tg, int cpu) { } in unregister_fair_sched_group() argument