Lines Matching refs:tg
781 int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
7461 static void free_sched_group(struct task_group *tg) in free_sched_group() argument
7463 free_fair_sched_group(tg); in free_sched_group()
7464 free_rt_sched_group(tg); in free_sched_group()
7465 autogroup_free(tg); in free_sched_group()
7466 kfree(tg); in free_sched_group()
7472 struct task_group *tg; in sched_create_group() local
7474 tg = kzalloc(sizeof(*tg), GFP_KERNEL); in sched_create_group()
7475 if (!tg) in sched_create_group()
7478 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
7481 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
7484 return tg; in sched_create_group()
7487 free_sched_group(tg); in sched_create_group()
7491 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
7496 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
7500 tg->parent = parent; in sched_online_group()
7501 INIT_LIST_HEAD(&tg->children); in sched_online_group()
7502 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
7514 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
7517 call_rcu(&tg->rcu, free_sched_group_rcu); in sched_destroy_group()
7520 void sched_offline_group(struct task_group *tg) in sched_offline_group() argument
7527 unregister_fair_sched_group(tg, i); in sched_offline_group()
7530 list_del_rcu(&tg->list); in sched_offline_group()
7531 list_del_rcu(&tg->siblings); in sched_offline_group()
7542 struct task_group *tg; in sched_move_task() local
7562 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_move_task()
7564 tg = autogroup_task_group(tsk, tg); in sched_move_task()
7565 tsk->sched_task_group = tg; in sched_move_task()
7590 static inline int tg_has_rt_tasks(struct task_group *tg) in tg_has_rt_tasks() argument
7597 if (task_group_is_autogroup(tg)) in tg_has_rt_tasks()
7601 if (rt_task(p) && task_group(p) == tg) in tg_has_rt_tasks()
7609 struct task_group *tg; member
7614 static int tg_rt_schedulable(struct task_group *tg, void *data) in tg_rt_schedulable() argument
7621 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
7622 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
7624 if (tg == d->tg) { in tg_rt_schedulable()
7638 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
7652 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
7656 if (child == d->tg) { in tg_rt_schedulable()
7670 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
7675 .tg = tg, in __rt_schedulable()
7687 static int tg_set_rt_bandwidth(struct task_group *tg, in tg_set_rt_bandwidth() argument
7696 if (tg == &root_task_group && rt_runtime == 0) in tg_set_rt_bandwidth()
7705 err = __rt_schedulable(tg, rt_period, rt_runtime); in tg_set_rt_bandwidth()
7709 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
7710 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
7711 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
7714 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth()
7720 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
7728 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) in sched_group_set_rt_runtime() argument
7732 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
7737 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_runtime()
7740 static long sched_group_rt_runtime(struct task_group *tg) in sched_group_rt_runtime() argument
7744 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
7747 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
7752 static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) in sched_group_set_rt_period() argument
7757 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
7759 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_period()
7762 static long sched_group_rt_period(struct task_group *tg) in sched_group_rt_period() argument
7766 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
7786 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) in sched_rt_can_attach() argument
7789 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()
7967 struct task_group *tg; in cpu_cgroup_css_alloc() local
7974 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
7975 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
7978 return &tg->css; in cpu_cgroup_css_alloc()
7983 struct task_group *tg = css_tg(css); in cpu_cgroup_css_online() local
7987 sched_online_group(tg, parent); in cpu_cgroup_css_online()
7993 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
7995 sched_destroy_group(tg); in cpu_cgroup_css_free()
8000 struct task_group *tg = css_tg(css); in cpu_cgroup_css_offline() local
8002 sched_offline_group(tg); in cpu_cgroup_css_offline()
8062 struct task_group *tg = css_tg(css); in cpu_shares_read_u64() local
8064 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
8073 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8075 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) in tg_set_cfs_bandwidth() argument
8078 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
8080 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
8105 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
8130 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
8150 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
8154 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
8160 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_quota()
8163 long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
8167 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
8170 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
8176 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
8181 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
8183 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_period()
8186 long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
8190 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
8221 struct task_group *tg; member
8229 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
8234 if (tg == d->tg) { in normalize_cfs_quota()
8238 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
8239 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
8249 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
8252 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
8255 if (!tg->parent) { in tg_cfs_schedulable_down()
8258 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
8260 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
8277 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
8281 .tg = tg, in __cfs_schedulable()
8300 struct task_group *tg = css_tg(seq_css(sf)); in cpu_stats_show() local
8301 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_stats_show()