Lines Matching refs:tg
806 int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
7697 static void sched_free_group(struct task_group *tg) in sched_free_group() argument
7699 free_fair_sched_group(tg); in sched_free_group()
7700 free_rt_sched_group(tg); in sched_free_group()
7701 autogroup_free(tg); in sched_free_group()
7702 kfree(tg); in sched_free_group()
7708 struct task_group *tg; in sched_create_group() local
7710 tg = kzalloc(sizeof(*tg), GFP_KERNEL); in sched_create_group()
7711 if (!tg) in sched_create_group()
7714 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
7717 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
7720 return tg; in sched_create_group()
7723 sched_free_group(tg); in sched_create_group()
7727 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
7732 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
7736 tg->parent = parent; in sched_online_group()
7737 INIT_LIST_HEAD(&tg->children); in sched_online_group()
7738 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
7749 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
7752 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_destroy_group()
7755 void sched_offline_group(struct task_group *tg) in sched_offline_group() argument
7762 unregister_fair_sched_group(tg, i); in sched_offline_group()
7765 list_del_rcu(&tg->list); in sched_offline_group()
7766 list_del_rcu(&tg->siblings); in sched_offline_group()
7777 struct task_group *tg; in sched_move_task() local
7797 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_move_task()
7799 tg = autogroup_task_group(tsk, tg); in sched_move_task()
7800 tsk->sched_task_group = tg; in sched_move_task()
7825 static inline int tg_has_rt_tasks(struct task_group *tg) in tg_has_rt_tasks() argument
7832 if (task_group_is_autogroup(tg)) in tg_has_rt_tasks()
7836 if (rt_task(p) && task_group(p) == tg) in tg_has_rt_tasks()
7844 struct task_group *tg; member
7849 static int tg_rt_schedulable(struct task_group *tg, void *data) in tg_rt_schedulable() argument
7856 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
7857 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
7859 if (tg == d->tg) { in tg_rt_schedulable()
7873 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
7887 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
7891 if (child == d->tg) { in tg_rt_schedulable()
7905 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
7910 .tg = tg, in __rt_schedulable()
7922 static int tg_set_rt_bandwidth(struct task_group *tg, in tg_set_rt_bandwidth() argument
7931 if (tg == &root_task_group && rt_runtime == 0) in tg_set_rt_bandwidth()
7940 err = __rt_schedulable(tg, rt_period, rt_runtime); in tg_set_rt_bandwidth()
7944 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
7945 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
7946 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
7949 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth()
7955 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
7963 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) in sched_group_set_rt_runtime() argument
7967 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
7972 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_runtime()
7975 static long sched_group_rt_runtime(struct task_group *tg) in sched_group_rt_runtime() argument
7979 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
7982 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
7987 static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) in sched_group_set_rt_period() argument
7992 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
7994 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_period()
7997 static long sched_group_rt_period(struct task_group *tg) in sched_group_rt_period() argument
8001 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
8021 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) in sched_rt_can_attach() argument
8024 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()
8202 struct task_group *tg; in cpu_cgroup_css_alloc() local
8209 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
8210 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
8213 sched_online_group(tg, parent); in cpu_cgroup_css_alloc()
8215 return &tg->css; in cpu_cgroup_css_alloc()
8220 struct task_group *tg = css_tg(css); in cpu_cgroup_css_released() local
8222 sched_offline_group(tg); in cpu_cgroup_css_released()
8227 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
8232 sched_free_group(tg); in cpu_cgroup_css_free()
8277 struct task_group *tg = css_tg(css); in cpu_shares_read_u64() local
8279 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
8288 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8290 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) in tg_set_cfs_bandwidth() argument
8293 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
8295 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
8320 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
8343 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
8363 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
8367 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
8373 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_quota()
8376 long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
8380 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
8383 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
8389 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
8394 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
8396 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_period()
8399 long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
8403 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
8434 struct task_group *tg; member
8442 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
8447 if (tg == d->tg) { in normalize_cfs_quota()
8451 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
8452 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
8462 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
8465 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
8468 if (!tg->parent) { in tg_cfs_schedulable_down()
8471 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
8473 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
8490 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
8494 .tg = tg, in __cfs_schedulable()
8513 struct task_group *tg = css_tg(seq_css(sf)); in cpu_stats_show() local
8514 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_stats_show()