task_group         62 include/linux/sched.h struct task_group;
task_group        679 include/linux/sched.h 	struct task_group		*sched_task_group;
task_group          7 include/linux/sched/autogroup.h struct task_group;
task_group         29 include/linux/sched/autogroup.h extern struct task_group root_task_group;
task_group         20 kernel/sched/autogroup.c void autogroup_free(struct task_group *tg)
task_group         66 kernel/sched/autogroup.c 	struct task_group *tg;
task_group        107 kernel/sched/autogroup.c bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
task_group        262 kernel/sched/autogroup.c int autogroup_path(struct task_group *tg, char *buf, int buflen)
task_group         11 kernel/sched/autogroup.h 	struct task_group	*tg;
task_group         18 kernel/sched/autogroup.h extern void autogroup_free(struct task_group *tg);
task_group         20 kernel/sched/autogroup.h static inline bool task_group_is_autogroup(struct task_group *tg)
task_group         25 kernel/sched/autogroup.h extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
task_group         27 kernel/sched/autogroup.h static inline struct task_group *
task_group         28 kernel/sched/autogroup.h autogroup_task_group(struct task_struct *p, struct task_group *tg)
task_group         38 kernel/sched/autogroup.h extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
task_group         43 kernel/sched/autogroup.h static inline void autogroup_free(struct task_group *tg) { }
task_group         44 kernel/sched/autogroup.h static inline bool task_group_is_autogroup(struct task_group *tg)
task_group         49 kernel/sched/autogroup.h static inline struct task_group *
task_group         50 kernel/sched/autogroup.h autogroup_task_group(struct task_struct *p, struct task_group *tg)
task_group         55 kernel/sched/autogroup.h static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
task_group        710 kernel/sched/core.c int walk_tg_tree_from(struct task_group *from,
task_group        713 kernel/sched/core.c 	struct task_group *parent, *child;
task_group        741 kernel/sched/core.c int tg_nop(struct task_group *tg, void *data)
task_group        887 kernel/sched/core.c 	if (task_group_is_autogroup(task_group(p)))
task_group        889 kernel/sched/core.c 	if (task_group(p) == &root_task_group)
task_group        892 kernel/sched/core.c 	uc_max = task_group(p)->uclamp[clamp_id];
task_group       1098 kernel/sched/core.c 	struct task_group *tg = &root_task_group;
task_group       4907 kernel/sched/core.c 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
task_group       4908 kernel/sched/core.c 				!task_group_is_autogroup(task_group(p))) {
task_group       6546 kernel/sched/core.c struct task_group root_task_group;
task_group       6611 kernel/sched/core.c 	task_group_cache = KMEM_CACHE(task_group, 0);
task_group       6914 kernel/sched/core.c static inline void alloc_uclamp_sched_group(struct task_group *tg,
task_group       6915 kernel/sched/core.c 					    struct task_group *parent)
task_group       6928 kernel/sched/core.c static void sched_free_group(struct task_group *tg)
task_group       6937 kernel/sched/core.c struct task_group *sched_create_group(struct task_group *parent)
task_group       6939 kernel/sched/core.c 	struct task_group *tg;
task_group       6960 kernel/sched/core.c void sched_online_group(struct task_group *tg, struct task_group *parent)
task_group       6982 kernel/sched/core.c 	sched_free_group(container_of(rhp, struct task_group, rcu));
task_group       6985 kernel/sched/core.c void sched_destroy_group(struct task_group *tg)
task_group       6991 kernel/sched/core.c void sched_offline_group(struct task_group *tg)
task_group       7006 kernel/sched/core.c 	struct task_group *tg;
task_group       7014 kernel/sched/core.c 			  struct task_group, css);
task_group       7068 kernel/sched/core.c static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
task_group       7070 kernel/sched/core.c 	return css ? container_of(css, struct task_group, css) : NULL;
task_group       7076 kernel/sched/core.c 	struct task_group *parent = css_tg(parent_css);
task_group       7077 kernel/sched/core.c 	struct task_group *tg;
task_group       7094 kernel/sched/core.c 	struct task_group *tg = css_tg(css);
task_group       7095 kernel/sched/core.c 	struct task_group *parent = css_tg(css->parent);
task_group       7110 kernel/sched/core.c 	struct task_group *tg = css_tg(css);
task_group       7117 kernel/sched/core.c 	struct task_group *tg = css_tg(css);
task_group       7276 kernel/sched/core.c 	struct task_group *tg;
task_group       7321 kernel/sched/core.c 	struct task_group *tg;
task_group       7366 kernel/sched/core.c 	struct task_group *tg = css_tg(css);
task_group       7377 kernel/sched/core.c static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
task_group       7379 kernel/sched/core.c static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
task_group       7455 kernel/sched/core.c static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
task_group       7470 kernel/sched/core.c static long tg_get_cfs_quota(struct task_group *tg)
task_group       7483 kernel/sched/core.c static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
task_group       7496 kernel/sched/core.c static long tg_get_cfs_period(struct task_group *tg)
task_group       7531 kernel/sched/core.c 	struct task_group *tg;
task_group       7539 kernel/sched/core.c static u64 normalize_cfs_quota(struct task_group *tg,
task_group       7559 kernel/sched/core.c static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
task_group       7592 kernel/sched/core.c static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
task_group       7615 kernel/sched/core.c 	struct task_group *tg = css_tg(seq_css(sf));
task_group       7721 kernel/sched/core.c 		struct task_group *tg = css_tg(css);
task_group       7742 kernel/sched/core.c 	struct task_group *tg = css_tg(css);
task_group       7836 kernel/sched/core.c 	struct task_group *tg = css_tg(seq_css(sf));
task_group       7845 kernel/sched/core.c 	struct task_group *tg = css_tg(of_css(of));
task_group        375 kernel/sched/debug.c static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
task_group        422 kernel/sched/debug.c static char *task_group_path(struct task_group *tg)
task_group        456 kernel/sched/debug.c 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
task_group       2997 kernel/sched/fair.c 	struct task_group *tg = cfs_rq->tg;
task_group       4376 kernel/sched/fair.c static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
task_group       4384 kernel/sched/fair.c 	struct task_group *tg = cfs_rq->tg;
task_group       4453 kernel/sched/fair.c static inline int throttled_lb_pair(struct task_group *tg,
task_group       4465 kernel/sched/fair.c static int tg_unthrottle_up(struct task_group *tg, void *data)
task_group       4483 kernel/sched/fair.c static int tg_throttle_down(struct task_group *tg, void *data)
task_group       4881 kernel/sched/fair.c static void sync_throttle(struct task_group *tg, int cpu)
task_group       5038 kernel/sched/fair.c 	struct task_group *tg;
task_group       5057 kernel/sched/fair.c 	struct task_group *tg;
task_group       5095 kernel/sched/fair.c static inline void sync_throttle(struct task_group *tg, int cpu) {}
task_group       5108 kernel/sched/fair.c static inline int throttled_lb_pair(struct task_group *tg,
task_group       5120 kernel/sched/fair.c static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
task_group       7259 kernel/sched/fair.c 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
task_group       10261 kernel/sched/fair.c void free_fair_sched_group(struct task_group *tg)
task_group       10278 kernel/sched/fair.c int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
task_group       10319 kernel/sched/fair.c void online_fair_sched_group(struct task_group *tg)
task_group       10337 kernel/sched/fair.c void unregister_fair_sched_group(struct task_group *tg)
task_group       10362 kernel/sched/fair.c void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
task_group       10395 kernel/sched/fair.c int sched_group_set_shares(struct task_group *tg, unsigned long shares)
task_group       10433 kernel/sched/fair.c void free_fair_sched_group(struct task_group *tg) { }
task_group       10435 kernel/sched/fair.c int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
task_group       10440 kernel/sched/fair.c void online_fair_sched_group(struct task_group *tg) { }
task_group       10442 kernel/sched/fair.c void unregister_fair_sched_group(struct task_group *tg) { }
task_group        138 kernel/sched/rt.c void free_rt_sched_group(struct task_group *tg)
task_group        156 kernel/sched/rt.c void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
task_group        183 kernel/sched/rt.c int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
task_group        251 kernel/sched/rt.c void free_rt_sched_group(struct task_group *tg) { }
task_group        253 kernel/sched/rt.c int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
task_group        455 kernel/sched/rt.c typedef struct task_group *rt_rq_iter_t;
task_group        457 kernel/sched/rt.c static inline struct task_group *next_task_group(struct task_group *tg)
task_group        461 kernel/sched/rt.c 			typeof(struct task_group), list);
task_group        555 kernel/sched/rt.c 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
task_group       2403 kernel/sched/rt.c static inline int tg_has_rt_tasks(struct task_group *tg)
task_group       2414 kernel/sched/rt.c 		if (rt_task(p) && task_group(p) == tg)
task_group       2422 kernel/sched/rt.c 	struct task_group *tg;
task_group       2427 kernel/sched/rt.c static int tg_rt_schedulable(struct task_group *tg, void *data)
task_group       2430 kernel/sched/rt.c 	struct task_group *child;
task_group       2483 kernel/sched/rt.c static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
task_group       2500 kernel/sched/rt.c static int tg_set_rt_bandwidth(struct task_group *tg,
task_group       2541 kernel/sched/rt.c int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
task_group       2555 kernel/sched/rt.c long sched_group_rt_runtime(struct task_group *tg)
task_group       2567 kernel/sched/rt.c int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
task_group       2580 kernel/sched/rt.c long sched_group_rt_period(struct task_group *tg)
task_group       2602 kernel/sched/rt.c int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
task_group        391 kernel/sched/sched.h 	struct task_group	*parent;
task_group        427 kernel/sched/sched.h typedef int (*tg_visitor)(struct task_group *, void *);
task_group        429 kernel/sched/sched.h extern int walk_tg_tree_from(struct task_group *from,
task_group        443 kernel/sched/sched.h extern int tg_nop(struct task_group *tg, void *data);
task_group        445 kernel/sched/sched.h extern void free_fair_sched_group(struct task_group *tg);
task_group        446 kernel/sched/sched.h extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
task_group        447 kernel/sched/sched.h extern void online_fair_sched_group(struct task_group *tg);
task_group        448 kernel/sched/sched.h extern void unregister_fair_sched_group(struct task_group *tg);
task_group        449 kernel/sched/sched.h extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
task_group        458 kernel/sched/sched.h extern void free_rt_sched_group(struct task_group *tg);
task_group        459 kernel/sched/sched.h extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
task_group        460 kernel/sched/sched.h extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
task_group        463 kernel/sched/sched.h extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
task_group        464 kernel/sched/sched.h extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
task_group        465 kernel/sched/sched.h extern long sched_group_rt_runtime(struct task_group *tg);
task_group        466 kernel/sched/sched.h extern long sched_group_rt_period(struct task_group *tg);
task_group        467 kernel/sched/sched.h extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
task_group        469 kernel/sched/sched.h extern struct task_group *sched_create_group(struct task_group *parent);
task_group        470 kernel/sched/sched.h extern void sched_online_group(struct task_group *tg,
task_group        471 kernel/sched/sched.h 			       struct task_group *parent);
task_group        472 kernel/sched/sched.h extern void sched_destroy_group(struct task_group *tg);
task_group        473 kernel/sched/sched.h extern void sched_offline_group(struct task_group *tg);
task_group        478 kernel/sched/sched.h extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
task_group        570 kernel/sched/sched.h 	struct task_group	*tg;	/* group that "owns" this runqueue */
task_group        628 kernel/sched/sched.h 	struct task_group	*tg;
task_group       1496 kernel/sched/sched.h static inline struct task_group *task_group(struct task_struct *p)
task_group       1505 kernel/sched/sched.h 	struct task_group *tg = task_group(p);
task_group       1523 kernel/sched/sched.h static inline struct task_group *task_group(struct task_struct *p)