sched_group        63 include/linux/sched/topology.h struct sched_group;
sched_group        75 include/linux/sched/topology.h 	struct sched_group *groups;	/* the balancing groups of the domain */
sched_group       174 include/linux/sched/topology.h 	struct sched_group *__percpu *sg;
sched_group      5579 kernel/sched/fair.c static struct sched_group *
sched_group      5583 kernel/sched/fair.c 	struct sched_group *idlest = NULL, *group = sd->groups;
sched_group      5584 kernel/sched/fair.c 	struct sched_group *most_spare_sg = NULL;
sched_group      5714 kernel/sched/fair.c find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
sched_group      5788 kernel/sched/fair.c 		struct sched_group *group;
sched_group      7738 kernel/sched/fair.c 	struct sched_group *busiest;	/* Busiest group in this sd */
sched_group      7739 kernel/sched/fair.c 	struct sched_group *local;	/* Local group in this sd */
sched_group      7797 kernel/sched/fair.c 	struct sched_group *sdg = sd->groups;
sched_group      7813 kernel/sched/fair.c 	struct sched_group *group, *sdg = sd->groups;
sched_group      7936 kernel/sched/fair.c static inline int sg_imbalanced(struct sched_group *group)
sched_group      7992 kernel/sched/fair.c group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
sched_group      8002 kernel/sched/fair.c group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
sched_group      8008 kernel/sched/fair.c group_type group_classify(struct sched_group *group,
sched_group      8053 kernel/sched/fair.c 				      struct sched_group *group,
sched_group      8123 kernel/sched/fair.c 				   struct sched_group *sg,
sched_group      8233 kernel/sched/fair.c 	struct sched_group *sg = env->sd->groups;
sched_group      8532 kernel/sched/fair.c static struct sched_group *find_busiest_group(struct lb_env *env)
sched_group      8637 kernel/sched/fair.c 				     struct sched_group *group)
sched_group      8789 kernel/sched/fair.c 	struct sched_group *sg = env->sd->groups;
sched_group      8835 kernel/sched/fair.c 	struct sched_group *group;
sched_group      1411 kernel/sched/sched.h 	struct sched_group	*next;			/* Must be a circular list */
sched_group      1428 kernel/sched/sched.h static inline struct cpumask *sched_group_span(struct sched_group *sg)
sched_group      1436 kernel/sched/sched.h static inline struct cpumask *group_balance_mask(struct sched_group *sg)
sched_group      1445 kernel/sched/sched.h static inline unsigned int group_first_cpu(struct sched_group *group)
sched_group      1450 kernel/sched/sched.h extern int group_balance_cpu(struct sched_group *sg);
sched_group        31 kernel/sched/topology.c 	struct sched_group *group = sd->groups;
sched_group       558 kernel/sched/topology.c static void free_sched_groups(struct sched_group *sg, int free_sgc)
sched_group       560 kernel/sched/topology.c 	struct sched_group *tmp, *first;
sched_group       728 kernel/sched/topology.c int group_balance_cpu(struct sched_group *sg)
sched_group       840 kernel/sched/topology.c build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
sched_group       876 kernel/sched/topology.c static struct sched_group *
sched_group       879 kernel/sched/topology.c 	struct sched_group *sg;
sched_group       882 kernel/sched/topology.c 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
sched_group       899 kernel/sched/topology.c 				     struct sched_group *sg)
sched_group       929 kernel/sched/topology.c 	struct sched_group *first = NULL, *last = NULL, *sg;
sched_group      1057 kernel/sched/topology.c static struct sched_group *get_group(int cpu, struct sd_data *sdd)
sched_group      1061 kernel/sched/topology.c 	struct sched_group *sg;
sched_group      1104 kernel/sched/topology.c 	struct sched_group *first = NULL, *last = NULL;
sched_group      1116 kernel/sched/topology.c 		struct sched_group *sg;
sched_group      1149 kernel/sched/topology.c 	struct sched_group *sg = sd->groups;
sched_group      1766 kernel/sched/topology.c 		sdd->sg = alloc_percpu(struct sched_group *);
sched_group      1777 kernel/sched/topology.c 			struct sched_group *sg;
sched_group      1794 kernel/sched/topology.c 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),