sched_group_span 5600 kernel/sched/fair.c 		if (!cpumask_intersects(sched_group_span(group),
sched_group_span 5605 kernel/sched/fair.c 					       sched_group_span(group));
sched_group_span 5615 kernel/sched/fair.c 		for_each_cpu(i, sched_group_span(group)) {
sched_group_span 5725 kernel/sched/fair.c 		return cpumask_first(sched_group_span(group));
sched_group_span 5728 kernel/sched/fair.c 	for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
sched_group_span 7836 kernel/sched/fair.c 		for_each_cpu(cpu, sched_group_span(sdg)) {
sched_group_span 8061 kernel/sched/fair.c 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
sched_group_span 8248 kernel/sched/fair.c 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
sched_group_span 8643 kernel/sched/fair.c 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
sched_group_span 8844 kernel/sched/fair.c 		.dst_grpmask    = sched_group_span(sd->groups),
sched_group_span 1447 kernel/sched/sched.h 	return cpumask_first(sched_group_span(group));
sched_group_span   50 kernel/sched/topology.c 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
sched_group_span   62 kernel/sched/topology.c 		if (!cpumask_weight(sched_group_span(group))) {
sched_group_span   69 kernel/sched/topology.c 		    cpumask_intersects(groupmask, sched_group_span(group))) {
sched_group_span   75 kernel/sched/topology.c 		cpumask_or(groupmask, groupmask, sched_group_span(group));
sched_group_span   79 kernel/sched/topology.c 				cpumask_pr_args(sched_group_span(group)));
sched_group_span   82 kernel/sched/topology.c 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
sched_group_span   92 kernel/sched/topology.c 				   sched_group_span(group))) {
sched_group_span  842 kernel/sched/topology.c 	const struct cpumask *sg_span = sched_group_span(sg);
sched_group_span  888 kernel/sched/topology.c 	sg_span = sched_group_span(sg);
sched_group_span  907 kernel/sched/topology.c 	cpu = cpumask_first_and(sched_group_span(sg), mask);
sched_group_span  920 kernel/sched/topology.c 	sg_span = sched_group_span(sg);
sched_group_span  963 kernel/sched/topology.c 		sg_span = sched_group_span(sg);
sched_group_span 1080 kernel/sched/topology.c 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
sched_group_span 1081 kernel/sched/topology.c 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
sched_group_span 1083 kernel/sched/topology.c 		cpumask_set_cpu(cpu, sched_group_span(sg));
sched_group_span 1087 kernel/sched/topology.c 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
sched_group_span 1123 kernel/sched/topology.c 		cpumask_or(covered, covered, sched_group_span(sg));
sched_group_span 1156 kernel/sched/topology.c 		sg->group_weight = cpumask_weight(sched_group_span(sg));
sched_group_span 1161 kernel/sched/topology.c 		for_each_cpu(cpu, sched_group_span(sg)) {