imbalance        5590 kernel/sched/fair.c 	unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
imbalance        5638 kernel/sched/fair.c 			if (min_runnable_load > (runnable_load + imbalance)) {
imbalance        5646 kernel/sched/fair.c 			} else if ((runnable_load < (min_runnable_load + imbalance)) &&
imbalance        5697 kernel/sched/fair.c 	    min_runnable_load + imbalance >= this_runnable_load)
imbalance        5700 kernel/sched/fair.c 	if (min_runnable_load > (this_runnable_load + imbalance))
imbalance        5703 kernel/sched/fair.c 	if ((this_runnable_load < (min_runnable_load + imbalance)) &&
imbalance        7134 kernel/sched/fair.c 	long			imbalance;
imbalance        7382 kernel/sched/fair.c 	if (env->imbalance <= 0)
imbalance        7415 kernel/sched/fair.c 		if ((load / 2) > env->imbalance)
imbalance        7422 kernel/sched/fair.c 		env->imbalance -= load;
imbalance        7438 kernel/sched/fair.c 		if (env->imbalance <= 0)
imbalance        7938 kernel/sched/fair.c 	return group->sgc->imbalance;
imbalance        8363 kernel/sched/fair.c 	env->imbalance = sds->busiest_stat.group_load;
imbalance        8397 kernel/sched/fair.c 		env->imbalance = busiest->load_per_task;
imbalance        8435 kernel/sched/fair.c 		env->imbalance = busiest->load_per_task;
imbalance        8470 kernel/sched/fair.c 		env->imbalance = 0;
imbalance        8498 kernel/sched/fair.c 	env->imbalance = min(
imbalance        8505 kernel/sched/fair.c 		env->imbalance = max_t(long, env->imbalance,
imbalance        8515 kernel/sched/fair.c 	if (env->imbalance < busiest->load_per_task)
imbalance        8626 kernel/sched/fair.c 	return env->imbalance ? sds.busiest : NULL;
imbalance        8629 kernel/sched/fair.c 	env->imbalance = 0;
imbalance        8705 kernel/sched/fair.c 		if (rq->nr_running == 1 && load > env->imbalance &&
imbalance        8876 kernel/sched/fair.c 	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
imbalance        8943 kernel/sched/fair.c 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
imbalance        8965 kernel/sched/fair.c 			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
imbalance        8967 kernel/sched/fair.c 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
imbalance        9066 kernel/sched/fair.c 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
imbalance        1401 kernel/sched/sched.h 	int			imbalance;		/* XXX unrelated to capacity but shared group state */