busiest          7738 kernel/sched/fair.c 	struct sched_group *busiest;	/* Busiest group in this sd */
busiest          7758 kernel/sched/fair.c 		.busiest = NULL,
busiest          8126 kernel/sched/fair.c 	struct sg_lb_stats *busiest = &sds->busiest_stat;
busiest          8139 kernel/sched/fair.c 	if (sgs->group_type > busiest->group_type)
busiest          8142 kernel/sched/fair.c 	if (sgs->group_type < busiest->group_type)
busiest          8145 kernel/sched/fair.c 	if (sgs->avg_load <= busiest->avg_load)
busiest          8165 kernel/sched/fair.c 	    sgs->group_misfit_task_load < busiest->group_misfit_task_load)
busiest          8183 kernel/sched/fair.c 		if (!sds->busiest)
busiest          8187 kernel/sched/fair.c 		if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
busiest          8281 kernel/sched/fair.c 			sds->busiest = sg;
busiest          8356 kernel/sched/fair.c 	if (!sds->busiest)
busiest          8359 kernel/sched/fair.c 	busiest_cpu = sds->busiest->asym_prefer_cpu;
busiest          8381 kernel/sched/fair.c 	struct sg_lb_stats *local, *busiest;
busiest          8384 kernel/sched/fair.c 	busiest = &sds->busiest_stat;
busiest          8388 kernel/sched/fair.c 	else if (busiest->load_per_task > local->load_per_task)
busiest          8392 kernel/sched/fair.c 		(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
busiest          8393 kernel/sched/fair.c 		busiest->group_capacity;
busiest          8395 kernel/sched/fair.c 	if (busiest->avg_load + scaled_busy_load_per_task >=
busiest          8397 kernel/sched/fair.c 		env->imbalance = busiest->load_per_task;
busiest          8407 kernel/sched/fair.c 	capa_now += busiest->group_capacity *
busiest          8408 kernel/sched/fair.c 			min(busiest->load_per_task, busiest->avg_load);
busiest          8414 kernel/sched/fair.c 	if (busiest->avg_load > scaled_busy_load_per_task) {
busiest          8415 kernel/sched/fair.c 		capa_move += busiest->group_capacity *
busiest          8416 kernel/sched/fair.c 			    min(busiest->load_per_task,
busiest          8417 kernel/sched/fair.c 				busiest->avg_load - scaled_busy_load_per_task);
busiest          8421 kernel/sched/fair.c 	if (busiest->avg_load * busiest->group_capacity <
busiest          8422 kernel/sched/fair.c 	    busiest->load_per_task * SCHED_CAPACITY_SCALE) {
busiest          8423 kernel/sched/fair.c 		tmp = (busiest->avg_load * busiest->group_capacity) /
busiest          8426 kernel/sched/fair.c 		tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
busiest          8435 kernel/sched/fair.c 		env->imbalance = busiest->load_per_task;
busiest          8447 kernel/sched/fair.c 	struct sg_lb_stats *local, *busiest;
busiest          8450 kernel/sched/fair.c 	busiest = &sds->busiest_stat;
busiest          8452 kernel/sched/fair.c 	if (busiest->group_type == group_imbalanced) {
busiest          8457 kernel/sched/fair.c 		busiest->load_per_task =
busiest          8458 kernel/sched/fair.c 			min(busiest->load_per_task, sds->avg_load);
busiest          8467 kernel/sched/fair.c 	if (busiest->group_type != group_misfit_task &&
busiest          8468 kernel/sched/fair.c 	    (busiest->avg_load <= sds->avg_load ||
busiest          8477 kernel/sched/fair.c 	if (busiest->group_type == group_overloaded &&
busiest          8479 kernel/sched/fair.c 		load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
busiest          8480 kernel/sched/fair.c 		if (load_above_capacity > busiest->group_capacity) {
busiest          8481 kernel/sched/fair.c 			load_above_capacity -= busiest->group_capacity;
busiest          8483 kernel/sched/fair.c 			load_above_capacity /= busiest->group_capacity;
busiest          8495 kernel/sched/fair.c 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
busiest          8499 kernel/sched/fair.c 		max_pull * busiest->group_capacity,
busiest          8504 kernel/sched/fair.c 	if (busiest->group_type == group_misfit_task) {
busiest          8506 kernel/sched/fair.c 				       busiest->group_misfit_task_load);
busiest          8515 kernel/sched/fair.c 	if (env->imbalance < busiest->load_per_task)
busiest          8534 kernel/sched/fair.c 	struct sg_lb_stats *local, *busiest;
busiest          8553 kernel/sched/fair.c 	busiest = &sds.busiest_stat;
busiest          8557 kernel/sched/fair.c 		return sds.busiest;
busiest          8560 kernel/sched/fair.c 	if (!sds.busiest || busiest->sum_nr_running == 0)
busiest          8572 kernel/sched/fair.c 	if (busiest->group_type == group_imbalanced)
busiest          8580 kernel/sched/fair.c 	    busiest->group_no_capacity)
busiest          8584 kernel/sched/fair.c 	if (busiest->group_type == group_misfit_task)
busiest          8591 kernel/sched/fair.c 	if (local->avg_load >= busiest->avg_load)
busiest          8609 kernel/sched/fair.c 		if ((busiest->group_type != group_overloaded) &&
busiest          8610 kernel/sched/fair.c 				(local->idle_cpus <= (busiest->idle_cpus + 1)))
busiest          8617 kernel/sched/fair.c 		if (100 * busiest->avg_load <=
busiest          8624 kernel/sched/fair.c 	env->src_grp_type = busiest->group_type;
busiest          8626 kernel/sched/fair.c 	return env->imbalance ? sds.busiest : NULL;
busiest          8639 kernel/sched/fair.c 	struct rq *busiest = NULL, *rq;
busiest          8679 kernel/sched/fair.c 				busiest = rq;
busiest          8723 kernel/sched/fair.c 			busiest = rq;
busiest          8727 kernel/sched/fair.c 	return busiest;
busiest          8836 kernel/sched/fair.c 	struct rq *busiest;
busiest          8868 kernel/sched/fair.c 	busiest = find_busiest_queue(&env, group);
busiest          8869 kernel/sched/fair.c 	if (!busiest) {
busiest          8874 kernel/sched/fair.c 	BUG_ON(busiest == env.dst_rq);
busiest          8878 kernel/sched/fair.c 	env.src_cpu = busiest->cpu;
busiest          8879 kernel/sched/fair.c 	env.src_rq = busiest;
busiest          8882 kernel/sched/fair.c 	if (busiest->nr_running > 1) {
busiest          8890 kernel/sched/fair.c 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
busiest          8893 kernel/sched/fair.c 		rq_lock_irqsave(busiest, &rf);
busiest          8894 kernel/sched/fair.c 		update_rq_clock(busiest);
busiest          8910 kernel/sched/fair.c 		rq_unlock(busiest, &rf);
busiest          8973 kernel/sched/fair.c 			__cpumask_clear_cpu(cpu_of(busiest), cpus);
busiest          9005 kernel/sched/fair.c 			raw_spin_lock_irqsave(&busiest->lock, flags);
busiest          9012 kernel/sched/fair.c 			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
busiest          9013 kernel/sched/fair.c 				raw_spin_unlock_irqrestore(&busiest->lock,
busiest          9024 kernel/sched/fair.c 			if (!busiest->active_balance) {
busiest          9025 kernel/sched/fair.c 				busiest->active_balance = 1;
busiest          9026 kernel/sched/fair.c 				busiest->push_cpu = this_cpu;
busiest          9029 kernel/sched/fair.c 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
busiest          9032 kernel/sched/fair.c 				stop_one_cpu_nowait(cpu_of(busiest),
busiest          9033 kernel/sched/fair.c 					active_load_balance_cpu_stop, busiest,
busiest          9034 kernel/sched/fair.c 					&busiest->active_balance_work);
busiest          2007 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
busiest          2009 kernel/sched/sched.h 	__acquires(busiest->lock)
busiest          2013 kernel/sched/sched.h 	double_rq_lock(this_rq, busiest);
busiest          2026 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
busiest          2028 kernel/sched/sched.h 	__acquires(busiest->lock)
busiest          2033 kernel/sched/sched.h 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
busiest          2034 kernel/sched/sched.h 		if (busiest < this_rq) {
busiest          2036 kernel/sched/sched.h 			raw_spin_lock(&busiest->lock);
busiest          2041 kernel/sched/sched.h 			raw_spin_lock_nested(&busiest->lock,
busiest          2052 kernel/sched/sched.h static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
busiest          2060 kernel/sched/sched.h 	return _double_lock_balance(this_rq, busiest);
busiest          2063 kernel/sched/sched.h static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
busiest          2064 kernel/sched/sched.h 	__releases(busiest->lock)
busiest          2066 kernel/sched/sched.h 	raw_spin_unlock(&busiest->lock);