avg_load           36 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	unsigned int avg_load;
avg_load          140 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	data->avg_load = (data->p_smooth * data->avg_load) + utilization;
avg_load          141 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	data->avg_load /= data->p_smooth + 1;
avg_load          143 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 		   utilization, data->avg_load);
avg_load          147 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
avg_load         5594 kernel/sched/fair.c 		unsigned long load, avg_load, runnable_load;
avg_load         5611 kernel/sched/fair.c 		avg_load = 0;
avg_load         5619 kernel/sched/fair.c 			avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
avg_load         5628 kernel/sched/fair.c 		avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
avg_load         5635 kernel/sched/fair.c 			this_avg_load = avg_load;
avg_load         5644 kernel/sched/fair.c 				min_avg_load = avg_load;
avg_load         5647 kernel/sched/fair.c 				   (100*min_avg_load > imbalance_scale*avg_load)) {
avg_load         5652 kernel/sched/fair.c 				min_avg_load = avg_load;
avg_load         7716 kernel/sched/fair.c 	unsigned long avg_load; /*Avg load across the CPUs of the group */
avg_load         7743 kernel/sched/fair.c 	unsigned long avg_load;	/* Average load across all groups in sd */
avg_load         7764 kernel/sched/fair.c 			.avg_load = 0UL,
avg_load         8097 kernel/sched/fair.c 	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
avg_load         8145 kernel/sched/fair.c 	if (sgs->avg_load <= busiest->avg_load)
avg_load         8395 kernel/sched/fair.c 	if (busiest->avg_load + scaled_busy_load_per_task >=
avg_load         8396 kernel/sched/fair.c 	    local->avg_load + (scaled_busy_load_per_task * imbn)) {
avg_load         8408 kernel/sched/fair.c 			min(busiest->load_per_task, busiest->avg_load);
avg_load         8410 kernel/sched/fair.c 			min(local->load_per_task, local->avg_load);
avg_load         8414 kernel/sched/fair.c 	if (busiest->avg_load > scaled_busy_load_per_task) {
avg_load         8417 kernel/sched/fair.c 				busiest->avg_load - scaled_busy_load_per_task);
avg_load         8421 kernel/sched/fair.c 	if (busiest->avg_load * busiest->group_capacity <
avg_load         8423 kernel/sched/fair.c 		tmp = (busiest->avg_load * busiest->group_capacity) /
avg_load         8430 kernel/sched/fair.c 		    min(local->load_per_task, local->avg_load + tmp);
avg_load         8458 kernel/sched/fair.c 			min(busiest->load_per_task, sds->avg_load);
avg_load         8468 kernel/sched/fair.c 	    (busiest->avg_load <= sds->avg_load ||
avg_load         8469 kernel/sched/fair.c 	     local->avg_load >= sds->avg_load)) {
avg_load         8495 kernel/sched/fair.c 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
avg_load         8500 kernel/sched/fair.c 		(sds->avg_load - local->avg_load) * local->group_capacity
avg_load         8564 kernel/sched/fair.c 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
avg_load         8591 kernel/sched/fair.c 	if (local->avg_load >= busiest->avg_load)
avg_load         8598 kernel/sched/fair.c 	if (local->avg_load >= sds.avg_load)
avg_load         8617 kernel/sched/fair.c 		if (100 * busiest->avg_load <=
avg_load         8618 kernel/sched/fair.c 				env->sd->imbalance_pct * local->avg_load)