Searched refs:SCHED_CAPACITY_SCALE (Results 1 - 5 of 5) sorted by relevance

/linux-4.1.27/arch/arm/kernel/
H A Dtopology.c65 * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
69 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
144 * SCHED_CAPACITY_SCALE, which is the default value, but with the
310 set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); for_each_possible_cpu()
/linux-4.1.27/kernel/sched/
H A Dfair.c1161 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1165 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
4680 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
6025 return SCHED_CAPACITY_SCALE; default_scale_cpu_capacity()
6054 if (likely(used < SCHED_CAPACITY_SCALE)) scale_rt_capacity()
6055 return SCHED_CAPACITY_SCALE - used; scale_rt_capacity()
6062 unsigned long capacity = SCHED_CAPACITY_SCALE; update_cpu_capacity()
6298 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6506 SCHED_CAPACITY_SCALE); check_asym_packing()
6535 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / fix_small_imbalance()
6554 capa_now /= SCHED_CAPACITY_SCALE; fix_small_imbalance()
6565 busiest->load_per_task * SCHED_CAPACITY_SCALE) { fix_small_imbalance()
6569 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / fix_small_imbalance()
6574 capa_move /= SCHED_CAPACITY_SCALE; fix_small_imbalance()
6642 ) / SCHED_CAPACITY_SCALE; calculate_imbalance()
6697 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) find_busiest_group()
H A Dsched.h1396 return SCHED_CAPACITY_SCALE; arch_scale_freq_capacity()
H A Dcore.c5461 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { sched_domain_debug_one()
5923 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); for_each_cpu()
7216 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; for_each_possible_cpu()
/linux-4.1.27/include/linux/
H A Dsched.h901 #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro

Completed in 197 milliseconds