Searched refs:SCHED_CAPACITY_SCALE (Results 1 – 5 of 5) sorted by relevance
310 set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); in init_cpu_topology()
1167 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity); in update_numa_stats()1171 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE)); in update_numa_stats()4770 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; in find_idlest_group()6099 if (likely(used < SCHED_CAPACITY_SCALE)) in scale_rt_capacity()6100 return SCHED_CAPACITY_SCALE - used; in scale_rt_capacity()6336 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; in update_sg_lb_stats()6544 SCHED_CAPACITY_SCALE); in check_asym_packing()6573 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()6592 capa_now /= SCHED_CAPACITY_SCALE; in fix_small_imbalance()6603 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()[all …]
1403 return SCHED_CAPACITY_SCALE; in arch_scale_freq_capacity()1414 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()
5718 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { in sched_domain_debug_one()6180 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in build_overlap_sched_groups()7474 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
930 #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro