Lines Matching refs:load

598 	if (unlikely(se->load.weight != NICE_0_LOAD))  in calc_delta_fair()
599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
631 struct load_weight *load; in sched_slice() local
635 load = &cfs_rq->load; in sched_slice()
638 lw = cfs_rq->load; in sched_slice()
640 update_load_add(&lw, se->load.weight); in sched_slice()
641 load = &lw; in sched_slice()
643 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
683 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
1126 unsigned long load; member
1149 ns->load += weighted_cpuload(cpu); in update_numa_stats()
1235 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1236 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
1261 long load; in task_numa_compare() local
1349 load = task_h_load(env->p); in task_numa_compare()
1350 dst_load = env->dst_stats.load + load; in task_numa_compare()
1351 src_load = env->src_stats.load - load; in task_numa_compare()
1371 load = task_h_load(cur); in task_numa_compare()
1372 dst_load -= load; in task_numa_compare()
1373 src_load += load; in task_numa_compare()
1424 if (src->load * dst->compute_capacity * env->imbalance_pct > in numa_has_capacity()
1426 dst->load * src->compute_capacity * 100) in numa_has_capacity()
1712 delta = p->se.avg.load_sum / p->se.load.weight; in numa_get_avg_runtime()
2333 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2335 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2350 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2352 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2373 tg_weight += cfs_rq->load.weight; in calc_tg_weight()
2380 long tg_weight, load, shares; in calc_cfs_shares() local
2383 load = cfs_rq->load.weight; in calc_cfs_shares()
2385 shares = (tg->shares * load); in calc_cfs_shares()
2412 update_load_set(&se->load, weight); in reweight_entity()
2431 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2705 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq); in update_cfs_rq_load_avg()
2727 se->on_rq * scale_load_down(se->load.weight), in update_load_avg()
2764 &se->avg, se->on_rq * scale_load_down(se->load.weight), in detach_entity_load_avg()
2784 se->on_rq * scale_load_down(se->load.weight), in enqueue_entity_load_avg()
3188 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
3581 if (qcfs_rq->load.weight) in throttle_cfs_rq()
3631 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
4204 if (cfs_rq->load.weight) { in dequeue_task_fair()
4286 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) in decay_load_missed() argument
4291 return load; in decay_load_missed()
4297 return load >> missed_updates; in decay_load_missed()
4301 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; in decay_load_missed()
4306 return load; in decay_load_missed()
4372 unsigned long load = weighted_cpuload(cpu_of(this_rq)); in update_idle_cpu_load() local
4378 if (load || curr_jiffies == this_rq->last_load_update_tick) in update_idle_cpu_load()
4384 __update_cpu_load(this_rq, load, pending_updates); in update_idle_cpu_load()
4418 unsigned long load = weighted_cpuload(cpu_of(this_rq)); in update_cpu_load_active() local
4423 __update_cpu_load(this_rq, load, 1); in update_cpu_load_active()
4663 s64 this_load, load; in wake_affine() local
4673 load = source_load(prev_cpu, idx); in wake_affine()
4686 load += effective_load(tg, prev_cpu, 0, -weight); in wake_affine()
4711 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); in wake_affine()
4744 unsigned long load, avg_load; in find_idlest_group() local
4762 load = source_load(i, load_idx); in find_idlest_group()
4764 load = target_load(i, load_idx); in find_idlest_group()
4766 avg_load += load; in find_idlest_group()
4791 unsigned long load, min_load = ULONG_MAX; in find_idlest_cpu() local
4823 load = weighted_cpuload(i); in find_idlest_cpu()
4824 if (load < min_load || (load == min_load && i == this_cpu)) { in find_idlest_cpu()
4825 min_load = load; in find_idlest_cpu()
5771 unsigned long load; in detach_tasks() local
5804 load = task_h_load(p); in detach_tasks()
5806 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
5809 if ((load / 2) > env->imbalance) in detach_tasks()
5816 env->imbalance -= load; in detach_tasks()
5930 unsigned long load; in update_cfs_rq_h_load() local
5949 load = cfs_rq->h_load; in update_cfs_rq_h_load()
5950 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
5953 cfs_rq->h_load = load; in update_cfs_rq_h_load()
6304 unsigned long load; in update_sg_lb_stats() local
6314 load = target_load(i, load_idx); in update_sg_lb_stats()
6316 load = source_load(i, load_idx); in update_sg_lb_stats()
6318 sgs->group_load += load; in update_sg_lb_stats()
8197 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
8263 if (rq->cfs.load.weight) in get_rr_interval_fair()