Lines Matching refs:load

603 	if (unlikely(se->load.weight != NICE_0_LOAD))  in calc_delta_fair()
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
641 struct load_weight *load; in sched_slice() local
645 load = &cfs_rq->load; in sched_slice()
648 lw = cfs_rq->load; in sched_slice()
650 update_load_add(&lw, se->load.weight); in sched_slice()
651 load = &lw; in sched_slice()
653 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
1120 unsigned long load; member
1143 ns->load += weighted_cpuload(cpu); in update_numa_stats()
1234 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1266 long load; in task_numa_compare() local
1354 load = task_h_load(env->p); in task_numa_compare()
1355 dst_load = env->dst_stats.load + load; in task_numa_compare()
1356 src_load = env->src_stats.load - load; in task_numa_compare()
1376 load = task_h_load(cur); in task_numa_compare()
1377 dst_load -= load; in task_numa_compare()
1378 src_load += load; in task_numa_compare()
2292 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2294 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2309 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2311 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2332 tg_weight += cfs_rq->load.weight; in calc_tg_weight()
2339 long tg_weight, load, shares; in calc_cfs_shares() local
2342 load = cfs_rq->load.weight; in calc_cfs_shares()
2344 shares = (tg->shares * load); in calc_cfs_shares()
2371 update_load_set(&se->load, weight); in reweight_entity()
2390 if (likely(se->load.weight == tg->shares)) in update_cfs_shares()
2723 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); in __update_task_entity_contrib()
3254 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
3656 if (qcfs_rq->load.weight) in throttle_cfs_rq()
3698 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
4292 if (cfs_rq->load.weight) { in dequeue_task_fair()
4498 w = se->my_q->load.weight + wl; in effective_load()
4519 wl -= se->load.weight; in effective_load()
4566 s64 this_load, load; in wake_affine() local
4583 load = source_load(prev_cpu, idx); in wake_affine()
4593 weight = current->se.load.weight; in wake_affine()
4596 load += effective_load(tg, prev_cpu, 0, -weight); in wake_affine()
4600 weight = p->se.load.weight; in wake_affine()
4621 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); in wake_affine()
4654 unsigned long load, avg_load; in find_idlest_group() local
4672 load = source_load(i, load_idx); in find_idlest_group()
4674 load = target_load(i, load_idx); in find_idlest_group()
4676 avg_load += load; in find_idlest_group()
4701 unsigned long load, min_load = ULONG_MAX; in find_idlest_cpu() local
4733 load = weighted_cpuload(i); in find_idlest_cpu()
4734 if (load < min_load || (load == min_load && i == this_cpu)) { in find_idlest_cpu()
4735 min_load = load; in find_idlest_cpu()
5695 unsigned long load; in detach_tasks() local
5721 load = task_h_load(p); in detach_tasks()
5723 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
5726 if ((load / 2) > env->imbalance) in detach_tasks()
5733 env->imbalance -= load; in detach_tasks()
5880 unsigned long load; in update_cfs_rq_h_load() local
5899 load = cfs_rq->h_load; in update_cfs_rq_h_load()
5900 load = div64_ul(load * se->avg.load_avg_contrib, in update_cfs_rq_h_load()
5903 cfs_rq->h_load = load; in update_cfs_rq_h_load()
6266 unsigned long load; in update_sg_lb_stats() local
6276 load = target_load(i, load_idx); in update_sg_lb_stats()
6278 load = source_load(i, load_idx); in update_sg_lb_stats()
6280 sgs->group_load += load; in update_sg_lb_stats()
8150 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
8216 if (rq->cfs.load.weight) in get_rr_interval_fair()