Lines Matching refs:env

1191 static void task_numa_assign(struct task_numa_env *env,  in task_numa_assign()  argument
1194 if (env->best_task) in task_numa_assign()
1195 put_task_struct(env->best_task); in task_numa_assign()
1199 env->best_task = p; in task_numa_assign()
1200 env->best_imp = imp; in task_numa_assign()
1201 env->best_cpu = env->dst_cpu; in task_numa_assign()
1205 struct task_numa_env *env) in load_too_imbalanced() argument
1218 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
1219 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
1227 src_load * dst_capacity * env->imbalance_pct; in load_too_imbalanced()
1235 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1236 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
1242 orig_src_load * dst_capacity * env->imbalance_pct; in load_too_imbalanced()
1254 static void task_numa_compare(struct task_numa_env *env, in task_numa_compare() argument
1257 struct rq *src_rq = cpu_rq(env->src_cpu); in task_numa_compare()
1258 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1262 long imp = env->p->numa_group ? groupimp : taskimp; in task_numa_compare()
1264 int dist = env->dist; in task_numa_compare()
1285 if (cur == env->p) in task_numa_compare()
1297 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) in task_numa_compare()
1304 if (cur->numa_group == env->p->numa_group) { in task_numa_compare()
1305 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1306 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1320 imp += group_weight(cur, env->src_nid, dist) - in task_numa_compare()
1321 group_weight(cur, env->dst_nid, dist); in task_numa_compare()
1323 imp += task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1324 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1328 if (imp <= env->best_imp && moveimp <= env->best_imp) in task_numa_compare()
1333 if (env->src_stats.nr_running <= env->src_stats.task_capacity && in task_numa_compare()
1334 !env->dst_stats.has_free_capacity) in task_numa_compare()
1341 if (imp > env->best_imp && src_rq->nr_running == 1 && in task_numa_compare()
1349 load = task_h_load(env->p); in task_numa_compare()
1350 dst_load = env->dst_stats.load + load; in task_numa_compare()
1351 src_load = env->src_stats.load - load; in task_numa_compare()
1353 if (moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
1360 if (!load_too_imbalanced(src_load, dst_load, env)) { in task_numa_compare()
1367 if (imp <= env->best_imp) in task_numa_compare()
1376 if (load_too_imbalanced(src_load, dst_load, env)) in task_numa_compare()
1384 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); in task_numa_compare()
1387 task_numa_assign(env, cur, imp); in task_numa_compare()
1392 static void task_numa_find_cpu(struct task_numa_env *env, in task_numa_find_cpu() argument
1397 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { in task_numa_find_cpu()
1399 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) in task_numa_find_cpu()
1402 env->dst_cpu = cpu; in task_numa_find_cpu()
1403 task_numa_compare(env, taskimp, groupimp); in task_numa_find_cpu()
1408 static bool numa_has_capacity(struct task_numa_env *env) in numa_has_capacity() argument
1410 struct numa_stats *src = &env->src_stats; in numa_has_capacity()
1411 struct numa_stats *dst = &env->dst_stats; in numa_has_capacity()
1424 if (src->load * dst->compute_capacity * env->imbalance_pct > in numa_has_capacity()
1434 struct task_numa_env env = { in task_numa_migrate() local
1460 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
1462 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
1476 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
1477 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1478 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1479 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1480 update_numa_stats(&env.src_stats, env.src_nid); in task_numa_migrate()
1481 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
1482 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
1483 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1486 if (numa_has_capacity(&env)) in task_numa_migrate()
1487 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1496 if (env.best_cpu == -1 || (p->numa_group && in task_numa_migrate()
1499 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
1502 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1504 dist != env.dist) { in task_numa_migrate()
1505 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1506 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1515 env.dist = dist; in task_numa_migrate()
1516 env.dst_nid = nid; in task_numa_migrate()
1517 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1518 if (numa_has_capacity(&env)) in task_numa_migrate()
1519 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1532 if (env.best_cpu == -1) in task_numa_migrate()
1533 nid = env.src_nid; in task_numa_migrate()
1535 nid = env.dst_nid; in task_numa_migrate()
1538 sched_setnuma(p, env.dst_nid); in task_numa_migrate()
1542 if (env.best_cpu == -1) in task_numa_migrate()
1551 if (env.best_task == NULL) { in task_numa_migrate()
1552 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
1554 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); in task_numa_migrate()
1558 ret = migrate_swap(p, env.best_task); in task_numa_migrate()
1560 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); in task_numa_migrate()
1561 put_task_struct(env.best_task); in task_numa_migrate()
5551 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
5555 lockdep_assert_held(&env->src_rq->lock); in task_hot()
5566 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
5576 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
5587 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
5596 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
5599 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
5600 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
5607 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
5630 struct lb_env *env) in migrate_degrades_locality() argument
5640 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
5644 lockdep_assert_held(&env->src_rq->lock); in can_migrate_task()
5653 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
5656 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { in can_migrate_task()
5661 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
5671 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED)) in can_migrate_task()
5675 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { in can_migrate_task()
5677 env->flags |= LBF_DST_PINNED; in can_migrate_task()
5678 env->new_dst_cpu = cpu; in can_migrate_task()
5687 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
5689 if (task_running(env->src_rq, p)) { in can_migrate_task()
5700 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
5702 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
5705 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
5707 schedstat_inc(env->sd, lb_hot_gained[env->idle]); in can_migrate_task()
5720 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
5722 lockdep_assert_held(&env->src_rq->lock); in detach_task()
5724 deactivate_task(env->src_rq, p, 0); in detach_task()
5726 set_task_cpu(p, env->dst_cpu); in detach_task()
5735 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
5739 lockdep_assert_held(&env->src_rq->lock); in detach_one_task()
5741 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
5742 if (!can_migrate_task(p, env)) in detach_one_task()
5745 detach_task(p, env); in detach_one_task()
5753 schedstat_inc(env->sd, lb_gained[env->idle]); in detach_one_task()
5767 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
5769 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
5774 lockdep_assert_held(&env->src_rq->lock); in detach_tasks()
5776 if (env->imbalance <= 0) in detach_tasks()
5784 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) in detach_tasks()
5789 env->loop++; in detach_tasks()
5791 if (env->loop > env->loop_max) in detach_tasks()
5795 if (env->loop > env->loop_break) { in detach_tasks()
5796 env->loop_break += sched_nr_migrate_break; in detach_tasks()
5797 env->flags |= LBF_NEED_BREAK; in detach_tasks()
5801 if (!can_migrate_task(p, env)) in detach_tasks()
5806 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
5809 if ((load / 2) > env->imbalance) in detach_tasks()
5812 detach_task(p, env); in detach_tasks()
5813 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
5816 env->imbalance -= load; in detach_tasks()
5824 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
5832 if (env->imbalance <= 0) in detach_tasks()
5845 schedstat_add(env->sd, lb_gained[env->idle], detached); in detach_tasks()
5878 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
5880 struct list_head *tasks = &env->tasks; in attach_tasks()
5883 raw_spin_lock(&env->dst_rq->lock); in attach_tasks()
5889 attach_task(env->dst_rq, p); in attach_tasks()
5892 raw_spin_unlock(&env->dst_rq->lock); in attach_tasks()
6244 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) in group_has_capacity() argument
6250 (sgs->group_util * env->sd->imbalance_pct)) in group_has_capacity()
6265 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) in group_is_overloaded() argument
6271 (sgs->group_util * env->sd->imbalance_pct)) in group_is_overloaded()
6299 static inline void update_sg_lb_stats(struct lb_env *env, in update_sg_lb_stats() argument
6309 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in update_sg_lb_stats()
6343 sgs->group_no_capacity = group_is_overloaded(env, sgs); in update_sg_lb_stats()
6360 static bool update_sd_pick_busiest(struct lb_env *env, in update_sd_pick_busiest() argument
6377 if (!(env->sd->flags & SD_ASYM_PACKING)) in update_sd_pick_busiest()
6385 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) { in update_sd_pick_busiest()
6431 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
6433 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
6434 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
6442 load_idx = get_sd_load_idx(env->sd, env->idle); in update_sd_lb_stats()
6448 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); in update_sd_lb_stats()
6453 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
6455 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
6458 update_sg_lb_stats(env, sg, load_idx, local_group, sgs, in update_sd_lb_stats()
6475 group_has_capacity(env, &sds->local_stat) && in update_sd_lb_stats()
6481 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
6492 } while (sg != env->sd->groups); in update_sd_lb_stats()
6494 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
6495 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
6497 if (!env->sd->parent) { in update_sd_lb_stats()
6499 if (env->dst_rq->rd->overload != overload) in update_sd_lb_stats()
6500 env->dst_rq->rd->overload = overload; in update_sd_lb_stats()
6528 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) in check_asym_packing() argument
6532 if (!(env->sd->flags & SD_ASYM_PACKING)) in check_asym_packing()
6539 if (env->dst_cpu > busiest_cpu) in check_asym_packing()
6542 env->imbalance = DIV_ROUND_CLOSEST( in check_asym_packing()
6557 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in fix_small_imbalance() argument
6568 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); in fix_small_imbalance()
6578 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6616 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6625 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
6649 env->imbalance = 0; in calculate_imbalance()
6650 return fix_small_imbalance(env, sds); in calculate_imbalance()
6677 env->imbalance = min( in calculate_imbalance()
6688 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
6689 return fix_small_imbalance(env, sds); in calculate_imbalance()
6711 static struct sched_group *find_busiest_group(struct lb_env *env) in find_busiest_group() argument
6722 update_sd_lb_stats(env, &sds); in find_busiest_group()
6727 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) && in find_busiest_group()
6728 check_asym_packing(env, &sds)) in find_busiest_group()
6747 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) && in find_busiest_group()
6765 if (env->idle == CPU_IDLE) { in find_busiest_group()
6782 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
6788 calculate_imbalance(env, &sds); in find_busiest_group()
6792 env->imbalance = 0; in find_busiest_group()
6799 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue() argument
6806 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in find_busiest_queue()
6832 if (rt > env->fbq_type) in find_busiest_queue()
6844 if (rq->nr_running == 1 && wl > env->imbalance && in find_busiest_queue()
6845 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
6878 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
6880 struct sched_domain *sd = env->sd; in need_active_balance()
6882 if (env->idle == CPU_NEWLY_IDLE) { in need_active_balance()
6889 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) in need_active_balance()
6899 if ((env->idle != CPU_NOT_IDLE) && in need_active_balance()
6900 (env->src_rq->cfs.h_nr_running == 1)) { in need_active_balance()
6901 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
6902 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
6911 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
6913 struct sched_group *sg = env->sd->groups; in should_we_balance()
6921 if (env->idle == CPU_NEWLY_IDLE) in should_we_balance()
6927 for_each_cpu_and(cpu, sg_cpus, env->cpus) { in should_we_balance()
6942 return balance_cpu == env->dst_cpu; in should_we_balance()
6960 struct lb_env env = { in load_balance() local
6969 .tasks = LIST_HEAD_INIT(env.tasks), in load_balance()
6977 env.dst_grpmask = NULL; in load_balance()
6984 if (!should_we_balance(&env)) { in load_balance()
6989 group = find_busiest_group(&env); in load_balance()
6995 busiest = find_busiest_queue(&env, group); in load_balance()
7001 BUG_ON(busiest == env.dst_rq); in load_balance()
7003 schedstat_add(sd, lb_imbalance[idle], env.imbalance); in load_balance()
7005 env.src_cpu = busiest->cpu; in load_balance()
7006 env.src_rq = busiest; in load_balance()
7016 env.flags |= LBF_ALL_PINNED; in load_balance()
7017 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
7026 cur_ld_moved = detach_tasks(&env); in load_balance()
7039 attach_tasks(&env); in load_balance()
7045 if (env.flags & LBF_NEED_BREAK) { in load_balance()
7046 env.flags &= ~LBF_NEED_BREAK; in load_balance()
7069 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in load_balance()
7072 cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
7074 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
7075 env.dst_cpu = env.new_dst_cpu; in load_balance()
7076 env.flags &= ~LBF_DST_PINNED; in load_balance()
7077 env.loop = 0; in load_balance()
7078 env.loop_break = sched_nr_migrate_break; in load_balance()
7093 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) in load_balance()
7098 if (unlikely(env.flags & LBF_ALL_PINNED)) { in load_balance()
7101 env.loop = 0; in load_balance()
7102 env.loop_break = sched_nr_migrate_break; in load_balance()
7120 if (need_active_balance(&env)) { in load_balance()
7131 env.flags |= LBF_ALL_PINNED; in load_balance()
7202 if (((env.flags & LBF_ALL_PINNED) && in load_balance()
7383 struct lb_env env = { in active_load_balance_cpu_stop() local
7394 p = detach_one_task(&env); in active_load_balance_cpu_stop()