Lines Matching refs:env

1185 static void task_numa_assign(struct task_numa_env *env,  in task_numa_assign()  argument
1188 if (env->best_task) in task_numa_assign()
1189 put_task_struct(env->best_task); in task_numa_assign()
1193 env->best_task = p; in task_numa_assign()
1194 env->best_imp = imp; in task_numa_assign()
1195 env->best_cpu = env->dst_cpu; in task_numa_assign()
1199 struct task_numa_env *env) in load_too_imbalanced() argument
1214 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
1215 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
1225 load_b * dst_capacity * env->imbalance_pct; in load_too_imbalanced()
1234 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1259 static void task_numa_compare(struct task_numa_env *env, in task_numa_compare() argument
1262 struct rq *src_rq = cpu_rq(env->src_cpu); in task_numa_compare()
1263 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1267 long imp = env->p->numa_group ? groupimp : taskimp; in task_numa_compare()
1269 int dist = env->dist; in task_numa_compare()
1290 if (cur == env->p) in task_numa_compare()
1302 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) in task_numa_compare()
1309 if (cur->numa_group == env->p->numa_group) { in task_numa_compare()
1310 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1311 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1325 imp += group_weight(cur, env->src_nid, dist) - in task_numa_compare()
1326 group_weight(cur, env->dst_nid, dist); in task_numa_compare()
1328 imp += task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1329 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1333 if (imp <= env->best_imp && moveimp <= env->best_imp) in task_numa_compare()
1338 if (env->src_stats.nr_running <= env->src_stats.task_capacity && in task_numa_compare()
1339 !env->dst_stats.has_free_capacity) in task_numa_compare()
1346 if (imp > env->best_imp && src_rq->nr_running == 1 && in task_numa_compare()
1354 load = task_h_load(env->p); in task_numa_compare()
1355 dst_load = env->dst_stats.load + load; in task_numa_compare()
1356 src_load = env->src_stats.load - load; in task_numa_compare()
1358 if (moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
1365 if (!load_too_imbalanced(src_load, dst_load, env)) { in task_numa_compare()
1372 if (imp <= env->best_imp) in task_numa_compare()
1381 if (load_too_imbalanced(src_load, dst_load, env)) in task_numa_compare()
1389 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); in task_numa_compare()
1392 task_numa_assign(env, cur, imp); in task_numa_compare()
1397 static void task_numa_find_cpu(struct task_numa_env *env, in task_numa_find_cpu() argument
1402 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { in task_numa_find_cpu()
1404 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) in task_numa_find_cpu()
1407 env->dst_cpu = cpu; in task_numa_find_cpu()
1408 task_numa_compare(env, taskimp, groupimp); in task_numa_find_cpu()
1414 struct task_numa_env env = { in task_numa_migrate() local
1440 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
1442 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
1456 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
1457 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1458 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1459 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1460 update_numa_stats(&env.src_stats, env.src_nid); in task_numa_migrate()
1461 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
1462 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
1463 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1466 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1475 if (env.best_cpu == -1 || (p->numa_group && in task_numa_migrate()
1478 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
1481 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1483 dist != env.dist) { in task_numa_migrate()
1484 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1485 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1494 env.dist = dist; in task_numa_migrate()
1495 env.dst_nid = nid; in task_numa_migrate()
1496 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1497 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1510 if (env.best_cpu == -1) in task_numa_migrate()
1511 nid = env.src_nid; in task_numa_migrate()
1513 nid = env.dst_nid; in task_numa_migrate()
1516 sched_setnuma(p, env.dst_nid); in task_numa_migrate()
1520 if (env.best_cpu == -1) in task_numa_migrate()
1529 if (env.best_task == NULL) { in task_numa_migrate()
1530 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
1532 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); in task_numa_migrate()
1536 ret = migrate_swap(p, env.best_task); in task_numa_migrate()
1538 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); in task_numa_migrate()
1539 put_task_struct(env.best_task); in task_numa_migrate()
5442 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
5446 lockdep_assert_held(&env->src_rq->lock); in task_hot()
5457 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
5467 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
5474 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env) in migrate_improves_locality() argument
5480 !(env->sd->flags & SD_NUMA)) { in migrate_improves_locality()
5484 src_nid = cpu_to_node(env->src_cpu); in migrate_improves_locality()
5485 dst_nid = cpu_to_node(env->dst_cpu); in migrate_improves_locality()
5510 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
5518 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
5521 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
5522 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
5548 struct lb_env *env) in migrate_improves_locality() argument
5554 struct lb_env *env) in migrate_degrades_locality() argument
5564 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
5568 lockdep_assert_held(&env->src_rq->lock); in can_migrate_task()
5577 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
5580 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { in can_migrate_task()
5585 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
5595 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED)) in can_migrate_task()
5599 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { in can_migrate_task()
5601 env->flags |= LBF_DST_PINNED; in can_migrate_task()
5602 env->new_dst_cpu = cpu; in can_migrate_task()
5611 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
5613 if (task_running(env->src_rq, p)) { in can_migrate_task()
5624 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
5626 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
5628 if (migrate_improves_locality(p, env) || !tsk_cache_hot || in can_migrate_task()
5629 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
5631 schedstat_inc(env->sd, lb_hot_gained[env->idle]); in can_migrate_task()
5644 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
5646 lockdep_assert_held(&env->src_rq->lock); in detach_task()
5648 deactivate_task(env->src_rq, p, 0); in detach_task()
5650 set_task_cpu(p, env->dst_cpu); in detach_task()
5659 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
5663 lockdep_assert_held(&env->src_rq->lock); in detach_one_task()
5665 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
5666 if (!can_migrate_task(p, env)) in detach_one_task()
5669 detach_task(p, env); in detach_one_task()
5677 schedstat_inc(env->sd, lb_gained[env->idle]); in detach_one_task()
5691 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
5693 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
5698 lockdep_assert_held(&env->src_rq->lock); in detach_tasks()
5700 if (env->imbalance <= 0) in detach_tasks()
5706 env->loop++; in detach_tasks()
5708 if (env->loop > env->loop_max) in detach_tasks()
5712 if (env->loop > env->loop_break) { in detach_tasks()
5713 env->loop_break += sched_nr_migrate_break; in detach_tasks()
5714 env->flags |= LBF_NEED_BREAK; in detach_tasks()
5718 if (!can_migrate_task(p, env)) in detach_tasks()
5723 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
5726 if ((load / 2) > env->imbalance) in detach_tasks()
5729 detach_task(p, env); in detach_tasks()
5730 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
5733 env->imbalance -= load; in detach_tasks()
5741 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
5749 if (env->imbalance <= 0) in detach_tasks()
5762 schedstat_add(env->sd, lb_gained[env->idle], detached); in detach_tasks()
5795 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
5797 struct list_head *tasks = &env->tasks; in attach_tasks()
5800 raw_spin_lock(&env->dst_rq->lock); in attach_tasks()
5806 attach_task(env->dst_rq, p); in attach_tasks()
5809 raw_spin_unlock(&env->dst_rq->lock); in attach_tasks()
6206 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) in group_has_capacity() argument
6212 (sgs->group_usage * env->sd->imbalance_pct)) in group_has_capacity()
6227 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) in group_is_overloaded() argument
6233 (sgs->group_usage * env->sd->imbalance_pct)) in group_is_overloaded()
6239 static enum group_type group_classify(struct lb_env *env, in group_classify() argument
6261 static inline void update_sg_lb_stats(struct lb_env *env, in update_sg_lb_stats() argument
6271 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in update_sg_lb_stats()
6305 sgs->group_no_capacity = group_is_overloaded(env, sgs); in update_sg_lb_stats()
6306 sgs->group_type = group_classify(env, group, sgs); in update_sg_lb_stats()
6322 static bool update_sd_pick_busiest(struct lb_env *env, in update_sd_pick_busiest() argument
6339 if (!(env->sd->flags & SD_ASYM_PACKING)) in update_sd_pick_busiest()
6347 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) { in update_sd_pick_busiest()
6393 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
6395 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
6396 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
6404 load_idx = get_sd_load_idx(env->sd, env->idle); in update_sd_lb_stats()
6410 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); in update_sd_lb_stats()
6415 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
6417 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
6420 update_sg_lb_stats(env, sg, load_idx, local_group, sgs, in update_sd_lb_stats()
6437 group_has_capacity(env, &sds->local_stat) && in update_sd_lb_stats()
6443 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
6454 } while (sg != env->sd->groups); in update_sd_lb_stats()
6456 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
6457 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
6459 if (!env->sd->parent) { in update_sd_lb_stats()
6461 if (env->dst_rq->rd->overload != overload) in update_sd_lb_stats()
6462 env->dst_rq->rd->overload = overload; in update_sd_lb_stats()
6490 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) in check_asym_packing() argument
6494 if (!(env->sd->flags & SD_ASYM_PACKING)) in check_asym_packing()
6501 if (env->dst_cpu > busiest_cpu) in check_asym_packing()
6504 env->imbalance = DIV_ROUND_CLOSEST( in check_asym_packing()
6519 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in fix_small_imbalance() argument
6530 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); in fix_small_imbalance()
6540 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6578 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6587 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
6611 env->imbalance = 0; in calculate_imbalance()
6612 return fix_small_imbalance(env, sds); in calculate_imbalance()
6639 env->imbalance = min( in calculate_imbalance()
6650 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
6651 return fix_small_imbalance(env, sds); in calculate_imbalance()
6673 static struct sched_group *find_busiest_group(struct lb_env *env) in find_busiest_group() argument
6684 update_sd_lb_stats(env, &sds); in find_busiest_group()
6689 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) && in find_busiest_group()
6690 check_asym_packing(env, &sds)) in find_busiest_group()
6709 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) && in find_busiest_group()
6727 if (env->idle == CPU_IDLE) { in find_busiest_group()
6744 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
6750 calculate_imbalance(env, &sds); in find_busiest_group()
6754 env->imbalance = 0; in find_busiest_group()
6761 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue() argument
6768 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in find_busiest_queue()
6794 if (rt > env->fbq_type) in find_busiest_queue()
6806 if (rq->nr_running == 1 && wl > env->imbalance && in find_busiest_queue()
6807 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
6840 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
6842 struct sched_domain *sd = env->sd; in need_active_balance()
6844 if (env->idle == CPU_NEWLY_IDLE) { in need_active_balance()
6851 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) in need_active_balance()
6861 if ((env->idle != CPU_NOT_IDLE) && in need_active_balance()
6862 (env->src_rq->cfs.h_nr_running == 1)) { in need_active_balance()
6863 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
6864 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
6873 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
6875 struct sched_group *sg = env->sd->groups; in should_we_balance()
6883 if (env->idle == CPU_NEWLY_IDLE) in should_we_balance()
6889 for_each_cpu_and(cpu, sg_cpus, env->cpus) { in should_we_balance()
6904 return balance_cpu == env->dst_cpu; in should_we_balance()
6922 struct lb_env env = { in load_balance() local
6931 .tasks = LIST_HEAD_INIT(env.tasks), in load_balance()
6939 env.dst_grpmask = NULL; in load_balance()
6946 if (!should_we_balance(&env)) { in load_balance()
6951 group = find_busiest_group(&env); in load_balance()
6957 busiest = find_busiest_queue(&env, group); in load_balance()
6963 BUG_ON(busiest == env.dst_rq); in load_balance()
6965 schedstat_add(sd, lb_imbalance[idle], env.imbalance); in load_balance()
6967 env.src_cpu = busiest->cpu; in load_balance()
6968 env.src_rq = busiest; in load_balance()
6978 env.flags |= LBF_ALL_PINNED; in load_balance()
6979 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
6988 cur_ld_moved = detach_tasks(&env); in load_balance()
7001 attach_tasks(&env); in load_balance()
7007 if (env.flags & LBF_NEED_BREAK) { in load_balance()
7008 env.flags &= ~LBF_NEED_BREAK; in load_balance()
7031 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in load_balance()
7034 cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
7036 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
7037 env.dst_cpu = env.new_dst_cpu; in load_balance()
7038 env.flags &= ~LBF_DST_PINNED; in load_balance()
7039 env.loop = 0; in load_balance()
7040 env.loop_break = sched_nr_migrate_break; in load_balance()
7055 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) in load_balance()
7060 if (unlikely(env.flags & LBF_ALL_PINNED)) { in load_balance()
7063 env.loop = 0; in load_balance()
7064 env.loop_break = sched_nr_migrate_break; in load_balance()
7082 if (need_active_balance(&env)) { in load_balance()
7093 env.flags |= LBF_ALL_PINNED; in load_balance()
7164 if (((env.flags & LBF_ALL_PINNED) && in load_balance()
7348 struct lb_env env = { in active_load_balance_cpu_stop() local
7359 p = detach_one_task(&env); in active_load_balance_cpu_stop()