Lines Matching refs:sd

1426 	struct sched_domain *sd;  in task_numa_migrate()  local
1440 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
1441 if (sd) in task_numa_migrate()
1442 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
1451 if (unlikely(!sd)) { in task_numa_migrate()
4564 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) in wake_affine() argument
4580 idx = sd->wake_idx; in wake_affine()
4614 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; in wake_affine()
4631 schedstat_inc(sd, ttwu_move_affine); in wake_affine()
4642 find_idlest_group(struct sched_domain *sd, struct task_struct *p, in find_idlest_group() argument
4645 struct sched_group *idlest = NULL, *group = sd->groups; in find_idlest_group()
4647 int load_idx = sd->forkexec_idx; in find_idlest_group()
4648 int imbalance = 100 + (sd->imbalance_pct-100)/2; in find_idlest_group()
4651 load_idx = sd->wake_idx; in find_idlest_group()
4688 } while (group = group->next, group != sd->groups); in find_idlest_group()
4749 struct sched_domain *sd; in select_idle_sibling() local
4765 sd = rcu_dereference(per_cpu(sd_llc, target)); in select_idle_sibling()
4766 for_each_lower_domain(sd) { in select_idle_sibling()
4767 sg = sd->groups; in select_idle_sibling()
4783 } while (sg != sd->groups); in select_idle_sibling()
4831 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; in select_task_rq_fair() local
4856 sd = tmp; in select_task_rq_fair()
4867 while (sd) { in select_task_rq_fair()
4871 if (!(sd->flags & sd_flag)) { in select_task_rq_fair()
4872 sd = sd->child; in select_task_rq_fair()
4876 group = find_idlest_group(sd, p, cpu, sd_flag); in select_task_rq_fair()
4878 sd = sd->child; in select_task_rq_fair()
4885 sd = sd->child; in select_task_rq_fair()
4891 weight = sd->span_weight; in select_task_rq_fair()
4892 sd = NULL; in select_task_rq_fair()
4897 sd = tmp; in select_task_rq_fair()
5414 struct sched_domain *sd; member
5480 !(env->sd->flags & SD_NUMA)) { in migrate_improves_locality()
5518 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
5629 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
5631 schedstat_inc(env->sd, lb_hot_gained[env->idle]); in can_migrate_task()
5677 schedstat_inc(env->sd, lb_gained[env->idle]); in detach_one_task()
5723 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
5762 schedstat_add(env->sd, lb_gained[env->idle], detached); in detach_tasks()
5999 static inline int get_sd_load_idx(struct sched_domain *sd, in get_sd_load_idx() argument
6006 load_idx = sd->busy_idx; in get_sd_load_idx()
6010 load_idx = sd->newidle_idx; in get_sd_load_idx()
6013 load_idx = sd->idle_idx; in get_sd_load_idx()
6020 static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu) in default_scale_cpu_capacity() argument
6022 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) in default_scale_cpu_capacity()
6023 return sd->smt_gain / sd->span_weight; in default_scale_cpu_capacity()
6028 unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) in arch_scale_cpu_capacity() argument
6030 return default_scale_cpu_capacity(sd, cpu); in arch_scale_cpu_capacity()
6060 static void update_cpu_capacity(struct sched_domain *sd, int cpu) in update_cpu_capacity() argument
6063 struct sched_group *sdg = sd->groups; in update_cpu_capacity()
6066 capacity *= arch_scale_cpu_capacity(sd, cpu); in update_cpu_capacity()
6068 capacity *= default_scale_cpu_capacity(sd, cpu); in update_cpu_capacity()
6084 void update_group_capacity(struct sched_domain *sd, int cpu) in update_group_capacity() argument
6086 struct sched_domain *child = sd->child; in update_group_capacity()
6087 struct sched_group *group, *sdg = sd->groups; in update_group_capacity()
6091 interval = msecs_to_jiffies(sd->balance_interval); in update_group_capacity()
6096 update_cpu_capacity(sd, cpu); in update_group_capacity()
6123 if (unlikely(!rq->sd)) { in update_group_capacity()
6128 sgc = rq->sd->groups->sgc; in update_group_capacity()
6153 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
6155 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
6212 (sgs->group_usage * env->sd->imbalance_pct)) in group_has_capacity()
6233 (sgs->group_usage * env->sd->imbalance_pct)) in group_is_overloaded()
6339 if (!(env->sd->flags & SD_ASYM_PACKING)) in update_sd_pick_busiest()
6395 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
6396 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
6404 load_idx = get_sd_load_idx(env->sd, env->idle); in update_sd_lb_stats()
6417 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
6454 } while (sg != env->sd->groups); in update_sd_lb_stats()
6456 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
6459 if (!env->sd->parent) { in update_sd_lb_stats()
6494 if (!(env->sd->flags & SD_ASYM_PACKING)) in check_asym_packing()
6744 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
6807 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
6842 struct sched_domain *sd = env->sd; in need_active_balance() local
6851 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) in need_active_balance()
6863 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
6864 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
6868 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); in need_active_balance()
6875 struct sched_group *sg = env->sd->groups; in should_we_balance()
6912 struct sched_domain *sd, enum cpu_idle_type idle, in load_balance() argument
6916 struct sched_domain *sd_parent = sd->parent; in load_balance()
6923 .sd = sd, in load_balance()
6926 .dst_grpmask = sched_group_cpus(sd->groups), in load_balance()
6943 schedstat_inc(sd, lb_count[idle]); in load_balance()
6953 schedstat_inc(sd, lb_nobusyg[idle]); in load_balance()
6959 schedstat_inc(sd, lb_nobusyq[idle]); in load_balance()
6965 schedstat_add(sd, lb_imbalance[idle], env.imbalance); in load_balance()
7072 schedstat_inc(sd, lb_failed[idle]); in load_balance()
7080 sd->nr_balance_failed++; in load_balance()
7119 sd->nr_balance_failed = sd->cache_nice_tries+1; in load_balance()
7122 sd->nr_balance_failed = 0; in load_balance()
7126 sd->balance_interval = sd->min_interval; in load_balance()
7134 if (sd->balance_interval < sd->max_interval) in load_balance()
7135 sd->balance_interval *= 2; in load_balance()
7158 schedstat_inc(sd, lb_balanced[idle]); in load_balance()
7160 sd->nr_balance_failed = 0; in load_balance()
7165 sd->balance_interval < MAX_PINNED_INTERVAL) || in load_balance()
7166 (sd->balance_interval < sd->max_interval)) in load_balance()
7167 sd->balance_interval *= 2; in load_balance()
7175 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) in get_sd_balance_interval() argument
7177 unsigned long interval = sd->balance_interval; in get_sd_balance_interval()
7180 interval *= sd->busy_factor; in get_sd_balance_interval()
7190 update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance) in update_next_balance() argument
7194 interval = get_sd_balance_interval(sd, cpu_busy); in update_next_balance()
7195 next = sd->last_balance + interval; in update_next_balance()
7209 struct sched_domain *sd; in idle_balance() local
7224 sd = rcu_dereference_check_sched_domain(this_rq->sd); in idle_balance()
7225 if (sd) in idle_balance()
7226 update_next_balance(sd, 0, &next_balance); in idle_balance()
7239 for_each_domain(this_cpu, sd) { in idle_balance()
7243 if (!(sd->flags & SD_LOAD_BALANCE)) in idle_balance()
7246 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { in idle_balance()
7247 update_next_balance(sd, 0, &next_balance); in idle_balance()
7251 if (sd->flags & SD_BALANCE_NEWIDLE) { in idle_balance()
7255 sd, CPU_NEWLY_IDLE, in idle_balance()
7259 if (domain_cost > sd->max_newidle_lb_cost) in idle_balance()
7260 sd->max_newidle_lb_cost = domain_cost; in idle_balance()
7265 update_next_balance(sd, 0, &next_balance); in idle_balance()
7318 struct sched_domain *sd; in active_load_balance_cpu_stop() local
7341 for_each_domain(target_cpu, sd) { in active_load_balance_cpu_stop()
7342 if ((sd->flags & SD_LOAD_BALANCE) && in active_load_balance_cpu_stop()
7343 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) in active_load_balance_cpu_stop()
7347 if (likely(sd)) { in active_load_balance_cpu_stop()
7349 .sd = sd, in active_load_balance_cpu_stop()
7357 schedstat_inc(sd, alb_count); in active_load_balance_cpu_stop()
7361 schedstat_inc(sd, alb_pushed); in active_load_balance_cpu_stop()
7363 schedstat_inc(sd, alb_failed); in active_load_balance_cpu_stop()
7380 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
7450 struct sched_domain *sd; in set_cpu_sd_state_busy() local
7454 sd = rcu_dereference(per_cpu(sd_busy, cpu)); in set_cpu_sd_state_busy()
7456 if (!sd || !sd->nohz_idle) in set_cpu_sd_state_busy()
7458 sd->nohz_idle = 0; in set_cpu_sd_state_busy()
7460 atomic_inc(&sd->groups->sgc->nr_busy_cpus); in set_cpu_sd_state_busy()
7467 struct sched_domain *sd; in set_cpu_sd_state_idle() local
7471 sd = rcu_dereference(per_cpu(sd_busy, cpu)); in set_cpu_sd_state_idle()
7473 if (!sd || sd->nohz_idle) in set_cpu_sd_state_idle()
7475 sd->nohz_idle = 1; in set_cpu_sd_state_idle()
7477 atomic_dec(&sd->groups->sgc->nr_busy_cpus); in set_cpu_sd_state_idle()
7543 struct sched_domain *sd; in rebalance_domains() local
7553 for_each_domain(cpu, sd) { in rebalance_domains()
7558 if (time_after(jiffies, sd->next_decay_max_lb_cost)) { in rebalance_domains()
7559 sd->max_newidle_lb_cost = in rebalance_domains()
7560 (sd->max_newidle_lb_cost * 253) / 256; in rebalance_domains()
7561 sd->next_decay_max_lb_cost = jiffies + HZ; in rebalance_domains()
7564 max_cost += sd->max_newidle_lb_cost; in rebalance_domains()
7566 if (!(sd->flags & SD_LOAD_BALANCE)) in rebalance_domains()
7580 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); in rebalance_domains()
7582 need_serialize = sd->flags & SD_SERIALIZE; in rebalance_domains()
7588 if (time_after_eq(jiffies, sd->last_balance + interval)) { in rebalance_domains()
7589 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
7597 sd->last_balance = jiffies; in rebalance_domains()
7598 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); in rebalance_domains()
7603 if (time_after(next_balance, sd->last_balance + interval)) { in rebalance_domains()
7604 next_balance = sd->last_balance + interval; in rebalance_domains()
7690 struct sched_domain *sd; in nohz_kick_needed() local
7719 sd = rcu_dereference(per_cpu(sd_busy, cpu)); in nohz_kick_needed()
7720 if (sd) { in nohz_kick_needed()
7721 sgc = sd->groups->sgc; in nohz_kick_needed()
7731 sd = rcu_dereference(rq->sd); in nohz_kick_needed()
7732 if (sd) { in nohz_kick_needed()
7734 check_cpu_capacity(rq, sd)) { in nohz_kick_needed()
7740 sd = rcu_dereference(per_cpu(sd_asym, cpu)); in nohz_kick_needed()
7741 if (sd && (cpumask_first_and(nohz.idle_cpus_mask, in nohz_kick_needed()
7742 sched_domain_span(sd)) < cpu)) { in nohz_kick_needed()