Lines Matching refs:sd

600 	struct sched_domain *sd;  in get_nohz_timer_target()  local
606 for_each_domain(cpu, sd) { in get_nohz_timer_target()
607 for_each_cpu(i, sched_domain_span(sd)) { in get_nohz_timer_target()
1421 struct sched_domain *sd; in ttwu_stat() local
1425 for_each_domain(this_cpu, sd) { in ttwu_stat()
1426 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { in ttwu_stat()
1427 schedstat_inc(sd, ttwu_wake_remote); in ttwu_stat()
5123 sd_alloc_ctl_domain_table(struct sched_domain *sd) in sd_alloc_ctl_domain_table() argument
5130 set_table_entry(&table[0], "min_interval", &sd->min_interval, in sd_alloc_ctl_domain_table()
5132 set_table_entry(&table[1], "max_interval", &sd->max_interval, in sd_alloc_ctl_domain_table()
5134 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, in sd_alloc_ctl_domain_table()
5136 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, in sd_alloc_ctl_domain_table()
5138 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, in sd_alloc_ctl_domain_table()
5140 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, in sd_alloc_ctl_domain_table()
5142 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, in sd_alloc_ctl_domain_table()
5144 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, in sd_alloc_ctl_domain_table()
5146 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, in sd_alloc_ctl_domain_table()
5149 &sd->cache_nice_tries, in sd_alloc_ctl_domain_table()
5151 set_table_entry(&table[10], "flags", &sd->flags, in sd_alloc_ctl_domain_table()
5154 &sd->max_newidle_lb_cost, in sd_alloc_ctl_domain_table()
5156 set_table_entry(&table[12], "name", sd->name, in sd_alloc_ctl_domain_table()
5166 struct sched_domain *sd; in sd_alloc_ctl_cpu_table() local
5170 for_each_domain(cpu, sd) in sd_alloc_ctl_cpu_table()
5177 for_each_domain(cpu, sd) { in sd_alloc_ctl_cpu_table()
5181 entry->child = sd_alloc_ctl_domain_table(sd); in sd_alloc_ctl_cpu_table()
5407 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
5410 struct sched_group *group = sd->groups; in sched_domain_debug_one()
5416 if (!(sd->flags & SD_LOAD_BALANCE)) { in sched_domain_debug_one()
5418 if (sd->parent) in sched_domain_debug_one()
5425 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
5427 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
5450 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
5467 } while (group != sd->groups); in sched_domain_debug_one()
5470 if (!cpumask_equal(sched_domain_span(sd), groupmask)) in sched_domain_debug_one()
5473 if (sd->parent && in sched_domain_debug_one()
5474 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
5480 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
5487 if (!sd) { in sched_domain_debug()
5495 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
5498 sd = sd->parent; in sched_domain_debug()
5499 if (!sd) in sched_domain_debug()
5504 # define sched_domain_debug(sd, cpu) do { } while (0) argument
5511 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate() argument
5513 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate()
5517 if (sd->flags & (SD_LOAD_BALANCE | in sd_degenerate()
5524 if (sd->groups != sd->groups->next) in sd_degenerate()
5529 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
5536 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() argument
5538 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
5543 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) in sd_parent_degenerate()
5698 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in free_sched_domain() local
5704 if (sd->flags & SD_OVERLAP) { in free_sched_domain()
5705 free_sched_groups(sd->groups, 1); in free_sched_domain()
5706 } else if (atomic_dec_and_test(&sd->groups->ref)) { in free_sched_domain()
5707 kfree(sd->groups->sgc); in free_sched_domain()
5708 kfree(sd->groups); in free_sched_domain()
5710 kfree(sd); in free_sched_domain()
5713 static void destroy_sched_domain(struct sched_domain *sd, int cpu) in destroy_sched_domain() argument
5715 call_rcu(&sd->rcu, free_sched_domain); in destroy_sched_domain()
5718 static void destroy_sched_domains(struct sched_domain *sd, int cpu) in destroy_sched_domains() argument
5720 for (; sd; sd = sd->parent) in destroy_sched_domains()
5721 destroy_sched_domain(sd, cpu); in destroy_sched_domains()
5742 struct sched_domain *sd; in update_top_cache_domain() local
5747 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
5748 if (sd) { in update_top_cache_domain()
5749 id = cpumask_first(sched_domain_span(sd)); in update_top_cache_domain()
5750 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain()
5751 busy_sd = sd->parent; /* sd_busy */ in update_top_cache_domain()
5755 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
5759 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
5760 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
5762 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
5763 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); in update_top_cache_domain()
5771 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
5777 for (tmp = sd; tmp; ) { in cpu_attach_domain()
5798 if (sd && sd_degenerate(sd)) { in cpu_attach_domain()
5799 tmp = sd; in cpu_attach_domain()
5800 sd = sd->parent; in cpu_attach_domain()
5802 if (sd) in cpu_attach_domain()
5803 sd->child = NULL; in cpu_attach_domain()
5806 sched_domain_debug(sd, cpu); in cpu_attach_domain()
5809 tmp = rq->sd; in cpu_attach_domain()
5810 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
5827 struct sched_domain ** __percpu sd; member
5851 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) in build_group_mask() argument
5853 const struct cpumask *span = sched_domain_span(sd); in build_group_mask()
5854 struct sd_data *sdd = sd->private; in build_group_mask()
5859 sibling = *per_cpu_ptr(sdd->sd, i); in build_group_mask()
5877 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
5880 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups()
5882 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
5894 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
5916 build_group_mask(sd, sg); in build_overlap_sched_groups()
5941 sd->groups = groups; in build_overlap_sched_groups()
5953 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() local
5954 struct sched_domain *child = sd->child; in get_group()
5976 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
5979 struct sd_data *sdd = sd->private; in build_sched_groups()
5980 const struct cpumask *span = sched_domain_span(sd); in build_sched_groups()
5984 get_group(cpu, sdd, &sd->groups); in build_sched_groups()
5985 atomic_inc(&sd->groups->ref); in build_sched_groups()
6034 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
6036 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
6043 } while (sg != sd->groups); in init_sched_groups_capacity()
6048 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
6069 static void set_domain_attribute(struct sched_domain *sd, in set_domain_attribute() argument
6081 if (request < sd->level) { in set_domain_attribute()
6083 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
6086 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
6101 free_percpu(d->sd); /* fall through */ in __free_domain_allocs()
6116 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
6117 if (!d->sd) in __visit_domain_allocation_hell()
6130 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
6132 struct sd_data *sdd = sd->private; in claim_allocations()
6134 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
6135 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
6174 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); in sd_init() local
6192 *sd = (struct sched_domain){ in sd_init()
6233 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
6234 sd->flags |= SD_PREFER_SIBLING; in sd_init()
6235 sd->imbalance_pct = 110; in sd_init()
6236 sd->smt_gain = 1178; /* ~15% */ in sd_init()
6238 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
6239 sd->imbalance_pct = 117; in sd_init()
6240 sd->cache_nice_tries = 1; in sd_init()
6241 sd->busy_idx = 2; in sd_init()
6244 } else if (sd->flags & SD_NUMA) { in sd_init()
6245 sd->cache_nice_tries = 2; in sd_init()
6246 sd->busy_idx = 3; in sd_init()
6247 sd->idle_idx = 2; in sd_init()
6249 sd->flags |= SD_SERIALIZE; in sd_init()
6251 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
6258 sd->flags |= SD_PREFER_SIBLING; in sd_init()
6259 sd->cache_nice_tries = 1; in sd_init()
6260 sd->busy_idx = 2; in sd_init()
6261 sd->idle_idx = 1; in sd_init()
6264 sd->private = &tl->data; in sd_init()
6266 return sd; in sd_init()
6595 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
6596 if (!sdd->sd) in __sdt_alloc()
6608 struct sched_domain *sd; in __sdt_alloc() local
6612 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), in __sdt_alloc()
6614 if (!sd) in __sdt_alloc()
6617 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
6649 struct sched_domain *sd; in __sdt_free() local
6651 if (sdd->sd) { in __sdt_free()
6652 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
6653 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
6654 free_sched_groups(sd->groups, 0); in __sdt_free()
6655 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
6663 free_percpu(sdd->sd); in __sdt_free()
6664 sdd->sd = NULL; in __sdt_free()
6676 struct sched_domain *sd = sd_init(tl, cpu); in build_sched_domain() local
6677 if (!sd) in build_sched_domain()
6680 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in build_sched_domain()
6682 sd->level = child->level + 1; in build_sched_domain()
6683 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
6684 child->parent = sd; in build_sched_domain()
6685 sd->child = child; in build_sched_domain()
6688 sched_domain_span(sd))) { in build_sched_domain()
6692 child->name, sd->name); in build_sched_domain()
6695 cpumask_or(sched_domain_span(sd), in build_sched_domain()
6696 sched_domain_span(sd), in build_sched_domain()
6701 set_domain_attribute(sd, attr); in build_sched_domain()
6703 return sd; in build_sched_domain()
6714 struct sched_domain *sd; in build_sched_domains() local
6726 sd = NULL; in build_sched_domains()
6728 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
6730 *per_cpu_ptr(d.sd, i) = sd; in build_sched_domains()
6732 sd->flags |= SD_OVERLAP; in build_sched_domains()
6733 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
6740 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
6741 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
6742 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
6743 if (build_overlap_sched_groups(sd, i)) in build_sched_domains()
6746 if (build_sched_groups(sd, i)) in build_sched_domains()
6757 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
6758 claim_allocations(i, sd); in build_sched_domains()
6759 init_sched_groups_capacity(i, sd); in build_sched_domains()
6766 sd = *per_cpu_ptr(d.sd, i); in build_sched_domains()
6767 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
7214 rq->sd = NULL; in sched_init()