Lines Matching refs:sd

622 	struct sched_domain *sd;  in get_nohz_timer_target()  local
628 for_each_domain(cpu, sd) { in get_nohz_timer_target()
629 for_each_cpu(i, sched_domain_span(sd)) { in get_nohz_timer_target()
1676 struct sched_domain *sd; in ttwu_stat() local
1680 for_each_domain(this_cpu, sd) { in ttwu_stat()
1681 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { in ttwu_stat()
1682 schedstat_inc(sd, ttwu_wake_remote); in ttwu_stat()
5377 sd_alloc_ctl_domain_table(struct sched_domain *sd) in sd_alloc_ctl_domain_table() argument
5384 set_table_entry(&table[0], "min_interval", &sd->min_interval, in sd_alloc_ctl_domain_table()
5386 set_table_entry(&table[1], "max_interval", &sd->max_interval, in sd_alloc_ctl_domain_table()
5388 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, in sd_alloc_ctl_domain_table()
5390 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, in sd_alloc_ctl_domain_table()
5392 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, in sd_alloc_ctl_domain_table()
5394 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, in sd_alloc_ctl_domain_table()
5396 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, in sd_alloc_ctl_domain_table()
5398 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, in sd_alloc_ctl_domain_table()
5400 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, in sd_alloc_ctl_domain_table()
5403 &sd->cache_nice_tries, in sd_alloc_ctl_domain_table()
5405 set_table_entry(&table[10], "flags", &sd->flags, in sd_alloc_ctl_domain_table()
5408 &sd->max_newidle_lb_cost, in sd_alloc_ctl_domain_table()
5410 set_table_entry(&table[12], "name", sd->name, in sd_alloc_ctl_domain_table()
5420 struct sched_domain *sd; in sd_alloc_ctl_cpu_table() local
5424 for_each_domain(cpu, sd) in sd_alloc_ctl_cpu_table()
5431 for_each_domain(cpu, sd) { in sd_alloc_ctl_cpu_table()
5435 entry->child = sd_alloc_ctl_domain_table(sd); in sd_alloc_ctl_cpu_table()
5664 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
5667 struct sched_group *group = sd->groups; in sched_domain_debug_one()
5673 if (!(sd->flags & SD_LOAD_BALANCE)) { in sched_domain_debug_one()
5675 if (sd->parent) in sched_domain_debug_one()
5682 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
5684 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
5707 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
5724 } while (group != sd->groups); in sched_domain_debug_one()
5727 if (!cpumask_equal(sched_domain_span(sd), groupmask)) in sched_domain_debug_one()
5730 if (sd->parent && in sched_domain_debug_one()
5731 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
5737 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
5744 if (!sd) { in sched_domain_debug()
5752 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
5755 sd = sd->parent; in sched_domain_debug()
5756 if (!sd) in sched_domain_debug()
5761 # define sched_domain_debug(sd, cpu) do { } while (0) argument
5768 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate() argument
5770 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate()
5774 if (sd->flags & (SD_LOAD_BALANCE | in sd_degenerate()
5781 if (sd->groups != sd->groups->next) in sd_degenerate()
5786 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
5793 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() argument
5795 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
5800 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) in sd_parent_degenerate()
5955 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in free_sched_domain() local
5961 if (sd->flags & SD_OVERLAP) { in free_sched_domain()
5962 free_sched_groups(sd->groups, 1); in free_sched_domain()
5963 } else if (atomic_dec_and_test(&sd->groups->ref)) { in free_sched_domain()
5964 kfree(sd->groups->sgc); in free_sched_domain()
5965 kfree(sd->groups); in free_sched_domain()
5967 kfree(sd); in free_sched_domain()
5970 static void destroy_sched_domain(struct sched_domain *sd, int cpu) in destroy_sched_domain() argument
5972 call_rcu(&sd->rcu, free_sched_domain); in destroy_sched_domain()
5975 static void destroy_sched_domains(struct sched_domain *sd, int cpu) in destroy_sched_domains() argument
5977 for (; sd; sd = sd->parent) in destroy_sched_domains()
5978 destroy_sched_domain(sd, cpu); in destroy_sched_domains()
5999 struct sched_domain *sd; in update_top_cache_domain() local
6004 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
6005 if (sd) { in update_top_cache_domain()
6006 id = cpumask_first(sched_domain_span(sd)); in update_top_cache_domain()
6007 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain()
6008 busy_sd = sd->parent; /* sd_busy */ in update_top_cache_domain()
6012 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
6016 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
6017 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
6019 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
6020 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); in update_top_cache_domain()
6028 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
6034 for (tmp = sd; tmp; ) { in cpu_attach_domain()
6055 if (sd && sd_degenerate(sd)) { in cpu_attach_domain()
6056 tmp = sd; in cpu_attach_domain()
6057 sd = sd->parent; in cpu_attach_domain()
6059 if (sd) in cpu_attach_domain()
6060 sd->child = NULL; in cpu_attach_domain()
6063 sched_domain_debug(sd, cpu); in cpu_attach_domain()
6066 tmp = rq->sd; in cpu_attach_domain()
6067 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
6084 struct sched_domain ** __percpu sd; member
6108 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) in build_group_mask() argument
6110 const struct cpumask *span = sched_domain_span(sd); in build_group_mask()
6111 struct sd_data *sdd = sd->private; in build_group_mask()
6116 sibling = *per_cpu_ptr(sdd->sd, i); in build_group_mask()
6134 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
6137 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups()
6139 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
6151 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
6173 build_group_mask(sd, sg); in build_overlap_sched_groups()
6198 sd->groups = groups; in build_overlap_sched_groups()
6210 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() local
6211 struct sched_domain *child = sd->child; in get_group()
6233 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
6236 struct sd_data *sdd = sd->private; in build_sched_groups()
6237 const struct cpumask *span = sched_domain_span(sd); in build_sched_groups()
6241 get_group(cpu, sdd, &sd->groups); in build_sched_groups()
6242 atomic_inc(&sd->groups->ref); in build_sched_groups()
6291 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
6293 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
6300 } while (sg != sd->groups); in init_sched_groups_capacity()
6305 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
6326 static void set_domain_attribute(struct sched_domain *sd, in set_domain_attribute() argument
6338 if (request < sd->level) { in set_domain_attribute()
6340 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
6343 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
6358 free_percpu(d->sd); /* fall through */ in __free_domain_allocs()
6373 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
6374 if (!d->sd) in __visit_domain_allocation_hell()
6387 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
6389 struct sd_data *sdd = sd->private; in claim_allocations()
6391 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
6392 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
6431 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); in sd_init() local
6449 *sd = (struct sched_domain){ in sd_init()
6490 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
6491 sd->flags |= SD_PREFER_SIBLING; in sd_init()
6492 sd->imbalance_pct = 110; in sd_init()
6493 sd->smt_gain = 1178; /* ~15% */ in sd_init()
6495 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
6496 sd->imbalance_pct = 117; in sd_init()
6497 sd->cache_nice_tries = 1; in sd_init()
6498 sd->busy_idx = 2; in sd_init()
6501 } else if (sd->flags & SD_NUMA) { in sd_init()
6502 sd->cache_nice_tries = 2; in sd_init()
6503 sd->busy_idx = 3; in sd_init()
6504 sd->idle_idx = 2; in sd_init()
6506 sd->flags |= SD_SERIALIZE; in sd_init()
6508 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
6515 sd->flags |= SD_PREFER_SIBLING; in sd_init()
6516 sd->cache_nice_tries = 1; in sd_init()
6517 sd->busy_idx = 2; in sd_init()
6518 sd->idle_idx = 1; in sd_init()
6521 sd->private = &tl->data; in sd_init()
6523 return sd; in sd_init()
6855 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
6856 if (!sdd->sd) in __sdt_alloc()
6868 struct sched_domain *sd; in __sdt_alloc() local
6872 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), in __sdt_alloc()
6874 if (!sd) in __sdt_alloc()
6877 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
6909 struct sched_domain *sd; in __sdt_free() local
6911 if (sdd->sd) { in __sdt_free()
6912 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
6913 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
6914 free_sched_groups(sd->groups, 0); in __sdt_free()
6915 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
6923 free_percpu(sdd->sd); in __sdt_free()
6924 sdd->sd = NULL; in __sdt_free()
6936 struct sched_domain *sd = sd_init(tl, cpu); in build_sched_domain() local
6937 if (!sd) in build_sched_domain()
6940 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in build_sched_domain()
6942 sd->level = child->level + 1; in build_sched_domain()
6943 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
6944 child->parent = sd; in build_sched_domain()
6945 sd->child = child; in build_sched_domain()
6948 sched_domain_span(sd))) { in build_sched_domain()
6952 child->name, sd->name); in build_sched_domain()
6955 cpumask_or(sched_domain_span(sd), in build_sched_domain()
6956 sched_domain_span(sd), in build_sched_domain()
6961 set_domain_attribute(sd, attr); in build_sched_domain()
6963 return sd; in build_sched_domain()
6974 struct sched_domain *sd; in build_sched_domains() local
6986 sd = NULL; in build_sched_domains()
6988 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
6990 *per_cpu_ptr(d.sd, i) = sd; in build_sched_domains()
6992 sd->flags |= SD_OVERLAP; in build_sched_domains()
6993 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
7000 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
7001 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
7002 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
7003 if (build_overlap_sched_groups(sd, i)) in build_sched_domains()
7006 if (build_sched_groups(sd, i)) in build_sched_domains()
7017 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
7018 claim_allocations(i, sd); in build_sched_domains()
7019 init_sched_groups_capacity(i, sd); in build_sched_domains()
7026 sd = *per_cpu_ptr(d.sd, i); in build_sched_domains()
7027 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
7472 rq->sd = NULL; in sched_init()