sched_domain 211 block/kyber-iosched.c unsigned int sched_domain, unsigned int type) sched_domain 213 block/kyber-iosched.c unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; sched_domain 214 block/kyber-iosched.c atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; sched_domain 226 block/kyber-iosched.c unsigned int sched_domain, unsigned int type, sched_domain 229 block/kyber-iosched.c unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; sched_domain 242 block/kyber-iosched.c if (!kqd->latency_timeout[sched_domain]) sched_domain 243 block/kyber-iosched.c kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL); sched_domain 245 block/kyber-iosched.c time_is_after_jiffies(kqd->latency_timeout[sched_domain])) { sched_domain 248 block/kyber-iosched.c kqd->latency_timeout[sched_domain] = 0; sched_domain 256 block/kyber-iosched.c memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); sched_domain 258 block/kyber-iosched.c trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], sched_domain 266 block/kyber-iosched.c unsigned int sched_domain, unsigned int depth) sched_domain 268 block/kyber-iosched.c depth = clamp(depth, 1U, kyber_depth[sched_domain]); sched_domain 269 block/kyber-iosched.c if (depth != kqd->domain_tokens[sched_domain].sb.depth) { sched_domain 270 block/kyber-iosched.c sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); sched_domain 271 block/kyber-iosched.c trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], sched_domain 279 block/kyber-iosched.c unsigned int sched_domain; sched_domain 288 block/kyber-iosched.c for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { sched_domain 289 block/kyber-iosched.c flush_latency_buckets(kqd, cpu_latency, sched_domain, sched_domain 291 block/kyber-iosched.c flush_latency_buckets(kqd, cpu_latency, sched_domain, sched_domain 301 block/kyber-iosched.c for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { sched_domain 304 block/kyber-iosched.c p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY, sched_domain 315 block/kyber-iosched.c for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { sched_domain 319 block/kyber-iosched.c p99 = calculate_percentile(kqd, sched_domain, sched_domain 331 block/kyber-iosched.c p99 = kqd->domain_p99[sched_domain]; sched_domain 332 block/kyber-iosched.c kqd->domain_p99[sched_domain] = -1; sched_domain 334 block/kyber-iosched.c kqd->domain_p99[sched_domain] = p99; sched_domain 349 block/kyber-iosched.c orig_depth = kqd->domain_tokens[sched_domain].sb.depth; sched_domain 351 block/kyber-iosched.c kyber_resize_domain(kqd, sched_domain, depth); sched_domain 541 block/kyber-iosched.c unsigned int sched_domain; sched_domain 546 block/kyber-iosched.c sched_domain = kyber_sched_domain(rq->cmd_flags); sched_domain 547 block/kyber-iosched.c sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr, sched_domain 571 block/kyber-iosched.c unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); sched_domain 572 block/kyber-iosched.c struct list_head *rq_list = &kcq->rq_list[sched_domain]; sched_domain 594 block/kyber-iosched.c unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags); sched_domain 596 block/kyber-iosched.c struct list_head *head = &kcq->rq_list[sched_domain]; sched_domain 603 block/kyber-iosched.c sbitmap_set_bit(&khd->kcq_map[sched_domain], sched_domain 618 block/kyber-iosched.c unsigned int sched_domain, unsigned int type, sched_domain 632 block/kyber-iosched.c atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); sched_domain 639 block/kyber-iosched.c unsigned int sched_domain; sched_domain 642 block/kyber-iosched.c sched_domain = kyber_sched_domain(rq->cmd_flags); sched_domain 643 block/kyber-iosched.c if (sched_domain == KYBER_OTHER) sched_domain 647 block/kyber-iosched.c target = kqd->latency_targets[sched_domain]; sched_domain 648 block/kyber-iosched.c add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY, sched_domain 650 block/kyber-iosched.c add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target, sched_domain 659 block/kyber-iosched.c unsigned int sched_domain; sched_domain 669 block/kyber-iosched.c list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain], sched_domain 678 block/kyber-iosched.c unsigned int sched_domain, sched_domain 683 block/kyber-iosched.c .sched_domain = sched_domain, sched_domain 687 block/kyber-iosched.c sbitmap_for_each_set(&khd->kcq_map[sched_domain], sched_domain 706 block/kyber-iosched.c unsigned int sched_domain = khd->cur_domain; sched_domain 707 block/kyber-iosched.c struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; sched_domain 708 block/kyber-iosched.c struct sbq_wait *wait = &khd->domain_wait[sched_domain]; sched_domain 721 block/kyber-iosched.c &khd->wait_index[sched_domain]); sched_domain 722 block/kyber-iosched.c khd->domain_ws[sched_domain] = ws; sched_domain 740 block/kyber-iosched.c ws = khd->domain_ws[sched_domain]; sched_domain 19 include/linux/arch_topology.h struct sched_domain; sched_domain 73 include/linux/sched/topology.h struct sched_domain __rcu *parent; /* top domain must be null terminated */ sched_domain 74 include/linux/sched/topology.h struct sched_domain __rcu *child; /* bottom domain must be null terminated */ sched_domain 148 include/linux/sched/topology.h static inline struct cpumask *sched_domain_span(struct sched_domain *sd) sched_domain 172 include/linux/sched/topology.h struct sched_domain *__percpu *sd; sched_domain 555 kernel/sched/core.c struct sched_domain *sd; sched_domain 2185 kernel/sched/core.c struct sched_domain *sd; sched_domain 1878 kernel/sched/deadline.c struct sched_domain *sd; sched_domain 249 kernel/sched/debug.c sd_alloc_ctl_domain_table(struct sched_domain *sd) sched_domain 272 kernel/sched/debug.c struct sched_domain *sd; sched_domain 1752 kernel/sched/fair.c struct sched_domain *sd; sched_domain 5504 kernel/sched/fair.c wake_affine_weight(struct sched_domain *sd, struct task_struct *p, sched_domain 5546 kernel/sched/fair.c static int wake_affine(struct sched_domain *sd, struct task_struct *p, sched_domain 5580 kernel/sched/fair.c find_idlest_group(struct sched_domain *sd, struct task_struct *p, sched_domain 5772 kernel/sched/fair.c static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, sched_domain 5789 kernel/sched/fair.c struct sched_domain *tmp; sched_domain 5883 kernel/sched/fair.c static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) sched_domain 5941 kernel/sched/fair.c static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) sched_domain 5958 kernel/sched/fair.c static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) sched_domain 5961 kernel/sched/fair.c struct sched_domain *this_sd; sched_domain 6016 kernel/sched/fair.c struct sched_domain *sd; sched_domain 6366 kernel/sched/fair.c struct sched_domain *sd; sched_domain 6471 kernel/sched/fair.c struct sched_domain *tmp, *sd = NULL; sched_domain 7123 kernel/sched/fair.c struct sched_domain *sd; sched_domain 7771 kernel/sched/fair.c static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) sched_domain 7794 kernel/sched/fair.c static void update_cpu_capacity(struct sched_domain *sd, int cpu) sched_domain 7810 kernel/sched/fair.c void update_group_capacity(struct sched_domain *sd, int cpu) sched_domain 7812 kernel/sched/fair.c struct sched_domain *child = sd->child; sched_domain 7889 kernel/sched/fair.c check_cpu_capacity(struct rq *rq, struct sched_domain *sd) sched_domain 7900 kernel/sched/fair.c static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) sched_domain 8232 kernel/sched/fair.c struct sched_domain *child = env->sd->child; sched_domain 8751 kernel/sched/fair.c struct sched_domain *sd = env->sd; sched_domain 8777 kernel/sched/fair.c struct sched_domain *sd = env->sd; sched_domain 8830 kernel/sched/fair.c struct sched_domain *sd, enum cpu_idle_type idle, sched_domain 8834 kernel/sched/fair.c struct sched_domain *sd_parent = sd->parent; sched_domain 9104 kernel/sched/fair.c get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) sched_domain 9119 kernel/sched/fair.c update_next_balance(struct sched_domain *sd, unsigned long *next_balance) sched_domain 9143 kernel/sched/fair.c struct sched_domain *sd; sched_domain 9244 kernel/sched/fair.c struct sched_domain *sd; sched_domain 9404 kernel/sched/fair.c struct sched_domain *sd; sched_domain 9513 kernel/sched/fair.c struct sched_domain *sd; sched_domain 9543 kernel/sched/fair.c struct sched_domain *sd; sched_domain 9803 kernel/sched/fair.c struct sched_domain *sd; sched_domain 1637 kernel/sched/rt.c struct sched_domain *sd; sched_domain 926 kernel/sched/sched.h struct sched_domain __rcu *sd; sched_domain 1357 kernel/sched/sched.h static inline struct sched_domain *highest_flag_domain(int cpu, int flag) sched_domain 1359 kernel/sched/sched.h struct sched_domain *sd, *hsd = NULL; sched_domain 1370 kernel/sched/sched.h static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) sched_domain 1372 kernel/sched/sched.h struct sched_domain *sd; sched_domain 1382 kernel/sched/sched.h DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); sched_domain 1386 kernel/sched/sched.h DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); sched_domain 1387 kernel/sched/sched.h DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); sched_domain 1388 kernel/sched/sched.h DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); sched_domain 1832 kernel/sched/sched.h extern void update_group_capacity(struct sched_domain *sd, int cpu); sched_domain 25 kernel/sched/stats.c struct sched_domain *sd; sched_domain 28 kernel/sched/topology.c static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, sched_domain 115 kernel/sched/topology.c static void sched_domain_debug(struct sched_domain *sd, int cpu) sched_domain 148 kernel/sched/topology.c static int sd_degenerate(struct sched_domain *sd) sched_domain 174 kernel/sched/topology.c sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) sched_domain 578 kernel/sched/topology.c static void destroy_sched_domain(struct sched_domain *sd) sched_domain 594 kernel/sched/topology.c struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); sched_domain 597 kernel/sched/topology.c struct sched_domain *parent = sd->parent; sched_domain 603 kernel/sched/topology.c static void destroy_sched_domains(struct sched_domain *sd) sched_domain 618 kernel/sched/topology.c DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); sched_domain 622 kernel/sched/topology.c DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); sched_domain 623 kernel/sched/topology.c DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); sched_domain 624 kernel/sched/topology.c DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); sched_domain 630 kernel/sched/topology.c struct sched_domain *sd; sched_domain 661 kernel/sched/topology.c cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) sched_domain 664 kernel/sched/topology.c struct sched_domain *tmp; sched_domain 668 kernel/sched/topology.c struct sched_domain *parent = tmp->parent; sched_domain 708 kernel/sched/topology.c struct sched_domain * __percpu *sd; sched_domain 840 kernel/sched/topology.c build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) sched_domain 844 kernel/sched/topology.c struct sched_domain *sibling; sched_domain 877 kernel/sched/topology.c build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) sched_domain 898 kernel/sched/topology.c static void init_overlap_sched_group(struct sched_domain *sd, sched_domain 927 kernel/sched/topology.c build_overlap_sched_groups(struct sched_domain *sd, int cpu) sched_domain 933 kernel/sched/topology.c struct sched_domain *sibling; sched_domain 1059 kernel/sched/topology.c struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); sched_domain 1060 kernel/sched/topology.c struct sched_domain *child = sd->child; sched_domain 1102 kernel/sched/topology.c build_sched_groups(struct sched_domain *sd, int cpu) sched_domain 1147 kernel/sched/topology.c static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) sched_domain 1196 kernel/sched/topology.c static void set_domain_attribute(struct sched_domain *sd, sched_domain 1246 kernel/sched/topology.c d->sd = alloc_percpu(struct sched_domain *); sched_domain 1261 kernel/sched/topology.c static void claim_allocations(int cpu, struct sched_domain *sd) sched_domain 1314 kernel/sched/topology.c static struct sched_domain * sched_domain 1317 kernel/sched/topology.c struct sched_domain *child, int dflags, int cpu) sched_domain 1320 kernel/sched/topology.c struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); sched_domain 1341 kernel/sched/topology.c *sd = (struct sched_domain){ sched_domain 1381 kernel/sched/topology.c struct sched_domain *t = sd; sched_domain 1758 kernel/sched/topology.c sdd->sd = alloc_percpu(struct sched_domain *); sched_domain 1775 kernel/sched/topology.c struct sched_domain *sd; sched_domain 1780 kernel/sched/topology.c sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), sched_domain 1828 kernel/sched/topology.c struct sched_domain *sd; sched_domain 1855 kernel/sched/topology.c static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, sched_domain 1857 kernel/sched/topology.c struct sched_domain *child, int dflags, int cpu) sched_domain 1859 kernel/sched/topology.c struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); sched_domain 1988 kernel/sched/topology.c struct sched_domain *sd;