Searched refs:sched_domain (Results 1 - 8 of 8) sorted by relevance
/linux-4.1.27/kernel/sched/ |
H A D | stats.c | 25 struct sched_domain *sd; show_schedstat()
|
H A D | sched.h | 615 struct sched_domain *sd; 783 * highest_flag_domain - Return highest sched_domain containing flag. 786 * @flag: The flag to check for the highest sched_domain 789 * Returns the highest sched_domain of a cpu which contains the given flag. 791 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) highest_flag_domain() 793 struct sched_domain *sd, *hsd = NULL; highest_flag_domain() 804 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) lowest_flag_domain() 806 struct sched_domain *sd; lowest_flag_domain() 816 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 819 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 820 DECLARE_PER_CPU(struct sched_domain *, sd_busy); 821 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1243 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1394 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) arch_scale_freq_capacity()
|
H A D | core.c | 600 struct sched_domain *sd; get_nohz_timer_target() 1421 struct sched_domain *sd; ttwu_stat() 5057 .procname = "sched_domain", 5123 sd_alloc_ctl_domain_table(struct sched_domain *sd) sd_alloc_ctl_domain_table() 5166 struct sched_domain *sd; sd_alloc_ctl_cpu_table() 5407 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, sched_domain_debug_one() 5480 static void sched_domain_debug(struct sched_domain *sd, int cpu) sched_domain_debug() 5511 static int sd_degenerate(struct sched_domain *sd) sd_degenerate() 5536 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) sd_parent_degenerate() 5698 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); free_sched_domain() 5713 static void destroy_sched_domain(struct sched_domain *sd, int cpu) destroy_sched_domain() 5718 static void destroy_sched_domains(struct sched_domain *sd, int cpu) destroy_sched_domains() 5725 * Keep a special pointer to the highest sched_domain that has 5733 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5736 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5737 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5738 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5742 struct sched_domain *sd; update_top_cache_domain() 5743 struct sched_domain *busy_sd = NULL; update_top_cache_domain() 5771 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) cpu_attach_domain() 5774 struct sched_domain *tmp; cpu_attach_domain() 5778 struct sched_domain *parent = tmp->parent; cpu_attach_domain() 5827 struct sched_domain ** __percpu sd; 5851 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) build_group_mask() 5855 struct sched_domain *sibling; build_group_mask() 5877 build_overlap_sched_groups(struct sched_domain *sd, int cpu) build_overlap_sched_groups() 5883 struct sched_domain *sibling; build_overlap_sched_groups() 5927 * canonical balance cpu. Otherwise the sched_domain iteration for_each_cpu() 5953 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); get_group() 5954 struct sched_domain *child = sd->child; get_group() 5973 * Assumes the sched_domain tree is fully constructed 5976 build_sched_groups(struct sched_domain *sd, int cpu) build_sched_groups() 6034 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) init_sched_groups_capacity() 6069 static void set_domain_attribute(struct sched_domain *sd, set_domain_attribute() 6116 d->sd = alloc_percpu(struct sched_domain *); __visit_domain_allocation_hell() 6126 * NULL the sd_data elements we've used to build the sched_domain and 6130 static void claim_allocations(int cpu, struct sched_domain *sd) claim_allocations() 6171 static struct sched_domain * sd_init() 6174 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); sd_init() 6192 *sd = (struct sched_domain){ sd_init() 6595 sdd->sd = alloc_percpu(struct sched_domain *); for_each_sd_topology() 6608 struct sched_domain *sd; for_each_cpu() 6612 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), for_each_cpu() 6649 struct sched_domain *sd; for_each_cpu() 6672 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, build_sched_domain() 6674 struct sched_domain *child, int cpu) build_sched_domain() 6676 struct sched_domain *sd = sd_init(tl, cpu); build_sched_domain() 6714 struct sched_domain *sd; build_sched_domains()
|
H A D | fair.c | 1426 struct sched_domain *sd; task_numa_migrate() 4564 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) wake_affine() 4642 find_idlest_group(struct sched_domain *sd, struct task_struct *p, find_idlest_group() 4745 * Try and locate an idle CPU in the sched_domain. 4749 struct sched_domain *sd; select_idle_sibling() 4831 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; select_task_rq_fair() 5414 struct sched_domain *sd; 5957 * sd_lb_stats - Structure to store the statistics of a sched_domain 5994 * @sd: The sched_domain whose load_idx is to be obtained. 5999 static inline int get_sd_load_idx(struct sched_domain *sd, get_sd_load_idx() 6020 static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu) default_scale_cpu_capacity() 6028 unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) arch_scale_cpu_capacity() 6060 static void update_cpu_capacity(struct sched_domain *sd, int cpu) update_cpu_capacity() 6084 void update_group_capacity(struct sched_domain *sd, int cpu) update_group_capacity() 6086 struct sched_domain *child = sd->child; update_group_capacity() 6153 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) check_cpu_capacity() 6256 * @load_idx: Load index of sched_domain of this_cpu for load calc. 6312 * @sds: sched_domain statistics 6389 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 6391 * @sds: variable to hold the statistics for this sched_domain. 6395 struct sched_domain *child = env->sd->child; update_sd_lb_stats() 6488 * @sds: Statistics of the sched_domain which is to be packed 6513 * amongst the groups of a sched_domain, during 6516 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 6583 * groups of a given sched_domain during load balance. 6585 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 6657 * find_busiest_group - Returns the busiest group within the sched_domain 6842 struct sched_domain *sd = env->sd; need_active_balance() 6912 struct sched_domain *sd, enum cpu_idle_type idle, load_balance() 6916 struct sched_domain *sd_parent = sd->parent; load_balance() 7175 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) get_sd_balance_interval() 7190 update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance) update_next_balance() 7209 struct sched_domain *sd; idle_balance() 7318 struct sched_domain *sd; active_load_balance_cpu_stop() 7450 struct sched_domain *sd; set_cpu_sd_state_busy() 7467 struct sched_domain *sd; set_cpu_sd_state_idle() 7543 struct sched_domain *sd; rebalance_domains() 7690 struct sched_domain *sd; nohz_kick_needed() 7770 * load balance only within the local sched_domain hierarchy run_rebalance_domains()
|
H A D | deadline.c | 1237 struct sched_domain *sd; find_later_rq()
|
H A D | rt.c | 1550 struct sched_domain *sd; find_lowest_rq()
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | topology.c | 45 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) arch_scale_cpu_capacity()
|
/linux-4.1.27/include/linux/ |
H A D | sched.h | 955 struct sched_domain { struct 957 struct sched_domain *parent; /* top domain must be null terminated */ 958 struct sched_domain *child; /* bottom domain must be null terminated */ 1035 static inline struct cpumask *sched_domain_span(struct sched_domain *sd) sched_domain_span() 1055 struct sched_domain **__percpu sd;
|
Completed in 463 milliseconds