Lines Matching refs:rd

2287 	return &cpu_rq(i)->rd->dl_bw;  in dl_bw_of()
2292 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() local
2297 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus()
3934 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
3942 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
4457 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity()
5087 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, in task_can_attach()
5489 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
5509 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
5535 if (rq->rd) { in migration_call()
5536 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5548 if (rq->rd) { in migration_call()
5549 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5824 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local
5826 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
5827 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
5828 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
5829 free_cpumask_var(rd->rto_mask); in free_rootdomain()
5830 free_cpumask_var(rd->online); in free_rootdomain()
5831 free_cpumask_var(rd->span); in free_rootdomain()
5832 kfree(rd); in free_rootdomain()
5835 static void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
5842 if (rq->rd) { in rq_attach_root()
5843 old_rd = rq->rd; in rq_attach_root()
5859 atomic_inc(&rd->refcount); in rq_attach_root()
5860 rq->rd = rd; in rq_attach_root()
5862 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
5872 static int init_rootdomain(struct root_domain *rd) in init_rootdomain() argument
5874 memset(rd, 0, sizeof(*rd)); in init_rootdomain()
5876 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
5878 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
5880 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
5882 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
5885 init_dl_bw(&rd->dl_bw); in init_rootdomain()
5886 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
5889 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
5894 free_cpumask_var(rd->rto_mask); in init_rootdomain()
5896 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
5898 free_cpumask_var(rd->online); in init_rootdomain()
5900 free_cpumask_var(rd->span); in init_rootdomain()
5920 struct root_domain *rd; in alloc_rootdomain() local
5922 rd = kmalloc(sizeof(*rd), GFP_KERNEL); in alloc_rootdomain()
5923 if (!rd) in alloc_rootdomain()
5926 if (init_rootdomain(rd) != 0) { in alloc_rootdomain()
5927 kfree(rd); in alloc_rootdomain()
5931 return rd; in alloc_rootdomain()
6028 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
6065 rq_attach_root(rq, rd); in cpu_attach_domain()
6085 struct root_domain *rd; member
6355 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
6356 free_rootdomain(&d->rd->rcu); /* fall through */ in __free_domain_allocs()
6376 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
6377 if (!d->rd) in __visit_domain_allocation_hell()
7027 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
7473 rq->rd = NULL; in sched_init()