Lines Matching refs:rd

1992 	return &cpu_rq(i)->rd->dl_bw;  in dl_bw_of()
1997 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() local
2002 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus()
3566 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
3574 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
4074 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity()
4694 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, in task_can_attach()
5236 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
5256 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
5281 if (rq->rd) { in migration_call()
5282 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5294 if (rq->rd) { in migration_call()
5295 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5567 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local
5569 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
5570 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
5571 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
5572 free_cpumask_var(rd->rto_mask); in free_rootdomain()
5573 free_cpumask_var(rd->online); in free_rootdomain()
5574 free_cpumask_var(rd->span); in free_rootdomain()
5575 kfree(rd); in free_rootdomain()
5578 static void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
5585 if (rq->rd) { in rq_attach_root()
5586 old_rd = rq->rd; in rq_attach_root()
5602 atomic_inc(&rd->refcount); in rq_attach_root()
5603 rq->rd = rd; in rq_attach_root()
5605 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
5615 static int init_rootdomain(struct root_domain *rd) in init_rootdomain() argument
5617 memset(rd, 0, sizeof(*rd)); in init_rootdomain()
5619 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
5621 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
5623 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
5625 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
5628 init_dl_bw(&rd->dl_bw); in init_rootdomain()
5629 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
5632 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
5637 free_cpumask_var(rd->rto_mask); in init_rootdomain()
5639 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
5641 free_cpumask_var(rd->online); in init_rootdomain()
5643 free_cpumask_var(rd->span); in init_rootdomain()
5663 struct root_domain *rd; in alloc_rootdomain() local
5665 rd = kmalloc(sizeof(*rd), GFP_KERNEL); in alloc_rootdomain()
5666 if (!rd) in alloc_rootdomain()
5669 if (init_rootdomain(rd) != 0) { in alloc_rootdomain()
5670 kfree(rd); in alloc_rootdomain()
5674 return rd; in alloc_rootdomain()
5771 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
5808 rq_attach_root(rq, rd); in cpu_attach_domain()
5828 struct root_domain *rd; member
6098 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
6099 free_rootdomain(&d->rd->rcu); /* fall through */ in __free_domain_allocs()
6119 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
6120 if (!d->rd) in __visit_domain_allocation_hell()
6767 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
7215 rq->rd = NULL; in sched_init()