dl_b              275 kernel/sched/deadline.c 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_b              279 kernel/sched/deadline.c 			raw_spin_lock(&dl_b->lock);
dl_b              280 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
dl_b              282 kernel/sched/deadline.c 			raw_spin_unlock(&dl_b->lock);
dl_b              337 kernel/sched/deadline.c void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
dl_b              339 kernel/sched/deadline.c 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
dl_b              340 kernel/sched/deadline.c 	dl_b->dl_period = period;
dl_b              341 kernel/sched/deadline.c 	dl_b->dl_runtime = runtime;
dl_b              344 kernel/sched/deadline.c void init_dl_bw(struct dl_bw *dl_b)
dl_b              346 kernel/sched/deadline.c 	raw_spin_lock_init(&dl_b->lock);
dl_b              349 kernel/sched/deadline.c 		dl_b->bw = -1;
dl_b              351 kernel/sched/deadline.c 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
dl_b              353 kernel/sched/deadline.c 	dl_b->total_bw = 0;
dl_b              532 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b              583 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
dl_b              584 kernel/sched/deadline.c 	raw_spin_lock(&dl_b->lock);
dl_b              585 kernel/sched/deadline.c 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
dl_b              586 kernel/sched/deadline.c 	raw_spin_unlock(&dl_b->lock);
dl_b              588 kernel/sched/deadline.c 	dl_b = &later_rq->rd->dl_bw;
dl_b              589 kernel/sched/deadline.c 	raw_spin_lock(&dl_b->lock);
dl_b              590 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
dl_b              591 kernel/sched/deadline.c 	raw_spin_unlock(&dl_b->lock);
dl_b             1297 kernel/sched/deadline.c 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_b             1305 kernel/sched/deadline.c 		raw_spin_lock(&dl_b->lock);
dl_b             1306 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
dl_b             1307 kernel/sched/deadline.c 		raw_spin_unlock(&dl_b->lock);
dl_b             2298 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b             2304 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
dl_b             2305 kernel/sched/deadline.c 	raw_spin_lock(&dl_b->lock);
dl_b             2307 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
dl_b             2309 kernel/sched/deadline.c 	raw_spin_unlock(&dl_b->lock);
dl_b             2471 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b             2486 kernel/sched/deadline.c 		dl_b = dl_bw_of(cpu);
dl_b             2488 kernel/sched/deadline.c 		raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b             2489 kernel/sched/deadline.c 		if (new_bw < dl_b->total_bw)
dl_b             2491 kernel/sched/deadline.c 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
dl_b             2518 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b             2533 kernel/sched/deadline.c 		dl_b = dl_bw_of(cpu);
dl_b             2535 kernel/sched/deadline.c 		raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b             2536 kernel/sched/deadline.c 		dl_b->bw = new_bw;
dl_b             2537 kernel/sched/deadline.c 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
dl_b             2555 kernel/sched/deadline.c 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_b             2573 kernel/sched/deadline.c 	raw_spin_lock(&dl_b->lock);
dl_b             2576 kernel/sched/deadline.c 	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
dl_b             2578 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
dl_b             2579 kernel/sched/deadline.c 		__dl_add(dl_b, new_bw, cpus);
dl_b             2582 kernel/sched/deadline.c 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
dl_b             2590 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
dl_b             2591 kernel/sched/deadline.c 		__dl_add(dl_b, new_bw, cpus);
dl_b             2602 kernel/sched/deadline.c 	raw_spin_unlock(&dl_b->lock);
dl_b             2719 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b             2727 kernel/sched/deadline.c 	dl_b = dl_bw_of(dest_cpu);
dl_b             2728 kernel/sched/deadline.c 	raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b             2730 kernel/sched/deadline.c 	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
dl_b             2740 kernel/sched/deadline.c 		__dl_add(dl_b, p->dl.dl_bw, cpus);
dl_b             2743 kernel/sched/deadline.c 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
dl_b             2773 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_b             2778 kernel/sched/deadline.c 	dl_b = dl_bw_of(cpu);
dl_b             2779 kernel/sched/deadline.c 	raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b             2781 kernel/sched/deadline.c 	overflow = __dl_overflow(dl_b, cpus, 0, 0);
dl_b             2782 kernel/sched/deadline.c 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
dl_b              291 kernel/sched/sched.h static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
dl_b              294 kernel/sched/sched.h void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
dl_b              296 kernel/sched/sched.h 	dl_b->total_bw -= tsk_bw;
dl_b              297 kernel/sched/sched.h 	__dl_update(dl_b, (s32)tsk_bw / cpus);
dl_b              301 kernel/sched/sched.h void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
dl_b              303 kernel/sched/sched.h 	dl_b->total_bw += tsk_bw;
dl_b              304 kernel/sched/sched.h 	__dl_update(dl_b, -((s32)tsk_bw / cpus));
dl_b              308 kernel/sched/sched.h bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
dl_b              310 kernel/sched/sched.h 	return dl_b->bw != -1 &&
dl_b              311 kernel/sched/sched.h 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
dl_b              315 kernel/sched/sched.h extern void init_dl_bw(struct dl_bw *dl_b);
dl_b             1884 kernel/sched/sched.h extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
dl_b             2225 kernel/sched/sched.h void __dl_update(struct dl_bw *dl_b, s64 bw)
dl_b             2227 kernel/sched/sched.h 	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
dl_b             2240 kernel/sched/sched.h void __dl_update(struct dl_bw *dl_b, s64 bw)
dl_b             2242 kernel/sched/sched.h 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);