dl_bw             510 include/linux/sched.h 	u64				dl_bw;		/* dl_runtime / dl_period		*/
dl_bw            4924 kernel/sched/core.c 			    rq->rd->dl_bw.bw == 0) {
dl_bw              47 kernel/sched/deadline.c static inline struct dl_bw *dl_bw_of(int i)
dl_bw              51 kernel/sched/deadline.c 	return &cpu_rq(i)->rd->dl_bw;
dl_bw              67 kernel/sched/deadline.c static inline struct dl_bw *dl_bw_of(int i)
dl_bw              69 kernel/sched/deadline.c 	return &cpu_rq(i)->dl.dl_bw;
dl_bw              79 kernel/sched/deadline.c void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_bw              84 kernel/sched/deadline.c 	dl_rq->running_bw += dl_bw;
dl_bw              92 kernel/sched/deadline.c void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_bw              97 kernel/sched/deadline.c 	dl_rq->running_bw -= dl_bw;
dl_bw             106 kernel/sched/deadline.c void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_bw             111 kernel/sched/deadline.c 	dl_rq->this_bw += dl_bw;
dl_bw             116 kernel/sched/deadline.c void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_bw             121 kernel/sched/deadline.c 	dl_rq->this_bw -= dl_bw;
dl_bw             132 kernel/sched/deadline.c 		__add_rq_bw(dl_se->dl_bw, dl_rq);
dl_bw             139 kernel/sched/deadline.c 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
dl_bw             146 kernel/sched/deadline.c 		__add_running_bw(dl_se->dl_bw, dl_rq);
dl_bw             153 kernel/sched/deadline.c 		__sub_running_bw(dl_se->dl_bw, dl_rq);
dl_bw             179 kernel/sched/deadline.c 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
dl_bw             275 kernel/sched/deadline.c 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_bw             280 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
dl_bw             344 kernel/sched/deadline.c void init_dl_bw(struct dl_bw *dl_b)
dl_bw             368 kernel/sched/deadline.c 	init_dl_bw(&dl_rq->dl_bw);
dl_bw             532 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw             583 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
dl_bw             585 kernel/sched/deadline.c 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
dl_bw             588 kernel/sched/deadline.c 	dl_b = &later_rq->rd->dl_bw;
dl_bw             590 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
dl_bw            1157 kernel/sched/deadline.c 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
dl_bw            1297 kernel/sched/deadline.c 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_bw            1306 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
dl_bw            2248 kernel/sched/deadline.c 		struct dl_bw *src_dl_b;
dl_bw            2257 kernel/sched/deadline.c 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
dl_bw            2298 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw            2304 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
dl_bw            2307 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
dl_bw            2319 kernel/sched/deadline.c 	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
dl_bw            2320 kernel/sched/deadline.c 	rd->dl_bw.total_bw = 0;
dl_bw            2321 kernel/sched/deadline.c 	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
dl_bw            2471 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw            2518 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw            2555 kernel/sched/deadline.c 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
dl_bw            2565 kernel/sched/deadline.c 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
dl_bw            2578 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
dl_bw            2582 kernel/sched/deadline.c 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
dl_bw            2590 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
dl_bw            2623 kernel/sched/deadline.c 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_bw            2693 kernel/sched/deadline.c 	dl_se->dl_bw			= 0;
dl_bw            2719 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw            2730 kernel/sched/deadline.c 	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
dl_bw            2740 kernel/sched/deadline.c 		__dl_add(dl_b, p->dl.dl_bw, cpus);
dl_bw            2753 kernel/sched/deadline.c 	struct dl_bw *cur_dl_b;
dl_bw            2773 kernel/sched/deadline.c 	struct dl_bw *dl_b;
dl_bw             593 kernel/sched/debug.c 	struct dl_bw *dl_bw;
dl_bw             604 kernel/sched/debug.c 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
dl_bw             606 kernel/sched/debug.c 	dl_bw = &dl_rq->dl_bw;
dl_bw             608 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
dl_bw             609 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
dl_bw             291 kernel/sched/sched.h static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
dl_bw             294 kernel/sched/sched.h void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
dl_bw             301 kernel/sched/sched.h void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
dl_bw             308 kernel/sched/sched.h bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
dl_bw             315 kernel/sched/sched.h extern void init_dl_bw(struct dl_bw *dl_b);
dl_bw             666 kernel/sched/sched.h 	struct dl_bw		dl_bw;
dl_bw             761 kernel/sched/sched.h 	struct dl_bw		dl_bw;
dl_bw            2225 kernel/sched/sched.h void __dl_update(struct dl_bw *dl_b, s64 bw)
dl_bw            2227 kernel/sched/sched.h 	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
dl_bw            2240 kernel/sched/sched.h void __dl_update(struct dl_bw *dl_b, s64 bw)
dl_bw            2242 kernel/sched/sched.h 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
dl_bw             507 kernel/sched/topology.c 	init_dl_bw(&rd->dl_bw);