dl_rq              28 kernel/sched/deadline.c static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
dl_rq              30 kernel/sched/deadline.c 	return container_of(dl_rq, struct rq, dl);
dl_rq              33 kernel/sched/deadline.c static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
dl_rq              79 kernel/sched/deadline.c void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq              81 kernel/sched/deadline.c 	u64 old = dl_rq->running_bw;
dl_rq              83 kernel/sched/deadline.c 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq              84 kernel/sched/deadline.c 	dl_rq->running_bw += dl_bw;
dl_rq              85 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
dl_rq              86 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
dl_rq              88 kernel/sched/deadline.c 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
dl_rq              92 kernel/sched/deadline.c void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq              94 kernel/sched/deadline.c 	u64 old = dl_rq->running_bw;
dl_rq              96 kernel/sched/deadline.c 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq              97 kernel/sched/deadline.c 	dl_rq->running_bw -= dl_bw;
dl_rq              98 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
dl_rq              99 kernel/sched/deadline.c 	if (dl_rq->running_bw > old)
dl_rq             100 kernel/sched/deadline.c 		dl_rq->running_bw = 0;
dl_rq             102 kernel/sched/deadline.c 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
dl_rq             106 kernel/sched/deadline.c void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq             108 kernel/sched/deadline.c 	u64 old = dl_rq->this_bw;
dl_rq             110 kernel/sched/deadline.c 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq             111 kernel/sched/deadline.c 	dl_rq->this_bw += dl_bw;
dl_rq             112 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
dl_rq             116 kernel/sched/deadline.c void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq             118 kernel/sched/deadline.c 	u64 old = dl_rq->this_bw;
dl_rq             120 kernel/sched/deadline.c 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq             121 kernel/sched/deadline.c 	dl_rq->this_bw -= dl_bw;
dl_rq             122 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
dl_rq             123 kernel/sched/deadline.c 	if (dl_rq->this_bw > old)
dl_rq             124 kernel/sched/deadline.c 		dl_rq->this_bw = 0;
dl_rq             125 kernel/sched/deadline.c 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
dl_rq             129 kernel/sched/deadline.c void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             132 kernel/sched/deadline.c 		__add_rq_bw(dl_se->dl_bw, dl_rq);
dl_rq             136 kernel/sched/deadline.c void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             139 kernel/sched/deadline.c 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
dl_rq             143 kernel/sched/deadline.c void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             146 kernel/sched/deadline.c 		__add_running_bw(dl_se->dl_bw, dl_rq);
dl_rq             150 kernel/sched/deadline.c void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             153 kernel/sched/deadline.c 		__sub_running_bw(dl_se->dl_bw, dl_rq);
dl_rq             241 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq             242 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq             273 kernel/sched/deadline.c 			sub_running_bw(dl_se, dl_rq);
dl_rq             295 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq             305 kernel/sched/deadline.c 		add_rq_bw(dl_se, dl_rq);
dl_rq             326 kernel/sched/deadline.c 		add_running_bw(dl_se, dl_rq);
dl_rq             330 kernel/sched/deadline.c static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
dl_rq             334 kernel/sched/deadline.c 	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
dl_rq             356 kernel/sched/deadline.c void init_dl_rq(struct dl_rq *dl_rq)
dl_rq             358 kernel/sched/deadline.c 	dl_rq->root = RB_ROOT_CACHED;
dl_rq             362 kernel/sched/deadline.c 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq             364 kernel/sched/deadline.c 	dl_rq->dl_nr_migratory = 0;
dl_rq             365 kernel/sched/deadline.c 	dl_rq->overloaded = 0;
dl_rq             366 kernel/sched/deadline.c 	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
dl_rq             368 kernel/sched/deadline.c 	init_dl_bw(&dl_rq->dl_bw);
dl_rq             371 kernel/sched/deadline.c 	dl_rq->running_bw = 0;
dl_rq             372 kernel/sched/deadline.c 	dl_rq->this_bw = 0;
dl_rq             373 kernel/sched/deadline.c 	init_dl_rq_bw_ratio(dl_rq);
dl_rq             408 kernel/sched/deadline.c static void update_dl_migration(struct dl_rq *dl_rq)
dl_rq             410 kernel/sched/deadline.c 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
dl_rq             411 kernel/sched/deadline.c 		if (!dl_rq->overloaded) {
dl_rq             412 kernel/sched/deadline.c 			dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq             413 kernel/sched/deadline.c 			dl_rq->overloaded = 1;
dl_rq             415 kernel/sched/deadline.c 	} else if (dl_rq->overloaded) {
dl_rq             416 kernel/sched/deadline.c 		dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq             417 kernel/sched/deadline.c 		dl_rq->overloaded = 0;
dl_rq             421 kernel/sched/deadline.c static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             426 kernel/sched/deadline.c 		dl_rq->dl_nr_migratory++;
dl_rq             428 kernel/sched/deadline.c 	update_dl_migration(dl_rq);
dl_rq             431 kernel/sched/deadline.c static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             436 kernel/sched/deadline.c 		dl_rq->dl_nr_migratory--;
dl_rq             438 kernel/sched/deadline.c 	update_dl_migration(dl_rq);
dl_rq             447 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
dl_rq             448 kernel/sched/deadline.c 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
dl_rq             468 kernel/sched/deadline.c 		dl_rq->earliest_dl.next = p->dl.deadline;
dl_rq             472 kernel/sched/deadline.c 			       &dl_rq->pushable_dl_tasks_root, leftmost);
dl_rq             477 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
dl_rq             482 kernel/sched/deadline.c 	if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
dl_rq             487 kernel/sched/deadline.c 			dl_rq->earliest_dl.next = rb_entry(next_node,
dl_rq             492 kernel/sched/deadline.c 	rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
dl_rq             612 kernel/sched/deadline.c void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             617 kernel/sched/deadline.c void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq             657 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq             658 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq             701 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq             702 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq             887 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq             888 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq            1334 kernel/sched/deadline.c static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
dl_rq            1336 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq            1338 kernel/sched/deadline.c 	if (dl_rq->earliest_dl.curr == 0 ||
dl_rq            1339 kernel/sched/deadline.c 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
dl_rq            1340 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = deadline;
dl_rq            1345 kernel/sched/deadline.c static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
dl_rq            1347 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
dl_rq            1353 kernel/sched/deadline.c 	if (!dl_rq->dl_nr_running) {
dl_rq            1354 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = 0;
dl_rq            1355 kernel/sched/deadline.c 		dl_rq->earliest_dl.next = 0;
dl_rq            1358 kernel/sched/deadline.c 		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
dl_rq            1362 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = entry->deadline;
dl_rq            1369 kernel/sched/deadline.c static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dl_rq            1370 kernel/sched/deadline.c static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dl_rq            1375 kernel/sched/deadline.c void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq            1381 kernel/sched/deadline.c 	dl_rq->dl_nr_running++;
dl_rq            1382 kernel/sched/deadline.c 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
dl_rq            1384 kernel/sched/deadline.c 	inc_dl_deadline(dl_rq, deadline);
dl_rq            1385 kernel/sched/deadline.c 	inc_dl_migration(dl_se, dl_rq);
dl_rq            1389 kernel/sched/deadline.c void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_rq            1394 kernel/sched/deadline.c 	WARN_ON(!dl_rq->dl_nr_running);
dl_rq            1395 kernel/sched/deadline.c 	dl_rq->dl_nr_running--;
dl_rq            1396 kernel/sched/deadline.c 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
dl_rq            1398 kernel/sched/deadline.c 	dec_dl_deadline(dl_rq, dl_se->deadline);
dl_rq            1399 kernel/sched/deadline.c 	dec_dl_migration(dl_se, dl_rq);
dl_rq            1404 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq            1405 kernel/sched/deadline.c 	struct rb_node **link = &dl_rq->root.rb_root.rb_node;
dl_rq            1424 kernel/sched/deadline.c 	rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
dl_rq            1426 kernel/sched/deadline.c 	inc_dl_tasks(dl_se, dl_rq);
dl_rq            1431 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_rq            1436 kernel/sched/deadline.c 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
dl_rq            1439 kernel/sched/deadline.c 	dec_dl_tasks(dl_se, dl_rq);
dl_rq            1766 kernel/sched/deadline.c 						   struct dl_rq *dl_rq)
dl_rq            1768 kernel/sched/deadline.c 	struct rb_node *left = rb_first_cached(&dl_rq->root);
dl_rq            1780 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
dl_rq            1788 kernel/sched/deadline.c 	dl_se = pick_next_dl_entity(rq, dl_rq);
dl_rq            2502 kernel/sched/deadline.c void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
dl_rq            2505 kernel/sched/deadline.c 		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
dl_rq            2506 kernel/sched/deadline.c 		dl_rq->extra_bw = 1 << BW_SHIFT;
dl_rq            2508 kernel/sched/deadline.c 		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
dl_rq            2510 kernel/sched/deadline.c 		dl_rq->extra_bw = to_ratio(global_rt_period(),
dl_rq             591 kernel/sched/debug.c void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
dl_rq             599 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
dl_rq             606 kernel/sched/debug.c 	dl_bw = &dl_rq->dl_bw;
dl_rq             889 kernel/sched/sched.h 	struct dl_rq		dl;
dl_rq            1887 kernel/sched/sched.h extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
dl_rq            2189 kernel/sched/sched.h extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
dl_rq            2201 kernel/sched/sched.h extern void init_dl_rq(struct dl_rq *dl_rq);
dl_rq            2242 kernel/sched/sched.h 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);