dl_se             120 kernel/sched/cpudeadline.c 	const struct sched_dl_entity *dl_se = &p->dl;
dl_se             131 kernel/sched/cpudeadline.c 		    dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
dl_se              23 kernel/sched/deadline.c static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
dl_se              25 kernel/sched/deadline.c 	return container_of(dl_se, struct task_struct, dl);
dl_se              33 kernel/sched/deadline.c static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
dl_se              35 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se              41 kernel/sched/deadline.c static inline int on_dl_rq(struct sched_dl_entity *dl_se)
dl_se              43 kernel/sched/deadline.c 	return !RB_EMPTY_NODE(&dl_se->rb_node);
dl_se             129 kernel/sched/deadline.c void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             131 kernel/sched/deadline.c 	if (!dl_entity_is_special(dl_se))
dl_se             132 kernel/sched/deadline.c 		__add_rq_bw(dl_se->dl_bw, dl_rq);
dl_se             136 kernel/sched/deadline.c void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             138 kernel/sched/deadline.c 	if (!dl_entity_is_special(dl_se))
dl_se             139 kernel/sched/deadline.c 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
dl_se             143 kernel/sched/deadline.c void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             145 kernel/sched/deadline.c 	if (!dl_entity_is_special(dl_se))
dl_se             146 kernel/sched/deadline.c 		__add_running_bw(dl_se->dl_bw, dl_rq);
dl_se             150 kernel/sched/deadline.c void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             152 kernel/sched/deadline.c 	if (!dl_entity_is_special(dl_se))
dl_se             153 kernel/sched/deadline.c 		__sub_running_bw(dl_se->dl_bw, dl_rq);
dl_se             239 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se             240 kernel/sched/deadline.c 	struct hrtimer *timer = &dl_se->inactive_timer;
dl_se             241 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se             249 kernel/sched/deadline.c 	if (dl_se->dl_runtime == 0)
dl_se             252 kernel/sched/deadline.c 	if (dl_entity_is_special(dl_se))
dl_se             255 kernel/sched/deadline.c 	WARN_ON(dl_se->dl_non_contending);
dl_se             257 kernel/sched/deadline.c 	zerolag_time = dl_se->deadline -
dl_se             258 kernel/sched/deadline.c 		 div64_long((dl_se->runtime * dl_se->dl_period),
dl_se             259 kernel/sched/deadline.c 			dl_se->dl_runtime);
dl_se             271 kernel/sched/deadline.c 	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
dl_se             273 kernel/sched/deadline.c 			sub_running_bw(dl_se, dl_rq);
dl_se             288 kernel/sched/deadline.c 	dl_se->dl_non_contending = 1;
dl_se             293 kernel/sched/deadline.c static void task_contending(struct sched_dl_entity *dl_se, int flags)
dl_se             295 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se             301 kernel/sched/deadline.c 	if (dl_se->dl_runtime == 0)
dl_se             305 kernel/sched/deadline.c 		add_rq_bw(dl_se, dl_rq);
dl_se             307 kernel/sched/deadline.c 	if (dl_se->dl_non_contending) {
dl_se             308 kernel/sched/deadline.c 		dl_se->dl_non_contending = 0;
dl_se             316 kernel/sched/deadline.c 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
dl_se             317 kernel/sched/deadline.c 			put_task_struct(dl_task_of(dl_se));
dl_se             326 kernel/sched/deadline.c 		add_running_bw(dl_se, dl_rq);
dl_se             332 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se             334 kernel/sched/deadline.c 	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
dl_se             421 kernel/sched/deadline.c static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             423 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se             431 kernel/sched/deadline.c static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             433 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se             612 kernel/sched/deadline.c void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             617 kernel/sched/deadline.c void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se             655 kernel/sched/deadline.c static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
dl_se             657 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se             660 kernel/sched/deadline.c 	WARN_ON(dl_se->dl_boosted);
dl_se             661 kernel/sched/deadline.c 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
dl_se             668 kernel/sched/deadline.c 	if (dl_se->dl_throttled)
dl_se             676 kernel/sched/deadline.c 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
dl_se             677 kernel/sched/deadline.c 	dl_se->runtime = dl_se->dl_runtime;
dl_se             698 kernel/sched/deadline.c static void replenish_dl_entity(struct sched_dl_entity *dl_se,
dl_se             701 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se             710 kernel/sched/deadline.c 	if (dl_se->dl_deadline == 0) {
dl_se             711 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se             712 kernel/sched/deadline.c 		dl_se->runtime = pi_se->dl_runtime;
dl_se             715 kernel/sched/deadline.c 	if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se             716 kernel/sched/deadline.c 		dl_se->runtime = 0;
dl_se             724 kernel/sched/deadline.c 	while (dl_se->runtime <= 0) {
dl_se             725 kernel/sched/deadline.c 		dl_se->deadline += pi_se->dl_period;
dl_se             726 kernel/sched/deadline.c 		dl_se->runtime += pi_se->dl_runtime;
dl_se             738 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
dl_se             740 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se             741 kernel/sched/deadline.c 		dl_se->runtime = pi_se->dl_runtime;
dl_se             744 kernel/sched/deadline.c 	if (dl_se->dl_yielded)
dl_se             745 kernel/sched/deadline.c 		dl_se->dl_yielded = 0;
dl_se             746 kernel/sched/deadline.c 	if (dl_se->dl_throttled)
dl_se             747 kernel/sched/deadline.c 		dl_se->dl_throttled = 0;
dl_se             774 kernel/sched/deadline.c static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
dl_se             797 kernel/sched/deadline.c 	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
dl_se             798 kernel/sched/deadline.c 	right = ((dl_se->deadline - t) >> DL_SCALE) *
dl_se             823 kernel/sched/deadline.c update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
dl_se             825 kernel/sched/deadline.c 	u64 laxity = dl_se->deadline - rq_clock(rq);
dl_se             833 kernel/sched/deadline.c 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
dl_se             835 kernel/sched/deadline.c 	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
dl_se             849 kernel/sched/deadline.c static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
dl_se             851 kernel/sched/deadline.c 	return dl_se->dl_deadline == dl_se->dl_period;
dl_se             884 kernel/sched/deadline.c static void update_dl_entity(struct sched_dl_entity *dl_se,
dl_se             887 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se             890 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_se             891 kernel/sched/deadline.c 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
dl_se             893 kernel/sched/deadline.c 		if (unlikely(!dl_is_implicit(dl_se) &&
dl_se             894 kernel/sched/deadline.c 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_se             895 kernel/sched/deadline.c 			     !dl_se->dl_boosted)){
dl_se             896 kernel/sched/deadline.c 			update_dl_revised_wakeup(dl_se, rq);
dl_se             900 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se             901 kernel/sched/deadline.c 		dl_se->runtime = pi_se->dl_runtime;
dl_se             905 kernel/sched/deadline.c static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
dl_se             907 kernel/sched/deadline.c 	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
dl_se             922 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se             923 kernel/sched/deadline.c 	struct hrtimer *timer = &dl_se->dl_timer;
dl_se             935 kernel/sched/deadline.c 	act = ns_to_ktime(dl_next_period(dl_se));
dl_se             980 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = container_of(timer,
dl_se             983 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se            1000 kernel/sched/deadline.c 	if (dl_se->dl_boosted)
dl_se            1007 kernel/sched/deadline.c 	if (!dl_se->dl_throttled)
dl_se            1028 kernel/sched/deadline.c 		replenish_dl_entity(dl_se, dl_se);
dl_se            1085 kernel/sched/deadline.c void init_dl_task_timer(struct sched_dl_entity *dl_se)
dl_se            1087 kernel/sched/deadline.c 	struct hrtimer *timer = &dl_se->dl_timer;
dl_se            1111 kernel/sched/deadline.c static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
dl_se            1113 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se            1114 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
dl_se            1116 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_se            1117 kernel/sched/deadline.c 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
dl_se            1118 kernel/sched/deadline.c 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
dl_se            1120 kernel/sched/deadline.c 		dl_se->dl_throttled = 1;
dl_se            1121 kernel/sched/deadline.c 		if (dl_se->runtime > 0)
dl_se            1122 kernel/sched/deadline.c 			dl_se->runtime = 0;
dl_se            1127 kernel/sched/deadline.c int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
dl_se            1129 kernel/sched/deadline.c 	return (dl_se->runtime <= 0);
dl_se            1153 kernel/sched/deadline.c static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
dl_se            1157 kernel/sched/deadline.c 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
dl_se            1182 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &curr->dl;
dl_se            1187 kernel/sched/deadline.c 	if (!dl_task(curr) || !on_dl_rq(dl_se))
dl_se            1201 kernel/sched/deadline.c 		if (unlikely(dl_se->dl_yielded))
dl_se            1215 kernel/sched/deadline.c 	if (dl_entity_is_special(dl_se))
dl_se            1225 kernel/sched/deadline.c 	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
dl_se            1237 kernel/sched/deadline.c 	dl_se->runtime -= scaled_delta_exec;
dl_se            1240 kernel/sched/deadline.c 	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se            1241 kernel/sched/deadline.c 		dl_se->dl_throttled = 1;
dl_se            1244 kernel/sched/deadline.c 		if (dl_runtime_exceeded(dl_se) &&
dl_se            1245 kernel/sched/deadline.c 		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se            1246 kernel/sched/deadline.c 			dl_se->dl_overrun = 1;
dl_se            1249 kernel/sched/deadline.c 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
dl_se            1284 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = container_of(timer,
dl_se            1287 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
dl_se            1299 kernel/sched/deadline.c 		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
dl_se            1302 kernel/sched/deadline.c 			dl_se->dl_non_contending = 0;
dl_se            1312 kernel/sched/deadline.c 	if (dl_se->dl_non_contending == 0)
dl_se            1315 kernel/sched/deadline.c 	sub_running_bw(dl_se, &rq->dl);
dl_se            1316 kernel/sched/deadline.c 	dl_se->dl_non_contending = 0;
dl_se            1324 kernel/sched/deadline.c void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
dl_se            1326 kernel/sched/deadline.c 	struct hrtimer *timer = &dl_se->inactive_timer;
dl_se            1375 kernel/sched/deadline.c void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se            1377 kernel/sched/deadline.c 	int prio = dl_task_of(dl_se)->prio;
dl_se            1378 kernel/sched/deadline.c 	u64 deadline = dl_se->deadline;
dl_se            1385 kernel/sched/deadline.c 	inc_dl_migration(dl_se, dl_rq);
dl_se            1389 kernel/sched/deadline.c void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dl_se            1391 kernel/sched/deadline.c 	int prio = dl_task_of(dl_se)->prio;
dl_se            1398 kernel/sched/deadline.c 	dec_dl_deadline(dl_rq, dl_se->deadline);
dl_se            1399 kernel/sched/deadline.c 	dec_dl_migration(dl_se, dl_rq);
dl_se            1402 kernel/sched/deadline.c static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
dl_se            1404 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se            1410 kernel/sched/deadline.c 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
dl_se            1415 kernel/sched/deadline.c 		if (dl_time_before(dl_se->deadline, entry->deadline))
dl_se            1423 kernel/sched/deadline.c 	rb_link_node(&dl_se->rb_node, parent, link);
dl_se            1424 kernel/sched/deadline.c 	rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
dl_se            1426 kernel/sched/deadline.c 	inc_dl_tasks(dl_se, dl_rq);
dl_se            1429 kernel/sched/deadline.c static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
dl_se            1431 kernel/sched/deadline.c 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
dl_se            1433 kernel/sched/deadline.c 	if (RB_EMPTY_NODE(&dl_se->rb_node))
dl_se            1436 kernel/sched/deadline.c 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
dl_se            1437 kernel/sched/deadline.c 	RB_CLEAR_NODE(&dl_se->rb_node);
dl_se            1439 kernel/sched/deadline.c 	dec_dl_tasks(dl_se, dl_rq);
dl_se            1443 kernel/sched/deadline.c enqueue_dl_entity(struct sched_dl_entity *dl_se,
dl_se            1446 kernel/sched/deadline.c 	BUG_ON(on_dl_rq(dl_se));
dl_se            1454 kernel/sched/deadline.c 		task_contending(dl_se, flags);
dl_se            1455 kernel/sched/deadline.c 		update_dl_entity(dl_se, pi_se);
dl_se            1457 kernel/sched/deadline.c 		replenish_dl_entity(dl_se, pi_se);
dl_se            1459 kernel/sched/deadline.c 		  dl_time_before(dl_se->deadline,
dl_se            1460 kernel/sched/deadline.c 				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
dl_se            1461 kernel/sched/deadline.c 		setup_new_dl_entity(dl_se);
dl_se            1464 kernel/sched/deadline.c 	__enqueue_dl_entity(dl_se);
dl_se            1467 kernel/sched/deadline.c static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
dl_se            1469 kernel/sched/deadline.c 	__dequeue_dl_entity(dl_se);
dl_se            1779 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se;
dl_se            1788 kernel/sched/deadline.c 	dl_se = pick_next_dl_entity(rq, dl_rq);
dl_se            1789 kernel/sched/deadline.c 	BUG_ON(!dl_se);
dl_se            1790 kernel/sched/deadline.c 	p = dl_task_of(dl_se);
dl_se            2617 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se            2619 kernel/sched/deadline.c 	dl_se->dl_runtime = attr->sched_runtime;
dl_se            2620 kernel/sched/deadline.c 	dl_se->dl_deadline = attr->sched_deadline;
dl_se            2621 kernel/sched/deadline.c 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se            2622 kernel/sched/deadline.c 	dl_se->flags = attr->sched_flags;
dl_se            2623 kernel/sched/deadline.c 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se            2624 kernel/sched/deadline.c 	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
dl_se            2629 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se            2632 kernel/sched/deadline.c 	attr->sched_runtime = dl_se->dl_runtime;
dl_se            2633 kernel/sched/deadline.c 	attr->sched_deadline = dl_se->dl_deadline;
dl_se            2634 kernel/sched/deadline.c 	attr->sched_period = dl_se->dl_period;
dl_se            2635 kernel/sched/deadline.c 	attr->sched_flags = dl_se->flags;
dl_se            2687 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se            2689 kernel/sched/deadline.c 	dl_se->dl_runtime		= 0;
dl_se            2690 kernel/sched/deadline.c 	dl_se->dl_deadline		= 0;
dl_se            2691 kernel/sched/deadline.c 	dl_se->dl_period		= 0;
dl_se            2692 kernel/sched/deadline.c 	dl_se->flags			= 0;
dl_se            2693 kernel/sched/deadline.c 	dl_se->dl_bw			= 0;
dl_se            2694 kernel/sched/deadline.c 	dl_se->dl_density		= 0;
dl_se            2696 kernel/sched/deadline.c 	dl_se->dl_throttled		= 0;
dl_se            2697 kernel/sched/deadline.c 	dl_se->dl_yielded		= 0;
dl_se            2698 kernel/sched/deadline.c 	dl_se->dl_non_contending	= 0;
dl_se            2699 kernel/sched/deadline.c 	dl_se->dl_overrun		= 0;
dl_se            2704 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
dl_se            2706 kernel/sched/deadline.c 	if (dl_se->dl_runtime != attr->sched_runtime ||
dl_se            2707 kernel/sched/deadline.c 	    dl_se->dl_deadline != attr->sched_deadline ||
dl_se            2708 kernel/sched/deadline.c 	    dl_se->dl_period != attr->sched_period ||
dl_se            2709 kernel/sched/deadline.c 	    dl_se->flags != attr->sched_flags)
dl_se             212 kernel/sched/sched.h static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
dl_se             215 kernel/sched/sched.h 	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
dl_se            1885 kernel/sched/sched.h extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
dl_se            1886 kernel/sched/sched.h extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);