Lines Matching refs:p
35 struct task_struct *p = dl_task_of(dl_se); in dl_rq_of_se() local
36 struct rq *rq = task_rq(p); in dl_rq_of_se()
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
48 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
135 struct task_struct *p = dl_task_of(dl_se); in inc_dl_migration() local
137 if (p->nr_cpus_allowed > 1) in inc_dl_migration()
145 struct task_struct *p = dl_task_of(dl_se); in dec_dl_migration() local
147 if (p->nr_cpus_allowed > 1) in dec_dl_migration()
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); in enqueue_pushable_dl_task()
171 if (dl_entity_preempt(&p->dl, &entry->dl)) in enqueue_pushable_dl_task()
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; in enqueue_pushable_dl_task()
182 rb_link_node(&p->pushable_dl_tasks, parent, link); in enqueue_pushable_dl_task()
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in enqueue_pushable_dl_task()
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) in dequeue_pushable_dl_task()
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { in dequeue_pushable_dl_task()
196 next_node = rb_next(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in dequeue_pushable_dl_task()
201 RB_CLEAR_NODE(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
223 static void dl_task_offline_migration(struct rq *rq, struct task_struct *p) in dl_task_offline_migration() argument
228 later_rq = find_lock_later_rq(p, rq); in dl_task_offline_migration()
238 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); in dl_task_offline_migration()
257 deactivate_task(rq, p, 0); in dl_task_offline_migration()
258 set_task_cpu(p, later_rq->cpu); in dl_task_offline_migration()
259 activate_task(later_rq, p, ENQUEUE_REPLENISH); in dl_task_offline_migration()
270 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
275 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
304 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
305 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
306 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
559 struct task_struct *p = dl_task_of(dl_se); in dl_task_timer() local
563 rq = task_rq_lock(p, &flags); in dl_task_timer()
578 if (!dl_task(p) || dl_se->dl_new || in dl_task_timer()
591 dl_task_offline_migration(rq, p); in dl_task_timer()
610 if (!task_on_rq_queued(p)) { in dl_task_timer()
615 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); in dl_task_timer()
617 check_preempt_curr_dl(rq, p, 0); in dl_task_timer()
629 task_rq_unlock(rq, p, &flags); in dl_task_timer()
896 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_dl() argument
898 struct task_struct *pi_task = rt_mutex_get_top_task(p); in enqueue_task_dl()
899 struct sched_dl_entity *pi_se = &p->dl; in enqueue_task_dl()
907 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { in enqueue_task_dl()
909 } else if (!dl_prio(p->normal_prio)) { in enqueue_task_dl()
917 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); in enqueue_task_dl()
927 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) in enqueue_task_dl()
930 enqueue_dl_entity(&p->dl, pi_se, flags); in enqueue_task_dl()
932 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_dl()
933 enqueue_pushable_dl_task(rq, p); in enqueue_task_dl()
936 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in __dequeue_task_dl() argument
938 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
939 dequeue_pushable_dl_task(rq, p); in __dequeue_task_dl()
942 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_dl() argument
945 __dequeue_task_dl(rq, p, flags); in dequeue_task_dl()
960 struct task_struct *p = rq->curr; in yield_task_dl() local
968 if (p->dl.runtime > 0) { in yield_task_dl()
970 p->dl.runtime = 0; in yield_task_dl()
987 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) in select_task_rq_dl() argument
1011 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1012 (p->nr_cpus_allowed > 1)) { in select_task_rq_dl()
1013 int target = find_later_rq(p); in select_task_rq_dl()
1024 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) in check_preempt_equal_dl() argument
1038 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
1039 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) in check_preempt_equal_dl()
1053 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, in check_preempt_curr_dl() argument
1056 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1066 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1068 check_preempt_equal_dl(rq, p); in check_preempt_curr_dl()
1073 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1075 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1078 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1097 struct task_struct *p; in pick_next_task_dl() local
1128 p = dl_task_of(dl_se); in pick_next_task_dl()
1129 p->se.exec_start = rq_clock_task(rq); in pick_next_task_dl()
1132 dequeue_pushable_dl_task(rq, p); in pick_next_task_dl()
1135 start_hrtick_dl(rq, p); in pick_next_task_dl()
1139 return p; in pick_next_task_dl()
1142 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) in put_prev_task_dl() argument
1146 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
1147 enqueue_pushable_dl_task(rq, p); in put_prev_task_dl()
1150 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
1159 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
1160 is_leftmost(p, &rq->dl)) in task_tick_dl()
1161 start_hrtick_dl(rq, p); in task_tick_dl()
1164 static void task_fork_dl(struct task_struct *p) in task_fork_dl() argument
1172 static void task_dead_dl(struct task_struct *p) in task_dead_dl() argument
1174 struct hrtimer *timer = &p->dl.dl_timer; in task_dead_dl()
1175 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_dead_dl()
1182 dl_b->total_bw -= p->dl.dl_bw; in task_dead_dl()
1190 struct task_struct *p = rq->curr; in set_curr_task_dl() local
1192 p->se.exec_start = rq_clock_task(rq); in set_curr_task_dl()
1195 dequeue_pushable_dl_task(rq, p); in set_curr_task_dl()
1203 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
1205 if (!task_running(rq, p) && in pick_dl_task()
1206 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) in pick_dl_task()
1216 struct task_struct *p = NULL; in pick_next_earliest_dl_task() local
1222 p = dl_task_of(dl_se); in pick_next_earliest_dl_task()
1224 if (pick_dl_task(rq, p, cpu)) in pick_next_earliest_dl_task()
1225 return p; in pick_next_earliest_dl_task()
1369 struct task_struct *p; in pick_next_pushable_dl_task() local
1374 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, in pick_next_pushable_dl_task()
1377 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
1378 BUG_ON(task_current(rq, p)); in pick_next_pushable_dl_task()
1379 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
1381 BUG_ON(!task_on_rq_queued(p)); in pick_next_pushable_dl_task()
1382 BUG_ON(!dl_task(p)); in pick_next_pushable_dl_task()
1384 return p; in pick_next_pushable_dl_task()
1479 struct task_struct *p; in pull_dl_task() local
1517 p = pick_next_earliest_dl_task(src_rq, this_cpu); in pull_dl_task()
1524 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
1526 dl_time_before(p->dl.deadline, in pull_dl_task()
1528 WARN_ON(p == src_rq->curr); in pull_dl_task()
1529 WARN_ON(!task_on_rq_queued(p)); in pull_dl_task()
1535 if (dl_time_before(p->dl.deadline, in pull_dl_task()
1541 deactivate_task(src_rq, p, 0); in pull_dl_task()
1542 set_task_cpu(p, this_cpu); in pull_dl_task()
1543 activate_task(this_rq, p, 0); in pull_dl_task()
1544 dmin = p->dl.deadline; in pull_dl_task()
1564 static void task_woken_dl(struct rq *rq, struct task_struct *p) in task_woken_dl() argument
1566 if (!task_running(rq, p) && in task_woken_dl()
1569 p->nr_cpus_allowed > 1 && in task_woken_dl()
1572 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
1577 static void set_cpus_allowed_dl(struct task_struct *p, in set_cpus_allowed_dl() argument
1584 BUG_ON(!dl_task(p)); in set_cpus_allowed_dl()
1586 rq = task_rq(p); in set_cpus_allowed_dl()
1604 __dl_clear(src_dl_b, p->dl.dl_bw); in set_cpus_allowed_dl()
1612 if (!on_dl_rq(&p->dl)) in set_cpus_allowed_dl()
1621 if ((p->nr_cpus_allowed > 1) == (weight > 1)) in set_cpus_allowed_dl()
1628 if (!task_current(rq, p)) in set_cpus_allowed_dl()
1629 dequeue_pushable_dl_task(rq, p); in set_cpus_allowed_dl()
1633 if (!task_current(rq, p)) in set_cpus_allowed_dl()
1634 enqueue_pushable_dl_task(rq, p); in set_cpus_allowed_dl()
1676 static void cancel_dl_timer(struct rq *rq, struct task_struct *p) in cancel_dl_timer() argument
1678 struct hrtimer *dl_timer = &p->dl.dl_timer; in cancel_dl_timer()
1681 lockdep_assert_held(&p->pi_lock); in cancel_dl_timer()
1699 static void switched_from_dl(struct rq *rq, struct task_struct *p) in switched_from_dl() argument
1702 cancel_dl_timer(rq, p); in switched_from_dl()
1703 __dl_clear_params(p); in switched_from_dl()
1710 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
1721 static void switched_to_dl(struct rq *rq, struct task_struct *p) in switched_to_dl() argument
1725 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_dl()
1727 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded && in switched_to_dl()
1728 push_dl_task(rq) && rq != task_rq(p)) in switched_to_dl()
1734 check_preempt_curr_dl(rq, p, 0); in switched_to_dl()
1745 static void prio_changed_dl(struct rq *rq, struct task_struct *p, in prio_changed_dl() argument
1748 if (task_on_rq_queued(p) || rq->curr == p) { in prio_changed_dl()
1764 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && in prio_changed_dl()
1765 rq->curr == p) in prio_changed_dl()
1776 switched_to_dl(rq, p); in prio_changed_dl()