Lines Matching refs:p

35 	struct task_struct *p = dl_task_of(dl_se);  in dl_rq_of_se()  local
36 struct rq *rq = task_rq(p); in dl_rq_of_se()
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
48 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
135 struct task_struct *p = dl_task_of(dl_se); in inc_dl_migration() local
137 if (p->nr_cpus_allowed > 1) in inc_dl_migration()
145 struct task_struct *p = dl_task_of(dl_se); in dec_dl_migration() local
147 if (p->nr_cpus_allowed > 1) in dec_dl_migration()
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); in enqueue_pushable_dl_task()
171 if (dl_entity_preempt(&p->dl, &entry->dl)) in enqueue_pushable_dl_task()
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; in enqueue_pushable_dl_task()
182 rb_link_node(&p->pushable_dl_tasks, parent, link); in enqueue_pushable_dl_task()
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in enqueue_pushable_dl_task()
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) in dequeue_pushable_dl_task()
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { in dequeue_pushable_dl_task()
196 next_node = rb_next(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in dequeue_pushable_dl_task()
201 RB_CLEAR_NODE(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
237 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) in dl_task_offline_migration() argument
242 later_rq = find_lock_later_rq(p, rq); in dl_task_offline_migration()
252 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); in dl_task_offline_migration()
274 deactivate_task(rq, p, 0); in dl_task_offline_migration()
275 set_task_cpu(p, later_rq->cpu); in dl_task_offline_migration()
276 activate_task(later_rq, p, 0); in dl_task_offline_migration()
289 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
294 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
326 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
327 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
328 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
523 static int start_dl_timer(struct task_struct *p) in start_dl_timer() argument
525 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
527 struct rq *rq = task_rq(p); in start_dl_timer()
561 get_task_struct(p); in start_dl_timer()
586 struct task_struct *p = dl_task_of(dl_se); in dl_task_timer() local
590 rq = task_rq_lock(p, &flags); in dl_task_timer()
596 if (!dl_task(p)) { in dl_task_timer()
597 __dl_clear_params(p); in dl_task_timer()
642 if (!task_on_rq_queued(p)) { in dl_task_timer()
647 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); in dl_task_timer()
649 check_preempt_curr_dl(rq, p, 0); in dl_task_timer()
665 rq = dl_task_offline_migration(rq, p); in dl_task_timer()
683 task_rq_unlock(rq, p, &flags); in dl_task_timer()
689 put_task_struct(p); in dl_task_timer()
956 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_dl() argument
958 struct task_struct *pi_task = rt_mutex_get_top_task(p); in enqueue_task_dl()
959 struct sched_dl_entity *pi_se = &p->dl; in enqueue_task_dl()
967 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { in enqueue_task_dl()
969 } else if (!dl_prio(p->normal_prio)) { in enqueue_task_dl()
977 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); in enqueue_task_dl()
987 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) in enqueue_task_dl()
990 enqueue_dl_entity(&p->dl, pi_se, flags); in enqueue_task_dl()
992 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_dl()
993 enqueue_pushable_dl_task(rq, p); in enqueue_task_dl()
996 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in __dequeue_task_dl() argument
998 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
999 dequeue_pushable_dl_task(rq, p); in __dequeue_task_dl()
1002 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_dl() argument
1005 __dequeue_task_dl(rq, p, flags); in dequeue_task_dl()
1020 struct task_struct *p = rq->curr; in yield_task_dl() local
1028 if (p->dl.runtime > 0) { in yield_task_dl()
1030 p->dl.runtime = 0; in yield_task_dl()
1047 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) in select_task_rq_dl() argument
1071 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1072 (p->nr_cpus_allowed > 1)) { in select_task_rq_dl()
1073 int target = find_later_rq(p); in select_task_rq_dl()
1076 (dl_time_before(p->dl.deadline, in select_task_rq_dl()
1087 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) in check_preempt_equal_dl() argument
1101 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
1102 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) in check_preempt_equal_dl()
1114 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, in check_preempt_curr_dl() argument
1117 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1127 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1129 check_preempt_equal_dl(rq, p); in check_preempt_curr_dl()
1134 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1136 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1139 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1158 struct task_struct *p; in pick_next_task_dl() local
1197 p = dl_task_of(dl_se); in pick_next_task_dl()
1198 p->se.exec_start = rq_clock_task(rq); in pick_next_task_dl()
1201 dequeue_pushable_dl_task(rq, p); in pick_next_task_dl()
1204 start_hrtick_dl(rq, p); in pick_next_task_dl()
1208 return p; in pick_next_task_dl()
1211 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) in put_prev_task_dl() argument
1215 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
1216 enqueue_pushable_dl_task(rq, p); in put_prev_task_dl()
1219 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
1228 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
1229 is_leftmost(p, &rq->dl)) in task_tick_dl()
1230 start_hrtick_dl(rq, p); in task_tick_dl()
1233 static void task_fork_dl(struct task_struct *p) in task_fork_dl() argument
1241 static void task_dead_dl(struct task_struct *p) in task_dead_dl() argument
1243 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_dead_dl()
1250 dl_b->total_bw -= p->dl.dl_bw; in task_dead_dl()
1256 struct task_struct *p = rq->curr; in set_curr_task_dl() local
1258 p->se.exec_start = rq_clock_task(rq); in set_curr_task_dl()
1261 dequeue_pushable_dl_task(rq, p); in set_curr_task_dl()
1269 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
1271 if (!task_running(rq, p) && in pick_dl_task()
1272 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) in pick_dl_task()
1282 struct task_struct *p = NULL; in pick_next_earliest_dl_task() local
1288 p = dl_task_of(dl_se); in pick_next_earliest_dl_task()
1290 if (pick_dl_task(rq, p, cpu)) in pick_next_earliest_dl_task()
1291 return p; in pick_next_earliest_dl_task()
1306 struct task_struct *p = NULL; in pick_earliest_pushable_dl_task() local
1313 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); in pick_earliest_pushable_dl_task()
1315 if (pick_dl_task(rq, p, cpu)) in pick_earliest_pushable_dl_task()
1316 return p; in pick_earliest_pushable_dl_task()
1473 struct task_struct *p; in pick_next_pushable_dl_task() local
1478 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, in pick_next_pushable_dl_task()
1481 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
1482 BUG_ON(task_current(rq, p)); in pick_next_pushable_dl_task()
1483 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
1485 BUG_ON(!task_on_rq_queued(p)); in pick_next_pushable_dl_task()
1486 BUG_ON(!dl_task(p)); in pick_next_pushable_dl_task()
1488 return p; in pick_next_pushable_dl_task()
1583 struct task_struct *p; in pull_dl_task() local
1622 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task()
1629 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
1631 dl_time_before(p->dl.deadline, in pull_dl_task()
1633 WARN_ON(p == src_rq->curr); in pull_dl_task()
1634 WARN_ON(!task_on_rq_queued(p)); in pull_dl_task()
1640 if (dl_time_before(p->dl.deadline, in pull_dl_task()
1646 deactivate_task(src_rq, p, 0); in pull_dl_task()
1647 set_task_cpu(p, this_cpu); in pull_dl_task()
1648 activate_task(this_rq, p, 0); in pull_dl_task()
1649 dmin = p->dl.deadline; in pull_dl_task()
1665 static void task_woken_dl(struct rq *rq, struct task_struct *p) in task_woken_dl() argument
1667 if (!task_running(rq, p) && in task_woken_dl()
1669 p->nr_cpus_allowed > 1 && in task_woken_dl()
1672 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
1677 static void set_cpus_allowed_dl(struct task_struct *p, in set_cpus_allowed_dl() argument
1683 BUG_ON(!dl_task(p)); in set_cpus_allowed_dl()
1685 rq = task_rq(p); in set_cpus_allowed_dl()
1703 __dl_clear(src_dl_b, p->dl.dl_bw); in set_cpus_allowed_dl()
1707 set_cpus_allowed_common(p, new_mask); in set_cpus_allowed_dl()
1742 static void switched_from_dl(struct rq *rq, struct task_struct *p) in switched_from_dl() argument
1750 if (!start_dl_timer(p)) in switched_from_dl()
1751 __dl_clear_params(p); in switched_from_dl()
1758 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
1768 static void switched_to_dl(struct rq *rq, struct task_struct *p) in switched_to_dl() argument
1770 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_dl()
1772 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
1776 check_preempt_curr_dl(rq, p, 0); in switched_to_dl()
1787 static void prio_changed_dl(struct rq *rq, struct task_struct *p, in prio_changed_dl() argument
1790 if (task_on_rq_queued(p) || rq->curr == p) { in prio_changed_dl()
1806 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
1817 switched_to_dl(rq, p); in prio_changed_dl()