Lines Matching refs:curr

85 	rt_rq->highest_prio.curr = MAX_RT_PRIO;  in init_rt_rq()
164 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry()
268 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
490 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local
504 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
855 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
886 return rt_rq->highest_prio.curr; in rt_se_prio()
941 struct task_struct *curr = rq->curr; in update_curr_rt() local
942 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt()
945 if (curr->sched_class != &rt_sched_class) in update_curr_rt()
948 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in update_curr_rt()
952 schedstat_set(curr->se.statistics.exec_max, in update_curr_rt()
953 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_rt()
955 curr->se.sum_exec_runtime += delta_exec; in update_curr_rt()
956 account_group_exec_runtime(curr, delta_exec); in update_curr_rt()
958 curr->se.exec_start = rq_clock_task(rq); in update_curr_rt()
959 cpuacct_charge(curr, delta_exec); in update_curr_rt()
1041 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1042 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1058 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1061 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1069 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1082 rt_rq->highest_prio.curr = in dec_rt_prio()
1087 rt_rq->highest_prio.curr = MAX_RT_PRIO; in dec_rt_prio()
1310 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1319 struct task_struct *curr; in select_task_rq_rt() local
1329 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1353 if (curr && unlikely(rt_task(curr)) && in select_task_rq_rt()
1354 (curr->nr_cpus_allowed < 2 || in select_task_rq_rt()
1355 curr->prio <= p->prio)) { in select_task_rq_rt()
1363 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1378 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1379 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1406 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1424 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1648 if (lowest_rq->rt.highest_prio.curr <= task->prio) { in find_lock_lowest_rq()
1679 if (lowest_rq->rt.highest_prio.curr > task->prio) in find_lock_lowest_rq()
1729 if (unlikely(next_task == rq->curr)) { in push_rt_task()
1739 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
1858 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) in find_next_push_cpu()
1999 this_rq->rt.highest_prio.curr) in pull_rt_task()
2019 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2020 WARN_ON(p == src_rq->curr); in pull_rt_task()
2031 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2061 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2063 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2064 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2065 rq->curr->prio <= p->prio)) in task_woken_rt()
2077 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2135 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_rt()
2140 if (p->prio < rq->curr->prio) in switched_to_rt()
2156 if (rq->curr == p) { in prio_changed_rt()
2169 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2182 if (p->prio < rq->curr->prio) in prio_changed_rt()
2244 struct task_struct *p = rq->curr; in set_curr_task_rt()