task_rq 86 kernel/sched/core.c rq = task_rq(p); task_rq 88 kernel/sched/core.c if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { task_rq 110 kernel/sched/core.c rq = task_rq(p); task_rq 129 kernel/sched/core.c if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { task_rq 1563 kernel/sched/core.c if (task_rq(p) == rq) { task_rq 1588 kernel/sched/core.c struct rq *rq = task_rq(p); task_rq 1733 kernel/sched/core.c lockdep_is_held(&task_rq(p)->lock))); task_rq 1761 kernel/sched/core.c src_rq = task_rq(p); task_rq 1901 kernel/sched/core.c rq = task_rq(p); task_rq 2620 kernel/sched/core.c atomic_dec(&task_rq(p)->nr_iowait); task_rq 2634 kernel/sched/core.c atomic_dec(&task_rq(p)->nr_iowait); task_rq 3531 kernel/sched/core.c struct sched_entity *curr = (&task_rq(p)->cfs)->curr; task_rq 5441 kernel/sched/core.c if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { task_rq 5703 kernel/sched/core.c p_rq = task_rq(p); task_rq 5714 kernel/sched/core.c if (task_rq(p) != p_rq) { task_rq 6112 kernel/sched/core.c if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, task_rq 6283 kernel/sched/core.c if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { task_rq 36 kernel/sched/deadline.c struct rq *rq = task_rq(p); task_rq 165 kernel/sched/deadline.c rq = task_rq(p); task_rq 924 kernel/sched/deadline.c struct rq *rq = task_rq(p); task_rq 1649 kernel/sched/deadline.c rq = task_rq(p); task_rq 1894 kernel/sched/deadline.c if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) task_rq 1992 kernel/sched/deadline.c if (unlikely(task_rq(task) != rq || task_rq 2239 kernel/sched/deadline.c rq = task_rq(p); task_rq 448 kernel/sched/fair.c return &task_rq(p)->cfs; task_rq 454 kernel/sched/fair.c struct rq *rq = task_rq(p); task_rq 1095 kernel/sched/fair.c (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); task_rq 5140 kernel/sched/fair.c SCHED_WARN_ON(task_rq(p) != rq); task_rq 6571 kernel/sched/fair.c lockdep_assert_held(&task_rq(p)->lock); task_rq 7463 kernel/sched/fair.c BUG_ON(task_rq(p) != rq); task_rq 9993 kernel/sched/fair.c update_overutilized_status(task_rq(curr)); task_rq 241 kernel/sched/rt.c return task_rq(p); task_rq 1649 kernel/sched/rt.c if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) task_rq 1743 kernel/sched/rt.c if (unlikely(task_rq(task) != rq ||