lowest_rq        1713 kernel/sched/rt.c 	struct rq *lowest_rq = NULL;
lowest_rq        1723 kernel/sched/rt.c 		lowest_rq = cpu_rq(cpu);
lowest_rq        1725 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
lowest_rq        1731 kernel/sched/rt.c 			lowest_rq = NULL;
lowest_rq        1736 kernel/sched/rt.c 		if (double_lock_balance(rq, lowest_rq)) {
lowest_rq        1744 kernel/sched/rt.c 				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
lowest_rq        1749 kernel/sched/rt.c 				double_unlock_balance(rq, lowest_rq);
lowest_rq        1750 kernel/sched/rt.c 				lowest_rq = NULL;
lowest_rq        1756 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr > task->prio)
lowest_rq        1760 kernel/sched/rt.c 		double_unlock_balance(rq, lowest_rq);
lowest_rq        1761 kernel/sched/rt.c 		lowest_rq = NULL;
lowest_rq        1764 kernel/sched/rt.c 	return lowest_rq;
lowest_rq        1795 kernel/sched/rt.c 	struct rq *lowest_rq;
lowest_rq        1823 kernel/sched/rt.c 	lowest_rq = find_lock_lowest_rq(next_task, rq);
lowest_rq        1824 kernel/sched/rt.c 	if (!lowest_rq) {
lowest_rq        1858 kernel/sched/rt.c 	set_task_cpu(next_task, lowest_rq->cpu);
lowest_rq        1859 kernel/sched/rt.c 	activate_task(lowest_rq, next_task, 0);
lowest_rq        1862 kernel/sched/rt.c 	resched_curr(lowest_rq);
lowest_rq        1864 kernel/sched/rt.c 	double_unlock_balance(rq, lowest_rq);