Searched refs:src_rq (Results 1 - 4 of 4) sorted by relevance

/linux-4.4.14/kernel/sched/
H A Drt.c1903 struct rq *rq, *src_rq; try_to_push_tasks() local
1913 src_rq = rq_of_rt_rq(rt_rq); try_to_push_tasks()
1930 rt_rq->push_cpu = src_rq->cpu; try_to_push_tasks()
1933 cpu = find_next_push_cpu(src_rq); try_to_push_tasks()
1967 struct rq *src_rq; pull_rt_task() local
1989 src_rq = cpu_rq(cpu); pull_rt_task()
1992 * Don't bother taking the src_rq->lock if the next highest pull_rt_task()
1995 * logically higher, the src_rq will push this task away. pull_rt_task()
1998 if (src_rq->rt.highest_prio.next >= pull_rt_task()
2007 double_lock_balance(this_rq, src_rq); pull_rt_task()
2013 p = pick_highest_pushable_task(src_rq, this_cpu); pull_rt_task()
2020 WARN_ON(p == src_rq->curr); pull_rt_task()
2031 if (p->prio < src_rq->curr->prio) pull_rt_task()
2036 deactivate_task(src_rq, p, 0); pull_rt_task()
2047 double_unlock_balance(this_rq, src_rq); pull_rt_task()
H A Ddeadline.c1585 struct rq *src_rq; pull_dl_task() local
1601 src_rq = cpu_rq(cpu); pull_dl_task()
1609 src_rq->dl.earliest_dl.next)) pull_dl_task()
1613 double_lock_balance(this_rq, src_rq); pull_dl_task()
1619 if (src_rq->dl.dl_nr_running <= 1) pull_dl_task()
1622 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); pull_dl_task()
1633 WARN_ON(p == src_rq->curr); pull_dl_task()
1641 src_rq->curr->dl.deadline)) pull_dl_task()
1646 deactivate_task(src_rq, p, 0); pull_dl_task()
1654 double_unlock_balance(this_rq, src_rq); pull_dl_task()
H A Dfair.c1257 struct rq *src_rq = cpu_rq(env->src_cpu); task_numa_compare() local
1341 if (imp > env->best_imp && src_rq->nr_running == 1 && task_numa_compare()
5525 struct rq *src_rq; member in struct:lb_env
5555 lockdep_assert_held(&env->src_rq->lock); task_hot()
5576 delta = rq_clock_task(env->src_rq) - p->se.exec_start; task_hot()
5607 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) migrate_degrades_locality()
5644 lockdep_assert_held(&env->src_rq->lock); can_migrate_task()
5689 if (task_running(env->src_rq, p)) { can_migrate_task()
5722 lockdep_assert_held(&env->src_rq->lock); detach_task()
5724 deactivate_task(env->src_rq, p, 0); detach_task()
5730 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
5739 lockdep_assert_held(&env->src_rq->lock); detach_one_task()
5741 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { detach_one_task()
5769 struct list_head *tasks = &env->src_rq->cfs_tasks; detach_tasks()
5774 lockdep_assert_held(&env->src_rq->lock); detach_tasks()
5784 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) detach_tasks()
6900 (env->src_rq->cfs.h_nr_running == 1)) { need_active_balance()
6901 if ((check_cpu_capacity(env->src_rq, sd)) && need_active_balance()
7006 env.src_rq = busiest; load_balance()
7388 .src_rq = busiest_rq,
H A Dcore.c1308 struct rq *src_rq, *dst_rq; __migrate_swap_task() local
1310 src_rq = task_rq(p); __migrate_swap_task()
1313 deactivate_task(src_rq, p, 0); __migrate_swap_task()
1335 struct rq *src_rq, *dst_rq; migrate_swap_stop() local
1341 src_rq = cpu_rq(arg->src_cpu); migrate_swap_stop()
1346 double_rq_lock(src_rq, dst_rq); migrate_swap_stop()
1366 double_rq_unlock(src_rq, dst_rq); migrate_swap_stop()

Completed in 254 milliseconds