Lines Matching refs:dest_cpu

1092 	int dest_cpu;  member
1104 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) in __migrate_task() argument
1106 if (unlikely(!cpu_active(dest_cpu))) in __migrate_task()
1110 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in __migrate_task()
1113 rq = move_queued_task(rq, p, dest_cpu); in __migrate_task()
1149 rq = __migrate_task(rq, p, arg->dest_cpu); in migration_cpu_stop()
1210 unsigned int dest_cpu; in __set_cpus_allowed_ptr() local
1238 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); in __set_cpus_allowed_ptr()
1240 struct migration_arg arg = { p, dest_cpu }; in __set_cpus_allowed_ptr()
1252 rq = move_queued_task(rq, p, dest_cpu); in __set_cpus_allowed_ptr()
1552 int dest_cpu; in select_fallback_rq() local
1563 for_each_cpu(dest_cpu, nodemask) { in select_fallback_rq()
1564 if (!cpu_online(dest_cpu)) in select_fallback_rq()
1566 if (!cpu_active(dest_cpu)) in select_fallback_rq()
1568 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in select_fallback_rq()
1569 return dest_cpu; in select_fallback_rq()
1575 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { in select_fallback_rq()
1576 if (!cpu_online(dest_cpu)) in select_fallback_rq()
1578 if (!cpu_active(dest_cpu)) in select_fallback_rq()
1616 return dest_cpu; in select_fallback_rq()
2788 int dest_cpu; in sched_exec() local
2791 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
2792 if (dest_cpu == smp_processor_id()) in sched_exec()
2795 if (likely(cpu_active(dest_cpu))) { in sched_exec()
2796 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
5089 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, in task_can_attach() local
5097 dl_b = dl_bw_of(dest_cpu); in task_can_attach()
5099 cpus = dl_bw_cpus(dest_cpu); in task_can_attach()
5231 int dest_cpu; in migrate_tasks() local
5292 dest_cpu = select_fallback_rq(dead_rq->cpu, next); in migrate_tasks()
5294 rq = __migrate_task(rq, next, dest_cpu); in migrate_tasks()