Lines Matching refs:dest_cpu
1162 int dest_cpu; member
1310 int dest_cpu; in select_fallback_rq() local
1321 for_each_cpu(dest_cpu, nodemask) { in select_fallback_rq()
1322 if (!cpu_online(dest_cpu)) in select_fallback_rq()
1324 if (!cpu_active(dest_cpu)) in select_fallback_rq()
1326 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in select_fallback_rq()
1327 return dest_cpu; in select_fallback_rq()
1333 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { in select_fallback_rq()
1334 if (!cpu_online(dest_cpu)) in select_fallback_rq()
1336 if (!cpu_active(dest_cpu)) in select_fallback_rq()
1372 return dest_cpu; in select_fallback_rq()
2422 int dest_cpu; in sched_exec() local
2425 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
2426 if (dest_cpu == smp_processor_id()) in sched_exec()
2429 if (likely(cpu_active(dest_cpu))) { in sched_exec()
2430 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
4696 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, in task_can_attach() local
4704 dl_b = dl_bw_of(dest_cpu); in task_can_attach()
4706 cpus = dl_bw_cpus(dest_cpu); in task_can_attach()
4792 unsigned int dest_cpu; in set_cpus_allowed_ptr() local
4811 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); in set_cpus_allowed_ptr()
4813 struct migration_arg arg = { p, dest_cpu }; in set_cpus_allowed_ptr()
4820 rq = move_queued_task(p, dest_cpu); in set_cpus_allowed_ptr()
4839 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) in __migrate_task() argument
4844 if (unlikely(!cpu_active(dest_cpu))) in __migrate_task()
4856 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) in __migrate_task()
4864 rq = move_queued_task(p, dest_cpu); in __migrate_task()
4941 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); in migration_cpu_stop()
5007 int dest_cpu; in migrate_tasks() local
5040 dest_cpu = select_fallback_rq(dead_cpu, next); in migrate_tasks()
5043 __migrate_task(next, dead_cpu, dest_cpu); in migrate_tasks()