dest_cpu          145 arch/ia64/kernel/smp.c send_IPI_single (int dest_cpu, int op)
dest_cpu          147 arch/ia64/kernel/smp.c 	set_bit(op, &per_cpu(ipi_operation, dest_cpu));
dest_cpu          148 arch/ia64/kernel/smp.c 	ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
dest_cpu          210 arch/parisc/kernel/smp.c send_IPI_single(int dest_cpu, enum ipi_message_type op)
dest_cpu          212 arch/parisc/kernel/smp.c 	BUG_ON(dest_cpu == NO_PROC_ID);
dest_cpu          214 arch/parisc/kernel/smp.c 	ipi_send(dest_cpu, op);
dest_cpu          674 drivers/parisc/iosapic.c 	int dest_cpu;
dest_cpu          676 drivers/parisc/iosapic.c 	dest_cpu = cpu_check_affinity(d, dest);
dest_cpu          677 drivers/parisc/iosapic.c 	if (dest_cpu < 0)
dest_cpu          680 drivers/parisc/iosapic.c 	cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
dest_cpu          681 drivers/parisc/iosapic.c 	vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
dest_cpu          191 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p, int dest_cpu),
dest_cpu          193 include/trace/events/sched.h 	TP_ARGS(p, dest_cpu),
dest_cpu          200 include/trace/events/sched.h 		__field(	int,	dest_cpu		)
dest_cpu          208 include/trace/events/sched.h 		__entry->dest_cpu	= dest_cpu;
dest_cpu          213 include/trace/events/sched.h 		  __entry->orig_cpu, __entry->dest_cpu)
dest_cpu         1507 kernel/sched/core.c 	int dest_cpu;
dest_cpu         1520 kernel/sched/core.c 				 struct task_struct *p, int dest_cpu)
dest_cpu         1523 kernel/sched/core.c 	if (!is_cpu_allowed(p, dest_cpu))
dest_cpu         1527 kernel/sched/core.c 	rq = move_queued_task(rq, rf, p, dest_cpu);
dest_cpu         1565 kernel/sched/core.c 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
dest_cpu         1567 kernel/sched/core.c 			p->wake_cpu = arg->dest_cpu;
dest_cpu         1628 kernel/sched/core.c 	unsigned int dest_cpu;
dest_cpu         1655 kernel/sched/core.c 	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
dest_cpu         1656 kernel/sched/core.c 	if (dest_cpu >= nr_cpu_ids) {
dest_cpu         1678 kernel/sched/core.c 		struct migration_arg arg = { p, dest_cpu };
dest_cpu         1688 kernel/sched/core.c 		rq = move_queued_task(rq, &rf, p, dest_cpu);
dest_cpu         2031 kernel/sched/core.c 	int dest_cpu;
dest_cpu         2042 kernel/sched/core.c 		for_each_cpu(dest_cpu, nodemask) {
dest_cpu         2043 kernel/sched/core.c 			if (!cpu_active(dest_cpu))
dest_cpu         2045 kernel/sched/core.c 			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
dest_cpu         2046 kernel/sched/core.c 				return dest_cpu;
dest_cpu         2052 kernel/sched/core.c 		for_each_cpu(dest_cpu, p->cpus_ptr) {
dest_cpu         2053 kernel/sched/core.c 			if (!is_cpu_allowed(p, dest_cpu))
dest_cpu         2092 kernel/sched/core.c 	return dest_cpu;
dest_cpu         3494 kernel/sched/core.c 	int dest_cpu;
dest_cpu         3497 kernel/sched/core.c 	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
dest_cpu         3498 kernel/sched/core.c 	if (dest_cpu == smp_processor_id())
dest_cpu         3501 kernel/sched/core.c 	if (likely(cpu_active(dest_cpu))) {
dest_cpu         3502 kernel/sched/core.c 		struct migration_arg arg = { p, dest_cpu };
dest_cpu         6235 kernel/sched/core.c 	int dest_cpu;
dest_cpu         6289 kernel/sched/core.c 		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
dest_cpu         6290 kernel/sched/core.c 		rq = __migrate_task(rq, rf, next, dest_cpu);
dest_cpu         2718 kernel/sched/deadline.c 	unsigned int dest_cpu;
dest_cpu         2724 kernel/sched/deadline.c 	dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
dest_cpu         2727 kernel/sched/deadline.c 	dl_b = dl_bw_of(dest_cpu);
dest_cpu         2729 kernel/sched/deadline.c 	cpus = dl_bw_cpus(dest_cpu);
dest_cpu         4454 kernel/sched/fair.c 				    int src_cpu, int dest_cpu)
dest_cpu         4459 kernel/sched/fair.c 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
dest_cpu         5109 kernel/sched/fair.c 				    int src_cpu, int dest_cpu)