new_cpu            81 arch/ia64/kernel/irq.c 	int 		irq, new_cpu;
new_cpu           107 arch/ia64/kernel/irq.c 			new_cpu = cpumask_any(cpu_online_mask);
new_cpu           116 arch/ia64/kernel/irq.c 						       cpumask_of(new_cpu), false);
new_cpu           320 arch/powerpc/perf/imc-pmu.c static void nest_change_cpu_context(int old_cpu, int new_cpu)
new_cpu           324 arch/powerpc/perf/imc-pmu.c 	if (old_cpu < 0 || new_cpu < 0)
new_cpu           328 arch/powerpc/perf/imc-pmu.c 		perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
new_cpu          1158 arch/x86/events/intel/uncore.c 				   int new_cpu)
new_cpu          1164 arch/x86/events/intel/uncore.c 	die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
new_cpu          1172 arch/x86/events/intel/uncore.c 			box->cpu = new_cpu;
new_cpu          1178 arch/x86/events/intel/uncore.c 		if (new_cpu < 0)
new_cpu          1182 arch/x86/events/intel/uncore.c 		perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
new_cpu          1183 arch/x86/events/intel/uncore.c 		box->cpu = new_cpu;
new_cpu          1188 arch/x86/events/intel/uncore.c 				  int old_cpu, int new_cpu)
new_cpu          1191 arch/x86/events/intel/uncore.c 		uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
new_cpu           195 arch/x86/hyperv/hv_init.c 	unsigned int new_cpu;
new_cpu           216 arch/x86/hyperv/hv_init.c 		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
new_cpu           218 arch/x86/hyperv/hv_init.c 		re_ctrl.target_vp = hv_vp_index[new_cpu];
new_cpu           204 drivers/irqchip/irq-bcm6345-l1.c 	unsigned int new_cpu;
new_cpu           212 drivers/irqchip/irq-bcm6345-l1.c 	new_cpu = cpumask_any_and(&valid, cpu_online_mask);
new_cpu           213 drivers/irqchip/irq-bcm6345-l1.c 	if (new_cpu >= nr_cpu_ids)
new_cpu           216 drivers/irqchip/irq-bcm6345-l1.c 	dest = cpumask_of(new_cpu);
new_cpu           219 drivers/irqchip/irq-bcm6345-l1.c 	if (old_cpu != new_cpu) {
new_cpu           231 drivers/irqchip/irq-bcm6345-l1.c 	irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
new_cpu           737 drivers/perf/thunderx2_pmu.c 	int new_cpu;
new_cpu           750 drivers/perf/thunderx2_pmu.c 	new_cpu = cpumask_any_and(
new_cpu           754 drivers/perf/thunderx2_pmu.c 	tx2_pmu->cpu = new_cpu;
new_cpu           755 drivers/perf/thunderx2_pmu.c 	if (new_cpu >= nr_cpu_ids)
new_cpu           757 drivers/perf/thunderx2_pmu.c 	perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
new_cpu          10645 drivers/scsi/lpfc/lpfc_init.c 	int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
new_cpu          10781 drivers/scsi/lpfc/lpfc_init.c 			new_cpu = start_cpu;
new_cpu          10783 drivers/scsi/lpfc/lpfc_init.c 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
new_cpu          10788 drivers/scsi/lpfc/lpfc_init.c 				new_cpu = cpumask_next(
new_cpu          10789 drivers/scsi/lpfc/lpfc_init.c 					new_cpu, cpu_present_mask);
new_cpu          10790 drivers/scsi/lpfc/lpfc_init.c 				if (new_cpu == nr_cpumask_bits)
new_cpu          10791 drivers/scsi/lpfc/lpfc_init.c 					new_cpu = first_cpu;
new_cpu          10804 drivers/scsi/lpfc/lpfc_init.c 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
new_cpu          10812 drivers/scsi/lpfc/lpfc_init.c 					cpu, cpup->irq, new_cpu, cpup->phys_id);
new_cpu          10832 drivers/scsi/lpfc/lpfc_init.c 			new_cpu = start_cpu;
new_cpu          10834 drivers/scsi/lpfc/lpfc_init.c 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
new_cpu          10838 drivers/scsi/lpfc/lpfc_init.c 				new_cpu = cpumask_next(
new_cpu          10839 drivers/scsi/lpfc/lpfc_init.c 					new_cpu, cpu_present_mask);
new_cpu          10840 drivers/scsi/lpfc/lpfc_init.c 				if (new_cpu == nr_cpumask_bits)
new_cpu          10841 drivers/scsi/lpfc/lpfc_init.c 					new_cpu = first_cpu;
new_cpu          10858 drivers/scsi/lpfc/lpfc_init.c 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
new_cpu          10865 drivers/scsi/lpfc/lpfc_init.c 					cpu, cpup->irq, new_cpu,
new_cpu          10923 drivers/scsi/lpfc/lpfc_init.c 		new_cpu = start_cpu;
new_cpu          10925 drivers/scsi/lpfc/lpfc_init.c 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
new_cpu          10931 drivers/scsi/lpfc/lpfc_init.c 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
new_cpu          10932 drivers/scsi/lpfc/lpfc_init.c 			if (new_cpu == nr_cpumask_bits)
new_cpu          10933 drivers/scsi/lpfc/lpfc_init.c 				new_cpu = first_cpu;
new_cpu          10939 drivers/scsi/lpfc/lpfc_init.c 		new_cpu = start_cpu;
new_cpu          10941 drivers/scsi/lpfc/lpfc_init.c 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
new_cpu          10946 drivers/scsi/lpfc/lpfc_init.c 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
new_cpu          10947 drivers/scsi/lpfc/lpfc_init.c 			if (new_cpu == nr_cpumask_bits)
new_cpu          10948 drivers/scsi/lpfc/lpfc_init.c 				new_cpu = first_cpu;
new_cpu          10957 drivers/scsi/lpfc/lpfc_init.c 		start_cpu = cpumask_next(new_cpu, cpu_present_mask);
new_cpu          1485 kernel/sched/core.c 				   struct task_struct *p, int new_cpu)
new_cpu          1491 kernel/sched/core.c 	set_task_cpu(p, new_cpu);
new_cpu          1494 kernel/sched/core.c 	rq = cpu_rq(new_cpu);
new_cpu          1497 kernel/sched/core.c 	BUG_ON(task_cpu(p) != new_cpu);
new_cpu          1702 kernel/sched/core.c void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
new_cpu          1738 kernel/sched/core.c 	WARN_ON_ONCE(!cpu_online(new_cpu));
new_cpu          1741 kernel/sched/core.c 	trace_sched_migrate_task(p, new_cpu);
new_cpu          1743 kernel/sched/core.c 	if (task_cpu(p) != new_cpu) {
new_cpu          1745 kernel/sched/core.c 			p->sched_class->migrate_task_rq(p, new_cpu);
new_cpu          1751 kernel/sched/core.c 	__set_task_cpu(p, new_cpu);
new_cpu           152 kernel/sched/cpudeadline.c 	int old_idx, new_cpu;
new_cpu           167 kernel/sched/cpudeadline.c 		new_cpu = cp->elements[cp->size - 1].cpu;
new_cpu           169 kernel/sched/cpudeadline.c 		cp->elements[old_idx].cpu = new_cpu;
new_cpu           171 kernel/sched/cpudeadline.c 		cp->elements[new_cpu].idx = old_idx;
new_cpu          1642 kernel/sched/deadline.c static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
new_cpu          2703 kernel/sched/fair.c static void update_scan_period(struct task_struct *p, int new_cpu)
new_cpu          2706 kernel/sched/fair.c 	int dst_nid = cpu_to_node(new_cpu);
new_cpu          2750 kernel/sched/fair.c static inline void update_scan_period(struct task_struct *p, int new_cpu)
new_cpu          5775 kernel/sched/fair.c 	int new_cpu = cpu;
new_cpu          5803 kernel/sched/fair.c 		new_cpu = find_idlest_group_cpu(group, p, cpu);
new_cpu          5804 kernel/sched/fair.c 		if (new_cpu == cpu) {
new_cpu          5811 kernel/sched/fair.c 		cpu = new_cpu;
new_cpu          5822 kernel/sched/fair.c 	return new_cpu;
new_cpu          6473 kernel/sched/fair.c 	int new_cpu = prev_cpu;
new_cpu          6481 kernel/sched/fair.c 			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
new_cpu          6482 kernel/sched/fair.c 			if (new_cpu >= 0)
new_cpu          6483 kernel/sched/fair.c 				return new_cpu;
new_cpu          6484 kernel/sched/fair.c 			new_cpu = prev_cpu;
new_cpu          6503 kernel/sched/fair.c 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
new_cpu          6517 kernel/sched/fair.c 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
new_cpu          6521 kernel/sched/fair.c 		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
new_cpu          6528 kernel/sched/fair.c 	return new_cpu;
new_cpu          6538 kernel/sched/fair.c static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
new_cpu          6592 kernel/sched/fair.c 	update_scan_period(p, new_cpu);
new_cpu          1742 kernel/sched/sched.h 	void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
new_cpu          1370 kernel/workqueue.c 	int new_cpu;
new_cpu          1383 kernel/workqueue.c 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
new_cpu          1384 kernel/workqueue.c 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
new_cpu          1385 kernel/workqueue.c 	if (unlikely(new_cpu >= nr_cpu_ids)) {
new_cpu          1386 kernel/workqueue.c 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
new_cpu          1387 kernel/workqueue.c 		if (unlikely(new_cpu >= nr_cpu_ids))
new_cpu          1390 kernel/workqueue.c 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
new_cpu          1392 kernel/workqueue.c 	return new_cpu;
new_cpu          1534 tools/perf/builtin-sched.c 	bool new_cpu = false;
new_cpu          1547 tools/perf/builtin-sched.c 			new_cpu = true;
new_cpu          1652 tools/perf/builtin-sched.c 	if (sched->map.comp && new_cpu)
new_cpu           696 virt/kvm/arm/vgic/vgic.c 			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
new_cpu           700 virt/kvm/arm/vgic/vgic.c 			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);