prev_cpu           46 arch/powerpc/include/asm/kvm_book3s_64.h 	short prev_cpu[NR_CPUS];
prev_cpu          756 arch/powerpc/include/asm/kvm_host.h 	int prev_cpu;
prev_cpu         2365 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.prev_cpu = -1;
prev_cpu         2562 arch/powerpc/kvm/book3s_hv.c 	int prev_cpu;
prev_cpu         2568 arch/powerpc/kvm/book3s_hv.c 		prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
prev_cpu         2570 arch/powerpc/kvm/book3s_hv.c 		prev_cpu = vcpu->arch.prev_cpu;
prev_cpu         2584 arch/powerpc/kvm/book3s_hv.c 	if (prev_cpu != pcpu) {
prev_cpu         2585 arch/powerpc/kvm/book3s_hv.c 		if (prev_cpu >= 0 &&
prev_cpu         2586 arch/powerpc/kvm/book3s_hv.c 		    cpu_first_thread_sibling(prev_cpu) !=
prev_cpu         2588 arch/powerpc/kvm/book3s_hv.c 			radix_flush_cpu(kvm, prev_cpu, vcpu);
prev_cpu         2590 arch/powerpc/kvm/book3s_hv.c 			nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
prev_cpu         2592 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.prev_cpu = pcpu;
prev_cpu          585 arch/powerpc/kvm/book3s_hv_nested.c 	memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
prev_cpu          193 arch/sparc/kernel/cpumap.c 	int n, id, cpu, prev_cpu, last_cpu, level;
prev_cpu          204 arch/sparc/kernel/cpumap.c 	prev_cpu = cpu = cpumask_first(cpu_online_mask);
prev_cpu          268 arch/sparc/kernel/cpumap.c 					    (cpu == last_cpu) ? cpu : prev_cpu;
prev_cpu          290 arch/sparc/kernel/cpumap.c 		prev_cpu = cpu;
prev_cpu          171 arch/x86/include/asm/trace/irq_vectors.h 		 unsigned int prev_cpu),
prev_cpu          173 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
prev_cpu          180 arch/x86/include/asm/trace/irq_vectors.h 		__field(	unsigned int,	prev_cpu	)
prev_cpu          188 arch/x86/include/asm/trace/irq_vectors.h 		__entry->prev_cpu	= prev_cpu;
prev_cpu          194 arch/x86/include/asm/trace/irq_vectors.h 		  __entry->prev_vector, __entry->prev_cpu)
prev_cpu          201 arch/x86/include/asm/trace/irq_vectors.h 		 unsigned int prev_cpu),				\
prev_cpu          202 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL);	\
prev_cpu           31 arch/x86/kernel/apic/vector.c 	unsigned int		prev_cpu;
prev_cpu          163 arch/x86/kernel/apic/vector.c 		apicd->prev_cpu = apicd->cpu;
prev_cpu          341 arch/x86/kernel/apic/vector.c 			   apicd->prev_cpu);
prev_cpu          352 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
prev_cpu          353 arch/x86/kernel/apic/vector.c 	irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
prev_cpu          620 arch/x86/kernel/apic/vector.c 		seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
prev_cpu          837 arch/x86/kernel/apic/vector.c 	unsigned int cpu = apicd->prev_cpu;
prev_cpu          896 arch/x86/kernel/apic/vector.c 	cpu = apicd->prev_cpu;
prev_cpu          727 kernel/sched/fair.c static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
prev_cpu         5480 kernel/sched/fair.c wake_affine_idle(int this_cpu, int prev_cpu, int sync)
prev_cpu         5494 kernel/sched/fair.c 	if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
prev_cpu         5495 kernel/sched/fair.c 		return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
prev_cpu         5505 kernel/sched/fair.c 		   int this_cpu, int prev_cpu, int sync)
prev_cpu         5526 kernel/sched/fair.c 	this_eff_load *= capacity_of(prev_cpu);
prev_cpu         5528 kernel/sched/fair.c 	prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
prev_cpu         5547 kernel/sched/fair.c 		       int this_cpu, int prev_cpu, int sync)
prev_cpu         5552 kernel/sched/fair.c 		target = wake_affine_idle(this_cpu, prev_cpu, sync);
prev_cpu         5555 kernel/sched/fair.c 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
prev_cpu         5559 kernel/sched/fair.c 		return prev_cpu;
prev_cpu         5773 kernel/sched/fair.c 				  int cpu, int prev_cpu, int sd_flag)
prev_cpu         5778 kernel/sched/fair.c 		return prev_cpu;
prev_cpu         6211 kernel/sched/fair.c static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
prev_cpu         6218 kernel/sched/fair.c 	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
prev_cpu         6360 kernel/sched/fair.c static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
prev_cpu         6365 kernel/sched/fair.c 	int cpu, best_energy_cpu = prev_cpu;
prev_cpu         6379 kernel/sched/fair.c 	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
prev_cpu         6408 kernel/sched/fair.c 			if (cpu == prev_cpu) {
prev_cpu         6409 kernel/sched/fair.c 				prev_delta = compute_energy(p, prev_cpu, pd);
prev_cpu         6426 kernel/sched/fair.c 		if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
prev_cpu         6448 kernel/sched/fair.c 	return prev_cpu;
prev_cpu         6469 kernel/sched/fair.c select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
prev_cpu         6473 kernel/sched/fair.c 	int new_cpu = prev_cpu;
prev_cpu         6481 kernel/sched/fair.c 			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
prev_cpu         6484 kernel/sched/fair.c 			new_cpu = prev_cpu;
prev_cpu         6487 kernel/sched/fair.c 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
prev_cpu         6501 kernel/sched/fair.c 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
prev_cpu         6502 kernel/sched/fair.c 			if (cpu != prev_cpu)
prev_cpu         6503 kernel/sched/fair.c 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
prev_cpu         6517 kernel/sched/fair.c 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
prev_cpu         6521 kernel/sched/fair.c 		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);