tcpu 156 arch/s390/kvm/diag.c struct kvm_vcpu *tcpu; tcpu 166 arch/s390/kvm/diag.c tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid); tcpu 167 arch/s390/kvm/diag.c if (tcpu) tcpu 168 arch/s390/kvm/diag.c kvm_vcpu_yield_to(tcpu); tcpu 779 arch/x86/platform/uv/tlb_uv.c int tcpu; tcpu 789 arch/x86/platform/uv/tlb_uv.c for_each_present_cpu(tcpu) { tcpu 790 arch/x86/platform/uv/tlb_uv.c tbcp = &per_cpu(bau_control, tcpu); tcpu 990 arch/x86/platform/uv/tlb_uv.c int tcpu; tcpu 998 arch/x86/platform/uv/tlb_uv.c for_each_present_cpu(tcpu) { tcpu 999 arch/x86/platform/uv/tlb_uv.c tbcp = &per_cpu(bau_control, tcpu); tcpu 754 arch/x86/platform/uv/uv_nmi.c int tcpu; tcpu 764 arch/x86/platform/uv/uv_nmi.c for_each_online_cpu(tcpu) { tcpu 765 arch/x86/platform/uv/uv_nmi.c if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask)) tcpu 767 arch/x86/platform/uv/uv_nmi.c else if (tcpu == cpu) tcpu 768 arch/x86/platform/uv/uv_nmi.c uv_nmi_dump_state_cpu(tcpu, regs); tcpu 770 arch/x86/platform/uv/uv_nmi.c uv_nmi_trigger_dump(tcpu); tcpu 12 drivers/media/pci/b2c2/flexcop-dma.c u8 *tcpu; tcpu 20 drivers/media/pci/b2c2/flexcop-dma.c tcpu = pci_alloc_consistent(pdev, size, &tdma); tcpu 21 drivers/media/pci/b2c2/flexcop-dma.c if (tcpu != NULL) { tcpu 23 drivers/media/pci/b2c2/flexcop-dma.c dma->cpu_addr0 = tcpu; tcpu 25 drivers/media/pci/b2c2/flexcop-dma.c dma->cpu_addr1 = tcpu + size/2; tcpu 1297 drivers/xen/events/events_base.c static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) tcpu 1310 drivers/xen/events/events_base.c bind_vcpu.vcpu = xen_vcpu_nr(tcpu); tcpu 1324 drivers/xen/events/events_base.c bind_evtchn_to_cpu(evtchn, tcpu); tcpu 1335 drivers/xen/events/events_base.c unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); tcpu 1336 drivers/xen/events/events_base.c int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); tcpu 1339 drivers/xen/events/events_base.c irq_data_update_effective_affinity(data, cpumask_of(tcpu)); tcpu 1345 drivers/xen/events/events_base.c int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) tcpu 1349 drivers/xen/events/events_base.c return set_affinity_irq(d, cpumask_of(tcpu), false); tcpu 63 include/xen/events.h int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu); tcpu 85 mm/percpu-vm.c unsigned int cpu, tcpu; tcpu 105 mm/percpu-vm.c for_each_possible_cpu(tcpu) { tcpu 106 mm/percpu-vm.c if (tcpu == cpu) tcpu 109 mm/percpu-vm.c __free_page(pages[pcpu_page_idx(tcpu, i)]); tcpu 216 mm/percpu-vm.c unsigned int cpu, tcpu; tcpu 232 mm/percpu-vm.c for_each_possible_cpu(tcpu) { tcpu 233 mm/percpu-vm.c if (tcpu == cpu) tcpu 235 mm/percpu-vm.c __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), tcpu 2541 mm/percpu.c unsigned int cpu, tcpu; tcpu 2573 mm/percpu.c for_each_possible_cpu(tcpu) { tcpu 2574 mm/percpu.c if (cpu == tcpu) tcpu 2576 mm/percpu.c if (group_map[tcpu] == group && cpu_distance_fn && tcpu 2577 mm/percpu.c (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || tcpu 2578 mm/percpu.c cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { tcpu 3955 net/core/dev.c u32 tcpu; tcpu 4000 net/core/dev.c tcpu = rflow->cpu; tcpu 4013 net/core/dev.c if (unlikely(tcpu != next_cpu) && tcpu 4014 net/core/dev.c (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || tcpu 4015 net/core/dev.c ((int)(per_cpu(softnet_data, tcpu).input_queue_head - tcpu 4017 net/core/dev.c tcpu = next_cpu; tcpu 4021 net/core/dev.c if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { tcpu 4023 net/core/dev.c cpu = tcpu; tcpu 4031 net/core/dev.c tcpu = map->cpus[reciprocal_scale(hash, map->len)]; tcpu 4032 net/core/dev.c if (cpu_online(tcpu)) { tcpu 4033 net/core/dev.c cpu = tcpu;