this_cpu 526 arch/alpha/kernel/smp.c int this_cpu = smp_processor_id(); this_cpu 527 arch/alpha/kernel/smp.c unsigned long *pending_ipis = &ipi_data[this_cpu].bits; this_cpu 532 arch/alpha/kernel/smp.c this_cpu, *pending_ipis, regs->pc)); this_cpu 559 arch/alpha/kernel/smp.c this_cpu, which); this_cpu 567 arch/alpha/kernel/smp.c cpu_data[this_cpu].ipi_count++; this_cpu 655 arch/alpha/kernel/smp.c int cpu, this_cpu = smp_processor_id(); this_cpu 657 arch/alpha/kernel/smp.c if (!cpu_online(cpu) || cpu == this_cpu) this_cpu 702 arch/alpha/kernel/smp.c int cpu, this_cpu = smp_processor_id(); this_cpu 704 arch/alpha/kernel/smp.c if (!cpu_online(cpu) || cpu == this_cpu) this_cpu 756 arch/alpha/kernel/smp.c int cpu, this_cpu = smp_processor_id(); this_cpu 758 arch/alpha/kernel/smp.c if (!cpu_online(cpu) || cpu == this_cpu) this_cpu 149 arch/arm/common/bL_switcher.c unsigned int mpidr, this_cpu, that_cpu; this_cpu 155 arch/arm/common/bL_switcher.c this_cpu = smp_processor_id(); this_cpu 159 arch/arm/common/bL_switcher.c BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); this_cpu 164 arch/arm/common/bL_switcher.c that_cpu = bL_switcher_cpu_pairing[this_cpu]; this_cpu 170 arch/arm/common/bL_switcher.c this_cpu, ob_mpidr, ib_mpidr); this_cpu 172 arch/arm/common/bL_switcher.c this_cpu = smp_processor_id(); this_cpu 180 arch/arm/common/bL_switcher.c ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); this_cpu 227 arch/arm/common/bL_switcher.c cpu_logical_map(this_cpu) = ib_mpidr; this_cpu 237 arch/arm/common/bL_switcher.c pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); this_cpu 37 arch/arm/include/asm/mmu_context.h void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, this_cpu 40 arch/arm/include/asm/mmu_context.h static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, this_cpu 166 arch/arm/kernel/smp_tlb.c int this_cpu; this_cpu 172 arch/arm/kernel/smp_tlb.c this_cpu = get_cpu(); this_cpu 173 arch/arm/kernel/smp_tlb.c a15_erratum_get_cpumask(this_cpu, mm, &mask); this_cpu 51 arch/arm/mm/context.c void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, this_cpu 61 arch/arm/mm/context.c if (cpu == this_cpu) this_cpu 716 arch/arm64/kernel/smp.c unsigned int this_cpu; this_cpu 720 arch/arm64/kernel/smp.c this_cpu = smp_processor_id(); this_cpu 721 arch/arm64/kernel/smp.c store_cpu_topology(this_cpu); this_cpu 722 arch/arm64/kernel/smp.c numa_store_cpu_info(this_cpu); this_cpu 723 arch/arm64/kernel/smp.c numa_add_cpu(this_cpu); this_cpu 5208 arch/ia64/kernel/perfmon.c int this_cpu = smp_processor_id(); this_cpu 5243 arch/ia64/kernel/perfmon.c pfm_stats[this_cpu].pfm_smpl_handler_calls++; this_cpu 5266 arch/ia64/kernel/perfmon.c pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; this_cpu 5408 arch/ia64/kernel/perfmon.c int this_cpu = smp_processor_id(); this_cpu 5411 arch/ia64/kernel/perfmon.c pfm_stats[this_cpu].pfm_ovfl_intr_count++; this_cpu 5443 arch/ia64/kernel/perfmon.c pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; this_cpu 5455 arch/ia64/kernel/perfmon.c this_cpu, task_pid_nr(task)); this_cpu 5460 arch/ia64/kernel/perfmon.c this_cpu, this_cpu 5471 arch/ia64/kernel/perfmon.c int this_cpu; this_cpu 5475 arch/ia64/kernel/perfmon.c this_cpu = get_cpu(); this_cpu 5477 arch/ia64/kernel/perfmon.c min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; this_cpu 5478 arch/ia64/kernel/perfmon.c max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; this_cpu 5492 arch/ia64/kernel/perfmon.c if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; this_cpu 5493 arch/ia64/kernel/perfmon.c if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; this_cpu 5495 arch/ia64/kernel/perfmon.c pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; this_cpu 6612 arch/ia64/kernel/perfmon.c int i, this_cpu; this_cpu 6616 arch/ia64/kernel/perfmon.c this_cpu = smp_processor_id(); this_cpu 6627 arch/ia64/kernel/perfmon.c this_cpu, this_cpu 6636 arch/ia64/kernel/perfmon.c printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); this_cpu 6641 arch/ia64/kernel/perfmon.c this_cpu, this_cpu 6655 arch/ia64/kernel/perfmon.c printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]); this_cpu 6660 arch/ia64/kernel/perfmon.c printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]); this_cpu 6665 arch/ia64/kernel/perfmon.c this_cpu, this_cpu 216 arch/ia64/kernel/process.c unsigned int this_cpu = smp_processor_id(); this_cpu 224 arch/ia64/kernel/process.c ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); this_cpu 99 arch/ia64/kernel/smp.c int this_cpu = get_cpu(); this_cpu 129 arch/ia64/kernel/smp.c this_cpu, which); this_cpu 121 arch/parisc/kernel/smp.c int this_cpu = smp_processor_id(); this_cpu 122 arch/parisc/kernel/smp.c struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); this_cpu 127 arch/parisc/kernel/smp.c spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); this_cpu 145 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); this_cpu 149 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); this_cpu 155 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); this_cpu 161 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); this_cpu 165 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); this_cpu 170 arch/parisc/kernel/smp.c smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); this_cpu 175 arch/parisc/kernel/smp.c this_cpu, which); this_cpu 63 arch/powerpc/kernel/dbell.c int this_cpu = get_cpu(); this_cpu 66 arch/powerpc/kernel/dbell.c if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) { this_cpu 598 arch/powerpc/kernel/fadump.c int old_cpu, this_cpu; this_cpu 610 arch/powerpc/kernel/fadump.c this_cpu = smp_processor_id(); this_cpu 611 arch/powerpc/kernel/fadump.c old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu); this_cpu 110 arch/s390/kernel/machine_kexec.c int this_cpu, cpu; this_cpu 114 arch/s390/kernel/machine_kexec.c this_cpu = smp_find_processor_id(stap()); this_cpu 116 arch/s390/kernel/machine_kexec.c if (cpu == this_cpu) this_cpu 65 arch/s390/kernel/processor.c int cpu, this_cpu; this_cpu 67 arch/s390/kernel/processor.c this_cpu = smp_processor_id(); this_cpu 70 arch/s390/kernel/processor.c cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false); this_cpu 592 arch/sparc/kernel/chmc.c unsigned long ret, this_cpu; this_cpu 596 arch/sparc/kernel/chmc.c this_cpu = real_hard_smp_processor_id(); this_cpu 598 arch/sparc/kernel/chmc.c if (p->portid == this_cpu) { this_cpu 250 arch/sparc/kernel/entry.h void sun4v_register_mondo_queues(int this_cpu); this_cpu 1000 arch/sparc/kernel/irq_64.c void notrace sun4v_register_mondo_queues(int this_cpu) this_cpu 1002 arch/sparc/kernel/irq_64.c struct trap_per_cpu *tb = &trap_block[this_cpu]; this_cpu 70 arch/sparc/kernel/nmi.c int this_cpu = smp_processor_id(); this_cpu 77 arch/sparc/kernel/nmi.c panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); this_cpu 79 arch/sparc/kernel/nmi.c WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); this_cpu 205 arch/sparc/kernel/process_64.c int this_cpu) this_cpu 211 arch/sparc/kernel/process_64.c rp = &global_cpu_snapshot[this_cpu].reg; this_cpu 257 arch/sparc/kernel/process_64.c int this_cpu, cpu; this_cpu 264 arch/sparc/kernel/process_64.c this_cpu = raw_smp_processor_id(); this_cpu 268 arch/sparc/kernel/process_64.c if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) this_cpu 269 arch/sparc/kernel/process_64.c __global_reg_self(tp, regs, this_cpu); this_cpu 276 arch/sparc/kernel/process_64.c if (exclude_self && cpu == this_cpu) this_cpu 285 arch/sparc/kernel/process_64.c (cpu == this_cpu ? '*' : ' '), cpu, this_cpu 322 arch/sparc/kernel/process_64.c static void __global_pmu_self(int this_cpu) this_cpu 330 arch/sparc/kernel/process_64.c pp = &global_cpu_snapshot[this_cpu].pmu; this_cpu 356 arch/sparc/kernel/process_64.c int this_cpu, cpu; this_cpu 362 arch/sparc/kernel/process_64.c this_cpu = raw_smp_processor_id(); this_cpu 364 arch/sparc/kernel/process_64.c __global_pmu_self(this_cpu); this_cpu 374 arch/sparc/kernel/process_64.c (cpu == this_cpu ? '*' : ' '), cpu, this_cpu 650 arch/sparc/kernel/smp_64.c int this_cpu, tot_cpus, prev_sent, i, rem; this_cpu 660 arch/sparc/kernel/smp_64.c this_cpu = smp_processor_id(); this_cpu 767 arch/sparc/kernel/smp_64.c this_cpu, ecpuerror_id - 1); this_cpu 770 arch/sparc/kernel/smp_64.c this_cpu, enocpu_id - 1); this_cpu 777 arch/sparc/kernel/smp_64.c this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); this_cpu 783 arch/sparc/kernel/smp_64.c this_cpu, first_cpu, (tot_retries + retries), tot_cpus); this_cpu 792 arch/sparc/kernel/smp_64.c int this_cpu, i, cnt; this_cpu 809 arch/sparc/kernel/smp_64.c this_cpu = smp_processor_id(); this_cpu 810 arch/sparc/kernel/smp_64.c tb = &trap_block[this_cpu]; this_cpu 823 arch/sparc/kernel/smp_64.c if (i == this_cpu || !cpu_online(i)) this_cpu 942 arch/sparc/kernel/smp_64.c int this_cpu; this_cpu 951 arch/sparc/kernel/smp_64.c this_cpu = get_cpu(); this_cpu 953 arch/sparc/kernel/smp_64.c if (cpu == this_cpu) { this_cpu 1545 arch/sparc/kernel/smp_64.c int this_cpu = smp_processor_id(); this_cpu 1547 arch/sparc/kernel/smp_64.c sunhv_migrate_hvcons_irq(this_cpu); this_cpu 1550 arch/sparc/kernel/smp_64.c if (cpu == this_cpu) this_cpu 224 arch/sparc/mm/init_64.c static inline void set_dcache_dirty(struct page *page, int this_cpu) this_cpu 226 arch/sparc/mm/init_64.c unsigned long mask = this_cpu; this_cpu 293 arch/sparc/mm/init_64.c int this_cpu = get_cpu(); this_cpu 298 arch/sparc/mm/init_64.c if (cpu == this_cpu) this_cpu 478 arch/sparc/mm/init_64.c int this_cpu; this_cpu 490 arch/sparc/mm/init_64.c this_cpu = get_cpu(); this_cpu 498 arch/sparc/mm/init_64.c if (dirty_cpu == this_cpu) this_cpu 502 arch/sparc/mm/init_64.c set_dcache_dirty(page, this_cpu); this_cpu 217 arch/x86/hyperv/hv_apic.c unsigned int this_cpu = smp_processor_id(); this_cpu 222 arch/x86/hyperv/hv_apic.c cpumask_clear_cpu(this_cpu, &new_mask); this_cpu 130 arch/x86/kernel/apic/apic_numachip.c unsigned int this_cpu = smp_processor_id(); this_cpu 134 arch/x86/kernel/apic/apic_numachip.c if (cpu != this_cpu) this_cpu 141 arch/x86/kernel/apic/apic_numachip.c unsigned int this_cpu = smp_processor_id(); this_cpu 145 arch/x86/kernel/apic/apic_numachip.c if (cpu != this_cpu) this_cpu 205 arch/x86/kernel/apic/ipi.c unsigned int this_cpu = smp_processor_id(); this_cpu 213 arch/x86/kernel/apic/ipi.c if (query_cpu == this_cpu) this_cpu 271 arch/x86/kernel/apic/ipi.c unsigned int this_cpu = smp_processor_id(); this_cpu 277 arch/x86/kernel/apic/ipi.c if (query_cpu == this_cpu) this_cpu 48 arch/x86/kernel/apic/x2apic_phys.c unsigned long this_cpu; this_cpu 55 arch/x86/kernel/apic/x2apic_phys.c this_cpu = smp_processor_id(); this_cpu 57 arch/x86/kernel/apic/x2apic_phys.c if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu) this_cpu 562 arch/x86/kernel/apic/x2apic_uv_x.c unsigned int this_cpu = smp_processor_id(); this_cpu 566 arch/x86/kernel/apic/x2apic_uv_x.c if (cpu != this_cpu) this_cpu 573 arch/x86/kernel/apic/x2apic_uv_x.c unsigned int this_cpu = smp_processor_id(); this_cpu 577 arch/x86/kernel/apic/x2apic_uv_x.c if (cpu != this_cpu) this_cpu 112 arch/x86/kernel/cpu/common.c static const struct cpu_dev *this_cpu = &default_cpu; this_cpu 554 arch/x86/kernel/cpu/common.c if (!this_cpu) this_cpu 557 arch/x86/kernel/cpu/common.c info = this_cpu->legacy_models; this_cpu 692 arch/x86/kernel/cpu/common.c if (this_cpu->legacy_cache_size) this_cpu 693 arch/x86/kernel/cpu/common.c l2size = this_cpu->legacy_cache_size(c, l2size); this_cpu 716 arch/x86/kernel/cpu/common.c if (this_cpu->c_detect_tlb) this_cpu 717 arch/x86/kernel/cpu/common.c this_cpu->c_detect_tlb(c); this_cpu 786 arch/x86/kernel/cpu/common.c this_cpu = cpu_devs[i]; this_cpu 787 arch/x86/kernel/cpu/common.c c->x86_vendor = this_cpu->c_x86_vendor; this_cpu 796 arch/x86/kernel/cpu/common.c this_cpu = &default_cpu; this_cpu 1262 arch/x86/kernel/cpu/common.c if (this_cpu->c_early_init) this_cpu 1263 arch/x86/kernel/cpu/common.c this_cpu->c_early_init(c); this_cpu 1268 arch/x86/kernel/cpu/common.c if (this_cpu->c_bsp_init) this_cpu 1269 arch/x86/kernel/cpu/common.c this_cpu->c_bsp_init(c); this_cpu 1496 arch/x86/kernel/cpu/common.c if (this_cpu->c_identify) this_cpu 1497 arch/x86/kernel/cpu/common.c this_cpu->c_identify(c); this_cpu 1516 arch/x86/kernel/cpu/common.c if (this_cpu->c_init) this_cpu 1517 arch/x86/kernel/cpu/common.c this_cpu->c_init(c); this_cpu 1656 arch/x86/kernel/cpu/common.c vendor = this_cpu->c_vendor; this_cpu 152 arch/x86/kernel/cpu/mce/therm_throt.c unsigned int this_cpu = smp_processor_id(); this_cpu 155 arch/x86/kernel/cpu/mce/therm_throt.c struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); this_cpu 192 arch/x86/kernel/cpu/mce/therm_throt.c this_cpu, this_cpu 199 arch/x86/kernel/cpu/mce/therm_throt.c pr_info("CPU%d: %s temperature/speed normal\n", this_cpu, this_cpu 208 arch/x86/kernel/cpu/mce/therm_throt.c unsigned int this_cpu = smp_processor_id(); this_cpu 209 arch/x86/kernel/cpu/mce/therm_throt.c struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); this_cpu 495 arch/x86/kernel/kvm.c unsigned int this_cpu = smp_processor_id(); this_cpu 500 arch/x86/kernel/kvm.c cpumask_clear_cpu(this_cpu, &new_mask); this_cpu 314 arch/x86/kernel/process.c unsigned int this_cpu = smp_processor_id(); this_cpu 332 arch/x86/kernel/process.c for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { this_cpu 333 arch/x86/kernel/process.c if (cpu == this_cpu) this_cpu 193 arch/x86/kernel/tsc.c unsigned int cpu, this_cpu = smp_processor_id(); this_cpu 198 arch/x86/kernel/tsc.c if (cpu != this_cpu) { this_cpu 12 arch/x86/lib/msr-smp.c int this_cpu = raw_smp_processor_id(); this_cpu 15 arch/x86/lib/msr-smp.c reg = per_cpu_ptr(rv->msrs, this_cpu); this_cpu 26 arch/x86/lib/msr-smp.c int this_cpu = raw_smp_processor_id(); this_cpu 29 arch/x86/lib/msr-smp.c reg = per_cpu_ptr(rv->msrs, this_cpu); this_cpu 104 arch/x86/lib/msr-smp.c int this_cpu; this_cpu 111 arch/x86/lib/msr-smp.c this_cpu = get_cpu(); this_cpu 113 arch/x86/lib/msr-smp.c if (cpumask_test_cpu(this_cpu, mask)) this_cpu 247 arch/x86/xen/smp.c unsigned int this_cpu = smp_processor_id(); this_cpu 254 arch/x86/xen/smp.c if (this_cpu == cpu) this_cpu 333 drivers/cpufreq/acpi-cpufreq.c int this_cpu; this_cpu 335 drivers/cpufreq/acpi-cpufreq.c this_cpu = get_cpu(); this_cpu 336 drivers/cpufreq/acpi-cpufreq.c if (cpumask_test_cpu(this_cpu, mask)) this_cpu 346 drivers/cpuidle/coupled.c static void cpuidle_coupled_poke_others(int this_cpu, this_cpu 352 drivers/cpuidle/coupled.c if (cpu != this_cpu && cpu_online(cpu)) this_cpu 26 drivers/cpuidle/cpuidle-ux500.c int this_cpu = smp_processor_id(); this_cpu 49 drivers/cpuidle/cpuidle-ux500.c if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) this_cpu 1327 drivers/gpu/drm/i915/i915_request.c unsigned int this_cpu; this_cpu 1329 drivers/gpu/drm/i915/i915_request.c if (time_after(local_clock_us(&this_cpu), timeout)) this_cpu 1332 drivers/gpu/drm/i915/i915_request.c return this_cpu != cpu; this_cpu 721 drivers/net/ethernet/cavium/liquidio/lio_core.c int this_cpu = smp_processor_id(); this_cpu 726 drivers/net/ethernet/cavium/liquidio/lio_core.c droq->cpu_id == this_cpu) { this_cpu 570 fs/eventpoll.c int this_cpu = get_cpu(); this_cpu 573 fs/eventpoll.c ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); this_cpu 164 include/linux/sched/topology.h bool cpus_share_cache(int this_cpu, int that_cpu); this_cpu 213 include/linux/sched/topology.h static inline bool cpus_share_cache(int this_cpu, int that_cpu) this_cpu 279 init/calibrate.c int this_cpu = smp_processor_id(); this_cpu 281 init/calibrate.c if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { this_cpu 282 init/calibrate.c lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); this_cpu 306 init/calibrate.c per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; this_cpu 248 kernel/debug/debug_core.c int this_cpu = raw_smp_processor_id(); this_cpu 254 kernel/debug/debug_core.c if (cpu == this_cpu) this_cpu 563 kernel/debug/kdb/kdb_io.c int this_cpu, old_cpu; this_cpu 574 kernel/debug/kdb/kdb_io.c this_cpu = smp_processor_id(); this_cpu 576 kernel/debug/kdb/kdb_io.c old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu); this_cpu 577 kernel/debug/kdb/kdb_io.c if (old_cpu == -1 || old_cpu == this_cpu) this_cpu 964 kernel/kexec_core.c int old_cpu, this_cpu; this_cpu 971 kernel/kexec_core.c this_cpu = raw_smp_processor_id(); this_cpu 972 kernel/kexec_core.c old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); this_cpu 173 kernel/panic.c int old_cpu, this_cpu; this_cpu 200 kernel/panic.c this_cpu = raw_smp_processor_id(); this_cpu 201 kernel/panic.c old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); this_cpu 203 kernel/panic.c if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) this_cpu 2384 kernel/sched/core.c bool cpus_share_cache(int this_cpu, int that_cpu) this_cpu 2386 kernel/sched/core.c return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); this_cpu 1880 kernel/sched/deadline.c int this_cpu = smp_processor_id(); this_cpu 1915 kernel/sched/deadline.c if (!cpumask_test_cpu(this_cpu, later_mask)) this_cpu 1916 kernel/sched/deadline.c this_cpu = -1; this_cpu 1927 kernel/sched/deadline.c if (this_cpu != -1 && this_cpu 1928 kernel/sched/deadline.c cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { this_cpu 1930 kernel/sched/deadline.c return this_cpu; this_cpu 1953 kernel/sched/deadline.c if (this_cpu != -1) this_cpu 1954 kernel/sched/deadline.c return this_cpu; this_cpu 2136 kernel/sched/deadline.c int this_cpu = this_rq->cpu, cpu; this_cpu 2152 kernel/sched/deadline.c if (this_cpu == cpu) this_cpu 2176 kernel/sched/deadline.c p = pick_earliest_pushable_dl_task(src_rq, this_cpu); this_cpu 2201 kernel/sched/deadline.c set_task_cpu(p, this_cpu); this_cpu 969 kernel/sched/debug.c unsigned int this_cpu = raw_smp_processor_id(); this_cpu 972 kernel/sched/debug.c t0 = cpu_clock(this_cpu); this_cpu 973 kernel/sched/debug.c t1 = cpu_clock(this_cpu); this_cpu 5480 kernel/sched/fair.c wake_affine_idle(int this_cpu, int prev_cpu, int sync) this_cpu 5494 kernel/sched/fair.c if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) this_cpu 5495 kernel/sched/fair.c return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; this_cpu 5497 kernel/sched/fair.c if (sync && cpu_rq(this_cpu)->nr_running == 1) this_cpu 5498 kernel/sched/fair.c return this_cpu; this_cpu 5505 kernel/sched/fair.c int this_cpu, int prev_cpu, int sync) this_cpu 5510 kernel/sched/fair.c this_eff_load = cpu_runnable_load(cpu_rq(this_cpu)); this_cpu 5516 kernel/sched/fair.c return this_cpu; this_cpu 5532 kernel/sched/fair.c prev_eff_load *= capacity_of(this_cpu); this_cpu 5543 kernel/sched/fair.c return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; this_cpu 5547 kernel/sched/fair.c int this_cpu, int prev_cpu, int sync) this_cpu 5552 kernel/sched/fair.c target = wake_affine_idle(this_cpu, prev_cpu, sync); this_cpu 5555 kernel/sched/fair.c target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); this_cpu 5581 kernel/sched/fair.c int this_cpu, int sd_flag) this_cpu 5604 kernel/sched/fair.c local_group = cpumask_test_cpu(this_cpu, this_cpu 5714 kernel/sched/fair.c find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) this_cpu 5719 kernel/sched/fair.c int least_loaded_cpu = this_cpu; this_cpu 8829 kernel/sched/fair.c static int load_balance(int this_cpu, struct rq *this_rq, this_cpu 8842 kernel/sched/fair.c .dst_cpu = this_cpu, this_cpu 9012 kernel/sched/fair.c if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { this_cpu 9026 kernel/sched/fair.c busiest->push_cpu = this_cpu; this_cpu 9632 kernel/sched/fair.c int this_cpu = this_rq->cpu; this_cpu 9656 kernel/sched/fair.c if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) this_cpu 9696 kernel/sched/fair.c update_blocked_averages(this_cpu); this_cpu 9731 kernel/sched/fair.c int this_cpu = this_rq->cpu; this_cpu 9734 kernel/sched/fair.c if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) this_cpu 9738 kernel/sched/fair.c atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); this_cpu 9743 kernel/sched/fair.c flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); this_cpu 9754 kernel/sched/fair.c int this_cpu = this_rq->cpu; this_cpu 9760 kernel/sched/fair.c if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) this_cpu 9802 kernel/sched/fair.c int this_cpu = this_rq->cpu; this_cpu 9817 kernel/sched/fair.c if (!cpu_active(this_cpu)) this_cpu 9844 kernel/sched/fair.c update_blocked_averages(this_cpu); this_cpu 9846 kernel/sched/fair.c for_each_domain(this_cpu, sd) { this_cpu 9859 kernel/sched/fair.c t0 = sched_clock_cpu(this_cpu); this_cpu 9861 kernel/sched/fair.c pulled_task = load_balance(this_cpu, this_rq, this_cpu 9865 kernel/sched/fair.c domain_cost = sched_clock_cpu(this_cpu) - t0; this_cpu 1639 kernel/sched/rt.c int this_cpu = smp_processor_id(); this_cpu 1667 kernel/sched/rt.c if (!cpumask_test_cpu(this_cpu, lowest_mask)) this_cpu 1668 kernel/sched/rt.c this_cpu = -1; /* Skip this_cpu opt if not among lowest */ this_cpu 1679 kernel/sched/rt.c if (this_cpu != -1 && this_cpu 1680 kernel/sched/rt.c cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { this_cpu 1682 kernel/sched/rt.c return this_cpu; this_cpu 1700 kernel/sched/rt.c if (this_cpu != -1) this_cpu 1701 kernel/sched/rt.c return this_cpu; this_cpu 2051 kernel/sched/rt.c int this_cpu = this_rq->cpu, cpu; this_cpu 2079 kernel/sched/rt.c if (this_cpu == cpu) this_cpu 2106 kernel/sched/rt.c p = pick_highest_pushable_task(src_rq, this_cpu); this_cpu 2130 kernel/sched/rt.c set_task_cpu(p, this_cpu); this_cpu 276 kernel/smp.c int this_cpu; this_cpu 283 kernel/smp.c this_cpu = get_cpu(); this_cpu 291 kernel/smp.c WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() this_cpu 416 kernel/smp.c int cpu, next_cpu, this_cpu = smp_processor_id(); this_cpu 424 kernel/smp.c WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() this_cpu 437 kernel/smp.c if (cpu == this_cpu) this_cpu 446 kernel/smp.c if (next_cpu == this_cpu) this_cpu 458 kernel/smp.c __cpumask_clear_cpu(this_cpu, cfd->cpumask); this_cpu 97 kernel/trace/trace_clock.c int this_cpu; this_cpu 102 kernel/trace/trace_clock.c this_cpu = raw_smp_processor_id(); this_cpu 103 kernel/trace/trace_clock.c now = sched_clock_cpu(this_cpu); this_cpu 132 kernel/watchdog_hld.c int this_cpu = smp_processor_id(); this_cpu 139 kernel/watchdog_hld.c this_cpu); this_cpu 40 lib/nmi_backtrace.c int i, this_cpu = get_cpu(); this_cpu 53 lib/nmi_backtrace.c cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); this_cpu 61 lib/nmi_backtrace.c if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) this_cpu 66 lib/nmi_backtrace.c this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); this_cpu 14 lib/smp_processor_id.c int this_cpu = raw_smp_processor_id(); this_cpu 26 lib/smp_processor_id.c if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) this_cpu 52 lib/smp_processor_id.c return this_cpu; this_cpu 32 net/netfilter/nft_counter.c struct nft_counter *this_cpu; this_cpu 36 net/netfilter/nft_counter.c this_cpu = this_cpu_ptr(priv->counter); this_cpu 41 net/netfilter/nft_counter.c this_cpu->bytes += pkt->skb->len; this_cpu 42 net/netfilter/nft_counter.c this_cpu->packets++; this_cpu 61 net/netfilter/nft_counter.c struct nft_counter *this_cpu; this_cpu 68 net/netfilter/nft_counter.c this_cpu = this_cpu_ptr(cpu_stats); this_cpu 70 net/netfilter/nft_counter.c this_cpu->packets = this_cpu 74 net/netfilter/nft_counter.c this_cpu->bytes = this_cpu 107 net/netfilter/nft_counter.c struct nft_counter *this_cpu; this_cpu 110 net/netfilter/nft_counter.c this_cpu = this_cpu_ptr(priv->counter); this_cpu 111 net/netfilter/nft_counter.c this_cpu->packets -= total->packets; this_cpu 112 net/netfilter/nft_counter.c this_cpu->bytes -= total->bytes; this_cpu 119 net/netfilter/nft_counter.c struct nft_counter *this_cpu; this_cpu 128 net/netfilter/nft_counter.c this_cpu = per_cpu_ptr(priv->counter, cpu); this_cpu 131 net/netfilter/nft_counter.c bytes = this_cpu->bytes; this_cpu 132 net/netfilter/nft_counter.c packets = this_cpu->packets; this_cpu 232 net/netfilter/nft_counter.c struct nft_counter *this_cpu; this_cpu 242 net/netfilter/nft_counter.c this_cpu = this_cpu_ptr(cpu_stats); this_cpu 243 net/netfilter/nft_counter.c this_cpu->packets = total.packets; this_cpu 244 net/netfilter/nft_counter.c this_cpu->bytes = total.bytes; this_cpu 1213 tools/perf/bench/numa.c int this_cpu; this_cpu 1222 tools/perf/bench/numa.c this_cpu = g->threads[task_nr].curr_cpu; this_cpu 1223 tools/perf/bench/numa.c if (this_cpu < g->p.nr_cpus/2) this_cpu 1532 tools/perf/builtin-sched.c int i, this_cpu = sample->cpu; this_cpu 1538 tools/perf/builtin-sched.c BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); this_cpu 1540 tools/perf/builtin-sched.c if (this_cpu > sched->max_cpu) this_cpu 1541 tools/perf/builtin-sched.c sched->max_cpu = this_cpu; this_cpu 1545 tools/perf/builtin-sched.c if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { this_cpu 1546 tools/perf/builtin-sched.c sched->map.comp_cpus[cpus_nr++] = this_cpu; this_cpu 1552 tools/perf/builtin-sched.c timestamp0 = sched->cpu_last_switched[this_cpu]; this_cpu 1553 tools/perf/builtin-sched.c sched->cpu_last_switched[this_cpu] = timestamp; this_cpu 1574 tools/perf/builtin-sched.c sched->curr_thread[this_cpu] = thread__get(sched_in); this_cpu 1620 tools/perf/builtin-sched.c if (cpu != this_cpu) this_cpu 1636 tools/perf/builtin-sched.c if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) this_cpu 1653 tools/perf/builtin-sched.c color_fprintf(stdout, color, " (CPU %d)", this_cpu); this_cpu 1669 tools/perf/builtin-sched.c int this_cpu = sample->cpu, err = 0; this_cpu 1673 tools/perf/builtin-sched.c if (sched->curr_pid[this_cpu] != (u32)-1) { this_cpu 1678 tools/perf/builtin-sched.c if (sched->curr_pid[this_cpu] != prev_pid) this_cpu 1685 tools/perf/builtin-sched.c sched->curr_pid[this_cpu] = next_pid; this_cpu 2918 tools/perf/builtin-sched.c int this_cpu = sample->cpu; this_cpu 2920 tools/perf/builtin-sched.c if (this_cpu > sched->max_cpu) this_cpu 2921 tools/perf/builtin-sched.c sched->max_cpu = this_cpu; this_cpu 145 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c int this_cpu; this_cpu 147 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c this_cpu = sched_getcpu(); this_cpu 150 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu); this_cpu 156 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c tmp = cpuidle_state_name(this_cpu, num); this_cpu 165 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c tmp = cpuidle_state_desc(this_cpu, num);