per_cpu            81 arch/alpha/kernel/irq.c 		seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
per_cpu            94 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
per_cpu           119 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
per_cpu           176 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
per_cpu           186 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
per_cpu            52 arch/arc/include/asm/mmu_context.h #define asid_cpu(cpu)		per_cpu(asid_cache, cpu)
per_cpu           697 arch/arc/kernel/setup.c 	    register_cpu(&per_cpu(cpu_topology, cpu), cpu);
per_cpu            38 arch/arm/include/asm/smp_plat.h 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
per_cpu            24 arch/arm/include/asm/system_misc.h 	harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
per_cpu          1182 arch/arm/kernel/setup.c 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
per_cpu          1251 arch/arm/kernel/setup.c 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
per_cpu          1257 arch/arm/kernel/setup.c 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
per_cpu          1258 arch/arm/kernel/setup.c 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
per_cpu           371 arch/arm/kernel/smp.c 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
per_cpu           458 arch/arm/kernel/smp.c 		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
per_cpu           615 arch/arm/kernel/smp.c 	per_cpu(cpu_completion, cpu) = completion;
per_cpu           621 arch/arm/kernel/smp.c 	complete(per_cpu(cpu_completion, cpu));
per_cpu           768 arch/arm/kernel/smp.c 	if (!per_cpu(l_p_j_ref, first)) {
per_cpu           770 arch/arm/kernel/smp.c 			per_cpu(l_p_j_ref, cpu) =
per_cpu           771 arch/arm/kernel/smp.c 				per_cpu(cpu_data, cpu).loops_per_jiffy;
per_cpu           772 arch/arm/kernel/smp.c 			per_cpu(l_p_j_ref_freq, cpu) = freq->old;
per_cpu           787 arch/arm/kernel/smp.c 		lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
per_cpu           788 arch/arm/kernel/smp.c 				    per_cpu(l_p_j_ref_freq, first), freq->new);
per_cpu           790 arch/arm/kernel/smp.c 			per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
per_cpu           228 arch/arm/kernel/smp_twd.c 	if (per_cpu(percpu_setup_called, cpu)) {
per_cpu           234 arch/arm/kernel/smp_twd.c 	per_cpu(percpu_setup_called, cpu) = true;
per_cpu            36 arch/arm/mach-alpine/alpine_cpu_pm.c 	       &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr);
per_cpu            22 arch/arm/mach-alpine/alpine_cpu_resume.h 	struct al_cpu_resume_regs_per_cpu per_cpu[];
per_cpu            70 arch/arm/mach-bcm/platsmp-brcmstb.c 	return per_cpu(per_cpu_sw_state, cpu);
per_cpu            76 arch/arm/mach-bcm/platsmp-brcmstb.c 	per_cpu(per_cpu_sw_state, cpu) = val;
per_cpu           120 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
per_cpu           131 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
per_cpu           185 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
per_cpu           228 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
per_cpu           310 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
per_cpu           368 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	pm_info = &per_cpu(omap4_pm_info, 0x0);
per_cpu           392 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	pm_info = &per_cpu(omap4_pm_info, 0x1);
per_cpu           195 arch/arm/mach-omap2/omap-wakeupgen.c 		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
per_cpu           203 arch/arm/mach-omap2/omap-wakeupgen.c 		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
per_cpu           263 arch/arm/mach-qcom/platsmp.c 	if (!per_cpu(cold_boot_done, cpu)) {
per_cpu           266 arch/arm/mach-qcom/platsmp.c 			per_cpu(cold_boot_done, cpu) = true;
per_cpu            67 arch/arm/mm/context.c 		asid = per_cpu(active_asids, cpu).counter;
per_cpu            69 arch/arm/mm/context.c 			asid = per_cpu(reserved_asids, cpu);
per_cpu           144 arch/arm/mm/context.c 		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
per_cpu           153 arch/arm/mm/context.c 			asid = per_cpu(reserved_asids, i);
per_cpu           155 arch/arm/mm/context.c 		per_cpu(reserved_asids, i) = asid;
per_cpu           180 arch/arm/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
per_cpu           182 arch/arm/mm/context.c 			per_cpu(reserved_asids, cpu) = newasid;
per_cpu           255 arch/arm/mm/context.c 	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
per_cpu           271 arch/arm/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
per_cpu            45 arch/arm/mm/proc-v7-bugs.c 	if (per_cpu(harden_branch_predictor_fn, cpu))
per_cpu            55 arch/arm/mm/proc-v7-bugs.c 		per_cpu(harden_branch_predictor_fn, cpu) =
per_cpu            62 arch/arm/mm/proc-v7-bugs.c 		per_cpu(harden_branch_predictor_fn, cpu) =
per_cpu            90 arch/arm/mm/proc-v7-bugs.c 			per_cpu(harden_branch_predictor_fn, cpu) =
per_cpu           101 arch/arm/mm/proc-v7-bugs.c 			per_cpu(harden_branch_predictor_fn, cpu) =
per_cpu           147 arch/arm/xen/enlighten.c 	if (per_cpu(xen_vcpu, cpu) != NULL)
per_cpu           159 arch/arm/xen/enlighten.c 	per_cpu(xen_vcpu, cpu) = vcpup;
per_cpu           363 arch/arm/xen/enlighten.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
per_cpu           150 arch/arm64/kernel/cpu_errata.c 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
per_cpu           151 arch/arm64/kernel/cpu_errata.c 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
per_cpu            54 arch/arm64/kernel/cpuidle.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           133 arch/arm64/kernel/cpuinfo.c 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
per_cpu           255 arch/arm64/kernel/cpuinfo.c 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
per_cpu           275 arch/arm64/kernel/cpuinfo.c 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
per_cpu           293 arch/arm64/kernel/cpuinfo.c 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
per_cpu           391 arch/arm64/kernel/cpuinfo.c 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
per_cpu          1406 arch/arm64/kernel/fpsimd.c 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
per_cpu            47 arch/arm64/kernel/irq.c 		per_cpu(irq_stack_ptr, cpu) = p;
per_cpu            59 arch/arm64/kernel/irq.c 		per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
per_cpu            43 arch/arm64/kernel/sdei.c 	p = per_cpu(*ptr, cpu);
per_cpu            45 arch/arm64/kernel/sdei.c 		per_cpu(*ptr, cpu) = NULL;
per_cpu            67 arch/arm64/kernel/sdei.c 	per_cpu(*ptr, cpu) = p;
per_cpu           384 arch/arm64/kernel/setup.c 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
per_cpu           739 arch/arm64/kernel/smp.c 		per_cpu(cpu_number, cpu) = cpu;
per_cpu            89 arch/arm64/mm/context.c 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
per_cpu            98 arch/arm64/mm/context.c 			asid = per_cpu(reserved_asids, i);
per_cpu           100 arch/arm64/mm/context.c 		per_cpu(reserved_asids, i) = asid;
per_cpu           125 arch/arm64/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
per_cpu           127 arch/arm64/mm/context.c 			per_cpu(reserved_asids, cpu) = newasid;
per_cpu           207 arch/arm64/mm/context.c 	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
per_cpu           210 arch/arm64/mm/context.c 	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
per_cpu           225 arch/arm64/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
per_cpu            97 arch/c6x/kernel/setup.c 	p = &per_cpu(cpu_data, smp_processor_id());
per_cpu           412 arch/c6x/kernel/setup.c 	struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
per_cpu            85 arch/hexagon/kernel/smp.c 	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
per_cpu           102 arch/hexagon/kernel/smp.c 		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
per_cpu           115 arch/hexagon/kernel/time.c 		&per_cpu(clock_events, cpu);
per_cpu           130 arch/hexagon/kernel/time.c 	struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
per_cpu           242 arch/ia64/include/asm/processor.h #define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
per_cpu            45 arch/ia64/include/asm/topology.h #define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
per_cpu           112 arch/ia64/include/asm/uv/uv_hub.h #define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
per_cpu            42 arch/ia64/kernel/crash.c 	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
per_cpu           141 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = irq;
per_cpu           171 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = -1;
per_cpu           241 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = -1;
per_cpu           247 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = irq;
per_cpu           580 arch/ia64/kernel/perfmon.c #define pfm_get_cpu_data(a,b)		per_cpu(a, b)
per_cpu           957 arch/ia64/kernel/setup.c 		cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
per_cpu           147 arch/ia64/kernel/smp.c 	set_bit(op, &per_cpu(ipi_operation, dest_cpu));
per_cpu           387 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
per_cpu           569 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
per_cpu           578 arch/ia64/kernel/smpboot.c 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
per_cpu           579 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
per_cpu           583 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
per_cpu           594 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
per_cpu           679 arch/ia64/kernel/smpboot.c 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
per_cpu           718 arch/ia64/kernel/smpboot.c 						&per_cpu(cpu_sibling_map, cpu));
per_cpu           720 arch/ia64/kernel/smpboot.c 						&per_cpu(cpu_sibling_map, i));
per_cpu           743 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
per_cpu           751 arch/ia64/kernel/smpboot.c 		cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
per_cpu            63 arch/ia64/mm/contig.c 		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
per_cpu           408 arch/ia64/mm/discontig.c 		per_cpu(ia64_cpu_info, cpu).node_data =
per_cpu           535 arch/ia64/mm/discontig.c 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
per_cpu           102 arch/ia64/mm/tlb.c 			per_cpu(ia64_need_tlb_flush, i) = 1;
per_cpu           398 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) = 8;
per_cpu           401 arch/ia64/mm/tlb.c 	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
per_cpu           402 arch/ia64/mm/tlb.c 	if (per_cpu(ia64_tr_num, cpu) >
per_cpu           404 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) =
per_cpu           406 arch/ia64/mm/tlb.c 	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
per_cpu           408 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
per_cpu           473 arch/ia64/mm/tlb.c 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
per_cpu           485 arch/ia64/mm/tlb.c 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
per_cpu           496 arch/ia64/mm/tlb.c 	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
per_cpu           517 arch/ia64/mm/tlb.c 	if (i >= per_cpu(ia64_tr_num, cpu))
per_cpu           521 arch/ia64/mm/tlb.c 	if (i > per_cpu(ia64_tr_used, cpu))
per_cpu           522 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_used, cpu) = i;
per_cpu           564 arch/ia64/mm/tlb.c 	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
per_cpu           585 arch/ia64/mm/tlb.c 	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
per_cpu           590 arch/ia64/mm/tlb.c 	per_cpu(ia64_tr_used, cpu) = i;
per_cpu           178 arch/microblaze/kernel/setup.c 	per_cpu(KM, 0) = 0x1;	/* We start in kernel mode */
per_cpu           179 arch/microblaze/kernel/setup.c 	per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
per_cpu           300 arch/mips/cavium-octeon/octeon-irq.c 	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
per_cpu           306 arch/mips/cavium-octeon/octeon-irq.c 		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
per_cpu           315 arch/mips/cavium-octeon/octeon-irq.c 		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu           403 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
per_cpu           405 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
per_cpu           407 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu           436 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
per_cpu           438 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
per_cpu           440 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu           476 arch/mips/cavium-octeon/octeon-irq.c 		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
per_cpu           480 arch/mips/cavium-octeon/octeon-irq.c 		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
per_cpu           624 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
per_cpu           631 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
per_cpu           654 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
per_cpu           661 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
per_cpu           808 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
per_cpu           812 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
per_cpu           814 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu           859 arch/mips/cavium-octeon/octeon-irq.c 			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
per_cpu           872 arch/mips/cavium-octeon/octeon-irq.c 			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu          1072 arch/mips/cavium-octeon/octeon-irq.c 	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
per_cpu          1075 arch/mips/cavium-octeon/octeon-irq.c 	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
per_cpu          1095 arch/mips/cavium-octeon/octeon-irq.c 	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
per_cpu          2401 arch/mips/cavium-octeon/octeon-irq.c 	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
per_cpu          2518 arch/mips/cavium-octeon/octeon-irq.c 	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
per_cpu          2679 arch/mips/cavium-octeon/octeon-irq.c 	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
per_cpu          2698 arch/mips/cavium-octeon/octeon-irq.c 	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
per_cpu          2708 arch/mips/cavium-octeon/octeon-irq.c 		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
per_cpu           315 arch/mips/cavium-octeon/smp.c 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
per_cpu           353 arch/mips/cavium-octeon/smp.c 	per_cpu(cpu_state, cpu) = CPU_DEAD;
per_cpu           101 arch/mips/kernel/cevt-bcm1480.c 	struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
per_cpu           102 arch/mips/kernel/cevt-bcm1480.c 	struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
per_cpu           103 arch/mips/kernel/cevt-bcm1480.c 	unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
per_cpu           152 arch/mips/kernel/cevt-r4k.c 		cd = &per_cpu(mips_clockevent_device, cpu);
per_cpu           272 arch/mips/kernel/cevt-r4k.c 	cd = &per_cpu(mips_clockevent_device, cpu);
per_cpu           100 arch/mips/kernel/cevt-sb1250.c 	struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
per_cpu           101 arch/mips/kernel/cevt-sb1250.c 	struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
per_cpu           102 arch/mips/kernel/cevt-sb1250.c 	unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
per_cpu           251 arch/mips/kernel/mips-cm.c 		spin_lock_init(&per_cpu(cm_core_lock, cpu));
per_cpu           299 arch/mips/kernel/mips-cm.c 		spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
per_cpu           300 arch/mips/kernel/mips-cm.c 				  per_cpu(cm_core_lock_flags, curr_core));
per_cpu           320 arch/mips/kernel/mips-cm.c 		spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
per_cpu           321 arch/mips/kernel/mips-cm.c 				       per_cpu(cm_core_lock_flags, curr_core));
per_cpu            75 arch/mips/kernel/mips-cpc.c 		spin_lock_init(&per_cpu(cpc_core_lock, cpu));
per_cpu            98 arch/mips/kernel/mips-cpc.c 	spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu            99 arch/mips/kernel/mips-cpc.c 			  per_cpu(cpc_core_lock_flags, curr_core));
per_cpu           118 arch/mips/kernel/mips-cpc.c 	spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu           119 arch/mips/kernel/mips-cpc.c 			       per_cpu(cpc_core_lock_flags, curr_core));
per_cpu           123 arch/mips/kernel/pm-cps.c 	entry = per_cpu(nc_asm_enter, core)[state];
per_cpu           159 arch/mips/kernel/pm-cps.c 	core_ready_count = per_cpu(ready_count, core);
per_cpu           167 arch/mips/kernel/pm-cps.c 	coupled_barrier(&per_cpu(pm_barrier, core), online);
per_cpu           643 arch/mips/kernel/pm-cps.c 		if (per_cpu(nc_asm_enter, core)[state])
per_cpu           655 arch/mips/kernel/pm-cps.c 		per_cpu(nc_asm_enter, core)[state] = entry_fn;
per_cpu           658 arch/mips/kernel/pm-cps.c 	if (!per_cpu(ready_count, core)) {
per_cpu           664 arch/mips/kernel/pm-cps.c 		per_cpu(ready_count, core) = core_rc;
per_cpu           711 arch/mips/kernel/process.c 		csd = &per_cpu(backtrace_csd, cpu);
per_cpu           326 arch/mips/kernel/smp-bmips.c 	per_cpu(ipi_action_mask, cpu) |= action;
per_cpu           338 arch/mips/kernel/smp-bmips.c 	per_cpu(ipi_action_mask, cpu) = 0;
per_cpu           709 arch/mips/kernel/smp.c 		count = &per_cpu(tick_broadcast_count, cpu);
per_cpu           710 arch/mips/kernel/smp.c 		csd = &per_cpu(tick_broadcast_csd, cpu);
per_cpu           721 arch/mips/kernel/smp.c 	atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
per_cpu           730 arch/mips/kernel/smp.c 		csd = &per_cpu(tick_broadcast_csd, cpu);
per_cpu            21 arch/mips/kernel/topology.c 		struct cpu *c = &per_cpu(cpu_devices, i);
per_cpu           183 arch/mips/loongson64/loongson-3/hpet.c 		cd = &per_cpu(hpet_clockevent_device, cpu);
per_cpu           232 arch/mips/loongson64/loongson-3/hpet.c 	cd = &per_cpu(hpet_clockevent_device, cpu);
per_cpu           312 arch/mips/loongson64/loongson-3/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
per_cpu           388 arch/mips/loongson64/loongson-3/smp.c 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
per_cpu           443 arch/mips/loongson64/loongson-3/smp.c 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
per_cpu           688 arch/mips/loongson64/loongson-3/smp.c 	state_addr = &per_cpu(cpu_state, cpu);
per_cpu            23 arch/mips/math-emu/me-debugfs.c 		ps = &per_cpu(fpuemustats, cpu);
per_cpu            86 arch/mips/mm/context.c 			mmid = per_cpu(reserved_mmids, cpu);
per_cpu            89 arch/mips/mm/context.c 		per_cpu(reserved_mmids, cpu) = mmid;
per_cpu           115 arch/mips/mm/context.c 		if (per_cpu(reserved_mmids, cpu) == mmid) {
per_cpu           117 arch/mips/mm/context.c 			per_cpu(reserved_mmids, cpu) = newmmid;
per_cpu            53 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
per_cpu            63 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
per_cpu           190 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
per_cpu           232 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
per_cpu           257 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
per_cpu            60 arch/mips/sgi-ip27/ip27-timer.c 	struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
per_cpu            93 arch/mips/sgi-ip27/ip27-timer.c 	struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
per_cpu            94 arch/mips/sgi-ip27/ip27-timer.c 	unsigned char *name = per_cpu(hub_rt_name, cpu);
per_cpu          1109 arch/nds32/kernel/perf_event_cpu.c 	struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
per_cpu            69 arch/openrisc/kernel/time.c 		&per_cpu(clockevent_openrisc_timer, cpu);
per_cpu           114 arch/openrisc/kernel/time.c 		&per_cpu(clockevent_openrisc_timer, cpu);
per_cpu            76 arch/parisc/kernel/irq.c 	per_cpu(local_ack_eiem, cpu) &= ~mask;
per_cpu            79 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
per_cpu            91 arch/parisc/kernel/irq.c 	per_cpu(local_ack_eiem, cpu) |= mask;
per_cpu            94 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
per_cpu           146 arch/parisc/kernel/irq.c #define irq_stats(x)		(&per_cpu(irq_stat, x))
per_cpu           341 arch/parisc/kernel/irq.c 	return per_cpu(cpu_data, cpu).txn_addr;
per_cpu           353 arch/parisc/kernel/irq.c 		(!per_cpu(cpu_data, next_cpu).txn_addr ||
per_cpu           423 arch/parisc/kernel/irq.c 	stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
per_cpu           426 arch/parisc/kernel/irq.c 	last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
per_cpu           442 arch/parisc/kernel/irq.c 	last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
per_cpu           474 arch/parisc/kernel/irq.c 	union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
per_cpu           521 arch/parisc/kernel/irq.c 	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
per_cpu           541 arch/parisc/kernel/irq.c 			   per_cpu(cpu_data, cpu).hpa);
per_cpu           559 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
per_cpu           524 arch/parisc/kernel/perf.c 	cpu_device = per_cpu(cpu_data, 0).dev;
per_cpu           526 arch/parisc/kernel/perf.c 		per_cpu(cpu_data, 0).dev->name);
per_cpu           159 arch/parisc/kernel/processor.c 	p = &per_cpu(cpu_data, cpuid);
per_cpu           337 arch/parisc/kernel/processor.c 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu           338 arch/parisc/kernel/processor.c 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
per_cpu           378 arch/parisc/kernel/processor.c 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
per_cpu           405 arch/parisc/kernel/setup.c 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu           406 arch/parisc/kernel/setup.c 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
per_cpu           122 arch/parisc/kernel/smp.c 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
per_cpu           127 arch/parisc/kernel/smp.c 		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
per_cpu           190 arch/parisc/kernel/smp.c 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
per_cpu           191 arch/parisc/kernel/smp.c 	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
per_cpu           322 arch/parisc/kernel/smp.c 	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
per_cpu           380 arch/parisc/kernel/smp.c 	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
per_cpu           400 arch/parisc/kernel/smp.c 		spin_lock_init(&per_cpu(ipi_lock, cpu));
per_cpu            68 arch/parisc/kernel/time.c 	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
per_cpu           163 arch/parisc/kernel/time.c 	per_cpu(cpu_data, cpu).it_value = next_tick;
per_cpu           255 arch/parisc/kernel/time.c 		cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
per_cpu           261 arch/parisc/kernel/time.c 			    (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
per_cpu            78 arch/parisc/kernel/topology.c 	p = &per_cpu(cpu_data, cpuid);
per_cpu            80 arch/parisc/kernel/topology.c 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
per_cpu           118 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_sibling_map, cpu);
per_cpu           123 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_core_map, cpu);
per_cpu           128 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_l2_cache_map, cpu);
per_cpu           133 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_smallcore_map, cpu);
per_cpu           138 arch/powerpc/include/asm/topology.h #define topology_sibling_cpumask(cpu)	(per_cpu(cpu_sibling_map, cpu))
per_cpu           139 arch/powerpc/include/asm/topology.h #define topology_core_cpumask(cpu)	(per_cpu(cpu_core_map, cpu))
per_cpu           496 arch/powerpc/kernel/cacheinfo.c 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
per_cpu           498 arch/powerpc/kernel/cacheinfo.c 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
per_cpu           880 arch/powerpc/kernel/cacheinfo.c 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
per_cpu           886 arch/powerpc/kernel/cacheinfo.c 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
per_cpu            70 arch/powerpc/kernel/iommu.c 		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
per_cpu           515 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
per_cpu           520 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
per_cpu           525 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
per_cpu           530 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
per_cpu           535 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
per_cpu           540 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
per_cpu           547 arch/powerpc/kernel/irq.c 					per_cpu(irq_stat, j).hmi_exceptions);
per_cpu           553 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
per_cpu           559 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
per_cpu           567 arch/powerpc/kernel/irq.c 			seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
per_cpu           580 arch/powerpc/kernel/irq.c 	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
per_cpu           582 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
per_cpu           583 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).pmu_irqs;
per_cpu           584 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).mce_exceptions;
per_cpu           585 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).spurious_irqs;
per_cpu           586 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).timer_irqs_others;
per_cpu           587 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).hmi_exceptions;
per_cpu           588 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).sreset_irqs;
per_cpu           590 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
per_cpu           593 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).doorbell_irqs;
per_cpu           235 arch/powerpc/kernel/setup-common.c 	pvr = per_cpu(cpu_pvr, cpu_id);
per_cpu           248 arch/powerpc/kernel/smp.c 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
per_cpu           634 arch/powerpc/kernel/smp.c 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
per_cpu           636 arch/powerpc/kernel/smp.c 	per_cpu(next_tlbcam_idx, id)
per_cpu           791 arch/powerpc/kernel/smp.c 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
per_cpu           813 arch/powerpc/kernel/smp.c 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
per_cpu           831 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
per_cpu           857 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
per_cpu           859 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
per_cpu           861 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
per_cpu           944 arch/powerpc/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_DEAD;
per_cpu           954 arch/powerpc/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
per_cpu           959 arch/powerpc/kernel/smp.c 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
per_cpu           964 arch/powerpc/kernel/smp.c 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
per_cpu          1174 arch/powerpc/kernel/smp.c 	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
per_cpu            56 arch/powerpc/kernel/sysfs.c 	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
per_cpu            66 arch/powerpc/kernel/sysfs.c 	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
per_cpu            82 arch/powerpc/kernel/sysfs.c 		per_cpu(smt_snooze_delay, cpu) = snooze;
per_cpu           738 arch/powerpc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu           829 arch/powerpc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu          1054 arch/powerpc/kernel/sysfs.c 		struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu           995 arch/powerpc/kernel/time.c 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
per_cpu           119 arch/powerpc/kernel/watchdog.c 		 cpu, tb, per_cpu(wd_timer_tb, cpu),
per_cpu           120 arch/powerpc/kernel/watchdog.c 		 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
per_cpu           242 arch/powerpc/kernel/watchdog.c 	per_cpu(wd_timer_tb, cpu) = tb;
per_cpu           264 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
per_cpu           275 arch/powerpc/kernel/watchdog.c 			 cpu, tb, per_cpu(wd_timer_tb, cpu),
per_cpu           276 arch/powerpc/kernel/watchdog.c 			 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
per_cpu           319 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
per_cpu           320 arch/powerpc/kernel/watchdog.c 		per_cpu(wd_timer_tb, cpu) = tb;
per_cpu          2702 arch/powerpc/kvm/book3s_hv.c 		struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
per_cpu          2730 arch/powerpc/kvm/book3s_hv.c 		lp = &per_cpu(preempted_vcores, vc->pcpu);
per_cpu           319 arch/powerpc/mm/mem.c 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
per_cpu           499 arch/powerpc/oprofile/op_model_cell.c 			per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
per_cpu           502 arch/powerpc/oprofile/op_model_cell.c 			if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
per_cpu           517 arch/powerpc/oprofile/op_model_cell.c 					      per_cpu(pmc_values,
per_cpu           777 arch/powerpc/oprofile/op_model_cell.c 			per_cpu(pmc_values, j)[i] = 0;
per_cpu           826 arch/powerpc/oprofile/op_model_cell.c 			per_cpu(pmc_values, cpu)[i] = reset_value[i];
per_cpu          2266 arch/powerpc/perf/core-book3s.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
per_cpu           702 arch/powerpc/perf/core-fsl-emb.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
per_cpu           317 arch/powerpc/perf/imc-pmu.c 	return per_cpu(local_nest_imc_refc, cpu);
per_cpu           850 arch/powerpc/perf/imc-pmu.c 	u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
per_cpu           866 arch/powerpc/perf/imc-pmu.c 		per_cpu(thread_imc_mem, cpu_id) = local_mem;
per_cpu           936 arch/powerpc/perf/imc-pmu.c 		addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
per_cpu          1023 arch/powerpc/perf/imc-pmu.c 	u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
per_cpu          1097 arch/powerpc/perf/imc-pmu.c 	u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
per_cpu          1110 arch/powerpc/perf/imc-pmu.c 		per_cpu(trace_imc_mem, cpu_id) = local_mem;
per_cpu          1150 arch/powerpc/perf/imc-pmu.c 	return (u64)per_cpu(trace_imc_mem, smp_processor_id());
per_cpu          1405 arch/powerpc/perf/imc-pmu.c 				per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
per_cpu          1448 arch/powerpc/perf/imc-pmu.c 		if (per_cpu(thread_imc_mem, i))
per_cpu          1449 arch/powerpc/perf/imc-pmu.c 			free_pages((u64)per_cpu(thread_imc_mem, i), order);
per_cpu          1459 arch/powerpc/perf/imc-pmu.c 		if (per_cpu(trace_imc_mem, i))
per_cpu          1460 arch/powerpc/perf/imc-pmu.c 			free_pages((u64)per_cpu(trace_imc_mem, i), order);
per_cpu            79 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
per_cpu            95 arch/powerpc/platforms/cell/cpufreq_spudemand.c 		affected_info = &per_cpu(spu_gov_info, i);
per_cpu           110 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
per_cpu           118 arch/powerpc/platforms/cell/cpufreq_spudemand.c 		info = &per_cpu(spu_gov_info, i);
per_cpu           159 arch/powerpc/platforms/cell/interrupt.c 	return per_cpu(cpu_iic, cpu).target_id;
per_cpu           174 arch/powerpc/platforms/cell/interrupt.c 	out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
per_cpu           283 arch/powerpc/platforms/cell/interrupt.c 	struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
per_cpu           127 arch/powerpc/platforms/powernv/rng.c 		if (per_cpu(powernv_rng, cpu) == NULL ||
per_cpu           129 arch/powerpc/platforms/powernv/rng.c 			per_cpu(powernv_rng, cpu) = rng;
per_cpu           153 arch/powerpc/platforms/powernv/subcore.c 		while(per_cpu(split_state, i).step < step)
per_cpu           184 arch/powerpc/platforms/powernv/subcore.c 		per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
per_cpu           218 arch/powerpc/platforms/powernv/subcore.c 		split_core_secondary_loop(&per_cpu(split_state, cpu).step);
per_cpu           250 arch/powerpc/platforms/powernv/subcore.c 	per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED;
per_cpu           308 arch/powerpc/platforms/powernv/subcore.c 			while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED)
per_cpu           343 arch/powerpc/platforms/powernv/subcore.c 		state = &per_cpu(split_state, cpu);
per_cpu            78 arch/powerpc/platforms/powernv/vas.c 			per_cpu(cpu_vas_id, cpu) = vasid;
per_cpu           109 arch/powerpc/platforms/powernv/vas.c 		vasid = per_cpu(cpu_vas_id, smp_processor_id());
per_cpu           130 arch/powerpc/platforms/powernv/vas.c 			return per_cpu(cpu_vas_id, cpu);
per_cpu           179 arch/powerpc/platforms/ps3/interrupt.c 	pd = &per_cpu(ps3_private, cpu);
per_cpu           683 arch/powerpc/platforms/ps3/interrupt.c 	struct ps3_private *pd = &per_cpu(ps3_private, cpu);
per_cpu           693 arch/powerpc/platforms/ps3/interrupt.c 	struct ps3_private *pd = &per_cpu(ps3_private, cpu);
per_cpu           718 arch/powerpc/platforms/ps3/interrupt.c 		dump_bmp(&per_cpu(ps3_private, 0));
per_cpu           719 arch/powerpc/platforms/ps3/interrupt.c 		dump_bmp(&per_cpu(ps3_private, 1));
per_cpu           725 arch/powerpc/platforms/ps3/interrupt.c 		dump_bmp(&per_cpu(ps3_private, 0));
per_cpu           726 arch/powerpc/platforms/ps3/interrupt.c 		dump_bmp(&per_cpu(ps3_private, 1));
per_cpu           749 arch/powerpc/platforms/ps3/interrupt.c 		struct ps3_private *pd = &per_cpu(ps3_private, cpu);
per_cpu            40 arch/powerpc/platforms/ps3/smp.c 	virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
per_cpu            54 arch/powerpc/platforms/ps3/smp.c 		unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
per_cpu            95 arch/powerpc/platforms/ps3/smp.c 	unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
per_cpu            83 arch/powerpc/platforms/pseries/dtl.c 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
per_cpu           103 arch/powerpc/platforms/pseries/dtl.c 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
per_cpu           119 arch/powerpc/platforms/pseries/dtl.c 	return per_cpu(dtl_rings, dtl->cpu).write_index;
per_cpu           366 arch/powerpc/platforms/pseries/dtl.c 		struct dtl *dtl = &per_cpu(cpu_dtl, i);
per_cpu            63 arch/powerpc/platforms/pseries/hotplug-cpu.c 	return per_cpu(current_state, cpu);
per_cpu            68 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(current_state, cpu) = state;
per_cpu            73 arch/powerpc/platforms/pseries/hotplug-cpu.c 	return per_cpu(preferred_offline_state, cpu);
per_cpu            78 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(preferred_offline_state, cpu) = state;
per_cpu            83 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(preferred_offline_state, cpu) = default_offline_state;
per_cpu           155 arch/powerpc/platforms/pseries/hvCall_inst.c 						 per_cpu(hcall_stats, cpu),
per_cpu           406 arch/powerpc/platforms/pseries/lpar.c 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
per_cpu           413 arch/powerpc/platforms/pseries/lpar.c 	per_cpu(dtl_entry_ridx, cpu) = 0;
per_cpu           416 arch/powerpc/platforms/pseries/lpar.c 	per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
per_cpu           425 arch/powerpc/platforms/pseries/lpar.c 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
per_cpu           240 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
per_cpu           466 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
per_cpu           490 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
per_cpu           571 arch/powerpc/sysdev/xive/common.c 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
per_cpu          1079 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
per_cpu          1152 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
per_cpu          1336 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
per_cpu          1350 arch/powerpc/sysdev/xive/common.c 		per_cpu(xive_cpu, cpu) = xc;
per_cpu            76 arch/s390/kernel/diag.c 			stat = &per_cpu(diag_stat, cpu);
per_cpu            55 arch/s390/kernel/idle.c 	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
per_cpu            73 arch/s390/kernel/idle.c 	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
per_cpu            98 arch/s390/kernel/idle.c 	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
per_cpu           181 arch/s390/kernel/irq.c 				   per_cpu(irq_stat, cpu).irqs[irq]);
per_cpu           806 arch/s390/kernel/perf_cpum_sf.c 		cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
per_cpu           868 arch/s390/kernel/perf_cpum_sf.c 			cpuhw = &per_cpu(cpu_hw_sf, cpu);
per_cpu          1813 arch/s390/kernel/perf_cpum_sf.c 		struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
per_cpu           144 arch/s390/kernel/processor.c 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
per_cpu          1132 arch/s390/kernel/smp.c 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
per_cpu          1138 arch/s390/kernel/smp.c 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
per_cpu          1153 arch/s390/kernel/smp.c 	per_cpu(cpu_device, cpu) = c;
per_cpu           168 arch/s390/kernel/time.c 	cd = &per_cpu(comparators, cpu);
per_cpu           177 arch/s390/pci/pci_irq.c 		cpu_data = &per_cpu(irq_data, cpu);
per_cpu            24 arch/sh/kernel/cpu/sh2/smp-j2.c 	volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
per_cpu           116 arch/sh/kernel/cpu/sh2/smp-j2.c 	pmsg = &per_cpu(j2_ipi_messages, cpu);
per_cpu           311 arch/sh/kernel/hw_breakpoint.c 		bp = per_cpu(bp_per_reg[i], cpu);
per_cpu           354 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
per_cpu            79 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
per_cpu            89 arch/sh/kernel/smp.c 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
per_cpu           200 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
per_cpu           218 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
per_cpu            55 arch/sh/kernel/topology.c 		struct cpu *c = &per_cpu(cpu_devices, i);
per_cpu            29 arch/sparc/include/asm/cpudata_32.h #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
per_cpu            35 arch/sparc/include/asm/cpudata_64.h #define cpu_data(__cpu)		per_cpu(__cpu_data, (__cpu))
per_cpu            86 arch/sparc/include/asm/mmu_context_64.h 	per_cpu(per_cpu_secondary_mm, cpu) = mm;
per_cpu            50 arch/sparc/include/asm/topology_64.h #define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
per_cpu            43 arch/sparc/kernel/iommu-common.c 		per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
per_cpu           293 arch/sparc/kernel/leon_kernel.c 	ce = &per_cpu(sparc32_clockevent, cpu);
per_cpu           300 arch/sparc/kernel/leon_smp.c 		work = &per_cpu(leon_ipi_work, cpu);
per_cpu           314 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
per_cpu           325 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
per_cpu           336 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
per_cpu            61 arch/sparc/kernel/nmi.c 			if (per_cpu(nmi_touch, cpu) != 1)
per_cpu            62 arch/sparc/kernel/nmi.c 				per_cpu(nmi_touch, cpu) = 1;
per_cpu           149 arch/sparc/kernel/nmi.c 	per_cpu(wd_enabled, cpu) = 0;
per_cpu           187 arch/sparc/kernel/nmi.c 		if (!per_cpu(wd_enabled, cpu))
per_cpu          1284 arch/sparc/kernel/pci_sun4v.c 			per_cpu(iommu_batch, i).pglist = (u64 *) page;
per_cpu          1303 arch/sparc/kernel/smp_64.c 		cpumask_clear(&per_cpu(cpu_sibling_map, i));
per_cpu          1305 arch/sparc/kernel/smp_64.c 			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
per_cpu          1312 arch/sparc/kernel/smp_64.c 				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
per_cpu          1384 arch/sparc/kernel/smp_64.c 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
per_cpu          1385 arch/sparc/kernel/smp_64.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
per_cpu          1386 arch/sparc/kernel/smp_64.c 	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
per_cpu          1468 arch/sparc/kernel/smp_64.c 	per_cpu(poke, cpu) = true;
per_cpu          1471 arch/sparc/kernel/smp_64.c 		per_cpu(poke, cpu) = false;
per_cpu           201 arch/sparc/kernel/sun4d_smp.c 		work = &per_cpu(sun4d_ipi_work, cpu);
per_cpu           239 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
per_cpu           250 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
per_cpu           261 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
per_cpu           384 arch/sparc/kernel/sun4d_smp.c 	ce = &per_cpu(sparc32_clockevent, cpu);
per_cpu           249 arch/sparc/kernel/sun4m_smp.c 	ce = &per_cpu(sparc32_clockevent, cpu);
per_cpu            23 arch/sparc/kernel/sysfs.c 	struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
per_cpu           116 arch/sparc/kernel/sysfs.c 		ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
per_cpu           210 arch/sparc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu           224 arch/sparc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu           266 arch/sparc/kernel/sysfs.c 		struct cpu *c = &per_cpu(cpu_devices, cpu);
per_cpu           214 arch/sparc/kernel/time_32.c 	struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
per_cpu           642 arch/sparc/kernel/time_64.c 	struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
per_cpu           660 arch/sparc/kernel/time_64.c 		ft = &per_cpu(sparc64_freq_table, cpu);
per_cpu           721 arch/sparc/kernel/time_64.c 	struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
per_cpu           812 arch/sparc/mm/init_64.c 		mm = per_cpu(per_cpu_secondary_mm, cpu);
per_cpu           508 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu           524 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu           538 arch/x86/events/amd/core.c 		nb = per_cpu(cpu_hw_events, i).amd_nb;
per_cpu           560 arch/x86/events/amd/core.c 	cpuhw = &per_cpu(cpu_hw_events, cpu);
per_cpu           410 arch/x86/events/amd/uncore.c 		uncore->id = per_cpu(cpu_llc_id, cpu);
per_cpu          1229 arch/x86/events/core.c 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
per_cpu          1366 arch/x86/events/core.c 	cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu          1394 arch/x86/events/core.c 		prev_left = per_cpu(pmc_prev_left[idx], cpu);
per_cpu          1580 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu          1599 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu           147 arch/x86/events/intel/bts.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
per_cpu           186 arch/x86/events/intel/bts.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
per_cpu          3690 arch/x86/events/intel/core.c 	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
per_cpu          3708 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
per_cpu          3739 arch/x86/events/intel/core.c 			pc = per_cpu(cpu_hw_events, i).shared_regs;
per_cpu          3758 arch/x86/events/intel/core.c 			sibling = &per_cpu(cpu_hw_events, i);
per_cpu          3812 arch/x86/events/intel/core.c 	intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
per_cpu          5209 arch/x86/events/intel/core.c 		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
per_cpu           265 arch/x86/events/intel/ds.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
per_cpu           277 arch/x86/events/intel/ds.c 	if (!per_cpu(cpu_hw_events, cpu).ds)
per_cpu           359 arch/x86/events/intel/ds.c 		per_cpu(insn_buffer, cpu) = insn_buff;
per_cpu           380 arch/x86/events/intel/ds.c 	kfree(per_cpu(insn_buffer, cpu));
per_cpu           381 arch/x86/events/intel/ds.c 	per_cpu(insn_buffer, cpu) = NULL;
per_cpu           439 arch/x86/events/intel/ds.c 	per_cpu(cpu_hw_events, cpu).ds = ds;
per_cpu           445 arch/x86/events/intel/ds.c 	per_cpu(cpu_hw_events, cpu).ds = NULL;
per_cpu            57 arch/x86/include/asm/desc.h 	return per_cpu(gdt_page, cpu).gdt;
per_cpu           601 arch/x86/include/asm/percpu.h 		&per_cpu(_name, _cpu))
per_cpu           619 arch/x86/include/asm/percpu.h #define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
per_cpu            46 arch/x86/include/asm/preempt.h 	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
per_cpu           166 arch/x86/include/asm/processor.h #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
per_cpu           403 arch/x86/include/asm/processor.h 	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
per_cpu            34 arch/x86/include/asm/smp.h 	return per_cpu(cpu_llc_shared_map, cpu);
per_cpu           159 arch/x86/include/asm/smp.h #define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
per_cpu           160 arch/x86/include/asm/smp.h #define cpu_acpi_id(cpu)	per_cpu(x86_cpu_to_acpiid, cpu)
per_cpu            96 arch/x86/include/asm/stackprotector.h 	unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
per_cpu           114 arch/x86/include/asm/topology.h #define topology_die_cpumask(cpu)		(per_cpu(cpu_die_map, cpu))
per_cpu           115 arch/x86/include/asm/topology.h #define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
per_cpu           116 arch/x86/include/asm/topology.h #define topology_sibling_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))
per_cpu           198 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_info_per(cpu)	(&per_cpu(__uv_cpu_info, cpu))
per_cpu           847 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_nmi_per(cpu)		(per_cpu(uv_cpu_nmi, cpu))
per_cpu           771 arch/x86/kernel/acpi/boot.c 	set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
per_cpu           774 arch/x86/kernel/acpi/boot.c 	per_cpu(x86_cpu_to_apicid, cpu) = -1;
per_cpu           216 arch/x86/kernel/apb_timer.c 	struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
per_cpu           329 arch/x86/kernel/apb_timer.c 		adev = &per_cpu(cpu_apbt_dev, i);
per_cpu            11 arch/x86/kernel/apic/apic_common.c 	return per_cpu(x86_cpu_to_apicid, cpu);
per_cpu            32 arch/x86/kernel/apic/apic_common.c 		return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
per_cpu            96 arch/x86/kernel/apic/apic_numachip.c 	int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu            54 arch/x86/kernel/apic/bigsmp_32.c 		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
per_cpu           179 arch/x86/kernel/apic/ipi.c 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
per_cpu           196 arch/x86/kernel/apic/ipi.c 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
per_cpu           215 arch/x86/kernel/apic/ipi.c 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
per_cpu           309 arch/x86/kernel/apic/ipi.c 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
per_cpu           172 arch/x86/kernel/apic/vector.c 	BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
per_cpu           173 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, newcpu)[newvec] = desc;
per_cpu           343 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
per_cpu           352 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
per_cpu           850 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
per_cpu            30 arch/x86/kernel/apic/x2apic_cluster.c 	u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
per_cpu            55 arch/x86/kernel/apic/x2apic_cluster.c 		struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
per_cpu            59 arch/x86/kernel/apic/x2apic_cluster.c 			dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
per_cpu            95 arch/x86/kernel/apic/x2apic_cluster.c 	return per_cpu(x86_cpu_to_logical_apicid, cpu);
per_cpu           111 arch/x86/kernel/apic/x2apic_cluster.c 		cmsk = per_cpu(cluster_masks, cpu);
per_cpu           126 arch/x86/kernel/apic/x2apic_cluster.c 	if (per_cpu(cluster_masks, cpu))
per_cpu           150 arch/x86/kernel/apic/x2apic_cluster.c 	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
per_cpu           157 arch/x86/kernel/apic/x2apic_cluster.c 	struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
per_cpu           161 arch/x86/kernel/apic/x2apic_cluster.c 	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
per_cpu            38 arch/x86/kernel/apic/x2apic_phys.c 	u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu            59 arch/x86/kernel/apic/x2apic_phys.c 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
per_cpu           547 arch/x86/kernel/apic/x2apic_uv_x.c 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu          1382 arch/x86/kernel/apic/x2apic_uv_x.c 		apicid = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu          1512 arch/x86/kernel/apic/x2apic_uv_x.c 		int apicid = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu           376 arch/x86/kernel/cpu/amd.c 		per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu           401 arch/x86/kernel/cpu/amd.c 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
per_cpu           406 arch/x86/kernel/cpu/amd.c 	return per_cpu(cpu_llc_id, cpu);
per_cpu           425 arch/x86/kernel/cpu/amd.c 		node = per_cpu(cpu_llc_id, cpu);
per_cpu            69 arch/x86/kernel/cpu/aperfmperf.c 	s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
per_cpu            93 arch/x86/kernel/cpu/aperfmperf.c 	return per_cpu(samples.khz, cpu);
per_cpu           131 arch/x86/kernel/cpu/aperfmperf.c 		return per_cpu(samples.khz, cpu);
per_cpu           136 arch/x86/kernel/cpu/aperfmperf.c 	return per_cpu(samples.khz, cpu);
per_cpu           660 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu           666 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
per_cpu           682 arch/x86/kernel/cpu/cacheinfo.c 			per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
per_cpu           700 arch/x86/kernel/cpu/cacheinfo.c 	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
per_cpu           848 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l2_id;
per_cpu           855 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l3_id;
per_cpu           867 arch/x86/kernel/cpu/cacheinfo.c 	if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
per_cpu           868 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
per_cpu          1601 arch/x86/kernel/cpu/common.c 	tss = &per_cpu(cpu_tss_rw, cpu);
per_cpu          1866 arch/x86/kernel/cpu/common.c 	t = &per_cpu(cpu_tss_rw, cpu);
per_cpu          1953 arch/x86/kernel/cpu/common.c 	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
per_cpu           102 arch/x86/kernel/cpu/hygon.c 		per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu           125 arch/x86/kernel/cpu/hygon.c 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
per_cpu           137 arch/x86/kernel/cpu/hygon.c 		node = per_cpu(cpu_llc_id, cpu);
per_cpu           225 arch/x86/kernel/cpu/mce/amd.c 		per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
per_cpu           484 arch/x86/kernel/cpu/mce/amd.c 	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
per_cpu           496 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
per_cpu           528 arch/x86/kernel/cpu/mce/amd.c 		per_cpu(bank_map, cpu) |= (1 << bank);
per_cpu          1020 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
per_cpu          1023 arch/x86/kernel/cpu/mce/amd.c 		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
per_cpu          1210 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
per_cpu          1305 arch/x86/kernel/cpu/mce/amd.c 	struct device *dev = per_cpu(mce_device, cpu);
per_cpu          1325 arch/x86/kernel/cpu/mce/amd.c 			per_cpu(threshold_banks, cpu)[bank] = b;
per_cpu          1360 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu)[bank] = b;
per_cpu          1380 arch/x86/kernel/cpu/mce/amd.c 	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
per_cpu          1409 arch/x86/kernel/cpu/mce/amd.c 	b = per_cpu(threshold_banks, cpu)[bank];
per_cpu          1419 arch/x86/kernel/cpu/mce/amd.c 			per_cpu(threshold_banks, cpu)[bank] = NULL;
per_cpu          1437 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu)[bank] = NULL;
per_cpu          1444 arch/x86/kernel/cpu/mce/amd.c 	for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
per_cpu          1445 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
per_cpu          1449 arch/x86/kernel/cpu/mce/amd.c 	kfree(per_cpu(threshold_banks, cpu));
per_cpu          1450 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu) = NULL;
per_cpu          1461 arch/x86/kernel/cpu/mce/amd.c 	bp = per_cpu(threshold_banks, cpu);
per_cpu          1465 arch/x86/kernel/cpu/mce/amd.c 	bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
per_cpu          1470 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu) = bp;
per_cpu          1472 arch/x86/kernel/cpu/mce/amd.c 	for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
per_cpu          1473 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
per_cpu           911 arch/x86/kernel/cpu/mce/core.c 		int severity = mce_severity(&per_cpu(mces_seen, cpu),
per_cpu           917 arch/x86/kernel/cpu/mce/core.c 			m = &per_cpu(mces_seen, cpu);
per_cpu           947 arch/x86/kernel/cpu/mce/core.c 		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
per_cpu          1466 arch/x86/kernel/cpu/mce/core.c 		del_timer_sync(&per_cpu(mce_timer, cpu));
per_cpu          2123 arch/x86/kernel/cpu/mce/core.c 	if (bank >= per_cpu(mce_num_banks, s->id))
per_cpu          2126 arch/x86/kernel/cpu/mce/core.c 	b = &per_cpu(mce_banks_array, s->id)[bank];
per_cpu          2144 arch/x86/kernel/cpu/mce/core.c 	if (bank >= per_cpu(mce_num_banks, s->id))
per_cpu          2147 arch/x86/kernel/cpu/mce/core.c 	b = &per_cpu(mce_banks_array, s->id)[bank];
per_cpu          2277 arch/x86/kernel/cpu/mce/core.c 	dev = per_cpu(mce_device, cpu);
per_cpu          2299 arch/x86/kernel/cpu/mce/core.c 	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
per_cpu          2305 arch/x86/kernel/cpu/mce/core.c 	per_cpu(mce_device, cpu) = dev;
per_cpu          2322 arch/x86/kernel/cpu/mce/core.c 	struct device *dev = per_cpu(mce_device, cpu);
per_cpu          2331 arch/x86/kernel/cpu/mce/core.c 	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
per_cpu          2336 arch/x86/kernel/cpu/mce/core.c 	per_cpu(mce_device, cpu) = NULL;
per_cpu           110 arch/x86/kernel/cpu/mce/inject.c 	struct mce *i = &per_cpu(injectm, m->extcpu);
per_cpu           239 arch/x86/kernel/cpu/mce/inject.c 			struct mce *mcpu = &per_cpu(injectm, cpu);
per_cpu           146 arch/x86/kernel/cpu/mce/intel.c 	if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
per_cpu           149 arch/x86/kernel/cpu/mce/intel.c 	per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
per_cpu           104 arch/x86/kernel/cpu/mce/therm_throt.c 			      per_cpu(thermal_state, cpu).event.name);	\
per_cpu           155 arch/x86/kernel/cpu/mce/therm_throt.c 	struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
per_cpu           209 arch/x86/kernel/cpu/mce/therm_throt.c 	struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
per_cpu          2906 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
per_cpu          2962 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
per_cpu          2963 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
per_cpu           143 arch/x86/kernel/espfix_64.c 	if (likely(per_cpu(espfix_stack, cpu)))
per_cpu           204 arch/x86/kernel/espfix_64.c 	per_cpu(espfix_stack, cpu) = addr;
per_cpu           205 arch/x86/kernel/espfix_64.c 	per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
per_cpu           547 arch/x86/kernel/hpet.c 	per_cpu(cpu_hpet_channel, cpu) = hc;
per_cpu           583 arch/x86/kernel/hpet.c 	struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
per_cpu           589 arch/x86/kernel/hpet.c 	per_cpu(cpu_hpet_channel, cpu) = NULL;
per_cpu           481 arch/x86/kernel/hw_breakpoint.c 		bp = per_cpu(bp_per_reg[i], cpu);
per_cpu            72 arch/x86/kernel/ioport.c 	tss = &per_cpu(cpu_tss_rw, get_cpu());
per_cpu            55 arch/x86/kernel/irq.c #define irq_stats(x)		(&per_cpu(irq_stat, x))
per_cpu           131 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
per_cpu           135 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
per_cpu           215 arch/x86/kernel/irq.c 	sum += per_cpu(mce_exception_count, cpu);
per_cpu           216 arch/x86/kernel/irq.c 	sum += per_cpu(mce_poll_count, cpu);
per_cpu           117 arch/x86/kernel/irq_32.c 	if (per_cpu(hardirq_stack_ptr, cpu))
per_cpu           129 arch/x86/kernel/irq_32.c 	per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
per_cpu           130 arch/x86/kernel/irq_32.c 	per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
per_cpu            50 arch/x86/kernel/irq_64.c 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
per_cpu            62 arch/x86/kernel/irq_64.c 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
per_cpu            69 arch/x86/kernel/irq_64.c 	if (per_cpu(hardirq_stack_ptr, cpu))
per_cpu            92 arch/x86/kernel/irqinit.c 		per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
per_cpu           173 arch/x86/kernel/itmt.c 	return per_cpu(sched_core_priority, cpu);
per_cpu           203 arch/x86/kernel/itmt.c 		per_cpu(sched_core_priority, cpu) = smt_prio;
per_cpu           283 arch/x86/kernel/kvm.c 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
per_cpu           385 arch/x86/kernel/kvm.c 	src = &per_cpu(steal_time, cpu);
per_cpu           425 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
per_cpu           426 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
per_cpu           427 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
per_cpu           461 arch/x86/kernel/kvm.c 		apic_id = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu           524 arch/x86/kernel/kvm.c 			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
per_cpu           597 arch/x86/kernel/kvm.c 		src = &per_cpu(steal_time, cpu);
per_cpu           764 arch/x86/kernel/kvm.c 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
per_cpu           799 arch/x86/kernel/kvm.c 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
per_cpu           268 arch/x86/kernel/kvmclock.c 	if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
per_cpu           286 arch/x86/kernel/kvmclock.c 	struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
per_cpu           293 arch/x86/kernel/kvmclock.c 	if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
per_cpu           304 arch/x86/kernel/kvmclock.c 	per_cpu(hv_clock_per_cpu, cpu) = p;
per_cpu           117 arch/x86/kernel/process.c 		struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
per_cpu           336 arch/x86/kernel/process.c 		if (!per_cpu(ssb_state, cpu).shared_state)
per_cpu           340 arch/x86/kernel/process.c 		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
per_cpu           224 arch/x86/kernel/setup_percpu.c 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
per_cpu           225 arch/x86/kernel/setup_percpu.c 		per_cpu(cpu_number, cpu) = cpu;
per_cpu           236 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_apicid, cpu) =
per_cpu           238 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_bios_cpu_apicid, cpu) =
per_cpu           240 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_acpiid, cpu) =
per_cpu           244 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_logical_apicid, cpu) =
per_cpu           248 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_node_map, cpu) =
per_cpu           281 arch/x86/kernel/smpboot.c 	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
per_cpu           442 arch/x86/kernel/smpboot.c 		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
per_cpu           486 arch/x86/kernel/smpboot.c 	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
per_cpu           490 arch/x86/kernel/smpboot.c 	if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
per_cpu          1009 arch/x86/kernel/smpboot.c 	per_cpu(current_task, cpu) = idle;
per_cpu          1018 arch/x86/kernel/smpboot.c 	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
per_cpu          1173 arch/x86/kernel/smpboot.c 	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
per_cpu          1332 arch/x86/kernel/smpboot.c 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
per_cpu          1333 arch/x86/kernel/smpboot.c 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
per_cpu          1334 arch/x86/kernel/smpboot.c 		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
per_cpu          1335 arch/x86/kernel/smpboot.c 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
per_cpu           142 arch/x86/kernel/topology.c 		per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
per_cpu           144 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
per_cpu           150 arch/x86/kernel/topology.c 	unregister_cpu(&per_cpu(cpu_devices, num).cpu);
per_cpu           157 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
per_cpu           952 arch/x86/kernel/tsc.c 		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
per_cpu           953 arch/x86/kernel/tsc.c 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
per_cpu           925 arch/x86/kvm/svm.c 	sd = per_cpu(svm_data, me);
per_cpu           987 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
per_cpu           992 arch/x86/kvm/svm.c 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
per_cpu          1018 arch/x86/kvm/svm.c 	per_cpu(svm_data, cpu) = sd;
per_cpu          1783 arch/x86/kvm/svm.c 		sd = per_cpu(svm_data, cpu);
per_cpu          2288 arch/x86/kvm/svm.c 		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
per_cpu          2315 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
per_cpu          5053 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
per_cpu          5060 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
per_cpu          5086 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
per_cpu           656 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
per_cpu           669 arch/x86/kvm/vmx/vmx.c 	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu           670 arch/x86/kvm/vmx/vmx.c 		per_cpu(current_vmcs, cpu) = NULL;
per_cpu          1309 arch/x86/kvm/vmx/vmx.c 			 &per_cpu(loaded_vmcss_on_cpu, cpu));
per_cpu          1313 arch/x86/kvm/vmx/vmx.c 	prev = per_cpu(current_vmcs, cpu);
per_cpu          1315 arch/x86/kvm/vmx/vmx.c 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
per_cpu          2226 arch/x86/kvm/vmx/vmx.c 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
per_cpu          2263 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
per_cpu          2588 arch/x86/kvm/vmx/vmx.c 		free_vmcs(per_cpu(vmxarea, cpu));
per_cpu          2589 arch/x86/kvm/vmx/vmx.c 		per_cpu(vmxarea, cpu) = NULL;
per_cpu          2619 arch/x86/kvm/vmx/vmx.c 		per_cpu(vmxarea, cpu) = vmcs;
per_cpu          5313 arch/x86/kvm/vmx/vmx.c 	spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
per_cpu          5314 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
per_cpu          5321 arch/x86/kvm/vmx/vmx.c 	spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
per_cpu          7342 arch/x86/kvm/vmx/vmx.c 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
per_cpu          7344 arch/x86/kvm/vmx/vmx.c 		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
per_cpu          7377 arch/x86/kvm/vmx/vmx.c 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
per_cpu          7379 arch/x86/kvm/vmx/vmx.c 			      &per_cpu(blocked_vcpu_on_cpu,
per_cpu          7381 arch/x86/kvm/vmx/vmx.c 		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
per_cpu          8042 arch/x86/kvm/vmx/vmx.c 		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
per_cpu          8043 arch/x86/kvm/vmx/vmx.c 		INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
per_cpu          8044 arch/x86/kvm/vmx/vmx.c 		spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
per_cpu          7004 arch/x86/kvm/x86.c 		per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
per_cpu            67 arch/x86/mm/cpu_entry_area.c 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
per_cpu            97 arch/x86/mm/cpu_entry_area.c 	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
per_cpu           164 arch/x86/mm/cpu_entry_area.c 	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
per_cpu           168 arch/x86/mm/cpu_entry_area.c 	per_cpu(cpu_entry_area, cpu) = cea;
per_cpu            97 arch/x86/mm/numa.c 	per_cpu(x86_cpu_to_node_map, cpu) = node;
per_cpu           794 arch/x86/mm/numa.c 	return per_cpu(x86_cpu_to_node_map, cpu);
per_cpu           813 arch/x86/mm/numa.c 	return per_cpu(x86_cpu_to_node_map, cpu);
per_cpu           459 arch/x86/mm/pti.c 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
per_cpu           660 arch/x86/mm/tlb.c 	return !per_cpu(cpu_tlbstate.is_lazy, cpu);
per_cpu           156 arch/x86/oprofile/nmi_int.c 		kfree(per_cpu(cpu_msrs, i).multiplex);
per_cpu           157 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).multiplex = NULL;
per_cpu           158 arch/x86/oprofile/nmi_int.c 		per_cpu(switch_index, i) = 0;
per_cpu           172 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).multiplex =
per_cpu           174 arch/x86/oprofile/nmi_int.c 		if (!per_cpu(cpu_msrs, i).multiplex)
per_cpu           197 arch/x86/oprofile/nmi_int.c 	per_cpu(switch_index, cpu) = 0;
per_cpu           229 arch/x86/oprofile/nmi_int.c 	int si = per_cpu(switch_index, cpu);
per_cpu           230 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
per_cpu           238 arch/x86/oprofile/nmi_int.c 		per_cpu(switch_index, cpu) = 0;
per_cpu           240 arch/x86/oprofile/nmi_int.c 		per_cpu(switch_index, cpu) = si;
per_cpu           285 arch/x86/oprofile/nmi_int.c 	memcpy(per_cpu(cpu_msrs, cpu).multiplex,
per_cpu           286 arch/x86/oprofile/nmi_int.c 	       per_cpu(cpu_msrs, 0).multiplex,
per_cpu           307 arch/x86/oprofile/nmi_int.c 		kfree(per_cpu(cpu_msrs, i).counters);
per_cpu           308 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).counters = NULL;
per_cpu           309 arch/x86/oprofile/nmi_int.c 		kfree(per_cpu(cpu_msrs, i).controls);
per_cpu           310 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).controls = NULL;
per_cpu           322 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
per_cpu           324 arch/x86/oprofile/nmi_int.c 		if (!per_cpu(cpu_msrs, i).counters)
per_cpu           326 arch/x86/oprofile/nmi_int.c 		per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
per_cpu           328 arch/x86/oprofile/nmi_int.c 		if (!per_cpu(cpu_msrs, i).controls)
per_cpu           345 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
per_cpu           352 arch/x86/oprofile/nmi_int.c 	per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
per_cpu           377 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
per_cpu           386 arch/x86/oprofile/nmi_int.c 	apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
per_cpu           458 arch/x86/oprofile/nmi_int.c 	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
per_cpu           466 arch/x86/oprofile/nmi_int.c 		memcpy(per_cpu(cpu_msrs, cpu).counters,
per_cpu           467 arch/x86/oprofile/nmi_int.c 		       per_cpu(cpu_msrs, 0).counters,
per_cpu           470 arch/x86/oprofile/nmi_int.c 		memcpy(per_cpu(cpu_msrs, cpu).controls,
per_cpu           471 arch/x86/oprofile/nmi_int.c 		       per_cpu(cpu_msrs, 0).controls,
per_cpu           148 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu           163 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu           195 arch/x86/platform/uv/tlb_uv.c 			return per_cpu(x86_cpu_to_apicid, cpu);
per_cpu           373 arch/x86/platform/uv/tlb_uv.c 	struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
per_cpu           790 arch/x86/platform/uv/tlb_uv.c 			tbcp = &per_cpu(bau_control, tcpu);
per_cpu           999 arch/x86/platform/uv/tlb_uv.c 			tbcp = &per_cpu(bau_control, tcpu);
per_cpu          1113 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, cpu);
per_cpu          1144 arch/x86/platform/uv/tlb_uv.c 	flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
per_cpu          1288 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, smp_processor_id());
per_cpu          1418 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          1531 arch/x86/platform/uv/tlb_uv.c 			stat = &per_cpu(ptcstats, cpu);
per_cpu          1631 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, cpu);
per_cpu          1638 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          1784 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          1817 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          1830 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, smp_processor_id());
per_cpu          1913 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          1917 arch/x86/platform/uv/tlb_uv.c 		bcp->statp			= &per_cpu(ptcstats, cpu);
per_cpu          1953 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          2031 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
per_cpu          2219 arch/x86/platform/uv/tlb_uv.c 		mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
per_cpu          2264 arch/x86/platform/uv/tlb_uv.c 		free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
per_cpu           323 arch/x86/platform/uv/uv_time.c 	struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
per_cpu           191 arch/x86/xen/enlighten.c 		per_cpu(xen_vcpu, cpu) =
per_cpu           195 arch/x86/xen/enlighten.c 		per_cpu(xen_vcpu, cpu) = NULL;
per_cpu           219 arch/x86/xen/enlighten.c 		if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
per_cpu           224 arch/x86/xen/enlighten.c 		vcpup = &per_cpu(xen_vcpu_info, cpu);
per_cpu           250 arch/x86/xen/enlighten.c 			per_cpu(xen_vcpu, cpu) = vcpup;
per_cpu           257 arch/x86/xen/enlighten.c 	return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
per_cpu           150 arch/x86/xen/enlighten_hvm.c 		per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
per_cpu           152 arch/x86/xen/enlighten_hvm.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
per_cpu           519 arch/x86/xen/enlighten_pv.c 	struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
per_cpu           975 arch/x86/xen/enlighten_pv.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
per_cpu          1260 arch/x86/xen/enlighten_pv.c 	per_cpu(xen_vcpu_id, 0) = 0;
per_cpu          1287 arch/x86/xen/enlighten_pv.c 	xen_initial_gdt = &per_cpu(gdt_page, 0);
per_cpu          1417 arch/x86/xen/enlighten_pv.c 	if (per_cpu(xen_vcpu, cpu) == NULL)
per_cpu          1021 arch/x86/xen/mmu_pv.c 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
per_cpu          1037 arch/x86/xen/mmu_pv.c 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
per_cpu           547 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
per_cpu           548 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).flags = 0;
per_cpu           579 arch/x86/xen/pmu.c 	free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
per_cpu           580 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
per_cpu            35 arch/x86/xen/smp.c 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
per_cpu            36 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
per_cpu            37 arch/x86/xen/smp.c 		per_cpu(xen_resched_irq, cpu).irq = -1;
per_cpu            38 arch/x86/xen/smp.c 		kfree(per_cpu(xen_resched_irq, cpu).name);
per_cpu            39 arch/x86/xen/smp.c 		per_cpu(xen_resched_irq, cpu).name = NULL;
per_cpu            41 arch/x86/xen/smp.c 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
per_cpu            42 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
per_cpu            43 arch/x86/xen/smp.c 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
per_cpu            44 arch/x86/xen/smp.c 		kfree(per_cpu(xen_callfunc_irq, cpu).name);
per_cpu            45 arch/x86/xen/smp.c 		per_cpu(xen_callfunc_irq, cpu).name = NULL;
per_cpu            47 arch/x86/xen/smp.c 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
per_cpu            48 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
per_cpu            49 arch/x86/xen/smp.c 		per_cpu(xen_debug_irq, cpu).irq = -1;
per_cpu            50 arch/x86/xen/smp.c 		kfree(per_cpu(xen_debug_irq, cpu).name);
per_cpu            51 arch/x86/xen/smp.c 		per_cpu(xen_debug_irq, cpu).name = NULL;
per_cpu            53 arch/x86/xen/smp.c 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
per_cpu            54 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
per_cpu            56 arch/x86/xen/smp.c 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
per_cpu            57 arch/x86/xen/smp.c 		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
per_cpu            58 arch/x86/xen/smp.c 		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
per_cpu            76 arch/x86/xen/smp.c 	per_cpu(xen_resched_irq, cpu).irq = rc;
per_cpu            77 arch/x86/xen/smp.c 	per_cpu(xen_resched_irq, cpu).name = resched_name;
per_cpu            88 arch/x86/xen/smp.c 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
per_cpu            89 arch/x86/xen/smp.c 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
per_cpu            97 arch/x86/xen/smp.c 	per_cpu(xen_debug_irq, cpu).irq = rc;
per_cpu            98 arch/x86/xen/smp.c 	per_cpu(xen_debug_irq, cpu).name = debug_name;
per_cpu           109 arch/x86/xen/smp.c 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
per_cpu           110 arch/x86/xen/smp.c 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
per_cpu            44 arch/x86/xen/smp_hvm.c 		per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
per_cpu           100 arch/x86/xen/smp_pv.c 	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
per_cpu           101 arch/x86/xen/smp_pv.c 		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
per_cpu           102 arch/x86/xen/smp_pv.c 		per_cpu(xen_irq_work, cpu).irq = -1;
per_cpu           103 arch/x86/xen/smp_pv.c 		kfree(per_cpu(xen_irq_work, cpu).name);
per_cpu           104 arch/x86/xen/smp_pv.c 		per_cpu(xen_irq_work, cpu).name = NULL;
per_cpu           107 arch/x86/xen/smp_pv.c 	if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
per_cpu           108 arch/x86/xen/smp_pv.c 		unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
per_cpu           109 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).irq = -1;
per_cpu           110 arch/x86/xen/smp_pv.c 		kfree(per_cpu(xen_pmu_irq, cpu).name);
per_cpu           111 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).name = NULL;
per_cpu           129 arch/x86/xen/smp_pv.c 	per_cpu(xen_irq_work, cpu).irq = rc;
per_cpu           130 arch/x86/xen/smp_pv.c 	per_cpu(xen_irq_work, cpu).name = callfunc_name;
per_cpu           140 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).irq = rc;
per_cpu           141 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
per_cpu           254 arch/x86/xen/smp_pv.c 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
per_cpu           255 arch/x86/xen/smp_pv.c 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
per_cpu           256 arch/x86/xen/smp_pv.c 		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
per_cpu           257 arch/x86/xen/smp_pv.c 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
per_cpu           353 arch/x86/xen/smp_pv.c 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
per_cpu           382 arch/x86/xen/smp_pv.c 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
per_cpu            25 arch/x86/xen/spinlock.c 	int irq = per_cpu(lock_kicker_irq, cpu);
per_cpu            74 arch/x86/xen/spinlock.c 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
per_cpu            75 arch/x86/xen/spinlock.c 	     cpu, per_cpu(lock_kicker_irq, cpu));
per_cpu            87 arch/x86/xen/spinlock.c 		per_cpu(lock_kicker_irq, cpu) = irq;
per_cpu            88 arch/x86/xen/spinlock.c 		per_cpu(irq_name, cpu) = name;
per_cpu            99 arch/x86/xen/spinlock.c 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
per_cpu           100 arch/x86/xen/spinlock.c 	per_cpu(lock_kicker_irq, cpu) = -1;
per_cpu           101 arch/x86/xen/spinlock.c 	kfree(per_cpu(irq_name, cpu));
per_cpu           102 arch/x86/xen/spinlock.c 	per_cpu(irq_name, cpu) = NULL;
per_cpu           320 arch/x86/xen/time.c 	evt = &per_cpu(xen_clock_events, cpu).evt;
per_cpu           330 arch/x86/xen/time.c 	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
per_cpu            35 arch/xtensa/include/asm/mmu_context.h #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
per_cpu            63 arch/xtensa/kernel/irq.c 		seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
per_cpu           414 arch/xtensa/kernel/setup.c 		struct cpu *cpu = &per_cpu(cpu_data, i);
per_cpu           413 arch/xtensa/kernel/smp.c 	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
per_cpu           452 arch/xtensa/kernel/smp.c 					per_cpu(ipi_data, cpu).ipi_count[i]);
per_cpu           139 arch/xtensa/kernel/time.c 	struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
per_cpu           370 arch/xtensa/kernel/traps.c 			per_cpu(exc_table, cpu).type[cause] = (handler);\
per_cpu           377 arch/xtensa/kernel/traps.c 	void *previous = per_cpu(exc_table, 0).default_handler[cause];
per_cpu            90 block/blk-softirq.c 	list_splice_init(&per_cpu(blk_cpu_done, cpu),
per_cpu           152 block/blk-softirq.c 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
per_cpu           400 drivers/acpi/acpi_processor.c 	if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu           401 drivers/acpi/acpi_processor.c 	    per_cpu(processor_device_array, pr->id) != device) {
per_cpu           412 drivers/acpi/acpi_processor.c 	per_cpu(processor_device_array, pr->id) = device;
per_cpu           413 drivers/acpi/acpi_processor.c 	per_cpu(processors, pr->id) = pr;
per_cpu           437 drivers/acpi/acpi_processor.c 	per_cpu(processors, pr->id) = NULL;
per_cpu           471 drivers/acpi/acpi_processor.c 	per_cpu(processor_device_array, pr->id) = NULL;
per_cpu           472 drivers/acpi/acpi_processor.c 	per_cpu(processors, pr->id) = NULL;
per_cpu           328 drivers/acpi/cppc_acpi.c 				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
per_cpu           448 drivers/acpi/cppc_acpi.c 		cpc_ptr = per_cpu(cpc_desc_ptr, i);
per_cpu           473 drivers/acpi/cppc_acpi.c 			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
per_cpu           506 drivers/acpi/cppc_acpi.c 			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
per_cpu           820 drivers/acpi/cppc_acpi.c 	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
per_cpu           862 drivers/acpi/cppc_acpi.c 	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
per_cpu           867 drivers/acpi/cppc_acpi.c 		per_cpu(cpc_desc_ptr, pr->id) = NULL;
per_cpu           902 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
per_cpu           915 drivers/acpi/cppc_acpi.c 	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
per_cpu           971 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
per_cpu          1016 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
per_cpu          1061 drivers/acpi/cppc_acpi.c 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
per_cpu          1062 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
per_cpu          1103 drivers/acpi/cppc_acpi.c 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
per_cpu          1108 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
per_cpu          1193 drivers/acpi/cppc_acpi.c 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
per_cpu          1196 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
per_cpu          1273 drivers/acpi/cppc_acpi.c 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
per_cpu          1275 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
per_cpu          1412 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
per_cpu          1415 drivers/acpi/cppc_acpi.c 	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
per_cpu           102 drivers/acpi/processor_driver.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           131 drivers/acpi/processor_driver.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           675 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
per_cpu           757 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
per_cpu           767 drivers/acpi/processor_idle.c 			cx = per_cpu(acpi_cstate[index], dev->cpu);
per_cpu           774 drivers/acpi/processor_idle.c 				cx = per_cpu(acpi_cstate[index], dev->cpu);
per_cpu           797 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
per_cpu           830 drivers/acpi/processor_idle.c 		per_cpu(acpi_cstate[count], dev->cpu) = cx;
per_cpu          1356 drivers/acpi/processor_idle.c 	dev = per_cpu(acpi_cpuidle_device, pr->id);
per_cpu          1395 drivers/acpi/processor_idle.c 			_pr = per_cpu(processors, cpu);
per_cpu          1398 drivers/acpi/processor_idle.c 			dev = per_cpu(acpi_cpuidle_device, cpu);
per_cpu          1408 drivers/acpi/processor_idle.c 			_pr = per_cpu(processors, cpu);
per_cpu          1413 drivers/acpi/processor_idle.c 				dev = per_cpu(acpi_cpuidle_device, cpu);
per_cpu          1459 drivers/acpi/processor_idle.c 		per_cpu(acpi_cpuidle_device, pr->id) = dev;
per_cpu          1479 drivers/acpi/processor_idle.c 	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
per_cpu           145 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
per_cpu           165 drivers/acpi/processor_perflib.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           185 drivers/acpi/processor_perflib.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           595 drivers/acpi/processor_perflib.c 		pr = per_cpu(processors, i);
per_cpu           614 drivers/acpi/processor_perflib.c 		pr = per_cpu(processors, i);
per_cpu           634 drivers/acpi/processor_perflib.c 		pr = per_cpu(processors, i);
per_cpu           660 drivers/acpi/processor_perflib.c 			match_pr = per_cpu(processors, j);
per_cpu           688 drivers/acpi/processor_perflib.c 			match_pr = per_cpu(processors, j);
per_cpu           705 drivers/acpi/processor_perflib.c 		pr = per_cpu(processors, i);
per_cpu           736 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
per_cpu           769 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
per_cpu            40 drivers/acpi/processor_thermal.c 	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
per_cpu           106 drivers/acpi/processor_thermal.c 		pr = per_cpu(processors, i);
per_cpu           133 drivers/acpi/processor_thermal.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
per_cpu           153 drivers/acpi/processor_thermal.c 		struct acpi_processor *pr = per_cpu(processors, policy->cpu);
per_cpu            73 drivers/acpi/processor_throttling.c 		pr = per_cpu(processors, i);
per_cpu            94 drivers/acpi/processor_throttling.c 		pr = per_cpu(processors, i);
per_cpu           120 drivers/acpi/processor_throttling.c 			match_pr = per_cpu(processors, j);
per_cpu           153 drivers/acpi/processor_throttling.c 			match_pr = per_cpu(processors, j);
per_cpu           175 drivers/acpi/processor_throttling.c 		pr = per_cpu(processors, i);
per_cpu           218 drivers/acpi/processor_throttling.c 	pr = per_cpu(processors, cpu);
per_cpu          1127 drivers/acpi/processor_throttling.c 			match_pr = per_cpu(processors, i);
per_cpu            35 drivers/base/arch_topology.c 		per_cpu(freq_scale, i) = scale;
per_cpu            42 drivers/base/arch_topology.c 	per_cpu(cpu_scale, cpu) = capacity;
per_cpu            25 drivers/base/cacheinfo.c #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
per_cpu           351 drivers/base/cacheinfo.c #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
per_cpu           357 drivers/base/cacheinfo.c #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
per_cpu            82 drivers/base/cpu.c 	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
per_cpu           390 drivers/base/cpu.c 	per_cpu(cpu_sys_devices, num) = &cpu->dev;
per_cpu           401 drivers/base/cpu.c 		return per_cpu(cpu_sys_devices, cpu);
per_cpu           513 drivers/base/cpu.c 		if (register_cpu(&per_cpu(cpu_devices, i), i))
per_cpu           274 drivers/base/power/domain_governor.c 		dev = per_cpu(cpuidle_devices, cpu);
per_cpu           550 drivers/clocksource/arm_arch_timer.c 			per_cpu(timer_unstable_counter_workaround, i) = wa;
per_cpu            45 drivers/cpufreq/amd_freq_sensitivity.c 	struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
per_cpu            90 drivers/cpufreq/arm_big_little.c 		cpu_freq = per_cpu(cpu_last_req_freq, j);
per_cpu            92 drivers/cpufreq/arm_big_little.c 		if ((cluster == per_cpu(physical_cluster, j)) &&
per_cpu           105 drivers/cpufreq/arm_big_little.c 	u32 cur_cluster = per_cpu(physical_cluster, cpu);
per_cpu           121 drivers/cpufreq/arm_big_little.c 		pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
per_cpu           124 drivers/cpufreq/arm_big_little.c 		return per_cpu(cpu_last_req_freq, cpu);
per_cpu           140 drivers/cpufreq/arm_big_little.c 		prev_rate = per_cpu(cpu_last_req_freq, cpu);
per_cpu           141 drivers/cpufreq/arm_big_little.c 		per_cpu(cpu_last_req_freq, cpu) = rate;
per_cpu           142 drivers/cpufreq/arm_big_little.c 		per_cpu(physical_cluster, cpu) = new_cluster;
per_cpu           171 drivers/cpufreq/arm_big_little.c 			per_cpu(cpu_last_req_freq, cpu) = prev_rate;
per_cpu           172 drivers/cpufreq/arm_big_little.c 			per_cpu(physical_cluster, cpu) = old_cluster;
per_cpu           219 drivers/cpufreq/arm_big_little.c 	new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
per_cpu           476 drivers/cpufreq/arm_big_little.c 			per_cpu(physical_cluster, cpu) = cur_cluster;
per_cpu           479 drivers/cpufreq/arm_big_little.c 		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
per_cpu           493 drivers/cpufreq/arm_big_little.c 		per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
per_cpu           182 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu          1267 drivers/cpufreq/cpufreq.c 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
per_cpu          1309 drivers/cpufreq/cpufreq.c 	policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu          1369 drivers/cpufreq/cpufreq.c 			per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu          1527 drivers/cpufreq/cpufreq.c 	policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu          1610 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu           103 drivers/cpufreq/cpufreq_governor.c 			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
per_cpu           138 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
per_cpu           332 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
per_cpu           368 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
per_cpu           383 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
per_cpu           524 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
per_cpu            36 drivers/cpufreq/cpufreq_userspace.c 	if (!per_cpu(cpu_is_managed, policy->cpu))
per_cpu            80 drivers/cpufreq/cpufreq_userspace.c 	per_cpu(cpu_is_managed, policy->cpu) = 1;
per_cpu            93 drivers/cpufreq/cpufreq_userspace.c 	per_cpu(cpu_is_managed, policy->cpu) = 0;
per_cpu          2599 drivers/cpufreq/intel_pstate.c 		struct acpi_processor *pr = per_cpu(processors, i);
per_cpu          2643 drivers/cpufreq/intel_pstate.c 		struct acpi_processor *pr = per_cpu(processors, i);
per_cpu           257 drivers/cpufreq/pcc-cpufreq.c 	pr = per_cpu(processors, cpu);
per_cpu           935 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
per_cpu          1083 drivers/cpufreq/powernow-k8.c 		per_cpu(powernow_data, cpu) = data;
per_cpu          1097 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
per_cpu          1108 drivers/cpufreq/powernow-k8.c 		per_cpu(powernow_data, cpu) = NULL;
per_cpu          1123 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
per_cpu           398 drivers/cpufreq/powernv-cpufreq.c 	struct chip *chip = per_cpu(chip_info, policy->cpu);		\
per_cpu          1073 drivers/cpufreq/powernv-cpufreq.c 			per_cpu(chip_info, cpu) =  &chips[i];
per_cpu            40 drivers/cpufreq/sh-cpufreq.c 	return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
per_cpu            48 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
per_cpu            92 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
per_cpu           111 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
per_cpu           142 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
per_cpu           261 drivers/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, policy->cpu) = model;
per_cpu           296 drivers/cpufreq/speedstep-centrino.c 	if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
per_cpu           297 drivers/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
per_cpu           298 drivers/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
per_cpu           303 drivers/cpufreq/speedstep-centrino.c 	if ((!per_cpu(centrino_model, cpu)) ||
per_cpu           304 drivers/cpufreq/speedstep-centrino.c 	    (!per_cpu(centrino_model, cpu)->op_points))
per_cpu           309 drivers/cpufreq/speedstep-centrino.c 		per_cpu(centrino_model, cpu)->op_points[i].frequency
per_cpu           312 drivers/cpufreq/speedstep-centrino.c 		if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
per_cpu           313 drivers/cpufreq/speedstep-centrino.c 			return per_cpu(centrino_model, cpu)->
per_cpu           317 drivers/cpufreq/speedstep-centrino.c 		return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
per_cpu           367 drivers/cpufreq/speedstep-centrino.c 		per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
per_cpu           369 drivers/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_cpu, policy->cpu)) {
per_cpu           398 drivers/cpufreq/speedstep-centrino.c 	policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
per_cpu           407 drivers/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_model, cpu))
per_cpu           410 drivers/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, cpu) = NULL;
per_cpu           433 drivers/cpufreq/speedstep-centrino.c 	if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
per_cpu           439 drivers/cpufreq/speedstep-centrino.c 	op_points = &per_cpu(centrino_model, cpu)->op_points[index];
per_cpu           333 drivers/cpuidle/coupled.c 	call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
per_cpu           652 drivers/cpuidle/coupled.c 		other_dev = per_cpu(cpuidle_devices, cpu);
per_cpu           675 drivers/cpuidle/coupled.c 	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
per_cpu           749 drivers/cpuidle/coupled.c 	dev = per_cpu(cpuidle_devices, cpu);
per_cpu           765 drivers/cpuidle/coupled.c 	dev = per_cpu(cpuidle_devices, cpu);
per_cpu           157 drivers/cpuidle/cpuidle-arm.c 		dev = per_cpu(cpuidle_devices, cpu);
per_cpu           108 drivers/cpuidle/cpuidle-cps.c 		device = &per_cpu(cpuidle_dev, cpu);
per_cpu           159 drivers/cpuidle/cpuidle-cps.c 		device = &per_cpu(cpuidle_dev, cpu);
per_cpu           166 drivers/cpuidle/cpuidle-powernv.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
per_cpu           178 drivers/cpuidle/cpuidle-powernv.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
per_cpu           108 drivers/cpuidle/cpuidle-psci.c 	per_cpu(psci_power_state, cpu) = psci_states;
per_cpu           228 drivers/cpuidle/cpuidle-psci.c 		dev = per_cpu(cpuidle_devices, cpu);
per_cpu           193 drivers/cpuidle/cpuidle-pseries.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
per_cpu           205 drivers/cpuidle/cpuidle-pseries.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
per_cpu           549 drivers/cpuidle/cpuidle.c 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
per_cpu           577 drivers/cpuidle/cpuidle.c 	per_cpu(cpuidle_devices, dev->cpu) = dev;
per_cpu           672 drivers/cpuidle/cpuidle.c 		device = &per_cpu(cpuidle_dev, cpu);
per_cpu           704 drivers/cpuidle/cpuidle.c 		device = &per_cpu(cpuidle_dev, cpu);
per_cpu            37 drivers/cpuidle/driver.c 	return per_cpu(cpuidle_drivers, cpu);
per_cpu            57 drivers/cpuidle/driver.c 		per_cpu(cpuidle_drivers, cpu) = NULL;
per_cpu            81 drivers/cpuidle/driver.c 		per_cpu(cpuidle_drivers, cpu) = drv;
per_cpu           138 drivers/cpuidle/governors/ladder.c 	struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
per_cpu           558 drivers/cpuidle/governors/menu.c 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
per_cpu           444 drivers/crypto/caam/qi.c 	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
per_cpu           516 drivers/crypto/caam/qi.c 		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
per_cpu           643 drivers/crypto/caam/qi.c 	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
per_cpu           704 drivers/crypto/caam/qi.c 		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
per_cpu           731 drivers/crypto/nx/nx-842-powernv.c 				per_cpu(cpu_txwin, i) = txwin;
per_cpu           736 drivers/crypto/nx/nx-842-powernv.c 		if (!per_cpu(cpu_txwin, i)) {
per_cpu           954 drivers/crypto/nx/nx-842-powernv.c 		txwin = per_cpu(cpu_txwin, i);
per_cpu           958 drivers/crypto/nx/nx-842-powernv.c 		per_cpu(cpu_txwin, i) = 0;
per_cpu           158 drivers/crypto/padlock-aes.c 		if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
per_cpu           159 drivers/crypto/padlock-aes.c 		    &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
per_cpu           160 drivers/crypto/padlock-aes.c 			per_cpu(paes_last_cword, cpu) = NULL;
per_cpu           172 drivers/crypto/padlock-aes.c 	if (cword != per_cpu(paes_last_cword, cpu))
per_cpu           182 drivers/crypto/padlock-aes.c 	per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
per_cpu           396 drivers/firmware/psci/psci_checker.c 		struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
per_cpu           391 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
per_cpu           423 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
per_cpu           442 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
per_cpu           460 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
per_cpu           574 drivers/hwtracing/coresight/coresight-cpu-debug.c 	if (per_cpu(debug_drvdata, drvdata->cpu)) {
per_cpu           591 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
per_cpu           626 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
per_cpu           635 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
per_cpu           245 drivers/hwtracing/coresight/coresight-etm-perf.c 		csdev = per_cpu(csdev_src, cpu);
per_cpu           299 drivers/hwtracing/coresight/coresight-etm-perf.c 	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
per_cpu           347 drivers/hwtracing/coresight/coresight-etm-perf.c 	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
per_cpu           501 drivers/hwtracing/coresight/coresight-etm-perf.c 		per_cpu(csdev_src, cpu) = csdev;
per_cpu           504 drivers/hwtracing/coresight/coresight-etm-perf.c 		per_cpu(csdev_src, cpu) = NULL;
per_cpu           707 drivers/hwtracing/coresight/coresight-platform.c 		pr = per_cpu(processors, i);
per_cpu           802 drivers/hwtracing/coresight/coresight.c 		per_cpu(tracer_path, cpu) = path;
per_cpu           842 drivers/hwtracing/coresight/coresight.c 		path = per_cpu(tracer_path, cpu);
per_cpu           843 drivers/hwtracing/coresight/coresight.c 		per_cpu(tracer_path, cpu) = NULL;
per_cpu           214 drivers/infiniband/sw/siw/siw_main.c 		usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
per_cpu           225 drivers/infiniband/sw/siw/siw_main.c 		atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
per_cpu           234 drivers/infiniband/sw/siw/siw_main.c 	atomic_dec(&per_cpu(siw_use_cnt, cpu));
per_cpu          1195 drivers/infiniband/sw/siw/siw_qp_tx.c 	wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
per_cpu          1203 drivers/infiniband/sw/siw/siw_qp_tx.c 	struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
per_cpu          1264 drivers/infiniband/sw/siw/siw_qp_tx.c 	llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
per_cpu          1266 drivers/infiniband/sw/siw/siw_qp_tx.c 	wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
per_cpu           265 drivers/irqchip/irq-csky-mpintc.c 		per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
per_cpu           266 drivers/irqchip/irq-csky-mpintc.c 		writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
per_cpu           967 drivers/irqchip/irq-gic-v3.c 	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
per_cpu           971 drivers/irqchip/irq-gic-v3.c 		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
per_cpu           104 drivers/irqchip/irq-ompic.c 		set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
per_cpu           122 drivers/irqchip/irq-ompic.c 	unsigned long *pending_ops = &per_cpu(ops, cpu);
per_cpu           146 drivers/leds/trigger/ledtrig-cpu.c 		struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
per_cpu            77 drivers/oprofile/cpu_buffer.c 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
per_cpu           104 drivers/oprofile/cpu_buffer.c 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
per_cpu           124 drivers/oprofile/cpu_buffer.c 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
per_cpu            64 drivers/oprofile/cpu_buffer.h 	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
per_cpu            39 drivers/oprofile/nmi_timer_int.c 	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
per_cpu            46 drivers/oprofile/nmi_timer_int.c 		per_cpu(nmi_timer_events, cpu) = event;
per_cpu            57 drivers/oprofile/nmi_timer_int.c 	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
per_cpu           107 drivers/oprofile/nmi_timer_int.c 		event = per_cpu(nmi_timer_events, cpu);
per_cpu           111 drivers/oprofile/nmi_timer_int.c 		per_cpu(nmi_timer_events, cpu) = NULL;
per_cpu            43 drivers/oprofile/oprofile_perf.c 		if (per_cpu(perf_events, cpu)[id] == event)
per_cpu            79 drivers/oprofile/oprofile_perf.c 	if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
per_cpu            96 drivers/oprofile/oprofile_perf.c 	per_cpu(perf_events, cpu)[event] = pevent;
per_cpu           103 drivers/oprofile/oprofile_perf.c 	struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
per_cpu           107 drivers/oprofile/oprofile_perf.c 		per_cpu(perf_events, cpu)[event] = NULL;
per_cpu           262 drivers/oprofile/oprofile_perf.c 			event = per_cpu(perf_events, cpu)[id];
per_cpu           267 drivers/oprofile/oprofile_perf.c 		kfree(per_cpu(perf_events, cpu));
per_cpu           301 drivers/oprofile/oprofile_perf.c 		per_cpu(perf_events, cpu) = kcalloc(num_counters,
per_cpu           303 drivers/oprofile/oprofile_perf.c 		if (!per_cpu(perf_events, cpu)) {
per_cpu            26 drivers/oprofile/oprofile_stats.c 		cpu_buf = &per_cpu(op_cpu_buffer, i);
per_cpu            54 drivers/oprofile/oprofile_stats.c 		cpu_buf = &per_cpu(op_cpu_buffer, i);
per_cpu            58 drivers/oprofile/timer_int.c 	struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
per_cpu           540 drivers/perf/arm_pmu.c 		if (per_cpu(cpu_irq, cpu) == irq)
per_cpu           549 drivers/perf/arm_pmu.c 	if (per_cpu(cpu_irq, cpu) == 0)
per_cpu           551 drivers/perf/arm_pmu.c 	if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
per_cpu           559 drivers/perf/arm_pmu.c 	per_cpu(cpu_irq, cpu) = 0;
per_cpu           595 drivers/perf/arm_pmu.c 	per_cpu(cpu_irq, cpu) = irq;
per_cpu           606 drivers/perf/arm_pmu.c 	return per_cpu(hw_events->irq, cpu);
per_cpu           625 drivers/perf/arm_pmu.c 	per_cpu(cpu_armpmu, cpu) = pmu;
per_cpu           654 drivers/perf/arm_pmu.c 	per_cpu(cpu_armpmu, cpu) = NULL;
per_cpu           161 drivers/perf/arm_pmu_acpi.c 		per_cpu(pmu_irqs, cpu) = irq;
per_cpu           169 drivers/perf/arm_pmu_acpi.c 		irq = per_cpu(pmu_irqs, cpu);
per_cpu           180 drivers/perf/arm_pmu_acpi.c 			if (per_cpu(pmu_irqs, irq_cpu) == irq)
per_cpu           181 drivers/perf/arm_pmu_acpi.c 				per_cpu(pmu_irqs, irq_cpu) = 0;
per_cpu           195 drivers/perf/arm_pmu_acpi.c 		pmu = per_cpu(probed_pmus, cpu);
per_cpu           227 drivers/perf/arm_pmu_acpi.c 		int other_irq = per_cpu(hw_events->irq, cpu);
per_cpu           259 drivers/perf/arm_pmu_acpi.c 	if (per_cpu(probed_pmus, cpu))
per_cpu           262 drivers/perf/arm_pmu_acpi.c 	irq = per_cpu(pmu_irqs, cpu);
per_cpu           268 drivers/perf/arm_pmu_acpi.c 	per_cpu(probed_pmus, cpu) = pmu;
per_cpu           272 drivers/perf/arm_pmu_acpi.c 		per_cpu(hw_events->irq, cpu) = irq;
per_cpu           306 drivers/perf/arm_pmu_acpi.c 		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
per_cpu            55 drivers/perf/arm_pmu_platform.c 		per_cpu(hw_events->irq, cpu) = irq;
per_cpu           148 drivers/perf/arm_pmu_platform.c 		if (per_cpu(hw_events->irq, cpu)) {
per_cpu           153 drivers/perf/arm_pmu_platform.c 		per_cpu(hw_events->irq, cpu) = irq;
per_cpu           166 drivers/perf/arm_pmu_platform.c 		int irq = per_cpu(hw_events->irq, cpu);
per_cpu           184 drivers/perf/arm_pmu_platform.c 		int irq = per_cpu(hw_events->irq, cpu);
per_cpu           133 drivers/powercap/idle_inject.c 	ii_dev = per_cpu(idle_inject_device, cpu);
per_cpu           303 drivers/powercap/idle_inject.c 		if (per_cpu(idle_inject_device, cpu)) {
per_cpu           308 drivers/powercap/idle_inject.c 		per_cpu(idle_inject_device, cpu) = ii_dev;
per_cpu           317 drivers/powercap/idle_inject.c 		per_cpu(idle_inject_device, cpu_rb) = NULL;
per_cpu           340 drivers/powercap/idle_inject.c 		per_cpu(idle_inject_device, cpu) = NULL;
per_cpu          2626 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
per_cpu          2650 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
per_cpu          2735 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		p = &per_cpu(bnx2fc_percpu, cpu);
per_cpu          1010 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	fps = &per_cpu(bnx2fc_percpu, cpu);
per_cpu          1918 drivers/scsi/bnx2i/bnx2i_hwi.c 	p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
per_cpu           416 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
per_cpu           438 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
per_cpu           495 drivers/scsi/bnx2i/bnx2i_init.c 		p = &per_cpu(bnx2i_percpu, cpu);
per_cpu          1492 drivers/scsi/bnx2i/bnx2i_iscsi.c 			p = &per_cpu(bnx2i_percpu, cpu);
per_cpu          1417 drivers/scsi/fcoe/fcoe.c 	fps = &per_cpu(fcoe_percpu, cpu);
per_cpu          2344 drivers/scsi/fcoe/fcoe.c 		pp = &per_cpu(fcoe_percpu, cpu);
per_cpu          1221 drivers/scsi/qedi/qedi_main.c 	p = &per_cpu(qedi_percpu, cpu);
per_cpu          2725 drivers/scsi/qedi/qedi_main.c 		p = &per_cpu(qedi_percpu, cpu);
per_cpu           599 drivers/soc/fsl/qbman/bman.c 	portal = &per_cpu(bman_affine_portal, c->cpu);
per_cpu          1366 drivers/soc/fsl/qbman/qman.c 	portal = &per_cpu(qman_affine_portal, c->cpu);
per_cpu           262 drivers/soc/qcom/spm.c 	per_cpu(qcom_idle_ops, cpu) = fns;
per_cpu           270 drivers/soc/qcom/spm.c 	return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
per_cpu           365 drivers/soc/qcom/spm.c 	per_cpu(cpu_spm_drv, cpu) = drv;
per_cpu            52 drivers/xen/events/events_2l.c 	clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
per_cpu            53 drivers/xen/events/events_2l.c 	set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
per_cpu           150 drivers/xen/events/events_2l.c 		per_cpu(cpu_evtchn_mask, cpu)[idx] &
per_cpu           269 drivers/xen/events/events_2l.c 	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
per_cpu           281 drivers/xen/events/events_2l.c 		v = per_cpu(xen_vcpu, i);
per_cpu           290 drivers/xen/events/events_2l.c 	v = per_cpu(xen_vcpu, cpu);
per_cpu           354 drivers/xen/events/events_2l.c 		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
per_cpu           203 drivers/xen/events/events_base.c 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
per_cpu           217 drivers/xen/events/events_base.c 	per_cpu(virq_to_irq, cpu)[virq] = irq;
per_cpu           264 drivers/xen/events/events_base.c 	return per_cpu(virq_to_irq, cpu)[virq];
per_cpu           620 drivers/xen/events/events_base.c 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
per_cpu           623 drivers/xen/events/events_base.c 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
per_cpu           878 drivers/xen/events/events_base.c 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
per_cpu           969 drivers/xen/events/events_base.c 	irq = per_cpu(virq_to_irq, cpu)[virq];
per_cpu          1211 drivers/xen/events/events_base.c 	irq = per_cpu(ipi_to_irq, cpu)[vector];
per_cpu          1456 drivers/xen/events/events_base.c 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
per_cpu          1481 drivers/xen/events/events_base.c 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
per_cpu           105 drivers/xen/events/events_fifo.c 	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
per_cpu           287 drivers/xen/events/events_fifo.c 	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
per_cpu           333 drivers/xen/events/events_fifo.c 	control_block = per_cpu(cpu_control_block, cpu);
per_cpu           354 drivers/xen/events/events_fifo.c 		void *control_block = per_cpu(cpu_control_block, cpu);
per_cpu           367 drivers/xen/events/events_fifo.c 			per_cpu(cpu_control_block, cpu) = NULL;
per_cpu           411 drivers/xen/events/events_fifo.c 	per_cpu(cpu_control_block, cpu) = control_block;
per_cpu           422 drivers/xen/events/events_fifo.c 	if (!per_cpu(cpu_control_block, cpu))
per_cpu            81 drivers/xen/time.c 		res->time[i] += per_cpu(old_runstate_time, cpu)[i];
per_cpu           122 drivers/xen/time.c 				per_cpu(old_runstate_time, cpu)[i] +=
per_cpu           149 drivers/xen/time.c 	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
per_cpu           164 drivers/xen/time.c 	area.addr.v = &per_cpu(xen_runstate, cpu);
per_cpu           471 drivers/xen/xen-acpi-processor.c 		_pr = per_cpu(processors, i /* APIC ID */);
per_cpu           553 drivers/xen/xen-acpi-processor.c 		pr = per_cpu(processors, i);
per_cpu          3350 fs/buffer.c    		tot += per_cpu(bh_accounting, i).nr;
per_cpu          3382 fs/buffer.c    	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
per_cpu          3388 fs/buffer.c    	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
per_cpu          3389 fs/buffer.c    	per_cpu(bh_accounting, cpu).nr = 0;
per_cpu           145 fs/dcache.c    		sum += per_cpu(nr_dentry, i);
per_cpu           154 fs/dcache.c    		sum += per_cpu(nr_dentry_unused, i);
per_cpu           164 fs/dcache.c    		sum += per_cpu(nr_dentry_negative, i);
per_cpu           127 fs/fscache/main.c 		init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
per_cpu            85 fs/inode.c     		sum += per_cpu(nr_inodes, i);
per_cpu            94 fs/inode.c     		sum += per_cpu(nr_unused, i);
per_cpu           929 fs/nfsd/filecache.c 		hits += per_cpu(nfsd_file_cache_hits, i);
per_cpu            23 include/linux/arch_topology.h 	return per_cpu(cpu_scale, cpu);
per_cpu            33 include/linux/arch_topology.h 	return per_cpu(freq_scale, cpu);
per_cpu            22 include/linux/irq_cpustat.h #define __IRQ_STAT(cpu, member)	(per_cpu(irq_stat.member, cpu))
per_cpu            49 include/linux/kernel_stat.h #define kstat_cpu(cpu) per_cpu(kstat, cpu)
per_cpu            50 include/linux/kernel_stat.h #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
per_cpu           615 include/linux/memcontrol.h 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
per_cpu           697 include/linux/memcontrol.h 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
per_cpu            95 include/linux/topology.h 	return per_cpu(numa_node, cpu);
per_cpu           109 include/linux/topology.h 	per_cpu(numa_node, cpu) = node;
per_cpu           161 include/linux/topology.h 	return per_cpu(_numa_mem_, cpu);
per_cpu           168 include/linux/topology.h 	per_cpu(_numa_mem_, cpu) = node;
per_cpu            17 include/xen/xen-ops.h 	return per_cpu(xen_vcpu_id, cpu);
per_cpu           281 init/calibrate.c 	if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
per_cpu           282 init/calibrate.c 		lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
per_cpu           306 init/calibrate.c 	per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
per_cpu           193 kernel/context_tracking.c 	if (!per_cpu(context_tracking.active, cpu)) {
per_cpu           194 kernel/context_tracking.c 		per_cpu(context_tracking.active, cpu) = true;
per_cpu           257 kernel/debug/debug_core.c 		csd = &per_cpu(kgdb_roundup_csd, cpu);
per_cpu          4377 kernel/events/core.c 		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
per_cpu          8541 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
per_cpu          8561 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
per_cpu          10327 kernel/events/core.c 		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
per_cpu          12115 kernel/events/core.c 		swhash = &per_cpu(swevent_htable, cpu);
per_cpu          12117 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
per_cpu          12119 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
per_cpu          12120 kernel/events/core.c 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
per_cpu          12123 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
per_cpu          12125 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
per_cpu          12131 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
per_cpu           562 kernel/events/hw_breakpoint.c 		per_cpu(*cpu_events, cpu) = bp;
per_cpu           583 kernel/events/hw_breakpoint.c 		unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
per_cpu           155 kernel/fork.c  		total += per_cpu(process_counts, cpu);
per_cpu           113 kernel/irq_work.c 		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
per_cpu            77 kernel/locking/lock_events.c 		sum += per_cpu(lockevents[id], cpu);
per_cpu           237 kernel/locking/lockdep.c 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
per_cpu           264 kernel/locking/lockdep.c 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
per_cpu           213 kernel/locking/lockdep_internals.h 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
per_cpu           234 kernel/locking/lockdep_internals.h 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
per_cpu           118 kernel/locking/percpu-rwsem.c 		__sum += per_cpu(var, cpu);				\
per_cpu            52 kernel/locking/qspinlock_stat.h 		sum += per_cpu(lockevents[id], cpu);
per_cpu            60 kernel/locking/qspinlock_stat.h 			kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
per_cpu            64 kernel/locking/qspinlock_stat.h 			kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
per_cpu           112 kernel/locking/qspinlock_stat.h 	per_cpu(pv_kick_time, cpu) = start;
per_cpu           181 kernel/power/energy_model.c 	return READ_ONCE(per_cpu(em_data, cpu));
per_cpu           217 kernel/power/energy_model.c 		if (READ_ONCE(per_cpu(em_data, cpu))) {
per_cpu           249 kernel/printk/printk_safe.c 		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
per_cpu           251 kernel/printk/printk_safe.c 		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
per_cpu           395 kernel/printk/printk_safe.c 		s = &per_cpu(safe_print_seq, cpu);
per_cpu           399 kernel/printk/printk_safe.c 		s = &per_cpu(nmi_print_seq, cpu);
per_cpu           244 kernel/profile.c 	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
per_cpu           252 kernel/profile.c 	j = per_cpu(cpu_profile_flip, get_cpu());
per_cpu           256 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
per_cpu           275 kernel/profile.c 	i = per_cpu(cpu_profile_flip, get_cpu());
per_cpu           279 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
per_cpu           295 kernel/profile.c 	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
per_cpu           343 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[i]) {
per_cpu           344 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
per_cpu           345 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
per_cpu           357 kernel/profile.c 	per_cpu(cpu_profile_flip, cpu) = 0;
per_cpu           360 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[i])
per_cpu           368 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
per_cpu          1411 kernel/rcu/rcutorture.c 			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
per_cpu          1412 kernel/rcu/rcutorture.c 			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
per_cpu          2398 kernel/rcu/rcutorture.c 			per_cpu(rcu_torture_count, cpu)[i] = 0;
per_cpu          2399 kernel/rcu/rcutorture.c 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
per_cpu           345 kernel/rcu/tree.c 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
per_cpu          1055 kernel/rcu/tree.c 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
per_cpu          2428 kernel/rcu/tree.c 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
per_cpu          2488 kernel/rcu/tree.c 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
per_cpu          3132 kernel/rcu/tree.c 	if (per_cpu(rcu_cpu_started, cpu))
per_cpu          3135 kernel/rcu/tree.c 	per_cpu(rcu_cpu_started, cpu) = 1;
per_cpu          3197 kernel/rcu/tree.c 	per_cpu(rcu_cpu_started, cpu) = 0;
per_cpu           264 kernel/rcu/tree_stall.h 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
per_cpu           105 kernel/sched/clock.c 	return &per_cpu(sched_clock_data, cpu);
per_cpu           168 kernel/sched/clock.c 		per_cpu(sched_clock_data, cpu) = *scd;
per_cpu          2386 kernel/sched/core.c 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
per_cpu          6591 kernel/sched/core.c 		per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
per_cpu          6593 kernel/sched/core.c 		per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
per_cpu            39 kernel/sched/cpufreq.c 	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
per_cpu            43 kernel/sched/cpufreq.c 	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
per_cpu            59 kernel/sched/cpufreq.c 	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
per_cpu           507 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
per_cpu           847 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
per_cpu           855 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
per_cpu           519 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
per_cpu           524 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
per_cpu          2290 kernel/sched/deadline.c 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
per_cpu          1767 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
per_cpu          5833 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
per_cpu          5842 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
per_cpu          6044 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_llc, target));
per_cpu          9451 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
per_cpu          9466 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
per_cpu          9487 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
per_cpu          9516 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
per_cpu          9546 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
per_cpu           366 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
per_cpu           371 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
per_cpu          2208 kernel/sched/rt.c 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
per_cpu          1051 kernel/sched/sched.h #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
per_cpu          2266 kernel/sched/sched.h 	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
per_cpu           355 kernel/sched/topology.c 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
per_cpu           641 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu           642 kernel/sched/topology.c 	per_cpu(sd_llc_size, cpu) = size;
per_cpu           643 kernel/sched/topology.c 	per_cpu(sd_llc_id, cpu) = id;
per_cpu           644 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
per_cpu           647 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
per_cpu           650 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
per_cpu           653 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
per_cpu          2169 kernel/sched/topology.c 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
per_cpu            45 kernel/smp.c   	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
per_cpu            67 kernel/smp.c   	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
per_cpu            95 kernel/smp.c   		init_llist_head(&per_cpu(call_single_queue, i));
per_cpu           179 kernel/smp.c   	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
per_cpu           473 kernel/smp.c   		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
per_cpu            32 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
per_cpu            42 kernel/smpboot.c 	per_cpu(idle_threads, smp_processor_id()) = current;
per_cpu            53 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
per_cpu            60 kernel/smpboot.c 			per_cpu(idle_threads, cpu) = tsk;
per_cpu           337 kernel/smpboot.c 	return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
per_cpu           355 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
per_cpu           359 kernel/smpboot.c 	switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
per_cpu           364 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
per_cpu           410 kernel/smpboot.c 	(void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
per_cpu           428 kernel/smpboot.c 	if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
per_cpu           433 kernel/smpboot.c 	while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
per_cpu           441 kernel/smpboot.c 	oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
per_cpu           445 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
per_cpu           448 kernel/smpboot.c 		if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
per_cpu           472 kernel/smpboot.c 		oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
per_cpu           477 kernel/smpboot.c 	} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
per_cpu           580 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail =
per_cpu           581 kernel/softirq.c 			&per_cpu(tasklet_vec, cpu).head;
per_cpu           582 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail =
per_cpu           583 kernel/softirq.c 			&per_cpu(tasklet_hi_vec, cpu).head;
per_cpu           632 kernel/softirq.c 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
per_cpu           637 kernel/softirq.c 				per_cpu(tasklet_vec, cpu).tail = i;
per_cpu           650 kernel/softirq.c 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
per_cpu           651 kernel/softirq.c 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
per_cpu           652 kernel/softirq.c 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
per_cpu           653 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).head = NULL;
per_cpu           654 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
per_cpu           658 kernel/softirq.c 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
per_cpu           659 kernel/softirq.c 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
per_cpu           660 kernel/softirq.c 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
per_cpu           661 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
per_cpu           662 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
per_cpu            79 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           390 kernel/stop_machine.c 		work = &per_cpu(cpu_stopper.stop_work, cpu);
per_cpu           487 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           499 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           535 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           549 kernel/stop_machine.c 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
per_cpu           554 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           561 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           582 kernel/stop_machine.c 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
per_cpu           305 kernel/taskstats.c 			listeners = &per_cpu(listener_array, cpu);
per_cpu           323 kernel/taskstats.c 		listeners = &per_cpu(listener_array, cpu);
per_cpu           702 kernel/taskstats.c 		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
per_cpu           703 kernel/taskstats.c 		init_rwsem(&(per_cpu(listener_array, i).sem));
per_cpu           395 kernel/time/clockevents.c 	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
per_cpu           729 kernel/time/clockevents.c 		&per_cpu(tick_cpu_device, dev->id);
per_cpu           743 kernel/time/clockevents.c 	return &per_cpu(tick_cpu_device, dev->id);
per_cpu           753 kernel/time/clockevents.c 		struct device *dev = &per_cpu(tick_percpu_dev, cpu);
per_cpu           203 kernel/time/hrtimer.c 		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
per_cpu          1990 kernel/time/hrtimer.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
per_cpu          2057 kernel/time/hrtimer.c 	old_base = &per_cpu(hrtimer_bases, scpu);
per_cpu           299 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
per_cpu           626 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
per_cpu           874 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
per_cpu            63 kernel/time/tick-common.c 	return &per_cpu(tick_cpu_device, cpu);
per_cpu           345 kernel/time/tick-common.c 	td = &per_cpu(tick_cpu_device, cpu);
per_cpu           429 kernel/time/tick-common.c 	struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
per_cpu            42 kernel/time/tick-sched.c 	return &per_cpu(tick_cpu_sched, cpu);
per_cpu           262 kernel/time/tick-sched.c 	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
per_cpu           568 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
per_cpu           609 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
per_cpu          1360 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
per_cpu          1379 kernel/time/tick-sched.c 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
per_cpu           132 kernel/time/timer_list.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
per_cpu           486 kernel/trace/fgraph.c 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
per_cpu           491 kernel/trace/fgraph.c 		ret_stack = per_cpu(idle_ret_stack, cpu);
per_cpu           499 kernel/trace/fgraph.c 			per_cpu(idle_ret_stack, cpu) = ret_stack;
per_cpu           649 kernel/trace/ftrace.c 	stat = &per_cpu(ftrace_profile_stats, cpu);
per_cpu           970 kernel/trace/ftrace.c 		stat = &per_cpu(ftrace_profile_stats, cpu);
per_cpu           690 kernel/trace/trace.c 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
per_cpu           699 kernel/trace/trace.c 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
per_cpu           709 kernel/trace/trace.c 		mutex_init(&per_cpu(cpu_access_lock, cpu));
per_cpu          2409 kernel/trace/trace.c 		per_cpu(trace_buffered_event, cpu) = event;
per_cpu          2414 kernel/trace/trace.c 		    per_cpu(trace_buffered_event, cpu))
per_cpu          2466 kernel/trace/trace.c 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
per_cpu          2467 kernel/trace/trace.c 		per_cpu(trace_buffered_event, cpu) = NULL;
per_cpu          3372 kernel/trace/trace_events.c 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
per_cpu          3391 kernel/trace/trace_events.c 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
per_cpu           113 kernel/trace/trace_irqsoff.c 	if (likely(!per_cpu(tracing_cpu, cpu)))
per_cpu           167 kernel/trace/trace_irqsoff.c 		per_cpu(tracing_cpu, cpu) = 0;
per_cpu           382 kernel/trace/trace_irqsoff.c 	if (per_cpu(tracing_cpu, cpu))
per_cpu           400 kernel/trace/trace_irqsoff.c 	per_cpu(tracing_cpu, cpu) = 1;
per_cpu           415 kernel/trace/trace_irqsoff.c 	if (unlikely(per_cpu(tracing_cpu, cpu)))
per_cpu           416 kernel/trace/trace_irqsoff.c 		per_cpu(tracing_cpu, cpu) = 0;
per_cpu           303 kernel/watchdog.c 		per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
per_cpu           228 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(dead_event, cpu);
per_cpu           236 kernel/watchdog_hld.c 		per_cpu(dead_event, cpu) = NULL;
per_cpu           253 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(watchdog_ev, cpu);
per_cpu           275 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(watchdog_ev, cpu);
per_cpu           379 kernel/workqueue.c 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
per_cpu           380 kernel/workqueue.c 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
per_cpu          4159 kernel/workqueue.c 				per_cpu(cpu_worker_pools, cpu);
per_cpu          5356 kernel/workqueue.c static DEVICE_ATTR_RO(per_cpu);
per_cpu          5724 kernel/workqueue.c 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
per_cpu          5756 kernel/workqueue.c 				READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
per_cpu          5784 kernel/workqueue.c 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
per_cpu          1009 lib/debugobjects.c 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
per_cpu          1367 lib/debugobjects.c 		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
per_cpu           195 lib/irq_poll.c 	list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
per_cpu           208 lib/irq_poll.c 		INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
per_cpu          1593 lib/radix-tree.c 	rtp = &per_cpu(radix_tree_preloads, cpu);
per_cpu           186 lib/random32.c 		struct rnd_state *state = &per_cpu(net_rand_state, i);
per_cpu           205 lib/random32.c 		struct rnd_state *state = &per_cpu(net_rand_state, i);
per_cpu            48 mm/kasan/tags.c 		per_cpu(prng_state, cpu) = (u32)get_cycles();
per_cpu            77 mm/kmemleak-test.c 		per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
per_cpu            79 mm/kmemleak-test.c 			per_cpu(kmemleak_test_pointer, i));
per_cpu           841 mm/memcontrol.c 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
per_cpu          2283 mm/memcontrol.c 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
per_cpu          2311 mm/memcontrol.c 	stock = &per_cpu(memcg_stock, cpu);
per_cpu          3481 mm/memcontrol.c 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
per_cpu          3496 mm/memcontrol.c 				stat[i] += per_cpu(
per_cpu          3516 mm/memcontrol.c 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
per_cpu          1505 mm/memory-failure.c 		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
per_cpu          5806 mm/page_alloc.c 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
per_cpu           486 mm/slab.c      	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
per_cpu           512 mm/slab.c      	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
per_cpu          1081 mm/slab.c      	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
per_cpu          1083 mm/slab.c      	per_cpu(slab_reap_work, cpu).work.func = NULL;
per_cpu           296 mm/swap.c      	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
per_cpu           304 mm/swap.c      	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
per_cpu           591 mm/swap.c      	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
per_cpu           596 mm/swap.c      	pvec = &per_cpu(lru_rotate_pvecs, cpu);
per_cpu           606 mm/swap.c      	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
per_cpu           610 mm/swap.c      	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
per_cpu           614 mm/swap.c      	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
per_cpu           726 mm/swap.c      		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
per_cpu           728 mm/swap.c      		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
per_cpu           729 mm/swap.c      		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
per_cpu           730 mm/swap.c      		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
per_cpu           731 mm/swap.c      		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
per_cpu           732 mm/swap.c      		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
per_cpu           741 mm/swap.c      		flush_work(&per_cpu(lru_add_drain_work, cpu));
per_cpu           138 mm/swap_slots.c 	cache = &per_cpu(swp_slots, cpu);
per_cpu           176 mm/swap_slots.c 	cache = &per_cpu(swp_slots, cpu);
per_cpu          1535 mm/vmalloc.c   	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
per_cpu          1680 mm/vmalloc.c   		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
per_cpu          1916 mm/vmalloc.c   		vbq = &per_cpu(vmap_block_queue, i);
per_cpu          1919 mm/vmalloc.c   		p = &per_cpu(vfree_deferred, i);
per_cpu           119 mm/vmstat.c    		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
per_cpu           147 mm/vmstat.c    	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
per_cpu          1900 mm/vmstat.c    		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
per_cpu          1942 mm/vmstat.c    	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
per_cpu          1242 mm/zsmalloc.c  	area = &per_cpu(zs_map_area, cpu);
per_cpu          1250 mm/zsmalloc.c  	area = &per_cpu(zs_map_area, cpu);
per_cpu           383 mm/zswap.c     	per_cpu(zswap_dstmem, cpu) = dst;
per_cpu           391 mm/zswap.c     	dst = per_cpu(zswap_dstmem, cpu);
per_cpu           393 mm/zswap.c     	per_cpu(zswap_dstmem, cpu) = NULL;
per_cpu          3935 net/core/dev.c 			per_cpu(softnet_data, next_cpu).input_queue_head;
per_cpu          4015 net/core/dev.c 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
per_cpu          4070 net/core/dev.c 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
per_cpu          4165 net/core/dev.c 	sd = &per_cpu(softnet_data, cpu);
per_cpu          9833 net/core/dev.c 	sd = &per_cpu(softnet_data, cpu);
per_cpu          9834 net/core/dev.c 	oldsd = &per_cpu(softnet_data, oldcpu);
per_cpu          10192 net/core/dev.c 		struct softnet_data *sd = &per_cpu(softnet_data, i);
per_cpu           992 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
per_cpu          1023 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
per_cpu          1053 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
per_cpu          1104 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
per_cpu          1347 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
per_cpu          1391 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
per_cpu          1600 net/core/drop_monitor.c 	data = &per_cpu(dm_cpu_data, cpu);
per_cpu          1608 net/core/drop_monitor.c 	data = &per_cpu(dm_cpu_data, cpu);
per_cpu          1620 net/core/drop_monitor.c 	hw_data = &per_cpu(dm_hw_cpu_data, cpu);
per_cpu          1628 net/core/drop_monitor.c 	hw_data = &per_cpu(dm_hw_cpu_data, cpu);
per_cpu           125 net/core/net-procfs.c 			sd = &per_cpu(softnet_data, *pos);
per_cpu           137 net/core/sysctl_net_core.c 			sd = &per_cpu(softnet_data, i);
per_cpu           169 net/core/sysctl_net_core.c 			sd = &per_cpu(softnet_data, i);
per_cpu           607 net/ipv4/netfilter/arp_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
per_cpu           748 net/ipv4/netfilter/ip_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
per_cpu           259 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
per_cpu           272 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
per_cpu          1539 net/ipv4/route.c 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
per_cpu          3468 net/ipv4/route.c 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
per_cpu          3737 net/ipv4/tcp.c 		void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
per_cpu          3747 net/ipv4/tcp.c 			per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
per_cpu          3749 net/ipv4/tcp.c 		if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
per_cpu          3758 net/ipv4/tcp.c 		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
per_cpu           907 net/ipv4/tcp_output.c 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
per_cpu           765 net/ipv6/netfilter/ip6_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
per_cpu           355 net/netfilter/nft_ct.c 		ct = per_cpu(nft_ct_pcpu_template, cpu);
per_cpu           359 net/netfilter/nft_ct.c 		per_cpu(nft_ct_pcpu_template, cpu) = NULL;
per_cpu           380 net/netfilter/nft_ct.c 		per_cpu(nft_ct_pcpu_template, cpu) = tmp;
per_cpu          1400 net/netfilter/x_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
per_cpu          1894 net/netfilter/x_tables.c 		seqcount_init(&per_cpu(xt_recseq, i));
per_cpu            97 net/rds/ib_stats.c 		src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
per_cpu            90 net/rds/page.c 	rem = &per_cpu(rds_page_remainders, get_cpu());
per_cpu           124 net/rds/page.c 		rem = &per_cpu(rds_page_remainders, get_cpu());
per_cpu           160 net/rds/page.c 		rem = &per_cpu(rds_page_remainders, cpu);
per_cpu           961 net/rds/rds.h  	per_cpu(which, get_cpu()).member++;		\
per_cpu           966 net/rds/rds.h  	per_cpu(which, get_cpu()).member += count;	\
per_cpu           130 net/rds/stats.c 		src = (uint64_t *)&(per_cpu(rds_stats, cpu));
per_cpu            64 net/rds/tcp_stats.c 		src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
per_cpu           803 net/xfrm/xfrm_input.c 		trans = &per_cpu(xfrm_trans_tasklet, i);
per_cpu          1541 security/apparmor/lsm.c 			kfree(per_cpu(aa_buffers, i).buf[j]);
per_cpu          1542 security/apparmor/lsm.c 			per_cpu(aa_buffers, i).buf[j] = NULL;
per_cpu          1565 security/apparmor/lsm.c 			per_cpu(aa_buffers, i).buf[j] = buffer;
per_cpu          1509 security/selinux/selinuxfs.c 		return &per_cpu(avc_cache_stats, cpu);
per_cpu           131 tools/perf/util/auxtrace.c 				   bool per_cpu)
per_cpu           135 tools/perf/util/auxtrace.c 	if (per_cpu) {
per_cpu           445 tools/perf/util/auxtrace.h 				   bool per_cpu);
per_cpu           718 tools/perf/util/auxtrace.h 				   bool per_cpu);
per_cpu           942 tools/perf/util/evsel.c 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
per_cpu          1049 tools/perf/util/evsel.c 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
per_cpu           160 tools/perf/util/mmap.c 					  bool per_cpu __maybe_unused)
per_cpu          1522 virt/kvm/arm/arm.c 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
per_cpu          1553 virt/kvm/arm/arm.c 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
per_cpu          1590 virt/kvm/arm/arm.c 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);