Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 – 200 of 368) sorted by relevance

12

/linux-4.4.14/arch/blackfin/mm/
Dsram-alloc.c78 per_cpu(free_l1_ssram_head, cpu).next = in l1sram_init()
80 if (!per_cpu(free_l1_ssram_head, cpu).next) { in l1sram_init()
85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; in l1sram_init()
86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; in l1sram_init()
87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; in l1sram_init()
88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; in l1sram_init()
90 per_cpu(used_l1_ssram_head, cpu).next = NULL; in l1sram_init()
93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); in l1sram_init()
106 per_cpu(free_l1_data_A_sram_head, cpu).next = in l1_data_sram_init()
108 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { in l1_data_sram_init()
[all …]
/linux-4.4.14/arch/x86/kernel/apic/
Dx2apic_cluster.c23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; in x2apic_cluster()
54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); in __x2apic_send_IPI_mask()
60 dest |= per_cpu(x86_cpu_to_logical_apicid, i); in __x2apic_send_IPI_mask()
110 dest = per_cpu(x86_cpu_to_logical_apicid, i); in x2apic_cpu_mask_to_apicid_and()
123 dest |= per_cpu(x86_cpu_to_logical_apicid, i); in x2apic_cpu_mask_to_apicid_and()
136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); in init_x2apic_ldr()
138 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr()
142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); in init_x2apic_ldr()
143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr()
159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu), in update_clusterinfo()
[all …]
Dipi.c33 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, in default_send_IPI_mask_sequence_phys()
52 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, in default_send_IPI_mask_allbutself_phys()
145 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) in convert_apicid_to_cpu()
Dvector.c179 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) in __assign_irq_vector()
189 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); in __assign_irq_vector()
262 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; in clear_irq_vector()
279 if (per_cpu(vector_irq, cpu)[vector] != desc) in clear_irq_vector()
281 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; in clear_irq_vector()
462 per_cpu(vector_irq, cpu)[vector] = desc; in __setup_vector_irq()
466 desc = per_cpu(vector_irq, cpu)[vector]; in __setup_vector_irq()
472 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; in __setup_vector_irq()
492 per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq); in setup_vector_irq()
744 per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED; in irq_force_complete_move()
Dbigsmp_32.c45 id = per_cpu(x86_bios_cpu_apicid, cpu); in calculate_ldr()
78 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); in bigsmp_cpu_present_to_apicid()
Dx2apic_phys.c54 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), in __x2apic_send_IPI_mask()
/linux-4.4.14/arch/x86/xen/
Dsmp.c120 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
121 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
122 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
123 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
124 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
126 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
127 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
128 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free()
129 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
130 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
[all …]
Dspinlock.c256 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); in xen_unlock_kick()
283 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
284 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
296 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
297 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
308 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); in xen_uninit_lock_cpu()
309 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
310 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
311 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
Dtime.c101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen()
108 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info()
400 evt = &per_cpu(xen_clock_events, cpu).evt; in xen_teardown_timer()
410 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); in xen_setup_timer()
Dpmu.c539 per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; in xen_pmu_init()
540 per_cpu(xenpmu_shared, cpu).flags = 0; in xen_pmu_init()
568 free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0); in xen_pmu_finish()
569 per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL; in xen_pmu_finish()
Denlighten.c202 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) in xen_vcpu_setup()
206 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; in xen_vcpu_setup()
214 vcpup = &per_cpu(xen_vcpu_info, cpu); in xen_vcpu_setup()
235 per_cpu(xen_vcpu, cpu) = vcpup; in xen_vcpu_setup()
682 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; in load_TLS_descriptor()
1606 xen_initial_gdt = &per_cpu(gdt_page, 0); in xen_start_kernel()
1620 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; in xen_start_kernel()
1766 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; in xen_hvm_init_shared_info()
/linux-4.4.14/kernel/
Dprofile.c240 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers()
248 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers()
252 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers()
271 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers()
275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers()
291 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits()
340 per_cpu(cpu_profile_flip, cpu) = 0; in profile_cpu_callback()
341 if (!per_cpu(cpu_profile_hits, cpu)[1]) { in profile_cpu_callback()
347 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); in profile_cpu_callback()
349 if (!per_cpu(cpu_profile_hits, cpu)[0]) { in profile_cpu_callback()
[all …]
Dsmpboot.c30 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get()
40 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu()
51 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init()
58 per_cpu(idle_threads, cpu) = tsk; in idle_init()
381 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state()
399 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare()
403 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare()
408 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare()
454 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); in cpu_set_state_online()
472 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) in cpu_wait_death()
[all …]
Dsoftirq.c639 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
640 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
641 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
642 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate()
696 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate()
709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets()
712 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets()
[all …]
Dstop_machine.c86 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work()
330 work = &per_cpu(cpu_stopper.stop_work, cpu); in queue_stop_cpus_work()
421 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_should_run()
433 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stopper_thread()
474 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in stop_machine_park()
488 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); in cpu_stop_create()
493 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_park()
500 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in stop_machine_unpark()
521 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_init()
Dwatchdog.c248 per_cpu(watchdog_touch_ts, cpu) = 0; in touch_all_softlockup_watchdogs()
575 struct perf_event *event = per_cpu(watchdog_ev, cpu); in watchdog_nmi_enable()
638 per_cpu(watchdog_ev, cpu) = event; in watchdog_nmi_enable()
640 perf_event_enable(per_cpu(watchdog_ev, cpu)); in watchdog_nmi_enable()
647 struct perf_event *event = per_cpu(watchdog_ev, cpu); in watchdog_nmi_disable()
651 per_cpu(watchdog_ev, cpu) = NULL; in watchdog_nmi_disable()
695 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); in watchdog_park_threads()
714 kthread_unpark(per_cpu(softlockup_watchdog, cpu)); in watchdog_unpark_threads()
Dcontext_tracking.c192 if (!per_cpu(context_tracking.active, cpu)) { in context_tracking_cpu_set()
193 per_cpu(context_tracking.active, cpu) = true; in context_tracking_cpu_set()
Dsmp.c40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in hotplug_cfd()
95 init_llist_head(&per_cpu(call_single_queue, i)); in call_function_init()
181 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) in generic_exec_single()
456 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); in smp_call_function_many()
Dtaskstats.c306 listeners = &per_cpu(listener_array, cpu); in add_del_listener()
324 listeners = &per_cpu(listener_array, cpu); in add_del_listener()
688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); in taskstats_init_early()
689 init_rwsem(&(per_cpu(listener_array, i).sem)); in taskstats_init_early()
/linux-4.4.14/arch/x86/oprofile/
Dnmi_int.c156 kfree(per_cpu(cpu_msrs, i).multiplex); in nmi_shutdown_mux()
157 per_cpu(cpu_msrs, i).multiplex = NULL; in nmi_shutdown_mux()
158 per_cpu(switch_index, i) = 0; in nmi_shutdown_mux()
172 per_cpu(cpu_msrs, i).multiplex = in nmi_setup_mux()
174 if (!per_cpu(cpu_msrs, i).multiplex) in nmi_setup_mux()
197 per_cpu(switch_index, cpu) = 0; in nmi_cpu_setup_mux()
229 int si = per_cpu(switch_index, cpu); in nmi_cpu_switch()
230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); in nmi_cpu_switch()
238 per_cpu(switch_index, cpu) = 0; in nmi_cpu_switch()
240 per_cpu(switch_index, cpu) = si; in nmi_cpu_switch()
[all …]
/linux-4.4.14/arch/s390/include/asm/
Dtopology.h25 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
26 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
28 (&per_cpu(cpu_topology, cpu).thread_mask)
29 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
30 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
31 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
32 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
64 return per_cpu(cpu_topology, cpu).node_id; in cpu_to_node()
/linux-4.4.14/drivers/cpufreq/
Dspeedstep-centrino.c259 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
301 if ((!per_cpu(centrino_model, cpu)) || in extract_clock()
302 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock()
307 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock()
310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock()
311 return per_cpu(centrino_model, cpu)-> in extract_clock()
315 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; in extract_clock()
[all …]
Darm_big_little.c88 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq()
90 if ((cluster == per_cpu(physical_cluster, j)) && in find_cluster_maxfreq()
103 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate()
119 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, in bL_cpufreq_get_rate()
122 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate()
138 prev_rate = per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_set_rate()
139 per_cpu(cpu_last_req_freq, cpu) = rate; in bL_cpufreq_set_rate()
140 per_cpu(physical_cluster, cpu) = new_cluster; in bL_cpufreq_set_rate()
169 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in bL_cpufreq_set_rate()
170 per_cpu(physical_cluster, cpu) = old_cluster; in bL_cpufreq_set_rate()
[all …]
Dsh-cpufreq.c35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get()
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_target()
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify()
102 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_init()
144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_exit()
Dcpufreq_userspace.c38 if (!per_cpu(cpu_is_managed, policy->cpu)) in cpufreq_set()
64 per_cpu(cpu_is_managed, cpu) = 1; in cpufreq_governor_userspace()
71 per_cpu(cpu_is_managed, cpu) = 0; in cpufreq_governor_userspace()
Dcpufreq_ondemand.c42 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in ondemand_powersave_bias_init_cpu()
83 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in generic_powersave_bias_target()
157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in od_check_cpu()
199 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in od_dbs_timer()
267 dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in update_sampling_rate()
314 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in store_io_is_busy()
353 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in store_sampling_down_factor()
384 dbs_info = &per_cpu(od_cpu_dbs_info, j); in store_ignore_nice_load()
550 shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared; in od_set_powersave_bias()
Dcpufreq_conservative.c62 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); in cs_check_cpu()
134 &per_cpu(cs_cpu_dbs_info, freq->cpu); in dbs_cpufreq_notifier()
246 dbs_info = &per_cpu(cs_cpu_dbs_info, j); in store_ignore_nice_load()
Dcpufreq_governor.h114 return &per_cpu(_dbs_info, cpu).cdbs; \
119 return &per_cpu(_dbs_info, cpu); \
/linux-4.4.14/arch/arm/mm/
Dcontext.c70 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
72 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
156 asid = per_cpu(reserved_asids, i); in flush_context()
158 per_cpu(reserved_asids, i) = asid; in flush_context()
183 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
185 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
258 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
274 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
/linux-4.4.14/arch/mips/kernel/
Dmips-cpc.c58 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe()
76 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other()
77 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other()
90 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other()
91 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
Dmips-cm.c257 spin_lock_init(&per_cpu(cm_core_lock, cpu)); in mips_cm_probe()
269 spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), in mips_cm_lock_other()
270 per_cpu(cm_core_lock_flags, curr_core)); in mips_cm_lock_other()
293 spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), in mips_cm_unlock_other()
294 per_cpu(cm_core_lock_flags, curr_core)); in mips_cm_unlock_other()
Dcevt-sb1250.c113 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); in sb1250_clockevent_init()
114 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); in sb1250_clockevent_init()
115 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); in sb1250_clockevent_init()
Dcevt-bcm1480.c114 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); in sb1480_clockevent_init()
115 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); in sb1480_clockevent_init()
116 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); in sb1480_clockevent_init()
Dpm-cps.c132 entry = per_cpu(nc_asm_enter, core)[state]; in cps_pm_enter_state()
168 core_ready_count = per_cpu(ready_count, core); in cps_pm_enter_state()
176 coupled_barrier(&per_cpu(pm_barrier, core), online); in cps_pm_enter_state()
632 if (per_cpu(nc_asm_enter, core)[state]) in cps_gen_core_entries()
644 per_cpu(nc_asm_enter, core)[state] = entry_fn; in cps_gen_core_entries()
647 if (!per_cpu(ready_count, core)) { in cps_gen_core_entries()
653 per_cpu(ready_count_alloc, core) = core_rc; in cps_gen_core_entries()
658 per_cpu(ready_count, core) = core_rc; in cps_gen_core_entries()
Dsmp.c469 count = &per_cpu(tick_broadcast_count, cpu); in tick_broadcast()
470 csd = &per_cpu(tick_broadcast_csd, cpu); in tick_broadcast()
481 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); in tick_broadcast_callee()
490 csd = &per_cpu(tick_broadcast_csd, cpu); in tick_broadcast_init()
Dcevt-r4k.c75 cd = &per_cpu(mips_clockevent_device, cpu); in c0_compare_interrupt()
195 cd = &per_cpu(mips_clockevent_device, cpu); in r4k_clockevent_init()
Dtopology.c20 struct cpu *c = &per_cpu(cpu_devices, i); in topology_init()
Dsmp-bmips.c320 per_cpu(ipi_action_mask, cpu) |= action; in bmips43xx_send_ipi_single()
332 per_cpu(ipi_action_mask, cpu) = 0; in bmips43xx_ipi_interrupt()
/linux-4.4.14/arch/powerpc/kernel/
Dirq.c361 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts()
366 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts()
371 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts()
376 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts()
381 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts()
388 per_cpu(irq_stat, j).hmi_exceptions); in arch_show_interrupts()
396 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts()
409 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; in arch_irq_stat_cpu()
411 sum += per_cpu(irq_stat, cpu).pmu_irqs; in arch_irq_stat_cpu()
412 sum += per_cpu(irq_stat, cpu).mce_exceptions; in arch_irq_stat_cpu()
[all …]
Dsmp.c216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_data()
223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_message_pass()
352 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); in smp_store_cpu_info()
354 per_cpu(next_tlbcam_idx, id) in smp_store_cpu_info()
376 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
378 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
430 if (per_cpu(cpu_state, cpu) == CPU_DEAD) in generic_cpu_die()
439 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
449 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
454 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
Dsysfs.c53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; in store_smt_snooze_delay()
63 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); in show_smt_snooze_delay()
79 per_cpu(smt_snooze_delay, cpu) = snooze; in setup_smt_snooze_delay()
708 struct cpu *c = &per_cpu(cpu_devices, cpu); in register_cpu_online()
790 struct cpu *c = &per_cpu(cpu_devices, cpu); in unregister_cpu_online()
1033 struct cpu *c = &per_cpu(cpu_devices, cpu); in topology_init()
Dcacheinfo.c499 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); in cacheinfo_create_cache_dir()
501 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; in cacheinfo_create_cache_dir()
856 cache_dir = per_cpu(cache_dir_pcpu, cpu_id); in cacheinfo_cpu_offline()
862 per_cpu(cache_dir_pcpu, cpu_id) = NULL; in cacheinfo_cpu_offline()
/linux-4.4.14/arch/ia64/mm/
Dtlb.c93 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context()
368 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init()
371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init()
372 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init()
374 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init()
376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init()
378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init()
442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry()
454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry()
465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { in ia64_itr_entry()
[all …]
/linux-4.4.14/arch/arm64/mm/
Dcontext.c58 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); in flush_context()
67 asid = per_cpu(reserved_asids, i); in flush_context()
69 per_cpu(reserved_asids, i) = asid; in flush_context()
94 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
96 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
167 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
181 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
/linux-4.4.14/arch/x86/kernel/
Dsetup_percpu.c223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas()
224 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas()
235 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas()
237 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas()
241 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas()
245 per_cpu(irq_stack_ptr, cpu) = in setup_per_cpu_areas()
246 per_cpu(irq_stack_union.irq_stack, cpu) + in setup_per_cpu_areas()
250 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
Dirq_32.c114 if (per_cpu(hardirq_stack, cpu)) in irq_ctx_init()
120 per_cpu(hardirq_stack, cpu) = irqstk; in irq_ctx_init()
125 per_cpu(softirq_stack, cpu) = irqstk; in irq_ctx_init()
128 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); in irq_ctx_init()
Dtopology.c140 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; in arch_register_cpu()
142 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu()
148 unregister_cpu(&per_cpu(cpu_devices, num).cpu); in arch_unregister_cpu()
155 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu()
Ddumpstack_64.c44 unsigned long end = per_cpu(orig_ist, cpu).ist[k]; in in_exception_stack()
157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); in dump_trace()
261 irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); in show_stack_log_lvl()
262 irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); in show_stack_log_lvl()
Dirq.c57 #define irq_stats(x) (&per_cpu(irq_stat, x))
134 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); in arch_show_interrupts()
138 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); in arch_show_interrupts()
196 sum += per_cpu(mce_exception_count, cpu); in arch_irq_stat_cpu()
197 sum += per_cpu(mce_poll_count, cpu); in arch_irq_stat_cpu()
416 IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) in check_irq_vectors_for_cpu_disable()
Dsmpboot.c311 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && in match_smt()
327 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && in match_llc()
328 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) in match_llc()
812 per_cpu(current_task, cpu) = idle; in common_cpu_up()
817 per_cpu(cpu_current_top_of_stack, cpu) = in common_cpu_up()
1179 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); in native_smp_prepare_cpus()
1180 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); in native_smp_prepare_cpus()
1181 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); in native_smp_prepare_cpus()
Despfix_64.c149 if (likely(per_cpu(espfix_stack, cpu))) in init_espfix_ap()
206 per_cpu(espfix_stack, cpu) = addr; in init_espfix_ap()
207 per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page in init_espfix_ap()
Dirqinit.c63 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) in vector_used_by_percpu_irq()
97 per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i); in init_IRQ()
Ddumpstack_32.c29 void *irq = per_cpu(hardirq_stack, cpu); in is_hardirq_stack()
36 void *irq = per_cpu(softirq_stack, cpu); in is_softirq_stack()
Dtsc.c125 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_write_begin()
146 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_write_end()
191 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_init()
890 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; in tsc_restore_sched_clock_state()
891 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; in tsc_restore_sched_clock_state()
Dkvm.c306 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); in kvm_register_steal_time()
408 src = &per_cpu(steal_time, cpu); in kvm_steal_clock()
583 apicid = per_cpu(x86_cpu_to_apicid, cpu); in kvm_kick_cpu()
843 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); in kvm_unlock_kick()
Dapb_timer.c222 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); in apbt_cpuhp_notify()
342 adev = &per_cpu(cpu_apbt_dev, i); in apbt_time_init()
Dioport.c57 tss = &per_cpu(cpu_tss, get_cpu()); in sys_ioperm()
/linux-4.4.14/arch/parisc/kernel/
Dirq.c89 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq()
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq()
104 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq()
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq()
156 #define irq_stats(x) (&per_cpu(irq_stat, x))
345 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr()
357 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr()
423 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; in stack_overflow_check()
426 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check()
442 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check()
[all …]
Dsmp.c124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt()
129 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt()
191 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send()
192 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send()
319 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu()
377 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; in smp_prepare_boot_cpu()
397 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
Dprocessor.c81 p = &per_cpu(cpu_data, cpunum); in init_percpu_prof()
172 p = &per_cpu(cpu_data, cpuid); in processor_probe()
316 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; in init_per_cpu()
317 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; in init_per_cpu()
356 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in show_cpuinfo()
Dtime.c65 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in timer_interrupt()
224 per_cpu(cpu_data, cpu).it_value = next_tick; in start_cpu_itimer()
Dtopology.c32 register_cpu(&per_cpu(cpu_devices, num), num); in topology_init()
Dsetup.c402 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; in start_parisc()
403 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; in start_parisc()
/linux-4.4.14/drivers/oprofile/
Doprofile_perf.c42 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler()
78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter()
95 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter()
102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter()
106 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter()
261 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit()
266 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit()
300 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init()
302 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
Dnmi_timer_int.c38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu()
45 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu()
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu()
114 event = per_cpu(nmi_timer_events, cpu); in nmi_timer_shutdown()
118 per_cpu(nmi_timer_events, cpu) = NULL; in nmi_timer_shutdown()
Doprofile_stats.c26 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_reset_stats()
54 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_create_stats_files()
Dcpu_buffer.c75 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in alloc_cpu_buffers()
102 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in start_cpu_work()
122 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in flush_cpu_work()
Dcpu_buffer.h64 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); in op_cpu_buffer_reset()
Dtimer_int.c58 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); in __oprofile_hrtimer_stop()
/linux-4.4.14/arch/s390/oprofile/
Dhwsampler.c86 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_stop()
116 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_deactivate()
142 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_enable_activate()
170 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_qsi()
207 cb = &per_cpu(sampler_cpu_buffer, cpu); in init_all_cpu_buffers()
218 cb = &per_cpu(sampler_cpu_buffer, cpu); in prepare_cpu_buffers()
251 cb = &per_cpu(sampler_cpu_buffer, cpu); in allocate_sdbt()
338 cb = &per_cpu(sampler_cpu_buffer, cpu); in deallocate_sdbt()
382 cb = &per_cpu(sampler_cpu_buffer, cpu); in start_sampling()
419 cb = &per_cpu(sampler_cpu_buffer, cpu); in stop_sampling()
[all …]
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce_amd.c283 per_cpu(bank_map, cpu) |= (1 << bank); in mce_amd_feature_init()
394 if (!(per_cpu(bank_map, cpu) & (1 << bank))) in amd_threshold_interrupt()
613 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { in allocate_threshold_blocks()
615 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); in allocate_threshold_blocks()
617 per_cpu(threshold_banks, cpu)[bank]->blocks = b; in allocate_threshold_blocks()
621 per_cpu(threshold_banks, cpu)[bank]->kobj, in allocate_threshold_blocks()
679 struct device *dev = per_cpu(mce_device, cpu); in threshold_create_bank()
696 per_cpu(threshold_banks, cpu)[bank] = b; in threshold_create_bank()
717 per_cpu(threshold_banks, cpu)[bank] = b; in threshold_create_bank()
752 per_cpu(threshold_banks, cpu) = bp; in threshold_create_device()
[all …]
Dmce-inject.c34 struct mce *i = &per_cpu(injectm, m->extcpu); in inject_mce()
164 struct mce *mcpu = &per_cpu(injectm, cpu); in raise_mce()
Dtherm_throt.c101 per_cpu(thermal_state, cpu).event.name); \
157 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in therm_throt_process()
214 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in thresh_event_valid()
Dmce_intel.c143 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE) in mce_intel_hcpu_update()
146 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; in mce_intel_hcpu_update()
Dmce.c754 int severity = mce_severity(&per_cpu(mces_seen, cpu), in mce_reign()
760 m = &per_cpu(mces_seen, cpu); in mce_reign()
790 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); in mce_reign()
1319 del_timer_sync(&per_cpu(mce_timer, cpu)); in mce_timer_delete_all()
1649 per_cpu(mce_next_interval, cpu) = iv; in mce_start_timer()
2337 per_cpu(mce_device, cpu) = dev; in mce_device_create()
2354 struct device *dev = per_cpu(mce_device, cpu); in mce_device_remove()
2368 per_cpu(mce_device, cpu) = NULL; in mce_device_remove()
2408 struct timer_list *t = &per_cpu(mce_timer, cpu); in mce_cpu_callback()
/linux-4.4.14/arch/arm/kernel/
Dsmp.c339 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info()
417 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done()
574 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion()
580 complete(per_cpu(cpu_completion, cpu)); in ipi_complete()
714 if (!per_cpu(l_p_j_ref, cpu)) { in cpufreq_callback()
715 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback()
716 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback()
717 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback()
729 per_cpu(cpu_data, cpu).loops_per_jiffy = in cpufreq_callback()
730 cpufreq_scale(per_cpu(l_p_j_ref, cpu), in cpufreq_callback()
[all …]
Dtopology.c47 return per_cpu(cpu_scale, cpu); in arch_scale_cpu_capacity()
52 per_cpu(cpu_scale, cpu) = capacity; in set_capacity_scale()
Dsmp_twd.c281 if (per_cpu(percpu_setup_called, cpu)) { in twd_timer_setup()
287 per_cpu(percpu_setup_called, cpu) = true; in twd_timer_setup()
Dsetup.c1022 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); in topology_init()
1091 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); in c_show()
1097 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), in c_show()
1098 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); in c_show()
/linux-4.4.14/arch/tile/kernel/
Dsmpboot.c97 per_cpu(boot_sp, cpu) = 0; in smp_prepare_cpus()
98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; in smp_prepare_cpus()
109 per_cpu(boot_sp, cpu) = task_ksp0(idle); in smp_prepare_cpus()
110 per_cpu(boot_pc, cpu) = idle->thread.pc; in smp_prepare_cpus()
228 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up()
Dtlb.c42 asid->asid = per_cpu(current_asid, cpu); in flush_tlb_mm()
/linux-4.4.14/arch/arm/mach-omap2/
Domap-mpuss-lowpower.c121 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in set_cpu_wakeup_addr()
132 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in scu_pwrst_prepare()
186 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in l2x0_pwrst_prepare()
229 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_enter_lowpower()
312 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_hotplug_cpu()
373 pm_info = &per_cpu(omap4_pm_info, 0x0); in omap4_mpuss_init()
393 pm_info = &per_cpu(omap4_pm_info, 0x1); in omap4_mpuss_init()
/linux-4.4.14/arch/powerpc/platforms/powernv/
Dsubcore.c156 while(per_cpu(split_state, i).step < step) in wait_for_sync_step()
187 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core()
221 split_core_secondary_loop(&per_cpu(split_state, cpu).step); in split_core()
253 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; in cpu_do_split()
311 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) in cpu_update_split_mode()
346 state = &per_cpu(split_state, cpu); in set_subcores_per_core()
Drng.c94 if (per_cpu(powernv_rng, cpu) == NULL || in rng_init_per_cpu()
96 per_cpu(powernv_rng, cpu) = rng; in rng_init_per_cpu()
/linux-4.4.14/include/linux/
Dtopology.h84 return per_cpu(numa_node, cpu); in cpu_to_node()
98 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
150 return per_cpu(_numa_mem_, cpu); in cpu_to_mem()
157 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem()
Dkernel_stat.h49 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
50 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
/linux-4.4.14/arch/metag/kernel/
Dsmp.c329 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info()
364 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); in secondary_start_kernel()
366 if (!per_cpu(pTBI, cpu)) in secondary_start_kernel()
408 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done()
431 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); in smp_prepare_boot_cpu()
433 if (!per_cpu(pTBI, cpu)) in smp_prepare_boot_cpu()
449 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi_message()
505 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); in show_ipi_list()
524 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in do_IPI()
Dsetup.c290 per_cpu(pTBI, cpu) = _pTBI; in setup_arch()
292 if (!per_cpu(pTBI, cpu)) in setup_arch()
481 lpj = per_cpu(cpu_data, i).loops_per_jiffy; in show_cpuinfo()
578 return per_cpu(pTBI, cpu); in pTBI_get()
Dtraps.c754 PTBI _pTBI = per_cpu(pTBI, cpu); in traps_save_context()
770 PTBI _pTBI = per_cpu(pTBI, cpu); in traps_restore_context()
786 return per_cpu(trigger_mask, cpu); in _get_trigger_mask()
798 per_cpu(trigger_mask, cpu) = mask; in set_trigger_mask()
837 PTBI _pTBI = per_cpu(pTBI, cpu); in trap_init()
857 PTBI _pTBI = per_cpu(pTBI, cpu); in tbi_startup_interrupt()
873 PTBI _pTBI = per_cpu(pTBI, cpu); in tbi_shutdown_interrupt()
Dtopology.c54 struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); in topology_init()
/linux-4.4.14/arch/m32r/kernel/
Dsmp.c669 if (--per_cpu(prof_counter, cpu_id) <= 0) { in smp_local_timer_interrupt()
678 per_cpu(prof_counter, cpu_id) in smp_local_timer_interrupt()
679 = per_cpu(prof_multiplier, cpu_id); in smp_local_timer_interrupt()
680 if (per_cpu(prof_counter, cpu_id) in smp_local_timer_interrupt()
681 != per_cpu(prof_old_multiplier, cpu_id)) in smp_local_timer_interrupt()
683 per_cpu(prof_old_multiplier, cpu_id) in smp_local_timer_interrupt()
684 = per_cpu(prof_counter, cpu_id); in smp_local_timer_interrupt()
/linux-4.4.14/arch/ia64/kernel/
Dsmpboot.c388 per_cpu(cpu_state, cpuid) = CPU_ONLINE; in smp_callin()
570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; in smp_prepare_boot_cpu()
579 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) in clear_cpu_sibling_map()
580 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); in clear_cpu_sibling_map()
584 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; in clear_cpu_sibling_map()
595 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); in remove_siblinginfo()
685 if (per_cpu(cpu_state, cpu) == CPU_DEAD) in __cpu_die()
724 &per_cpu(cpu_sibling_map, cpu)); in set_cpu_sibling_map()
726 &per_cpu(cpu_sibling_map, i)); in set_cpu_sibling_map()
749 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up()
[all …]
Dirq_ia64.c142 per_cpu(vector_irq, cpu)[vector] = irq; in __bind_irq_vector()
172 per_cpu(vector_irq, cpu)[vector] = -1; in __clear_irq_vector()
242 per_cpu(vector_irq, cpu)[vector] = -1; in __setup_vector_irq()
248 per_cpu(vector_irq, cpu)[vector] = irq; in __setup_vector_irq()
/linux-4.4.14/drivers/xen/events/
Devents_fifo.c105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block()
287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event()
333 control_block = per_cpu(cpu_control_block, cpu); in __evtchn_fifo_handle_events()
354 void *control_block = per_cpu(cpu_control_block, cpu); in evtchn_fifo_resume()
367 per_cpu(cpu_control_block, cpu) = NULL; in evtchn_fifo_resume()
412 per_cpu(cpu_control_block, cpu) = control_block; in evtchn_fifo_alloc_control_block()
430 if (!per_cpu(cpu_control_block, cpu)) in evtchn_fifo_cpu_notification()
Devents_2l.c51 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); in evtchn_2l_bind_to_cpu()
52 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu()
149 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns()
268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt()
280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt()
289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt()
353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume()
Devents_base.c203 per_cpu(ipi_to_irq, cpu)[ipi] = irq; in xen_irq_info_ipi_setup()
217 per_cpu(virq_to_irq, cpu)[virq] = irq; in xen_irq_info_virq_setup()
264 return per_cpu(virq_to_irq, cpu)[virq]; in irq_from_virq()
628 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; in __unbind_from_irq()
631 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; in __unbind_from_irq()
888 irq = per_cpu(ipi_to_irq, cpu)[ipi]; in bind_ipi_to_irq()
979 irq = per_cpu(virq_to_irq, cpu)[virq]; in bind_virq_to_irq()
1220 irq = per_cpu(ipi_to_irq, cpu)[vector]; in xen_send_IPI_one()
1454 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) in restore_cpu_virqs()
1479 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) in restore_cpu_ipis()
/linux-4.4.14/drivers/acpi/
Dprocessor_idle.c707 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_play_dead()
789 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter()
799 cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter()
806 cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter()
829 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter_freeze()
886 per_cpu(acpi_cstate[count], dev->cpu) = cx; in acpi_processor_setup_cpuidle_cx()
982 dev = per_cpu(acpi_cpuidle_device, pr->id); in acpi_processor_hotplug()
1024 _pr = per_cpu(processors, cpu); in acpi_processor_cst_has_changed()
1027 dev = per_cpu(acpi_cpuidle_device, cpu); in acpi_processor_cst_has_changed()
1037 _pr = per_cpu(processors, cpu); in acpi_processor_cst_has_changed()
[all …]
Dacpi_processor.c402 if (per_cpu(processor_device_array, pr->id) != NULL && in acpi_processor_add()
403 per_cpu(processor_device_array, pr->id) != device) { in acpi_processor_add()
414 per_cpu(processor_device_array, pr->id) = device; in acpi_processor_add()
415 per_cpu(processors, pr->id) = pr; in acpi_processor_add()
439 per_cpu(processors, pr->id) = NULL; in acpi_processor_add()
473 per_cpu(processor_device_array, pr->id) = NULL; in acpi_processor_remove()
474 per_cpu(processors, pr->id) = NULL; in acpi_processor_remove()
Dprocessor_perflib.c91 pr = per_cpu(processors, policy->cpu); in acpi_processor_ppc_notifier()
195 pr = per_cpu(processors, cpu); in acpi_processor_get_bios_limit()
616 pr = per_cpu(processors, i); in acpi_processor_preregister_performance()
635 pr = per_cpu(processors, i); in acpi_processor_preregister_performance()
654 pr = per_cpu(processors, i); in acpi_processor_preregister_performance()
680 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance()
708 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance()
725 pr = per_cpu(processors, i); in acpi_processor_preregister_performance()
756 pr = per_cpu(processors, cpu); in acpi_processor_register_performance()
789 pr = per_cpu(processors, cpu); in acpi_processor_unregister_performance()
Dcppc_acpi.c218 cpc_ptr = per_cpu(cpc_desc_ptr, i); in acpi_get_psd_map()
241 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); in acpi_get_psd_map()
272 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); in acpi_get_psd_map()
506 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; in acpi_cppc_processor_probe()
544 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); in acpi_cppc_processor_exit()
583 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); in cppc_get_perf_caps()
646 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); in cppc_get_perf_ctrs()
703 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); in cppc_set_perf()
Dprocessor_throttling.c86 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord()
107 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord()
133 match_pr = per_cpu(processors, j); in acpi_processor_update_tsd_coord()
166 match_pr = per_cpu(processors, j); in acpi_processor_update_tsd_coord()
188 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord()
231 pr = per_cpu(processors, cpu); in acpi_processor_throttling_notifier()
1133 match_pr = per_cpu(processors, i); in acpi_processor_set_throttling()
/linux-4.4.14/arch/powerpc/platforms/ps3/
Dsmp.c52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; in ps3_smp_message_pass()
66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_probe()
107 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_cleanup_cpu()
Dinterrupt.c191 pd = &per_cpu(ps3_private, cpu); in ps3_virq_setup()
695 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_register_ipi_debug_brk()
705 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_register_ipi_irq()
730 dump_bmp(&per_cpu(ps3_private, 0)); in ps3_get_irq()
731 dump_bmp(&per_cpu(ps3_private, 1)); in ps3_get_irq()
737 dump_bmp(&per_cpu(ps3_private, 0)); in ps3_get_irq()
738 dump_bmp(&per_cpu(ps3_private, 1)); in ps3_get_irq()
761 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_init_IRQ()
/linux-4.4.14/arch/mn10300/kernel/
Dcevt-mn10300.c57 cd = &per_cpu(mn10300_clockevent_device, cpu); in timer_interrupt()
83 cd = &per_cpu(mn10300_clockevent_device, cpu); in init_clockevents()
108 iact = &per_cpu(timer_irq, cpu); in init_clockevents()
/linux-4.4.14/arch/sparc/kernel/
Dnmi.c60 if (per_cpu(nmi_touch, cpu) != 1) in touch_nmi_watchdog()
61 per_cpu(nmi_touch, cpu) = 1; in touch_nmi_watchdog()
150 per_cpu(wd_enabled, cpu) = 0; in report_broken_nmi()
185 if (!per_cpu(wd_enabled, cpu)) in check_nmi_watchdog()
Dsun4d_smp.c200 work = &per_cpu(sun4d_ipi_work, cpu); in smp4d_ipi_init()
238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_single()
249 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_mask_one()
260 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_resched()
383 ce = &per_cpu(sparc32_clockevent, cpu); in smp4d_percpu_timer_interrupt()
Dsysfs.c22 struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
135 ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); in write_mmustat_enable()
226 struct cpu *c = &per_cpu(cpu_devices, cpu); in register_cpu_online()
239 struct cpu *c = &per_cpu(cpu_devices, cpu); in unregister_cpu_online()
306 struct cpu *c = &per_cpu(cpu_devices, cpu); in topology_init()
Dleon_smp.c299 work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_init()
313 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_single()
324 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_mask_one()
335 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_resched()
Dtime_64.c623 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); in sparc64_get_clock_tick()
638 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); in sparc64_cpufreq_notifier()
698 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); in timer_interrupt()
Dsmp_64.c1262 cpumask_clear(&per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps()
1264 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps()
1271 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps()
1343 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) in __cpu_disable()
1344 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); in __cpu_disable()
1345 cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); in __cpu_disable()
/linux-4.4.14/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c66 return per_cpu(current_state, cpu); in get_cpu_current_state()
71 per_cpu(current_state, cpu) = state; in set_cpu_current_state()
76 return per_cpu(preferred_offline_state, cpu); in get_preferred_offline_state()
81 per_cpu(preferred_offline_state, cpu) = state; in set_preferred_offline_state()
86 per_cpu(preferred_offline_state, cpu) = default_offline_state; in set_default_offline_state()
Ddtl.c104 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); in dtl_start()
125 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); in dtl_stop()
141 return per_cpu(dtl_rings, dtl->cpu).write_index; in dtl_current_index()
379 struct dtl *dtl = &per_cpu(cpu_dtl, i); in dtl_init()
/linux-4.4.14/kernel/trace/
Dtrace_stack.c211 if (per_cpu(trace_active, cpu)++ != 0) in stack_trace_call()
219 per_cpu(trace_active, cpu)--; in stack_trace_call()
265 per_cpu(trace_active, cpu)++; in stack_max_size_write()
271 per_cpu(trace_active, cpu)--; in stack_max_size_write()
310 per_cpu(trace_active, cpu)++; in t_start()
327 per_cpu(trace_active, cpu)--; in t_stop()
Dtrace_irqsoff.c108 if (likely(!per_cpu(tracing_cpu, cpu))) in func_prolog_dec()
162 per_cpu(tracing_cpu, cpu) = 0; in irqsoff_display_graph()
366 if (per_cpu(tracing_cpu, cpu)) in start_critical_timing()
384 per_cpu(tracing_cpu, cpu) = 1; in start_critical_timing()
399 if (unlikely(per_cpu(tracing_cpu, cpu))) in stop_critical_timing()
400 per_cpu(tracing_cpu, cpu) = 0; in stop_critical_timing()
/linux-4.4.14/Documentation/locking/
Dlglock.txt14 distributed over all CPUs as per_cpu elements.
17 as per_cpu elements but can be mostly handled by CPU local actions
24 - very fast access to the local per_cpu data
25 - reasonably fast access to specific per_cpu data on a different
34 Basically it is an array of per_cpu spinlocks with the
100 locality aware spinlock. lg_local_* behaves like a per_cpu
105 access to protected per_cpu object on this CPU
109 access to protected per_cpu object on other CPU cpu
113 access all protected per_cpu objects on all CPUs
/linux-4.4.14/arch/s390/kernel/
Didle.c54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); in show_idle_count()
71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); in show_idle_time()
89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); in arch_cpu_idle_time()
Dtopology.c99 topo = &per_cpu(cpu_topology, lcpu + i); in add_cpus_to_mask()
256 topo = &per_cpu(cpu_topology, cpu); in update_cpu_masks()
431 return &per_cpu(cpu_topology, cpu).thread_mask; in cpu_thread_mask()
437 return &per_cpu(cpu_topology, cpu).core_mask; in cpu_coregroup_mask()
442 return &per_cpu(cpu_topology, cpu).book_mask; in cpu_book_mask()
Dprocessor.c83 struct cpuid *id = &per_cpu(cpu_id, n); in show_cpuinfo()
/linux-4.4.14/arch/powerpc/platforms/cell/
Dcpufreq_spudemand.c95 info = &per_cpu(spu_gov_info, cpu); in spu_gov_govern()
113 affected_info = &per_cpu(spu_gov_info, i); in spu_gov_govern()
130 info = &per_cpu(spu_gov_info, i); in spu_gov_govern()
Dinterrupt.c172 return per_cpu(cpu_iic, cpu).target_id; in iic_get_target_id()
187 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); in iic_message_pass()
302 struct iic *iic = &per_cpu(cpu_iic, hw_cpu); in init_one_iic()
/linux-4.4.14/scripts/gdb/linux/
Dcpus.py36 def per_cpu(var_ptr, cpu): function
115 return per_cpu(var_ptr, cpu)
132 return per_cpu(var_ptr, cpu).dereference()
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event_intel_rapl.c535 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_exit()
593 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_prepare()
629 per_cpu(rapl_pmu, cpu) = pmu; in rapl_cpu_prepare()
630 per_cpu(rapl_pmu_to_free, cpu) = NULL; in rapl_cpu_prepare()
637 struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); in rapl_cpu_kfree()
641 per_cpu(rapl_pmu_to_free, cpu) = NULL; in rapl_cpu_kfree()
646 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_dying()
651 per_cpu(rapl_pmu, cpu) = NULL; in rapl_cpu_dying()
653 per_cpu(rapl_pmu_to_free, cpu) = pmu; in rapl_cpu_dying()
Dperf_event_intel_ds.c251 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in init_debug_store_on_cpu()
263 if (!per_cpu(cpu_hw_events, cpu).ds) in fini_debug_store_on_cpu()
273 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in alloc_pebs_buffer()
295 per_cpu(insn_buffer, cpu) = ibuffer; in alloc_pebs_buffer()
310 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in release_pebs_buffer()
315 kfree(per_cpu(insn_buffer, cpu)); in release_pebs_buffer()
316 per_cpu(insn_buffer, cpu) = NULL; in release_pebs_buffer()
324 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in alloc_bts_buffer()
353 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in release_bts_buffer()
371 per_cpu(cpu_hw_events, cpu).ds = ds; in alloc_ds_buffer()
[all …]
Dperf_event_amd.c368 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_prepare()
384 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_starting()
398 nb = per_cpu(cpu_hw_events, i).amd_nb; in amd_pmu_cpu_starting()
420 cpuhw = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_dead()
Damd.c337 per_cpu(cpu_llc_id, cpu) = node_id; in amd_get_topology()
363 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; in amd_detect_cmp()
376 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; in amd_detect_cmp()
384 id = per_cpu(cpu_llc_id, cpu); in amd_get_nb_id()
405 node = per_cpu(cpu_llc_id, cpu); in srat_detect_node()
Dcommon.c377 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); in load_percpu_segment()
996 tss = &per_cpu(cpu_tss, cpu); in enable_sep_cpu()
1347 t = &per_cpu(cpu_tss, cpu); in cpu_init()
1348 oist = &per_cpu(orig_ist, cpu); in cpu_init()
1386 char *estacks = per_cpu(exception_stacks, cpu); in cpu_init()
1393 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; in cpu_init()
1431 struct tss_struct *t = &per_cpu(cpu_tss, cpu); in cpu_init()
/linux-4.4.14/net/rds/
Dpage.c119 rem = &per_cpu(rds_page_remainders, get_cpu()); in rds_page_remainder_alloc()
153 rem = &per_cpu(rds_page_remainders, get_cpu()); in rds_page_remainder_alloc()
188 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_remainder_cpu_notify()
Diw_stats.c85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); in rds_iw_stats_info_copy()
Dib_stats.c93 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
Dtcp_stats.c64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
Dstats.c127 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
/linux-4.4.14/drivers/soc/qcom/
Dspm.c185 struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); in qcom_cpu_spc()
202 return per_cpu(qcom_idle_ops, cpu)[index](cpu); in qcom_idle_enter()
266 per_cpu(qcom_idle_ops, cpu) = fns; in qcom_cpuidle_init()
274 return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO; in qcom_cpuidle_init()
369 per_cpu(cpu_spm_drv, cpu) = drv; in spm_dev_probe()
/linux-4.4.14/mm/
Dswap.c525 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); in activate_page_drain()
533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain()
809 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); in lru_add_drain_cpu()
814 pvec = &per_cpu(lru_rotate_pvecs, cpu); in lru_add_drain_cpu()
824 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); in lru_add_drain_cpu()
881 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); in lru_add_drain_all()
883 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || in lru_add_drain_all()
884 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || in lru_add_drain_all()
885 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || in lru_add_drain_all()
894 flush_work(&per_cpu(lru_add_drain_work, cpu)); in lru_add_drain_all()
Dkmemleak-test.c89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); in kmemleak_test_init()
91 per_cpu(kmemleak_test_pointer, i)); in kmemleak_test_init()
Dquicklist.c96 ql = per_cpu(quicklist, cpu); in quicklist_total_size()
/linux-4.4.14/arch/x86/platform/uv/
Dtlb_uv.c136 bcp = &per_cpu(bau_control, cpu); in set_bau_on()
151 bcp = &per_cpu(bau_control, cpu); in set_bau_off()
183 return per_cpu(x86_cpu_to_apicid, cpu); in uvhub_to_first_apicid()
361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); in do_reset()
767 tbcp = &per_cpu(bau_control, tcpu); in disable_for_period()
975 tbcp = &per_cpu(bau_control, tcpu); in check_enable()
1094 bcp = &per_cpu(bau_control, cpu); in uv_flush_tlb_others()
1125 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); in uv_flush_tlb_others()
1254 bcp = &per_cpu(bau_control, smp_processor_id()); in uv_bau_message_interrupt()
1384 bcp = &per_cpu(bau_control, cpu); in ptc_seq_show()
[all …]
/linux-4.4.14/arch/powerpc/include/asm/
Dtopology.h90 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
91 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
Dsmp.h98 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask()
103 return per_cpu(cpu_core_map, cpu); in cpu_core_mask()
/linux-4.4.14/arch/x86/include/asm/
Dtopology.h126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
127 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
Dsmp.h42 return per_cpu(cpu_llc_shared_map, cpu); in cpu_llc_shared_mask()
162 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
Dstackprotector.h89 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); in setup_stack_canary_segment()
Dpreempt.h36 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
/linux-4.4.14/arch/mips/cavium-octeon/
Docteon-irq.c262 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_enable()
268 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_enable()
277 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_enable()
365 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_disable_all()
367 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_disable_all()
369 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_disable_all()
398 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_enable_all()
400 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_enable_all()
402 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_enable_all()
438 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); in octeon_irq_ciu_enable_v2()
[all …]
Dsmp.c257 while (per_cpu(cpu_state, cpu) != CPU_DEAD) in octeon_cpu_die()
295 per_cpu(cpu_state, cpu) = CPU_DEAD; in play_dead()
/linux-4.4.14/arch/alpha/kernel/
Dtime.c93 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in rtc_timer_interrupt()
118 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in init_rtc_clockevent()
175 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in qemu_timer_interrupt()
185 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in init_qemu_clockevent()
Dirq.c80 seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); in arch_show_interrupts()
/linux-4.4.14/arch/ia64/sn/kernel/sn2/
Dsn2_smp.c494 stat = &per_cpu(ptcstats, cpu); in sn2_ptc_seq_show()
498 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show()
499 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show()
500 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show()
504 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); in sn2_ptc_seq_show()
522 memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); in sn2_ptc_proc_write()
/linux-4.4.14/drivers/cpuidle/
Ddriver.c35 return per_cpu(cpuidle_drivers, cpu); in __cpuidle_get_cpu_driver()
55 per_cpu(cpuidle_drivers, cpu) = NULL; in __cpuidle_unset_driver()
79 per_cpu(cpuidle_drivers, cpu) = drv; in __cpuidle_set_driver()
Dcpuidle-cps.c112 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_unregister()
164 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_init()
Dcoupled.c343 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke()
662 other_dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_register_device()
685 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); in cpuidle_coupled_register_device()
782 dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_cpu_notify()
Dcpuidle.c434 per_cpu(cpuidle_devices, dev->cpu) = NULL; in __cpuidle_unregister_device()
461 per_cpu(cpuidle_devices, dev->cpu) = dev; in __cpuidle_register_device()
556 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_unregister()
588 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_register()
Dcpuidle-arm.c154 dev = per_cpu(cpuidle_devices, cpu); in arm_idle_init()
/linux-4.4.14/arch/mips/sgi-ip27/
Dip27-timer.c75 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); in hub_rt_counter_handler()
107 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); in hub_rt_clock_event_init()
108 unsigned char *name = per_cpu(hub_rt_name, cpu); in hub_rt_clock_event_init()
/linux-4.4.14/drivers/leds/trigger/
Dledtrig-cpu.c129 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); in ledtrig_cpu_init()
152 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); in ledtrig_cpu_exit()
/linux-4.4.14/init/
Dcalibrate.c280 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { in calibrate_delay()
281 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); in calibrate_delay()
305 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; in calibrate_delay()
/linux-4.4.14/arch/sh/kernel/
Dsmp.c80 per_cpu(cpu_state, cpu) = CPU_ONLINE; in smp_prepare_boot_cpu()
90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { in native_cpu_die()
204 per_cpu(cpu_state, cpu) = CPU_ONLINE; in start_secondary()
222 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up()
Dlocaltimer.c44 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); in local_timer_setup()
Dtopology.c58 struct cpu *c = &per_cpu(cpu_devices, i); in topology_init()
/linux-4.4.14/drivers/clocksource/
Dmetag_generic.c96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); in arch_timer_setup()
97 char *name = per_cpu(local_clockevent_name, cpu); in arch_timer_setup()
/linux-4.4.14/arch/microblaze/kernel/
Dsetup.c189 per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ in machine_early_init()
190 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; in machine_early_init()
/linux-4.4.14/kernel/locking/
Dpercpu-rwsem.c117 sum += per_cpu(*brw->fast_read_ctr, cpu); in clear_fast_ctr()
118 per_cpu(*brw->fast_read_ctr, cpu) = 0; in clear_fast_ctr()
/linux-4.4.14/lib/
Dnmi_backtrace.c78 s = &per_cpu(nmi_print_seq, cpu); in nmi_trigger_all_cpu_backtrace()
103 s = &per_cpu(nmi_print_seq, cpu); in nmi_trigger_all_cpu_backtrace()
Drandom32.c185 struct rnd_state *state = &per_cpu(net_rand_state, i); in prandom_seed()
204 struct rnd_state *state = &per_cpu(net_rand_state, i); in prandom_init()
/linux-4.4.14/block/
Dblk-softirq.c92 list_splice_init(&per_cpu(blk_cpu_done, cpu), in blk_cpu_notify()
180 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); in blk_softirq_init()
Dblk-iopoll.c200 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in blk_iopoll_cpu_notify()
218 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); in blk_iopoll_setup()
/linux-4.4.14/arch/hexagon/kernel/
Dsmp.c97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in handle_ipi()
114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi()
Dtime.c128 &per_cpu(clock_events, cpu); in setup_percpu_clockdev()
143 struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu); in ipi_timer()
/linux-4.4.14/kernel/time/
Dtick-common.c59 return &per_cpu(tick_cpu_device, cpu); in tick_get_device()
307 td = &per_cpu(tick_cpu_device, cpu); in tick_check_new_device()
391 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); in tick_shutdown()
Dtimer_stats.c248 lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); in timer_stats_update_stats()
351 raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); in sync_access()
413 raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); in init_timer_stats()
Dtick-sched.c46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched()
232 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu()
492 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_idle_time_us()
533 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_iowait_time_us()
1125 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in tick_cancel_sched_timer()
1144 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); in tick_clock_notify()
Dclockevents.c400 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; in __clockevents_try_unbind()
714 &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev()
728 return &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev()
738 struct device *dev = &per_cpu(tick_percpu_dev, cpu); in tick_init_sysfs()
/linux-4.4.14/arch/blackfin/mach-common/
Dsmp.c135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in ipi_timer()
183 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); in bfin_ipi_init()
197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); in send_ipi()
/linux-4.4.14/arch/arm/mach-alpine/
Dalpine_cpu_resume.h31 struct al_cpu_resume_regs_per_cpu per_cpu[]; member
Dalpine_cpu_pm.c45 &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr); in alpine_cpu_wakeup()
/linux-4.4.14/drivers/base/
Dcpu.c79 per_cpu(cpu_sys_devices, logical_cpu) = NULL; in unregister_cpu()
375 per_cpu(cpu_sys_devices, num) = &cpu->dev; in register_cpu()
385 return per_cpu(cpu_sys_devices, cpu); in get_cpu_device()
495 if (register_cpu(&per_cpu(cpu_devices, i), i)) in cpu_dev_register_generic()
Dcacheinfo.c33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
219 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
225 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
/linux-4.4.14/arch/sparc/include/asm/
Dcpudata_32.h28 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
Dcpudata_64.h33 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
Dtopology_64.h47 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
/linux-4.4.14/arch/tile/include/asm/
Dhardirq.h41 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
/linux-4.4.14/drivers/crypto/
Dpadlock-aes.c157 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || in aes_set_key()
158 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) in aes_set_key()
159 per_cpu(paes_last_cword, cpu) = NULL; in aes_set_key()
171 if (cword != per_cpu(paes_last_cword, cpu)) in padlock_reset_key()
181 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; in padlock_store_cword()
/linux-4.4.14/arch/ia64/include/asm/sn/
Dpda.h66 #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
/linux-4.4.14/arch/arm64/kernel/
Dcpuinfo.c110 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); in c_show()
255 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); in cpuinfo_store_boot_cpu()
/linux-4.4.14/drivers/thermal/
Dx86_pkg_temp_thermal.c379 &per_cpu(pkg_temp_thermal_threshold_work, cpu), in pkg_temp_thermal_platform_thermal_notify()
536 INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu), in get_core_online()
548 &per_cpu(pkg_temp_thermal_threshold_work, cpu)); in put_core_offline()
638 &per_cpu(pkg_temp_thermal_threshold_work, i)); in pkg_temp_thermal_exit()
/linux-4.4.14/arch/mips/loongson64/loongson-3/
Dsmp.c303 per_cpu(cpu_state, cpu) = CPU_ONLINE; in loongson3_init_secondary()
378 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; in loongson3_prepare_cpus()
433 while (per_cpu(cpu_state, cpu) != CPU_DEAD) in loongson3_cpu_die()
587 state_addr = &per_cpu(cpu_state, cpu); in play_dead()
Dhpet.c182 cd = &per_cpu(hpet_clockevent_device, cpu); in hpet_irq_handler()
231 cd = &per_cpu(hpet_clockevent_device, cpu); in setup_hpet_timer()
/linux-4.4.14/kernel/rcu/
Dtree_trace.c125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), in print_one_rcu_data()
147 per_cpu(rcu_cpu_has_work, rdp->cpu), in print_one_rcu_data()
148 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, in print_one_rcu_data()
150 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); in print_one_rcu_data()
/linux-4.4.14/arch/ia64/include/asm/
Dtopology.h56 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
/linux-4.4.14/arch/arm/include/asm/
Dsmp_plat.h37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); in smp_cpuid_part()
/linux-4.4.14/arch/arm/xen/
Denlighten.c94 if (per_cpu(xen_vcpu, cpu) != NULL) in xen_percpu_init()
105 per_cpu(xen_vcpu, cpu) = vcpup; in xen_percpu_init()
/linux-4.4.14/arch/mips/math-emu/
Dme-debugfs.c22 ps = &per_cpu(fpuemustats, cpu); in fpuemu_stat_get()
/linux-4.4.14/drivers/scsi/bnx2i/
Dbnx2i_init.c425 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_percpu_thread_create()
446 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_percpu_thread_destroy()
536 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_mod_init()
/linux-4.4.14/kernel/sched/
Dsched.h704 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1751 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); in irq_time_read()
1752 irq_time = per_cpu(cpu_softirq_time, cpu) + in irq_time_read()
1753 per_cpu(cpu_hardirq_time, cpu); in irq_time_read()
1754 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); in irq_time_read()
1769 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); in irq_time_read()
/linux-4.4.14/arch/blackfin/kernel/
Dtime-ts.c308 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in bfin_coretmr_interrupt()
328 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in bfin_coretmr_clockevent_init()
/linux-4.4.14/arch/arm/mach-bcm/
Dplatsmp-brcmstb.c70 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd()
76 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr()
/linux-4.4.14/arch/arm/mach-qcom/
Dplatsmp.c277 if (!per_cpu(cold_boot_done, cpu)) { in qcom_boot_secondary()
280 per_cpu(cold_boot_done, cpu) = true; in qcom_boot_secondary()
/linux-4.4.14/arch/arc/include/asm/
Dmmu_context.h54 #define asid_cpu(cpu) per_cpu(asid_cache, cpu)
/linux-4.4.14/arch/cris/arch-v10/mm/
Dfault.c48 pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); in handle_mmu_bus_fault()
/linux-4.4.14/arch/mn10300/include/asm/
Dmmu_context.h150 per_cpu(cpu_tlbstate, cpu).active_mm = next; in switch_mm()
/linux-4.4.14/arch/xtensa/include/asm/
Dmmu_context.h34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)

12